本文共 5502 字,大约阅读时间需要 18 分钟。
#include <iostream>
#include <stdio.h>
#include <stdlib.h> //为rand()及srand()提供函数声明 #include <time.h>extern "C" int mulWithCuda(float* c,float* a,float* b, int size, int kernelSize);
int main()
{ int size = 10; float* a = (float*)malloc(size * size * sizeof(float)); float* c = (float*)malloc(size * size * sizeof(float)); float* d = (float*)malloc(size * size * sizeof(float)); srand(time(NULL)); for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { a[col + row * size] = (float)rand() / (RAND_MAX / 10);; c[row * size + col] = 0; } }int kernelSize = 5;
float* b = (float*)malloc(kernelSize * kernelSize * sizeof(float)); for (int i = 0; i < kernelSize * kernelSize; ++i) { b[i] = 1; } clock_t start = clock(); for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { for (int i = 0; i < kernelSize; ++i) { for (int j = 0; j < kernelSize; ++j) { float v = 0; //使其定位到左上角坐标系原点,便于后续定位元素 int curRow = row - kernelSize / 2 + i; int curCol = col - kernelSize / 2 + j; if (curRow >= 0 && curCol >= 0 && curRow < size && curCol < size) { v = *(a+curRow * size + curCol); } *(d + row * size + col) += *(b+i * kernelSize + j) * v; } } } } clock_t end = clock(); double interval = double(end - start) / CLK_TCK; printf("CPU运行时间为:%lf\n", interval);// Add vectors in parallel. clock_t start1 = clock(); mulWithCuda(c, a, b, size, kernelSize); clock_t end1 = clock(); double interval1 = double(end1 - start1) / CLK_TCK; printf("GPU运行时间为:%lf\n", interval1);
printf("加速比为:%lf\n", interval/interval1);
for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { printf("%f ",a[col + row * size]); } printf("\n"); } printf("\n"); for (int row = 0; row < kernelSize; ++row) { for (int col = 0; col < kernelSize; ++col) { printf("%f ", b[col + row * kernelSize]); } printf("\n"); } printf("\n"); printf("GPU执行结果如下:\n"); for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { printf("%f ", c[col + row * size]); } printf("\n"); } printf("\n"); printf("CPU执行结果如下:\n"); for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { printf("%f ", c[col + row * size]); } printf("\n"); } printf("\n"); return 0; }#include <stdio.h>
#include <stdlib.h> #include<math.h> #define dim1 16 #define dim2 16 #define N 4 __global__ void conv(float* a, float* b, float* c,int size, int kernelSize) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; int col = (blockIdx.y * blockDim.y) + threadIdx.y; //int id = idx + idy * blockDim.x * gridDim.x;if (row < size && col < size)
{ //int row = id / size; //int col = id % size; for (int r = row * N; r < row * N + N; r++) { for (int i = 0; i < kernelSize; ++i) { for (int j = 0; j < kernelSize; ++j) { float v = 0; //使其定位到左上角坐标系原点,便于后续定位元素 int cR = r - kernelSize / 2 + i; int cC = col - kernelSize / 2 + j; if (cR < 0 || cC < 0 || cR >= size || cC >= size) { } else { v = a[cR * size + cC]; } c[r * size + col] += b[i * kernelSize + j] * v; } } } } }// Helper function for using CUDA to add vectors in parallel.
extern "C" int mulWithCuda(float* c, float* a, float* b, int size, int kernelSize) { float* dev_a = 0; float* dev_b = 0; float* dev_c = 0; cudaError_t cudaStatus;// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; }// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; }cudaStatus = cudaMalloc((void**)&dev_a, size * size * sizeof(float));
if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; }cudaStatus = cudaMalloc((void**)&dev_b, kernelSize * kernelSize * sizeof(float));
if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; }// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * size * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; }cudaStatus = cudaMemcpy(dev_b, b, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } dim3 block(dim1, dim2); dim3 grid( (( size - 1 ) / dim1) /N + 1, (size - 1) / dim2 + 1); //dim3 grid(ceil(size/dimen1/N) , ceil(size / dimen2), 1); conv << <grid, block >> >(dev_a, dev_b, dev_c, size, kernelSize);// Check for any errors launching the kernel
cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; }// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; }// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; }Error:
cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b);return cudaStatus;
}
转载地址:http://qxio.baihongyu.com/