我写了一个简短的 CUDA 程序,它使用高度优化的 CUB 库来演示来自旧的四核英特尔 Q6600 处理器的一个内核(所有四个处理器都应该能够达到 ~30 GFLOPS/秒)可以比 Nvidia 750 Ti(据说能够达到 1306 GFLOPS/秒的单精度)对 100,000 个元素进行包容性扫描(或累积/前缀总和)。 为什么会这样呢?
源代码为:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cub/cub.cuh>
#include <stdio.h>
#include <time.h>
#include <algorithm>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void fillArrayWithRandom(float* inputArray, int inputN)
{
for (int i = 0; i < inputN; i++)
{
inputArray[i] = (float)rand() / float(RAND_MAX);
}
}
void inclusiveSum_CPU(float *inputArray, float *inputSummedArray, int inputN)
{
for (int i = 0; i < inputN; i++)
{
if (i > 0)
{
inputSummedArray[i] = inputSummedArray[i - 1] + inputArray[i];
}
else
{
inputSummedArray[i] = inputArray[i];
}
}
}
int main()
{
int N = 100000; //1 hundred thousand elements
float numSimulations = 10000;
//Make Host Arrays
float* testArray_CPU = (float *)malloc(sizeof(float)*N);
fillArrayWithRandom(testArray_CPU, N);
float* testArrayOutput_CPU = (float *)malloc(sizeof(float)*N);
//Make GPU Arrays
float* testArray_GPU;
gpuErrchk(cudaMalloc(&testArray_GPU, N*sizeof(float)));
gpuErrchk(cudaMemcpy(testArray_GPU, testArray_CPU, N*sizeof(float), cudaMemcpyHostToDevice));
float* testArrayOutput_GPU;
gpuErrchk(cudaMalloc(&testArrayOutput_GPU, N*sizeof(float)));
//Initiate the benchmark variables
clock_t begin_CPU, end_CPU;
float time_spent_GPU, time_spent_CPU;
//GPU prep
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, testArray_GPU, testArrayOutput_GPU, N);
gpuErrchk(cudaMalloc(&d_temp_storage, temp_storage_bytes));
//GPU Timing
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start, 0));
for (int i = 0; i < numSimulations; i++)
{
cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, testArray_GPU, testArrayOutput_GPU, N);
}
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&time_spent_GPU, start, stop));
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA error: %sn", cudaGetErrorString(error));
exit(-1);
}
time_spent_GPU = (float)(time_spent_GPU / 1000);
float avg_GPU = time_spent_GPU / numSimulations;
printf("Avg. GPU Simulation Time: %.17g [sim/sec]n", avg_GPU);
//CPU Timing
begin_CPU = clock();
for (int i = 0; i < numSimulations; i++)
{
inclusiveSum_CPU(testArray_CPU, testArrayOutput_CPU, N);
}
end_CPU = clock();
time_spent_CPU = (float)(end_CPU - begin_CPU) / CLOCKS_PER_SEC;
float avg_CPU = time_spent_CPU / numSimulations;
printf("Avg. CPU Simulation Time: %.17g [sim/sec]n", avg_CPU);
printf("GPU/CPU Timing:%.17gx n", avg_GPU / avg_CPU);
return 0;
}
当我在我的机器上运行它时,输出是:
平均 GPU 仿真时间:0.0011999999405816197 [sim/sec]
平均 CPU 模拟时间:0.00059999997029080987 [sim/sec]
图形处理器/中央处理器时序:2x
另外,这是我的编译标志和输出:
1>------ Build started: Project: speedTest, Configuration: Debug Win32 ------
1> Compiling CUDA source file kernel.cu...
1>
1> C:UsersOwnerDocumentsVisual Studio 2013ProjectsspeedTestspeedTest>"C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5binnvcc.exe" -gencode=arch=compute_50,code="sm_50,compute_50" --use-local-env --cl-version 2013 -ccbin "C:Program Files (x86)Microsoft Visual Studio 12.0VCbin" -rdc=true -I"C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5include" -I"C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5include" -G --keep-dir Debug -maxrregcount=0 --machine 32 --compile -cudart static -g -DWIN32 -D_DEBUG -D_CONSOLE -D_MBCS -Xcompiler "/EHsc /W3 /nologo /Od /Zi /RTC1 /MDd " -o Debugkernel.cu.obj "C:UsersOwnerDocumentsVisual Studio 2013ProjectsspeedTestspeedTestkernel.cu"
1> kernel.cu
1>
1> C:UsersOwnerDocumentsVisual Studio 2013ProjectsspeedTestspeedTest>"C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5binnvcc.exe" -dlink -o DebugspeedTest.device-link.obj -Xcompiler "/EHsc /W3 /nologo /Od /Zi /RTC1 /MDd " -L"C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5libWin32" cudart.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib -gencode=arch=compute_50,code=sm_50 -G --machine 32 Debugkernel.cu.obj
1> cudart.lib
1> kernel32.lib
1> user32.lib
1> gdi32.lib
1> winspool.lib
1> comdlg32.lib
1> advapi32.lib
1> shell32.lib
1> ole32.lib
1> oleaut32.lib
1> uuid.lib
1> odbc32.lib
1> odbccp32.lib
1> kernel.cu.obj
1> speedTest.vcxproj -> C:UsersOwnerDocumentsVisual Studio 2013ProjectsspeedTestDebugspeedTest.exe
1> copy "C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5bincudart*.dll" "C:UsersOwnerDocumentsVisual Studio 2013ProjectsspeedTestDebug"
1> C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5bincudart32_65.dll
1> C:Program FilesNVIDIA GPU Computing ToolkitCUDAv6.5bincudart64_65.dll
1> 2 file(s) copied.
========== Build: 1 succeeded, 0 failed, 0 up-to-date, 0 skipped ==========
感谢Robert Crovella,事实证明我使用的是众所周知的慢速"调试"模式,而不是"发布"模式。