非常慢的矩阵转置操作与CUBLAS



我试图使用CUBLAS库(与cublasSgeam函数)并行化矩阵转置操作。输出数据是正确的,但是它比我的CPU版本平均花费150的时间。为什么?

CPU代码(用于N = 5000M=140的矩阵转置)

// Starting the timer
    float *matrixT = (float *) malloc (N * M * sizeof(float));
    for (int i = 0; i < N; i++)
        for (int j = 0; j < M; j++)
            matrixT[(j*N)+i] = matrix[(i*M)+j]; // matrix is obviously filled
//Ending the timer

GPU代码(用于N = 5000M=140的矩阵转置)

    float *h_matrixT , *d_matrixT , *d_matrix;
    h_matrixT = (float *) malloc (N * M * sizeof(float));
    cudaMalloc((void **)&d_matrixT , N * M * sizeof(float)));
    cudaMalloc((void**)&d_matrix , N * M * sizeof(float)));
    cudaMemcpy(d_matrix , matrix , N * M * sizeof(float) , cudaMemcpyHostToDevice));
//Starting the timer
    const float alpha = 1.0;
    const float beta  = 0.0;
    cublasHandle_t handle;
    cublasCreate(&handle);
    cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, M, &alpha, d_matrix, M, &beta, d_matrix, N, d_matrixT, N);
    cublasDestroy(handle);
//Ending the timer
    cudaMemcpy(h_matrixT , d_matrixT , N * M * sizeof(float) , cudaMemcpyDeviceToHost));

    cudaFree(d_matrix);
    cudaFree(d_matrixT);

运行时间

CUBLAS: 148.461 ms

CPU: 0.986944 ms

PS:运行在GeForce GTX 660 &Intel Core i5 660

用一个分析器运行代码,看看时间都花在哪里了。

cublasCreate函数移出计时区域。这是挑选各种CUDA和库启动时间,这不应该被纳入基准测试一个单一的功能(或者如果你打算以这种方式进行基准测试,使用GPU来执行这一个单一的功能显然没有什么意义。它不会加速它,正如你已经发现的。

我还建议将cublasDestroy移出定时循环。

您可能希望在最后的计时关闭之前包含一个cudaDeviceSynchronize();

下面是一个完整的示例,选择M = 1000和N = 1000,并实现了上述更改:

$ cat t469.cu
#include <stdio.h>
#include <cublas_v2.h>
#include <time.h>
#include <sys/time.h>
#define uS_PER_SEC 1000000
#define uS_PER_mS 1000
#define N  1000
#define M 1000
int main(){
    timeval t1, t2;
    float *matrix = (float *) malloc (N * M * sizeof(float));
// Starting the timer
    gettimeofday(&t1, NULL);
    float *matrixT = (float *) malloc (N * M * sizeof(float));
    for (int i = 0; i < N; i++)
        for (int j = 0; j < M; j++)
            matrixT[(j*N)+i] = matrix[(i*M)+j]; // matrix is obviously filled
//Ending the timer
    gettimeofday(&t2, NULL);
    float et1 = (((t2.tv_sec*uS_PER_SEC)+t2.tv_usec) - ((t1.tv_sec*uS_PER_SEC)+t1.tv_usec))/(float)uS_PER_mS;
    printf("CPU time = %fmsn", et1);
    float *h_matrixT , *d_matrixT , *d_matrix;
    h_matrixT = (float *) (malloc (N * M * sizeof(float)));
    cudaMalloc((void **)&d_matrixT , N * M * sizeof(float));
    cudaMalloc((void**)&d_matrix , N * M * sizeof(float));
    cudaMemcpy(d_matrix , matrix , N * M * sizeof(float) , cudaMemcpyHostToDevice);
//Starting the timer
    gettimeofday(&t1, NULL);
    const float alpha = 1.0;
    const float beta  = 0.0;
    // gettimeofday(&t1, NULL);
    cublasHandle_t handle;
    cublasCreate(&handle);
    gettimeofday(&t1, NULL);
    cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, M, &alpha, d_matrix, M, &beta, d_matrix, N, d_matrixT, N);
    cudaDeviceSynchronize();
    gettimeofday(&t2, NULL);
    cublasDestroy(handle);
//Ending the timer
    float et2 = (((t2.tv_sec*uS_PER_SEC)+t2.tv_usec) - ((t1.tv_sec*uS_PER_SEC)+t1.tv_usec))/(float)uS_PER_mS;
    printf("GPU time = %fmsn", et2);
    cudaMemcpy(h_matrixT , d_matrixT , N * M * sizeof(float) , cudaMemcpyDeviceToHost);

    cudaFree(d_matrix);
    cudaFree(d_matrixT);
    return 0;
}
$ nvcc -O3 -arch=sm_20 -o t469 t469.cu -lcublas
$ ./t469
CPU time = 8.744000ms
GPU time = 0.327000ms
$

如果相反,我改变上面的代码,让计时函数在 cublasCreate调用之前开始,我得到这个:

$ ./t469
CPU time = 9.475000ms
GPU time = 78.393997ms
$

最新更新