cudaMallocManaged(统一内存)与cuBLAS



我正在尝试将统一内存与cudaMallocManaged((一起与cuBLAS库一起使用。作为一个简单的例子,我正在执行一个简单矩阵到向量的乘法,并将结果存储在数组results中。然而,当打印results数组时,我会返回所有的0,而不是矩阵mat乘以向量vec的结果。
我使用的流程是:

  1. 使用cudaMallocManaged((分配内存
  2. 使用数据初始化阵列
  3. 分配cuBLAS句柄
  4. 调用cublasDgemv执行乘法,将结果存储在results

使用new,然后使用cublasSetMatrix()cublasSetVector()时,此操作效果良好。

如何将统一内存与cuBLAS一起使用?

以下是最低限度的工作示例:

统一内存尝试(这会返回results中的所有0(:

#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "cublas_v2.h"
#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static const char *cublasErrChk(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
int main() {
size_t dims = 4;
double *vec, *mat, *results;
cudaErrChk( cudaMallocManaged(&vec, dims * sizeof(double)) );
cudaErrChk( cudaMallocManaged(&mat, dims * dims * sizeof(double)) );
cudaErrChk( cudaMallocManaged(&results, dims * sizeof(double)) );
printf("Vector:n");
for (int i = 1; i < dims + 1; i++) {
vec[i] = 0.5 * i;
printf("%.2lf ", vec[i]);
} 
printf("nnMatrix:n");
for (int i = 1; i < dims * dims + 1; i++) {
mat[i] = 1.0 * i;
printf("%.2lf ", mat[i]);
if (i % dims == 0)
printf("n");
}
printf("n");
cublasHandle_t handle;
cublasErrChk( cublasCreate(&handle) );
double alpha = 1.f, beta = 1.f;
// multiply mat by vec to get results
cublasErrChk(
cublasDgemv(
handle, CUBLAS_OP_N,
dims, dims,
&alpha,
mat, dims,
vec, 1,
&beta,
results, 1
)
);
for (int i = 0; i < dims; i++)
printf("%.2lf ", results[i]);
printf("n");
cudaErrChk( cudaFree(vec) );
cudaErrChk( cudaFree(mat) );
cudaErrChk( cudaFree(results) );
return 0;
}

常规malloc/setMatrix((尝试:

#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "cublas_v2.h"
#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static const char *cublasErrChk(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
int main() {
size_t dims = 4;
double *h_vec, *h_mat, *h_results;
h_vec = new double[dims];
h_mat = new double[dims * dims];
h_results = new double[dims];
printf("Vector:n");
for (int i = 1; i < dims + 1; i++) {
h_vec[i] = 0.5 * i;
printf("%.2lf ", h_vec[i]);
} 
printf("nnMatrix:n");
for (int i = 1; i < dims * dims + 1; i++) {
h_mat[i] = 1.0 * i;
printf("%.2lf ", h_mat[i]);
if (i % dims == 0)
printf("n");
}
printf("n");
double *d_vec, *d_mat, *d_results;
cudaErrChk( cudaMalloc(&d_vec, dims * sizeof(double)) );
cudaErrChk( cudaMalloc(&d_mat, dims * dims * sizeof(double)) );
cudaErrChk( cudaMalloc(&d_results, dims * sizeof(double)) );
cublasHandle_t handle;
cublasErrChk( cublasCreate(&handle) );
// copy the data manually to the GPUs
cublasErrChk( cublasSetVector(dims, sizeof(*d_vec), h_vec, 1, d_vec, 1) );
cublasErrChk( cublasSetMatrix(dims, dims, sizeof(double), h_mat, dims, d_mat, dims) );
double alpha = 1.f, beta = 1.f;
// // multiply mat by vec to get results
cublasErrChk(
cublasDgemv(
handle, CUBLAS_OP_N,
dims, dims,
&alpha,
d_mat, dims,
d_vec, 1,
&beta,
d_results, 1
)
);
cublasErrChk( cublasGetVector(dims, sizeof(*h_results), d_results, 1, h_results, 1) );
for (int i = 0; i < dims; i++)
printf("%.2lf ", h_results[i]);
printf("n");
cudaErrChk( cudaFree(d_vec) );
cudaErrChk( cudaFree(d_mat) );
cudaErrChk( cudaFree(d_results) );
delete [] h_vec;
delete [] h_mat;
delete [] h_results;
return 0;
}

编译时使用
nvcc-o main main.cu-lcublas

正如@talonmies所指出的,问题是我使用了异步调用,没有及时返回结果。这是通过在cublasDgemv((调用后添加cudaDeviceSynchronize((来修复的:

#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "cublas_v2.h"
#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static const char *cublasErrChk(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
int main() {
size_t dims = 4;
double *vec, *mat, *results;
cudaErrChk( cudaMallocManaged(&vec, dims * sizeof(double)) );
cudaErrChk( cudaMallocManaged(&mat, dims * dims * sizeof(double)) );
cudaErrChk( cudaMallocManaged(&results, dims * sizeof(double)) );
printf("Vector:n");
for (int i = 1; i < dims + 1; i++) {
vec[i] = 0.5 * i;
printf("%.2lf ", vec[i]);
} 
printf("nnMatrix:n");
for (int i = 1; i < dims * dims + 1; i++) {
mat[i] = 1.0 * i;
printf("%.2lf ", mat[i]);
if (i % dims == 0)
printf("n");
}
printf("n");
cublasHandle_t handle;
cublasErrChk( cublasCreate(&handle) );
double alpha = 1.f, beta = 1.f;
// multiply mat by vec to get results
cublasErrChk(
cublasDgemv(
handle, CUBLAS_OP_N,
dims, dims,
&alpha,
mat, dims,
vec, 1,
&beta,
results, 1
)
);
cudaDeviceSynchronize();
for (int i = 0; i < dims; i++)
printf("%.2lf ", results[i]);
printf("n");
cudaErrChk( cudaFree(vec) );
cudaErrChk( cudaFree(mat) );
cudaErrChk( cudaFree(results) );
return 0;
}

最新更新