在尝试使用带有OpenCV的GPU加速简单算法时,我注意到在我的机器上(Ubuntu 12.10,NVidia 9800GT,Cuda 4.2.9,g ++ 4.7.2)GPU版本实际上比CPU版本慢。我使用以下代码进行了测试。
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <chrono>
#include <iostream>
int main()
{
using namespace cv;
using namespace std;
Mat img1(512, 512, CV_32FC3, Scalar(0.1f, 0.2f, 0.3f));
Mat img2(128, 128, CV_32FC3, Scalar(0.2f, 0.3f, 0.4f));
Mat img3(128, 128, CV_32FC3, Scalar(0.3f, 0.4f, 0.5f));
auto startCPU = chrono::high_resolution_clock::now();
double resultCPU(0.0);
cout << "CPU ... " << flush;
for (int y(0); y < img2.rows; ++y)
{
for (int x(0); x < img2.cols; ++x)
{
Mat roi(img1(Rect(x, y, img2.cols, img2.rows)));
Mat diff;
absdiff(roi, img2, diff);
Mat diffMult(diff.mul(img3));
Scalar diffSum(sum(diff));
double diffVal(diffSum[0] + diffSum[1] + diffSum[2]);
resultCPU += diffVal;
}
}
auto endCPU = chrono::high_resolution_clock::now();
auto elapsedCPU = endCPU - startCPU;
cout << "done. " << resultCPU << " - ticks: " << elapsedCPU.count() << endl;
gpu::GpuMat img1GPU(img1);
gpu::GpuMat img2GPU(img2);
gpu::GpuMat img3GPU(img3);
gpu::GpuMat diffGPU;
gpu::GpuMat diffMultGPU;
gpu::GpuMat sumBuf;
double resultGPU(0.0);
auto startGPU = chrono::high_resolution_clock::now();
cout << "GPU ... " << flush;
for (int y(0); y < img2GPU.rows; ++y)
{
for (int x(0); x < img2GPU.cols; ++x)
{
gpu::GpuMat roiGPU(img1GPU, Rect(x, y, img2GPU.cols, img2GPU.rows));
gpu::absdiff(roiGPU, img2GPU, diffGPU);
gpu::multiply(diffGPU, img3GPU, diffMultGPU);
Scalar diffSum(gpu::sum(diffMultGPU, sumBuf));
double diffVal(diffSum[0] + diffSum[1] + diffSum[2]);
resultGPU += diffVal;
}
}
auto endGPU = chrono::high_resolution_clock::now();
auto elapsedGPU = endGPU - startGPU;
cout << "done. " << resultGPU << " - ticks: " << elapsedGPU.count() << endl;
}
我的结果如下:
CPU ... done. 8.05306e+07 - ticks: 4028470
GPU ... done. 3.22122e+07 - ticks: 5459935
如果这有帮助:我的分析器(系统概述 1.1.8)告诉我大部分时间都花在 cudaDeviceSynchronize
上。
我在使用 OpenCV GPU 功能的方式上做错了什么根本性的事情,还是我的 GPU 只是很慢?
感谢集线器和 Eric 的评论,我能够以一种 GPU 版本实际上比 CPU 版本更快的方式更改我的测试。现在还消除了导致两个版本校验和不同的错误。;-)
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <chrono>
#include <iostream>
int main()
{
using namespace cv;
using namespace std;
Mat img1(512, 512, CV_32FC3, Scalar(1.0f, 2.0f, 3.0f));
Mat img2(128, 128, CV_32FC3, Scalar(4.0f, 5.0f, 6.0f));
Mat img3(128, 128, CV_32FC3, Scalar(7.0f, 8.0f, 9.0f));
Mat resultCPU(img2.rows, img2.cols, CV_32FC3, Scalar(0.0f, 0.0f, 0.0f));
auto startCPU = chrono::high_resolution_clock::now();
cout << "CPU ... " << flush;
for (int y(0); y < img1.rows - img2.rows; ++y)
{
for (int x(0); x < img1.cols - img2.cols; ++x)
{
Mat roi(img1(Rect(x, y, img2.cols, img2.rows)));
Mat diff;
absdiff(roi, img2, diff);
Mat diffMult(diff.mul(img3));
resultCPU += diffMult;
}
}
auto endCPU = chrono::high_resolution_clock::now();
auto elapsedCPU = endCPU - startCPU;
Scalar meanCPU(mean(resultCPU));
cout << "done. " << meanCPU << " - ticks: " << elapsedCPU.count() << endl;
gpu::GpuMat img1GPU(img1);
gpu::GpuMat img2GPU(img2);
gpu::GpuMat img3GPU(img3);
gpu::GpuMat diffGPU(img2.rows, img2.cols, CV_32FC3);
gpu::GpuMat diffMultGPU(img2.rows, img2.cols, CV_32FC3);
gpu::GpuMat resultGPU(img2.rows, img2.cols, CV_32FC3, Scalar(0.0f, 0.0f, 0.0f));
auto startGPU = chrono::high_resolution_clock::now();
cout << "GPU ... " << flush;
for (int y(0); y < img1GPU.rows - img2GPU.rows; ++y)
{
for (int x(0); x < img1GPU.cols - img2GPU.cols; ++x)
{
gpu::GpuMat roiGPU(img1GPU, Rect(x, y, img2GPU.cols, img2GPU.rows));
gpu::absdiff(roiGPU, img2GPU, diffGPU);
gpu::multiply(diffGPU, img3GPU, diffMultGPU);
gpu::add(resultGPU, diffMultGPU, resultGPU);
}
}
auto endGPU = chrono::high_resolution_clock::now();
auto elapsedGPU = endGPU - startGPU;
Mat downloadedResultGPU(resultGPU);
Scalar meanGPU(mean(downloadedResultGPU));
cout << "done. " << meanGPU << " - ticks: " << elapsedGPU.count() << endl;
}
输出:
CPU ... done. [3.09658e+06, 3.53894e+06, 3.98131e+06, 0] - ticks: 34021332
GPU ... done. [3.09658e+06, 3.53894e+06, 3.98131e+06, 0] - ticks: 20609880
这不是我期望的加速,但可能我的 GPU 不是最适合这些东西的。谢谢大家。