我一直在找一个关于如何让yolo c#使用gpu而不是cpu的教程或其他东西,我总是发现它说它在cpu和gpu上都能工作,但从来没有人说如何使用gpu,因为它总是为我使用cpu。以下是我使用yolo v5 c#的代码。对我来说,如果它使用yolo v5并不重要,只是它使用gpu。教程我找到了那个教程,但我甚至找不到适用于CUDA 10.1的Nvidia cuDNN v7.6.3的下载。它感觉非常不清楚如何使用它与gpu请帮助我:D
var image = pictureBox1.Image;
var scorer = new YoloScorer<YoloCocoP5Model>("Assets/Weights/yolov5n.onnx");
List<YoloPrediction> predictions = scorer.Predict(image);
var graphics = Graphics.FromImage(image);
foreach (var prediction in predictions) // iterate predictions to draw results
{
using (MemoryStream ms = new MemoryStream())
{
pictureBox1.Image.Save(ms, ImageFormat.Png);
prediction.Label.Color = Color.FromArgb(255, 255, 0, 0);
double score = Math.Round(prediction.Score, 2);
graphics.DrawRectangles(new Pen(prediction.Label.Color, 1),
new[] { prediction.Rectangle });
var (x, y) = (prediction.Rectangle.X - 3, prediction.Rectangle.Y - 23);
graphics.DrawString($"{prediction.Label.Name} ({score})",
new Font("Consolas", 16, GraphicsUnit.Pixel), new SolidBrush(prediction.Label.Color),
new PointF(x, y));
pictureBox1.Image = image;
}
}
在使用记分器之前,您需要一个选项集。
//https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html
bool initResult = false;
var cudaProviderOptions = new Microsoft.ML.OnnxRuntime.OrtCUDAProviderOptions(); // Dispose this finally
var providerOptionsDict = new Dictionary<string, string>();
providerOptionsDict["device_id"] = "0";
providerOptionsDict["gpu_mem_limit"] = "2147483648";
providerOptionsDict["arena_extend_strategy"] = "kSameAsRequested";
/*
cudnn_conv_algo_search
The type of search done for cuDNN convolution algorithms.
Value Description
EXHAUSTIVE (0) expensive exhaustive benchmarking using cudnnFindConvolutionForwardAlgorithmEx
HEURISTIC (1) lightweight heuristic based search using cudnnGetConvolutionForwardAlgorithm_v7
DEFAULT (2) default algorithm using CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM
Default value: EXHAUSTIVE
*/
providerOptionsDict["cudnn_conv_algo_search"] = "DEFAULT";
/*
do_copy_in_default_stream
Whether to do copies in the default stream or use separate streams. The recommended setting is true. If false, there are race conditions and possibly better performance.
Default value: true
*/
providerOptionsDict["do_copy_in_default_stream"] = "1";
/*
cudnn_conv_use_max_workspace
Check tuning performance for convolution heavy models for details on what this flag does. This flag is only supported from the V2 version of the provider options struct when used using the C API. The V2 provider options struct can be created using this and updated using this. Please take a look at the sample below for an example.
Default value: 0
*/
providerOptionsDict["cudnn_conv_use_max_workspace"] = "1";
/*
cudnn_conv1d_pad_to_nc1d
Check convolution input padding in the CUDA EP for details on what this flag does. This flag is only supported from the V2 version of the provider options struct when used using the C API. The V2 provider options struct can be created using this and updated using this. Please take a look at the sample below for an example.
Default value: 0
*/
providerOptionsDict["cudnn_conv1d_pad_to_nc1d"] = "1";
cudaProviderOptions.UpdateOptions(providerOptionsDict);
options = SessionOptions.MakeSessionOptionWithCudaProvider(cudaProviderOptions); // Dispose this finally
if (options != null)
{
// check yolo model file is accesible
if (File.Exists(yoloModelFile))
{
scorer = new YoloScorer<YoloCocoP5Model>(yoloModelFile, options);
initResult = true;
}
else
{
DebugMessage("Yolo model ONNX file (" + yoloModelFile + ") is missing!rn", 2);
}
}
else
DebugMessage("Yolo instance initializing error! Session options are empty!rn", 2);
}