我尝试使用DeviceMapModule和DeviceMap2Module进行简单的映射操作。令我惊讶的是,它比手工编写内核慢了大约2倍。对于手工编写的内核,我没有做任何特别的事情。我刚刚复制了Getting Started Square内核,并将其修改为sigmoid。
以下是两个版本的代码:
首先是慢速版本:
type SigmoidModule(size) =
inherit Module(size, size) // Note Module is my own abstract class that defines the forward/backward interface.
[<ReflectedDefinition; AOTCompile>]
let sigmoid' s y = (1.0f-s) * s * y
[<ReflectedDefinition; AOTCompile>]
let sigmoid x = 1.0f / (1.0f + __nv_expf (-x))
let forwardMap = new DeviceMapModule<float32, float32>(GPUModuleTarget.DefaultWorker, <@ sigmoid @>)
let backwardMap = new DeviceMap2Module<float32, float32, float32>(
GPUModuleTarget.DefaultWorker, <@ sigmoid' @>)
let mutable activation = Matrix.ZeroCreate 1 size
override m.Forward (fwd:Matrix) =
assert(fwd.Cols = size)
if activation.Rows <> fwd.Rows then
activation.Data.Dispose()
activation <- Matrix.ZeroCreate fwd.Rows fwd.Cols
forwardMap.Map(activation.Data.Ptr, fwd.Data.Ptr, fwd.Cols * fwd.Rows)
activation
override m.Backward (dy:Matrix) =
assert(dy.Cols = size)
assert(activation.Rows = dy.Rows)
backwardMap.Map(activation.Data.Ptr, activation.Data.Ptr, dy.Data.Ptr, dy.Cols * dy.Rows)
activation
interface IDisposable with
member m.Dispose() =
forwardMap.Dispose()
backwardMap.Dispose()
activation.Data.Dispose()
现在的快速版本:
type SigmoidModuleFast(size) =
inherit Module(size, size)
let sigmoid' s y = (1.0f-s) * s * y
let worker = Worker.Default
[<ReflectedDefinition; AOTCompile>]
static let sigmoidKernel (outputs:deviceptr<float32>) (inputs:deviceptr<float32>) n =
let start = blockIdx.x * blockDim.x + threadIdx.x
let stride = gridDim.x * blockDim.x
let sigmoid x = 1.0f / (1.0f + __nv_expf (-x))
let mutable i = start
while i < n do
outputs.[i] <- sigmoid(inputs.[i])
i <- i + stride
[<ReflectedDefinition; AOTCompile>]
static let sigmoidPrimeKernel (outputs:deviceptr<float32>) (input:deviceptr<float32>) (dy:deviceptr<float32>) n =
let start = blockIdx.x * blockDim.x + threadIdx.x
let stride = gridDim.x * blockDim.x
let mutable i = start
while i < n do
let s = input.[i]
let y = dy.[i]
outputs.[i] <- (1.0f-s) * s * y
i <- i + stride
let mutable activation = Matrix.ZeroCreate 1 size
let mutable lp = LaunchParam(1, size)
override m.Forward (fwd:Matrix) =
assert(fwd.Cols = size)
if activation.Rows <> fwd.Rows then
activation.Data.Dispose()
activation <- Matrix.ZeroCreate fwd.Rows fwd.Cols
let threads = fwd.Rows * fwd.Cols
if threads < 1024 then
lp <- LaunchParam(1, threads)
else
let blockSize = 256
let numSm = worker.Device.Attributes.MULTIPROCESSOR_COUNT
let gridSize = Math.Min(16 * numSm, divup threads blockSize)
lp <- new LaunchParam(gridSize, blockSize)
worker.Launch <@ sigmoidKernel @> lp activation.Data.Ptr fwd.Data.Ptr (fwd.Cols*fwd.Rows)
activation
override m.Backward (dy:Matrix) =
assert(dy.Cols = size)
assert(activation.Rows = dy.Rows)
worker.Launch <@ sigmoidPrimeKernel @> lp activation.Data.Ptr activation.Data.Ptr dy.Data.Ptr (dy.Cols*dy.Rows)
activation
interface IDisposable with
member m.Dispose() =
activation.Data.Dispose()
以下是关于我的系统的一些信息:
- 命名Intel(R)Core(TM)i5-4590 CPU@3.30GHz
- 体系结构x64
- 频率3300 MHz
- 芯数4
- 页面大小4096
- 总物理内存8050.00 MB
- 可用物理内存902.00 MB
- 混合图形启用错误
- 版本名称Windows 10 Pro
- 版本号10.0.10586
- Nsight版本5.1.0.16062
Visual Studio 14.0版
GPU:NVidia GTX 980 Ti
- 。NET CLR:。净4.6
我不是GPU编程专家,但有基本的理解。我在评估Alea GPU时看到了这个问题。
英伟达GTX 980 Ti有2816个核心。对于256的块大小,给出2816/256=11的网格大小。
Alea Community/Basic Developer库最多允许2688个内核。如果Alea库将blockSize设置为256(这是一个猜测),则网格大小为10.5。
可能是这份工作被一分为二了吗?第一次运行使2688芯的极限饱和,然后其余运行2816-2688=128芯?