使用Accelerator规范视差图像



我有一个视差图像,我正在使用下面的示例代码对其进行规范化,但速度非常慢。我需要使用一些加速器,比如自定义CIFilter或任何其他技术,但我不知道怎么做?我目前正在使用CIContext((运行代码,并且它正在CPU上运行(不确定(。有没有一种方法可以在GPU上运行它并在没有自定义CIfilter的情况下加速?这是当前代码:

extension CVPixelBuffer {
func normalize() {
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
let baseAddr = CVPixelBufferGetBaseAddress(self)!
let floatBuffer = unsafeBitCast(CVPixelBufferGetBaseAddress(self), to: UnsafeMutablePointer<Float>.self)
var minPixel: Float = 1.0
var maxPixel: Float = 0.0
for y in 0 ..< height {
for x in 0 ..< width {
let pixel = floatBuffer[y * width + x]
minPixel = min(pixel, minPixel)
maxPixel = max(pixel, maxPixel)
}
}

let range = maxPixel - minPixel
for y in 0 ..< height {
for x in 0 ..< width {
let pixel = floatBuffer[y * width + x]
floatBuffer[y * width + x] = (pixel - minPixel) / range
}
}
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
}
}

像素值为Float值,因此也可以使用vDSP。

vDSP_minvvDSP_maxv计算极值,并且:

floatBuffer[y * width + x] = (pixel - minPixel) / range

可替换为vDSP_vasm(需要乘以range的倒数(。

查看进行此计算的vDSP_normalize可能也很有用:

m = sum(A[n], 0 <= n < N) / N;
d = sqrt(sum(A[n]**2, 0 <= n < N) / N - m**2);
if (C)
{
// Normalize.
for (n = 0; n < N; ++n)
C[n] = (A[n] - m) / d;
}

对于您的用例,vImage可能是最好的选择。请参阅此答案中的选项3。

在Core Image中也有这样的方法。我可以想象使用CIAreaMinMax滤波器来获得极值,然后使用一些巧妙的混合进行归一化。如果你愿意,我可以详细说明。

我使用Accelerate Framework vDSP矢量函数来规范视差。请参阅gitHub中修改后的PhotoBrowse以获得工作演示。

以下是两个功能中的相关代码

extension CVPixelBuffer {
func vectorNormalize( targetVector: UnsafeMutableBufferPointer<Float>) -> [Float] {
// range = max - min
// normalized to 0..1 is (pixel - minPixel) / range
// see Documentation "Using vDSP for Vector-based Arithmetic" in vDSP under system "Accelerate" documentation
// see also the Accelerate documentation section 'Vector extrema calculation'
// Maximium static func maximum<U>(U) -> Float
//      Returns the maximum element of a single-precision vector.
//static func minimum<U>(U) -> Float
//      Returns the minimum element of a single-precision vector.

let maxValue = vDSP.maximum(targetVector)
let minValue = vDSP.minimum(targetVector)
let range = maxValue - minValue
let negMinValue = -minValue
let subtractVector = vDSP.add(negMinValue, targetVector)
// adding negative value is subtracting
let result = vDSP.divide(subtractVector, range)
return result
}
func setUpNormalize() -> CVPixelBuffer {
// grayscale buffer float32 ie Float
// return normalized CVPixelBuffer
CVPixelBufferLockBaseAddress(self,
CVPixelBufferLockFlags(rawValue: 0))
let width = CVPixelBufferGetWidthOfPlane(self, 0)
let height = CVPixelBufferGetHeightOfPlane(self, 0)
let count = width * height
let bufferBaseAddress = CVPixelBufferGetBaseAddressOfPlane(self, 0)
// UnsafeMutableRawPointer
let pixelBufferBase  = unsafeBitCast(bufferBaseAddress, to: UnsafeMutablePointer<Float>.self)
let depthCopy  =   UnsafeMutablePointer<Float>.allocate(capacity: count)
depthCopy.initialize(from: pixelBufferBase, count: count)
let depthCopyBuffer = UnsafeMutableBufferPointer<Float>(start: depthCopy, count: count)
let normalizedDisparity = vectorNormalize(targetVector: depthCopyBuffer)
pixelBufferBase.initialize(from: normalizedDisparity, count: count)
// copy back the normalized map into the CVPixelBuffer
depthCopy.deallocate()
//        depthCopyBuffer.deallocate()
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
return self
}
}

相关内容

  • 没有找到相关文章

最新更新