在YUV中绘制CGImageRef



我使用此处的代码在OS X上将CGImageRef转换为CVPixelBufferRef

将UIImage转换为CVImageBufferRef

然而,我需要在YUV (kCVPixelFormatType_420YpCbCr8Planar)中绘制图像,而不是像现在这样的RBG。

有没有办法直接在YUV颜色空间中绘制CGImage?如果没有,有人有一个例子可以说明将CVPixedBufferRefRBG转换为YUV的最佳方法吗?

我理解转换的公式,但在CPU上进行转换非常缓慢。

这是一个迟交的答案,不是为了获得选票或其他什么。有一种Accelerate框架方法可以将RGB转换为YUV,代码有点复杂,但由于很难找到工作示例,因此它可以工作并包含在内。它被封装为一个扩展CIFilter的类,但如果您想做一些不同的事情,则很容易进行调整。这段代码不包含内存泄漏,并且在重复调用时应该能很好地执行。需要注意的是,该实现创建了一个CVPixelBufferRef,然后设置了所有需要的属性,以便稍后调用vImageCVImageFormat_CreateWithCVPixelCache()时能够正常工作。该代码将RGB数据渲染到CoreVideo缓冲区,然后包装YUV结果图像,并像任何其他CoreImage过滤器一样返回它。

//
//  CoreImageToYUVConverter.h
//
//  Make use of CoreImage to convert a RGB input image into YUV data where
//  UV is sumsampled and Y is the same dimensions as the original data.
#import <Foundation/Foundation.h>
#import <CoreImage/CoreImage.h>
@interface CoreImageToYUVConverter : CIFilter
@property (nonatomic, retain) CIImage *inputImage;
// If there is an error while processing the filter, this value is
// set to non-nil. Otherwise it is set to nil.
@property (nonatomic, retain) NSError *error;
// Dimension of the output image, not that Y is 2x
// the dimensions of u and v buffer so the Y image
// must have even width and height.
@property (nonatomic, assign) CGSize size;
@end
// CoreImageToYUVConverter.m
#import "CoreImageToYUVConverter.h"
@import Accelerate;
@interface CoreImageToYUVConverter ()
@property (nonatomic, retain) CIContext *coreImageContext;
@property (nonatomic, copy) NSNumber *inputWidth;
@property (nonatomic, copy) NSNumber *inputAspectRatio;
@property (nonatomic, assign) CVPixelBufferRef pixelBuffer;
@end
@implementation CoreImageToYUVConverter
@synthesize coreImageContext = m_coreImageContext;
@synthesize pixelBuffer = m_pixelBuffer;

- (void) deallocate
{
  self.pixelBuffer = NULL;
}
// Setter for self.rgbBuffer, this logic holds on to a retain for the CoreVideo buffer
- (void) setPixelBuffer:(CVImageBufferRef)cvBufferRef
{
  if (cvBufferRef) {
    CFRetain(cvBufferRef);
  }
  if (self->m_pixelBuffer) {
    CFRelease(self->m_pixelBuffer);
  }
  self->m_pixelBuffer = cvBufferRef;
}
- (CIImage *)outputImage
{
  self.error = nil;
  NSParameterAssert(self.inputImage != nil && [self.inputImage isKindOfClass:[CIImage class]]);
  CIImage *inputImage = self.inputImage;
  [self renderIntoYUVBuffer:inputImage];
  CIImage *outCIImage = [CIImage imageWithCVImageBuffer:self.pixelBuffer];
  return outCIImage;
}
- (NSDictionary *)customAttributes
{
  return @{
           kCIInputWidthKey : @{kCIAttributeDefault : @(0), kCIAttributeType : kCIAttributeTypeScalar},
           kCIInputAspectRatioKey : @{kCIAttributeDefault : @(0), kCIAttributeType : kCIAttributeTypeScalar},
           };
}
- (void) renderIntoYUVBuffer:(CIImage*)inputImage
{
  CGRect imageExtent = inputImage.extent;
  int width = (int) imageExtent.size.width;
  int height = (int) imageExtent.size.height;
  // Extract a CGImageRef from CIImage, this will flatten pixels possibly from
  // multiple steps of a CoreImage chain.
  if (self.coreImageContext == nil) {
    CIContext *context = [CIContext contextWithOptions:nil];
    NSAssert(context != nil, @"CIContext contextWithOptions failed");
    self.coreImageContext = context;
  }
  CGImageRef inCGImageRef = [self.coreImageContext createCGImage:inputImage fromRect:imageExtent];
  NSDictionary *pixelAttributes = @{
                                    (__bridge NSString*)kCVPixelBufferIOSurfacePropertiesKey : @{},
                                    (__bridge NSString*)kCVPixelFormatOpenGLESCompatibility : @(YES),
                                    (__bridge NSString*)kCVPixelBufferCGImageCompatibilityKey : @(YES),
                                    (__bridge NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : @(YES),
                                    };
  CVPixelBufferRef cvPixelBuffer = NULL;
  uint32_t yuvImageFormatType;
  //yuvImageFormatType = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; // luma (0, 255)
  yuvImageFormatType = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; // luma (16, 235)
  CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault,
                                        width,
                                        height,
                                        yuvImageFormatType,
                                        (__bridge CFDictionaryRef)(pixelAttributes),
                                        &cvPixelBuffer);
  NSAssert(result == kCVReturnSuccess, @"CVPixelBufferCreate failed");
  // FIXME: UHDTV : HEVC uses kCGColorSpaceITUR_2020
  CGColorSpaceRef yuvColorSpace = CGColorSpaceCreateWithName(kCGColorSpaceITUR_709);
  {
    // Attach colorspace info to pixel buffer
    //CFDataRef colorProfileData = CGColorSpaceCopyICCProfile(yuvColorSpace); // deprecated
    CFDataRef colorProfileData = CGColorSpaceCopyICCData(yuvColorSpace);
    NSDictionary *pbAttachments = @{
      (__bridge NSString*)kCVImageBufferYCbCrMatrixKey: (__bridge NSString*)kCVImageBufferYCbCrMatrix_ITU_R_709_2,
      (__bridge NSString*)kCVImageBufferColorPrimariesKey: (__bridge NSString*)kCVImageBufferColorPrimaries_ITU_R_709_2,
      (__bridge NSString*)kCVImageBufferTransferFunctionKey: (__bridge NSString*)kCVImageBufferTransferFunction_ITU_R_709_2,
      (__bridge NSString*)kCVImageBufferICCProfileKey: (__bridge NSData *)colorProfileData,
      (__bridge NSString*)kCVImageBufferChromaLocationTopFieldKey: (__bridge NSString*)kCVImageBufferChromaLocation_Center,
      (__bridge NSString*)kCVImageBufferAlphaChannelIsOpaque: (id)kCFBooleanTrue,
    };
    CVBufferRef pixelBuffer = cvPixelBuffer;
    CVBufferSetAttachments(pixelBuffer, (__bridge CFDictionaryRef)pbAttachments, kCVAttachmentMode_ShouldPropagate);
    // Drop ref to NSDictionary to enable explicit checking of ref count of colorProfileData, after the
    // release below the colorProfileData must be 1.
    pbAttachments = nil;
    CFRelease(colorProfileData);
  }
  // Note that this setter will implicitly release an earlier held ref to a pixel buffer
  self.pixelBuffer = cvPixelBuffer;
  vImageCVImageFormatRef cvImgFormatRef;
  cvImgFormatRef = vImageCVImageFormat_CreateWithCVPixelBuffer(cvPixelBuffer);
  // vImage_CGImageFormat for input RGB
  // FIXME: Need to select sRGB if running under MacOSX
  //CGColorSpaceRef defaultColorspaceRef = CGColorSpaceCreateDeviceRGB();
  // Default to sRGB on both MacOSX and iOS
  CGColorSpaceRef defaultColorspaceRef = NULL;
  vImage_CGImageFormat rgbCGImgFormat = {
    .bitsPerComponent = 8,
    .bitsPerPixel = 32,
    .bitmapInfo = (CGBitmapInfo)(kCGBitmapByteOrder32Host | kCGImageAlphaNoneSkipFirst),
    .colorSpace = defaultColorspaceRef,
  };
  // Copy input CoreGraphic image into a CoreVideo buffer
  vImage_Buffer sourceBuffer;
  const CGFloat backgroundColor = 0.0f;
  vImage_Flags flags = 0;
  flags = kvImagePrintDiagnosticsToConsole;
  vImage_Error err;
  err = vImageBuffer_InitWithCGImage(&sourceBuffer, &rgbCGImgFormat, &backgroundColor, inCGImageRef, flags);
  NSAssert(err == kvImageNoError, @"vImageBuffer_InitWithCGImage failed");
  err = vImageBuffer_CopyToCVPixelBuffer(&sourceBuffer, &rgbCGImgFormat, cvPixelBuffer, cvImgFormatRef, &backgroundColor, flags);
  NSAssert(err == kvImageNoError, @"error in vImageBuffer_CopyToCVPixelBuffer %d", (int)err);
  // Manually free() the allocated buffer
  free(sourceBuffer.data);
  vImageCVImageFormat_Release(cvImgFormatRef);
  CVPixelBufferRelease(cvPixelBuffer);
  CGColorSpaceRelease(yuvColorSpace);
  CGColorSpaceRelease(defaultColorspaceRef);
  CGImageRelease(inCGImageRef);
}
@end

使用计算

CVPixelBufferRef converted_frame;
CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_420YpCbCr8Planar, 0, &converted_frame);
VTPixelTransferSessionTransferImage(_vtpt_ref, imageBuffer, converted_frame);

其中imageBuffer是源CVPixedBufferRef

相关内容

  • 没有找到相关文章

最新更新