GPUImage自定义OpenGL ES着色器生成黑色图像



在此基础上开发另一个OpenGL ES图像过滤器:

uniform sampler2D texture;
uniform float amount;
uniform vec2 texSize;
varying vec2 texCoord;
void main() {
    vec4 color = texture2D(texture, texCoord);
    vec4 orig = color;
    /* High pass filter */
    vec4 highpass = color * 5.0;
    float dx = 1.0 / texSize.x;
    float dy = 1.0 / texSize.y;
    highpass += texture2D(texture, texCoord + vec2(-dx, -dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx, -dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx, dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(-dx, dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx * 2.0, dy * 2.0)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
    highpass.a = 1.0;
    /* Overlay blend */
    vec3 overlay = vec3(1.0);
    if (highpass.r <= 0.5) {
        overlay.r = 2.0 * color.r * highpass.r;
    } else {
        overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
    }
    if (highpass.g <= 0.5) {
        overlay.g = 2.0 * color.g * highpass.g;
    } else {
        overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
    }
    if (highpass.b <= 0.5) {
        overlay.b = 2.0 * color.b * highpass.b;
    } else {
        overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
    }
    color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
    /* Desaturated hard light */
    vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
    if (desaturated.r <= 0.5) {
        color.rgb = 2.0 * color.rgb * desaturated;
    } else {
        color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
    }
    color = (orig * 0.6) + (color * 0.4);
    /* Add back some color */
    float average = (color.r + color.g + color.b) / 3.0;
    color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));
    gl_FragColor = (color * amount) + (orig * (1.0 - amount));
}

根据我昨天的问题,我知道为每个float和vec分配精度。这一次它编译得很好,但当我在GPUImage中应用过滤器时(例如,通过将clarity的值设置为0.8(,图像会变黑。我的直觉告诉我这和纹理大小有关,但不知道GPUImage是如何处理的,我有点不知所措。

以下是我在Objective-C中的实现:

.h

#import <GPUImage/GPUImage.h>
@interface GPUImageClarityFilter : GPUImageFilter
{
    GLint clarityUniform;
}
// Gives the image a gritty, surreal contrasty effect
// Value 0 to 1
@property (readwrite, nonatomic) GLfloat clarity;
@end

.m

#import "GPUImageClarityFilter.h"
#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
 uniform sampler2D inputImageTexture;
 uniform lowp float clarity;
 uniform highp vec2 textureSize;
 varying highp vec2 textureCoordinate;
 void main() {
     highp vec4 color = texture2D(inputImageTexture, textureCoordinate);
     highp vec4 orig = color;
     /* High pass filter */
     highp vec4 highpass = color * 5.0;
     highp float dx = 1.0 / textureSize.x;
     highp float dy = 1.0 / textureSize.y;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
     highpass.a = 1.0;
     /* Overlay blend */
     highp vec3 overlay = vec3(1.0);
     if (highpass.r <= 0.5) {
         overlay.r = 2.0 * color.r * highpass.r;
     } else {
         overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
     }
     if (highpass.g <= 0.5) {
         overlay.g = 2.0 * color.g * highpass.g;
     } else {
         overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
     }
     if (highpass.b <= 0.5) {
         overlay.b = 2.0 * color.b * highpass.b;
     } else {
         overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
     }
     color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
     /* Desaturated hard light */
     highp vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
     if (desaturated.r <= 0.5) {
         color.rgb = 2.0 * color.rgb * desaturated;
     } else {
         color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
     }
     color = (orig * 0.6) + (color * 0.4);
     /* Add back some color */
     highp float average = (color.r + color.g + color.b) / 3.0;
     color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));
     gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
 }
);
#else
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
 uniform sampler2D inputImageTexture;
 uniform float clarity;
 uniform vec2 textureSize;
 varying vec2 textureCoordinate;
 void main() {
     vec4 color = texture2D(inputImageTexture, textureCoordinate);
     vec4 orig = color;
     /* High pass filter */
     vec4 highpass = color * 5.0;
     float dx = 1.0 / textureSize.x;
     float dy = 1.0 / textureSize.y;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
     highpass.a = 1.0;
     /* Overlay blend */
     vec3 overlay = vec3(1.0);
     if (highpass.r <= 0.5) {
         overlay.r = 2.0 * color.r * highpass.r;
     } else {
         overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
     }
     if (highpass.g <= 0.5) {
         overlay.g = 2.0 * color.g * highpass.g;
     } else {
         overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
     }
     if (highpass.b <= 0.5) {
         overlay.b = 2.0 * color.b * highpass.b;
     } else {
         overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
     }
     color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
     /* Desaturated hard light */
     vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
     if (desaturated.r <= 0.5) {
         color.rgb = 2.0 * color.rgb * desaturated;
     } else {
         color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
     }
     color = (orig * 0.6) + (color * 0.4);
     /* Add back some color */
     float average = (color.r + color.g + color.b) / 3.0;
     color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));
     gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
 }
);
#endif
@implementation GPUImageClarityFilter
@synthesize clarity = _clarity;
#pragma mark -
#pragma mark Initialization and teardown
- (id)init;
{
    if (!(self = [super initWithFragmentShaderFromString:kGPUImageClarityFragmentShaderString]))
    {
        return nil;
    }
    clarityUniform = [filterProgram uniformIndex:@"clarity"];
    self.clarity = 0.0;
    return self;
}
#pragma mark -
#pragma mark Accessors
- (void)setClarity:(GLfloat)clarity;
{
    _clarity = clarity;
    [self setFloat:_clarity forUniform:clarityUniform program:filterProgram];
}
@end

我想做的另一件事是应用GPUImage内置的低通和高通滤波器,但我觉得这最终会是一个相当笨拙的解决方案。

这可能是因为textureSize不是作为GPUImageFilter的一部分为您提供的标准统一。inputImageTexturetextureCoordinate是由其中一个过滤器提供的标准制服,看起来像是在提供clarity制服。

由于未设置textureSize,因此它将默认为0.0。然后,您的1.0 / textureSize.x计算将除以零,这往往会导致iOS碎片着色器中出现黑帧。

您可以计算并提供统一的,也可以考虑将自定义过滤器基于GPUImage3x3TextureSamplingFilter。该滤波器基类通过1.0 / textureSize.x的结果作为texelWidth均匀(以及用于垂直分量的匹配texelHeight(。你不必计算这个。事实上,它还计算周围8个像素的纹理坐标,因此您可以去掉上面的四个计算,并将其转换为非依赖纹理读取。您只需要根据2 * texelWidth2 * texelHeight计算四个纹理读取即可完成剩余的四个读取。

事实上,您可以将此操作分解为多个过程以节省计算,先进行小框模糊,然后进行叠加混合,再进行此过滤器的最后阶段。这可能会进一步加快速度。

因此,您可以覆盖

(void)setupFilterForSize:(CGSize)filterFrameSize

设置宽度的方法&高度因子如CCD_ 14。

最新更新