Opencv L*a*b*到RGB的转换产生灰度输出



我被指派使用OpenCV将图片从Lab*颜色空间更改为RGB。为了做到这一点,我使用了这里和这里提供的信息。

编辑:被分配在没有OpenCV附带的cvtColor函数的情况下进行编辑。

还尝试从这里直接实现公式。我还是一个图像处理的新手,不知道我的结果是否有效。我可以看到每个通道,RGB图像的参数在0到255之间,但当合并通道时,我获得了灰度图像。我希望在从Lab*转换为RGB后,我会得到原始的彩色图像。这正常吗?

    Mat image = imread(argv[1], CV_LOAD_IMAGE_UNCHANGED);    
    Mat labimage = Mat::zeros(image.size(), image.type());  //Matriz para almacenar imagen LAB.
        cvtColor(image, labimage, CV_BGR2Lab);  //Conversion automatica RGB to lab.
        Mat lchannel = Mat::zeros(image.size(), labimage.type());   //Matriz para almacenar canal b.
        Mat achannel = Mat::zeros(image.size(), labimage.type());   //Matriz para almacenar canal g.
        Mat bchannel = Mat::zeros(image.size(), labimage.type());   //Matriz para almacenar canal r.
        Mat bwchannel = Mat::zeros(image.size(), labimage.type());  //Matriz para almacenar canal r.
for(int x = 0;x < cols;x++){
            for(int y = 0;y < rows;y++){
                lchannel.at<Vec3b>(y,x)[0] = labimage.at<Vec3b>(y,x)[0];
                achannel.at<Vec3b>(y,x)[1] = labimage.at<Vec3b>(y,x)[1];
                bchannel.at<Vec3b>(y,x)[2] = labimage.at<Vec3b>(y,x)[2];
            }
        }
Mat color = Mat::zeros(image.size(), labimage.type());
    double X, Y, Z, dX, dY, dZ;
    double R, G, B;
    double L, a, b;
    X = Y = Z = dX = dY = dZ = R = G = B = L = a = b = 0;
    for(int x = 0;x < cols;x++){
                for(int y = 0;y < rows;y++){
                    L = (double)(lchannel.at<Vec3b>(y,x)[0] / 255.0) * 100.0;       //Rango 0 a 100.
                    a = (double)(achannel.at<Vec3b>(y,x)[1] / 255) * 128;   //Rango -128 a 128.
                    b = (double)(bchannel.at<Vec3b>(y,x)[2] / 255) * 128;   //Rango -128 a 128.
                // Lab -> normalized XYZ (X,Y,Z are all in 0...1)
                Y = L * (1.0/116.0) + 16.0/116.0;
                X = a * (1.0/500.0) + Y;
                Z = b * (-1.0/200.0) + Y;
                X = X > 6.0/29.0 ? X * X * X : X * (108.0/841.0) - 432.0/24389.0;
                Y = L > 8.0 ? Y * Y * Y : L * (27.0/24389.0);
                Z = Z > 6.0/29.0 ? Z * Z * Z : Z * (108.0/841.0) - 432.0/24389.0;
                // normalized XYZ -> linear sRGB (in 0...1)
                R = X * (1219569.0/395920.0)     + Y * (-608687.0/395920.0)    + Z * (-107481.0/197960.0);
                G = X * (-80960619.0/87888100.0) + Y * (82435961.0/43944050.0) + Z * (3976797.0/87888100.0);
                B = X * (93813.0/1774030.0)      + Y * (-180961.0/887015.0)    + Z * (107481.0/93370.0);
                // linear sRGB -> gamma-compressed sRGB (in 0...1)
                R = R > 0.0031308 ? pow(R, 1.0 / 2.4) * 1.055 - 0.055 : R * 12.92;
                G = G > 0.0031308 ? pow(G, 1.0 / 2.4) * 1.055 - 0.055 : G * 12.92;
                B = B > 0.0031308 ? pow(B, 1.0 / 2.4) * 1.055 - 0.055 : B * 12.92;
                //printf("a0: %dt L0: %dt b0: %dn", achannel.at<Vec3b>(y,x)[1], lchannel.at<Vec3b>(y,x)[0], bchannel.at<Vec3b>(y,x)[2]);
                //printf("a: %ft L: %ft b: %fn", a, L, b);
                //printf("X: %ft Y: %ft Z: %fn", X, Y, Z);
                //printf("R: %ft G: %ft B: %fn", R, G, B);
                //cout<<"R: "<<R<<" G: "<<G<<" B: "<<B<<endl;
                //string str = type2str(color.type());
                //cout<<"Matrix type: "<<str<<endl;
                color.at<Vec3b>(y,x)[0] = R*255;
                color.at<Vec3b>(y,x)[1] = G*255;
                color.at<Vec3b>(y,x)[2] = B*255;
            }
        }

我做的是对的还是我误解了信息?

do滚动自己的每像素循环,这是非常无效的。

改为使用cvtColor(src,dst,COLOR_Lab2BGR)

(而且,如果我可以这么说的话,我更喜欢文档而不是SO的答案。)

没关系。我自己设法解决了这个问题,非常愉快。对于任何感兴趣并有和我一样麻烦的人来说,这里有算法和一些代码:

  1. 将CIE-Lab*转换为XYZ。这是必要的,因为CIE-Lab*不是线性颜色空间,因此没有已知的到RGB的直接转换。

    void CIElabtoXYZ(cv::Mat& image, cv::Mat& output){
         float WhitePoint[3] = {0.950456, 1, 1.088754};
         Mat fX = Mat::zeros(image.size(), CV_32FC1);
         Mat fY = Mat::zeros(image.size(), CV_32FC1);
         Mat fZ = Mat::zeros(image.size(), CV_32FC1);
         Mat invfX = Mat::zeros(image.size(), CV_32FC1);
         Mat invfY = Mat::zeros(image.size(), CV_32FC1);
         Mat invfZ = Mat::zeros(image.size(), CV_32FC1);
         for(int x = 0;x < image.rows;x++){
             for(int y = 0;y < image.cols;y++){
                 fY.at<float>(x,y) = (image.at<Vec3f>(x,y)[0] + 16.0) / 116.0;
                 fX.at<float>(x,y) = fY.at<float>(x,y) + image.at<Vec3f>(x,y)[1] / 500.0;
                 fZ.at<float>(x,y) = fY.at<float>(x,y) - image.at<Vec3f>(x,y)[2] / 200.0;
             }
         }
         invf(fX, invfX);
         invf(fY, invfY);
         invf(fZ, invfZ);
         for(int x = 0;x < image.rows;x++){
             for(int y = 0;y < image.cols;y++){
                 output.at<Vec3f>(x,y)[0] = WhitePoint[0] * invfX.at<float>(x,y);
                 output.at<Vec3f>(x,y)[1] = WhitePoint[1] * invfY.at<float>(x,y);
                 output.at<Vec3f>(x,y)[2] = WhitePoint[2] * invfZ.at<float>(x,y);
             }
         }
     }
     void invf(cv::Mat& input, cv::Mat& output){
         for(int x = 0;x < input.rows;x++){
             for(int y = 0;y < input.cols;y++){
                 output.at<float>(x,y) = pow(input.at<float>(x,y), 3);
                 if(output.at<float>(x,y) < 0.008856){
                     output.at<float>(x,y) = (input.at<float>(x,y) - 4.0/29.0)*(108.0/841.0);
                 }
             }
         }
     }
    
  2. 将XYZ转换为RGB

    void XYZtoRGB(cv::Mat& input, cv::Mat& output){
        float data[3][3] = {{3.240479, -1.53715, -0.498535}, {-0.969256, 1.875992, 0.041556}, {0.055648, -0.204043, 1.057311}};
        Mat T = Mat(3, 3, CV_32FC1, &data);
        Mat R = Mat::zeros(input.size(), CV_32FC1);
        Mat G = Mat::zeros(input.size(), CV_32FC1);
        Mat B = Mat::zeros(input.size(), CV_32FC1);
        for(int x = 0;x < input.rows;x++){
            for(int y = 0;y < input.cols;y++){
                R.at<float>(x,y) = T.at<float>(0,0)*input.at<Vec3f>(x,y)[0] + T.at<float>(1,0)*input.at<Vec3f>(x,y)[1] + T.at<float>(2,0)*input.at<Vec3f>(x,y)[2];
                G.at<float>(x,y) = T.at<float>(0,1)*input.at<Vec3f>(x,y)[0] + T.at<float>(1,1)*input.at<Vec3f>(x,y)[1] + T.at<float>(2,1)*input.at<Vec3f>(x,y)[2];
                B.at<float>(x,y) = T.at<float>(0,2)*input.at<Vec3f>(x,y)[0] + T.at<float>(1,2)*input.at<Vec3f>(x,y)[1] + T.at<float>(2,2)*input.at<Vec3f>(x,y)[2];
            }
        }
        //Desaturate and rescale to constrain resulting RGB values to [0,1]
        double RminVal, GminVal, BminVal;
        double RmaxVal, GmaxVal, BmaxVal;
        Point minLoc;
        Point maxLoc;
        minMaxLoc( R, &RminVal, &RmaxVal, &minLoc, &maxLoc );
        minMaxLoc( G, &GminVal, &GmaxVal, &minLoc, &maxLoc );
        minMaxLoc( B, &BminVal, &BmaxVal, &minLoc, &maxLoc );
        Mat matMin = Mat::zeros(1, 4, CV_32FC1), matMax = Mat::zeros(1, 4, CV_32FC1);
        matMin.at<float>(0,0) = RminVal; matMin.at<float>(0,1) = GminVal; matMin.at<float>(0,2) = BminVal; matMin.at<float>(0,3) = 0;
        double min, max;
        minMaxLoc( matMin, &min, &max, &minLoc, &maxLoc );
        float addWhite = -min;
        matMax.at<float>(0,0) = RmaxVal + addWhite; matMax.at<float>(0,1) = GmaxVal + addWhite; matMax.at<float>(0,2) = BmaxVal + addWhite; matMax.at<float>(0,3) = 1;
        minMaxLoc( matMax, &min, &max, &minLoc, &maxLoc );
        float Scale = max;
        for(int x = 0;x < input.rows;x++){
            for(int y = 0;y < input.cols;y++){
                output.at<Vec3f>(x,y)[2] = (R.at<float>(x,y) + addWhite) / Scale;
                output.at<Vec3f>(x,y)[1] = (G.at<float>(x,y) + addWhite) / Scale;
                output.at<Vec3f>(x,y)[0] = (B.at<float>(x,y) + addWhite) / Scale;
            }
        }
        imshow("Unscaled RGB", output);
    }
    

最新更新