我正在尝试在Android的相机视图顶部的opengl中投影3D表面。我在本机代码中使用opencv函数solvepnp来获得设备相机的旋转和平移向量。我使用这些向量来计算modelView矩阵和modelViewProjection矩阵,如下面的链接所示:
http://spottrlabs.blogspot.com/2012/07/opencv-and-opengl-not-always-friends.html最后,我将这些矩阵传递给浮点数组中的java代码,但是当我在渲染器中使用这些矩阵时,我没有得到屏幕上的表面投影。我确信我的3D表面是正确生成的,因为当我使用modelView和modelViewProjection矩阵的任意值时,我可以看到投影。但是当我使用由solvepnp函数计算的矩阵时,我没有得到任何东西。有人能告诉我我错在哪里吗?以下是我的相关代码:
"Native code in cpp file"
Rodrigues(r, expandedR); // r is the rotation vector received form solvepnp function
Mat Rt = Mat::zeros(4, 4, CV_64FC1);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
Rt.at<double>(y, x) = expandedR.at<double>(y, x);
}
Rt.at<double>(y, 3) = t.at<double>(y, 0); //t is the translation vector from solvepnp
}
Rt.at<double>(3, 3) = 1.0;
//OpenGL has reversed Y & Z coords
Mat reverseYZ = Mat::eye(4, 4, CV_64FC1);
reverseYZ.at<double>(1, 1) = reverseYZ.at<double>(2, 2) = -1;
//since we are in landscape mode
Mat rot2D = Mat::eye(4, 4, CV_64FC1);
rot2D.at<double>(0, 0) = rot2D.at<double>(1, 1) = 0;
rot2D.at<double>(0, 1) = 1;
rot2D.at<double>(1, 0) = -1;
Mat projMat = Mat::zeros(4, 4, CV_64FC1);
float fard = 10000, neard = 5;
float imageWidth=1024.0f;
float imageHeight=576.0f;
projMat.at<double>(0, 0) = 2*scaledCameraMatrix.at<double>(0, 0)/imageWidth;
projMat.at<double>(0, 2) = -1 + (2*scaledCameraMatrix.at<double>(0, 2)/imageWidth);
projMat.at<double>(1, 1) = 2*scaledCameraMatrix.at<double>(1, 1)/imageHeight;
projMat.at<double>(1, 2) = -1 + (2*scaledCameraMatrix.at<double>(1, 2)/imageHeight);
projMat.at<double>(2, 2) = -(fard+neard)/(fard-neard);
projMat.at<double>(2, 3) = -2*fard*neard/(fard-neard);
projMat.at<double>(3, 2) = -1;
Mat mvMat = reverseYZ * Rt;
projMat = rot2D * projMat;
Mat mvp = projMat * mvMat;
float arr[16];
float arr2[16];
int count=0;
for(int i=0;i<4;i++)
{
for(int j=0;j<4;j++)
{
arr[count]=(float)mvp.at<double>(i,j);
arr2[count++]=(float)mvMat.at<double>(i,j);
}
}
env->SetFloatArrayRegion(sendfloatArray, 0,16, &arr[0]);
env->SetFloatArrayRegion(sendfloatArray, 16,16, &arr2[0]);
下面是我的Renderer类
中的代码 // Pass in the modelview matrix ma.mvMatrix received from native code
GLES20.glUniformMatrix4fv(mvMatrixUniform, 1, false,ma.mvMatrix, 0);
// Pass in the modelviewprojection matrix ma.mvpMatrix received from native code
GLES20.glUniformMatrix4fv(mvpMatrixUniform, 1, false, ma.mvpMatrix, 0);
下面是我的片段着色器中的代码:
precision mediump float; // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
varying vec4 v_Color; // This is the color from the vertex shader interpolated across the
// triangle per fragment.
// The entry point for our fragment shader.
void main() {
gl_FragColor = v_Color;
}
下面是我的顶点着色器
中的代码 uniform mat4 u_MVPMatrix; // A constant representing the combined model/view/projection matrix.
uniform mat4 u_MVMatrix; // A constant representing the combined model/view matrix.
attribute vec4 a_Position; // Per-vertex position information we will pass in.
attribute vec4 a_Color; // Per-vertex color information we will pass in.
varying vec4 v_Color; // This will be passed into the fragment shader.
// The entry point for our vertex shader.
void main() {
// Pass through the color.
v_Color = a_Color;
// gl_Position is a special variable used to store the final position.
// Multiply the vertex by the matrix to get the final point in normalized screen coordinates.
gl_Position = u_MVPMatrix * a_Position;
}
projMat.at<double>(2, 3) = -1;
projMat.at<double>(3, 2) = -2*fard*neard/(fard-neard);