但是如何定位匹配



我在OpenCV中使用ORB来匹配源图像(我想要定位)和dst图像(源图像可以在其中)之间的关键点。现在我的问题是,我有一些很好的匹配,但我如何定位源图像在dst图像?

如果有人能帮我一把,我将非常感激。

我的代码在这里,我已经使用findHomography(),但它不能找到正确的对象,甚至不能画一个四边形。帮我找出错误。

#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
    double t = (double)cvGetTickCount();
    cv::Mat object = cv::imread("card.jpg");
    cv::VideoCapture mCapture;  
    cv::Mat scene;  
    mCapture.open(0);  
    assert(mCapture.isOpened());  
    while (1)  
    {  
        mCapture >> scene;  
        if (!scene.empty())  
        {  
           // ORB
            cv::OrbFeatureDetector detector;
            cv::OrbDescriptorExtractor extractor;
            //Find keypoints
            cv::Mat descriptorsA, descriptorsB;
            std::vector<cv::KeyPoint> keypointsA, keypointsB;
            //int64 last = cv::getTickCount();
            detector.detect(object, keypointsA);
            detector.detect(scene, keypointsB);
            //printf("detection = %f[s]n", (cv::getTickCount() - last)/cv::getTickFrequency());
            //last = cv::getTickCount();
            extractor.compute(object, keypointsA, descriptorsA);
            extractor.compute(scene, keypointsB, descriptorsB);
            //printf("description = %f[s]n", (cv::getTickCount() - last)/cv::getTickFrequency());
            // Match
            std::vector<cv::DMatch> matches;
            cv::BFMatcher matcher(cv::NORM_HAMMING, true);
            matcher.match(descriptorsA, descriptorsB, matches);
            // min distance
            double min_dist = DBL_MAX;
            for (int i = 0; i < (int)matches.size(); i++)
            { 
                double dist = matches[i].distance;
                if (dist < min_dist) 
                    min_dist = dist;
            }
            if (min_dist < 1.0) 
                min_dist = 1.0;
            //std::cout<<min_dist<<std::endl;
            // save good matches
            const double threshold = 1.5 * min_dist;
            std::vector<cv::DMatch> matches_good;
            for (int i = 0; i < (int)matches.size(); i++) 
            {
                if (matches[i].distance < threshold) 
                {
                    matches_good.push_back(matches[i]);
                }
            }
            // show
            Mat matchImage;
            // localize points
            std::vector<cv::Point2f> object_points, scene_points;
            for( int i = 0; i < matches_good.size(); i++ )
            {
                //-- Get the keypoints from the good matches
                object_points.push_back( keypointsA[ matches_good[i].queryIdx ].pt );
                scene_points.push_back( keypointsB[ matches_good[i].trainIdx ].pt );
            }
            Mat H = findHomography(object_points, scene_points,  CV_RANSAC); 
            cv::drawMatches(object, keypointsA, scene, keypointsB, matches, matchImage, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
            std::vector<cv::Point2f> object_corners(4), scene_corners(4);
            object_corners[0] = cv::Point(0, 0),
            object_corners[1] = cv::Point(object.cols, 0);
            object_corners[2] = cv::Point(object.cols, object.rows);
            object_corners[3] = cv::Point(0, object.rows);
            cv::perspectiveTransform(object_corners, scene_corners, H);
            Mat diff;
            //diff = Mat(scene_corners) - Mat(scene_points);
            //cv::imshow("Diff",diff);
            // draw bounds
            cv::line(matchImage, scene_corners.at(0) + cv::Point2f(object.cols, 0), scene_corners.at(1) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
            cv::line(matchImage, scene_corners.at(1) + cv::Point2f(object.cols, 0), scene_corners.at(2) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
            cv::line(matchImage, scene_corners.at(2) + cv::Point2f(object.cols, 0), scene_corners.at(3) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
            cv::line(matchImage, scene_corners.at(3) + cv::Point2f(object.cols, 0), scene_corners.at(0) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
            cv::imshow("match", matchImage);
            //std::cout<<"match size:"<<matches.size()<<std::endl;

        }  
        int key = cv::waitKey(30);  
        if (key == 27)  
        {  
            return 1;  
        }  
    } 
    cv::waitKey(0);
    return 0;
}

您需要使用RANSAC或LMeds(最小平方中位数)等算法来估计位置,这些算法对噪声和异常值具有鲁棒性。

您需要使用WarpPerspective()并将您找到的单义性应用于这两个图像。

相关内容

  • 没有找到相关文章

最新更新