这个程序是我的'鸟瞰系统中的实时颜色跟踪'。
#include <sstream>
#include <string>
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include "stdafx.h"
#include "Fruit.h"
#include "opencv2\opencv.hpp"
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_core249d.lib")
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_imgproc249d.lib")
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_highgui249d.lib")
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_objdetect249d.lib")
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_ml249d.lib")
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_video249d.lib")
#pragma comment(lib,"C:\opencv\build\x86\vc10\lib\opencv_calib3d249d.lib")
using namespace cv;
//initial min and max HSV filter values.
//these will be changed using trackbars
int H_MIN = 0;
int H_MAX = 256;
int S_MIN = 0;
int S_MAX = 256;
int V_MIN = 0;
int V_MAX = 256;
//default capture width and height
const int FRAME_WIDTH = 640;
const int FRAME_HEIGHT = 480;
//max number of objects to be detected in frame
const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 7*7;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
//names that will appear at the top of each window
const string windowName = "Original Image";
const string windowName1 = "HSV Image";
const string windowName2 = "Thresholded Image";
const string windowName3 = "After Morphological Operations";
const string trackbarWindowName = "Trackbars";
void on_trackbar( int, void* )
{
//This function gets called whenever a
// trackbar position is changed
}
string intToString(int number){
std::stringstream ss;
ss << number;
return ss.str();
}
void createTrackbars(){
//create window for trackbars
namedWindow(trackbarWindowName,0);
//create memory to store trackbar name on window
char TrackbarName[50];
sprintf( TrackbarName, "H_MIN", H_MIN);
sprintf( TrackbarName, "H_MAX", H_MAX);
sprintf( TrackbarName, "S_MIN", S_MIN);
sprintf( TrackbarName, "S_MAX", S_MAX);
sprintf( TrackbarName, "V_MIN", V_MIN);
sprintf( TrackbarName, "V_MAX", V_MAX);
createTrackbar( "H_MIN", trackbarWindowName, &H_MIN, H_MAX, on_trackbar );
createTrackbar( "H_MAX", trackbarWindowName, &H_MAX, H_MAX, on_trackbar );
createTrackbar( "S_MIN", trackbarWindowName, &S_MIN, S_MAX, on_trackbar );
createTrackbar( "S_MAX", trackbarWindowName, &S_MAX, S_MAX, on_trackbar );
createTrackbar( "V_MIN", trackbarWindowName, &V_MIN, V_MAX, on_trackbar );
createTrackbar( "V_MAX", trackbarWindowName, &V_MAX, V_MAX, on_trackbar );
}
void drawObject(vector<Fruit> theFruits,Mat &frame){
for(int i =0; i<theFruits.size(); i++){
cv::circle(frame,cv::Point(theFruits.at(i).getXPos(),theFruits.at(i).getYPos()),10,cv::Scalar(0,0,255));
cv::putText(frame,intToString(theFruits.at(i).getXPos())+ " , " + intToString(theFruits.at(i).getYPos()),cv::Point(theFruits.at(i).getXPos(),theFruits.at(i).getYPos()+20),1,1,Scalar(0,255,0));
}
}
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(3,3));
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(8,8));
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
void trackFilteredObject(Mat threshold,Mat HSV, Mat &cameraFeed_BE){
vector <Fruit> apples;
Mat temp;
threshold.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
bool objectFound = false;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(numObjects<MAX_NUM_OBJECTS){
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
if(area>MIN_OBJECT_AREA){
Fruit apple;
apple.setXPos(moment.m10/area);
apple.setYPos(moment.m01/area);
apples.push_back(apple);
objectFound = true;
}else objectFound = false;
}
//let user know you found an object
if(objectFound ==true){
//draw object location on screen
drawObject(apples,cameraFeed_BE);
}
}else putText(cameraFeed_BE,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
}
}
int main(int argc, char* argv[])
{
//if we would like to calibrate our filter values, set to true.
bool calibrationMode = true;
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
Mat threshold;
Mat HSV;
if(calibrationMode){
//create slider bars for HSV filtering
createTrackbars();
}
//video capture object to acquire webcam feed
VideoCapture capture;
//open capture object at location zero (default location for webcam)
capture.open(0);
//set height and width of capture frame
capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
if(!capture.isOpened()) //confirm camera opened
{
return -1;
}
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while(1){
do{
capture >> cameraFeed;
}while(cameraFeed.empty());
int key = 0;
IplImage* Frame ;
IplImage* birds_image;
CvMat *H;
float Z;
cvNamedWindow("Birds eye");
Frame =new IplImage(cameraFeed);
birds_image = cvCloneImage(Frame);
// EXAMPLE OF LOADING THESE MATRICES BACK IN:
CvMat *intrinsic = (CvMat*)cvLoad("Intrinsics.xml");
CvMat *distortion = (CvMat*)cvLoad("Distortion.xml");
// Build the undistort map which we will use for all subsequent frames.
IplImage* mapx = cvCreateImage( cvGetSize(Frame), IPL_DEPTH_32F, 1 );
IplImage* mapy = cvCreateImage( cvGetSize(Frame), IPL_DEPTH_32F, 1 );
cvInitUndistortMap(
intrinsic,
distortion,
mapx,
mapy
);
IplImage *t = cvCloneImage(Frame);
//cvShowImage( "Calibration", image ); // Show raw image
cvRemap( t, Frame, mapx, mapy ); // Undistort image
cvReleaseImage(&t);
H = (CvMat*)cvLoad("H.xml");
cvWarpPerspective(
Frame,
birds_image,
H,
CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
);
cvShowImage("Birds eye", birds_image);
Mat cameraFeed_BE = cvarrToMat(birds_image);
//store image to matrix
capture.read(cameraFeed_BE);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed_BE,HSV,COLOR_BGR2HSV);
if(calibrationMode==true){
//if in calibration mode, we track objects based on the HSV slider values.
cvtColor(cameraFeed_BE,HSV,COLOR_BGR2HSV);
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(windowName2,threshold);
trackFilteredObject(threshold,HSV,cameraFeed_BE);
}
//show frames
imshow(windowName2,threshold);
imshow(windowName,cameraFeed_BE);
imshow(windowName1,HSV);
//delay 30ms so that screen can refresh.
//image will not appear without this waitKey() command
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
我认为
Original Image
,HSV Image
和Thresholded Image
必须birds image
但实际上这些不是。我不知道为什么。我运行程序一分钟,出现此错误。在此处输入图像描述
我认为当有一些内存泄漏时会出现此错误。所以我添加了这段代码。
cvReleaseImage(&birds_image)
但是这段代码效果不佳。
你能告诉我你对 1. 和 2.的想法吗?
如您所指出的,错误消息与内存错误有关。您有一个内存分配行
Frame =new IplImage(cameraFeed);
在你的无限循环中。这个数组(来自网络摄像头的RGB图像!!)永远不会被释放,循环会写入指针,丢失信息并且无法破坏该内存分配。
此外,将读取.xml文件移出无限循环可能是个好主意
CvMat *intrinsic = (CvMat*)cvLoad("Intrinsics.xml");
CvMat *distortion = (CvMat*)cvLoad("Distortion.xml");