I don't why, but for some reason is the function getting the incorrect parameters.. I don't know how, since the parameter should be all correct but they aren't do not understand why. It would be very much appreciated if any could clarify me why this is not working..
#include <iostream>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"
using namespace cv;
using namespace std;
int i = 0;
TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
Size subPixWinSize(10,10), winSize(31,31);
//detectAndDisplay - displacement
tuple<pair<Mat, Mat>,pair<Vector<Point2f>, Vector<Point2f>> > detectAndDisplay ( Mat frame )
{
Mat grayframe, prevGray, image;
if (frame.type()!= 8) {
cvtColor(frame, frame, CV_8U);
}
cvtColor(frame, grayframe, CV_RGB2GRAY);
vector<Point2f> corners_prev;
vector<Point2f> corners_now;
String face_cascade_XML = "/Users/x/Documents/x/facedetect/facedetect/haarcascade_frontalface_alt.xml";
CascadeClassifier face_cascade;
vector<Rect> faces(1);
Point center_of_frame(grayframe.size().width/2,grayframe.size().height/2);
pair<Point, Point> corners;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_XML ) ){
cout << "Cascade Error" << endl;
};
circle(grayframe, center_of_frame, 1, CV_RGB(0,255,255),8,8,0);
//-- Detect faces
face_cascade.detectMultiScale( grayframe, faces, 1.1, 2, 0, Size(300, 300) );
if (!faces.empty()) {
for( auto faces : faces)
{
Point center_position_of_face((faces.br().x+faces.tl().x)/2,(faces.tl().y+faces.br().y)/2);
Point corner_1(faces.br().x,faces.tl().y);
Point corner_2 = faces.tl();
Point corner_3 = faces.br();
Point corner_4(faces.tl().x,faces.br().y);
rectangle(frame, faces, CV_RGB(0,255,0),4,8,0);
circle(frame, center_position_of_face, 8, CV_RGB(128,128,128),8,8,0);
circle(frame, corner_1, 1, CV_RGB(128,128,128),8,8,0);
circle(frame, corner_2, 1, CV_RGB(128,128,128),8,8,0);
circle(frame, corner_3, 1, CV_RGB(128,128,128),8,8,0);
circle(frame, corner_4, 1, CV_RGB(128,128,128),8,8,0);
line(frame, center_position_of_face, center_of_frame, CV_RGB(0,200,0),1,8);
}
Mat mask(frame.size(),CV_8UC1 ,Scalar(0));
mask(faces.back()).setTo(Scalar(255));
if(prevGray.empty())
{
grayframe.copyTo(prevGray);
}
goodFeaturesToTrack(prevGray, corners_prev, 300, 0.01, 10,mask,3,0,0.04);
goodFeaturesToTrack(grayframe, corners_now, 300, 0.01, 10, mask, 3, 0, 0.04);
cornerSubPix(prevGray, corners_prev, subPixWinSize, Size(-1,-1), termcrit);
cornerSubPix(grayframe, corners_now, subPixWinSize, Size(-1,-1), termcrit);
size_t i, k;
vector<uchar> status;
vector<float> err;
for ( i = 0; i < corners_now.size(); i++)
{
circle( frame, corners_now[i], 4, Scalar(0,255,0), -1, 8, 0 );
}
}
flip(frame, frame, 1);
imshow("Facedetection", frame);
return make_tuple(make_pair(grayframe,prevGray), make_pair(corners_now, corners_prev));
}
int main( int argc, const char** argv )
{
VideoCapture cam(0);
Mat frame;
tuple<pair<Mat, Mat>,pair<Vector<Point2f> , Vector<Point2f>> > grey_frames;
//-- 2. Read the video stream
if( !cam.isOpened() )
{
cout <<"Webcam error" << endl;
return 0;
}
else
{
cout << "Is face detected ?" << endl;
while(waitKey(1) != 'y')
{
cam.read(frame);
if( !frame.empty()){
grey_frames = detectAndDisplay(frame);
}
}
while(waitKey(1))
{
cam.read(frame);
//-- 3. Apply the classifier to the frame
if( !get<1>(grey_frames).second.empty())
{
vector<uchar> status;
vector<float> err;
Mat prevImg = get<0>(grey_frames).second;
Mat nextImg = get<0>(grey_frames).first;
Vector<Point2f> prevPts = get<1>(grey_frames).second;
Vector<Point2f> nextPts = get<1>(grey_frames).first;
calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts, status, err); //---> here the error occurs.
flip(frame,frame,1);
imshow("tracking", frame);
}
}
}
return 0;
}