I use the calcOpticalFlowPyrLK
method, to track the Points I've matched through a recognition step in an augmented reality app for Android.
In a recent Question I asked for alternative methods or changes which would raise the performance of the algorithm. Now it has the performance which I aimed for, but the Points which are tracked by it, are totally wrong. Here a small example:
After recognition, I get "good" points for the corners of the object e.g.:
576.16235, 128.3963
572.71576, 129.0526
551.0733, 143.824
561.6874, 119.729095
After the first Iteration I already get strange results (note I did not move the camera):
286.5614, -150.73625
291.46445, -146.87875
218.38367, -198.0336
-26.965776, -447.23856
That repeats until the numbers of the corners a growing (or shrinking) so far, that my output only shows 0 for every corner.
Here is my code:
calcOpticalFlow:
int BruteForceMatcher::trackWithOpticalFlow(std::vector<cv::Mat> prevPyr, std::vector<cv::Mat> nextPyr, std::vector<cv::Point2f> &srcPoints, std::vector<cv::Point2f> &srcCorners){
std::vector<cv::Point2f> estPoints;
std::vector<cv::Point2f> estCorners;
std::vector<cv::Point2f> goodPoints;
std::vector<cv::Point2f> leftsrc;
std::vector<uchar> status;
std::vector<float> error;
//double opticalFlowT = cv::getTickCount(), homoT, perspT, tf = cv::getTickFrequency();
if(srcPoints.size() > 0) {
cv::calcOpticalFlowPyrLK(prevPyr, nextPyr, srcPoints, estPoints, status, error, cv::Size(7,7));
for (int i = 0; i < estPoints.size(); i++) {
if (!status[i] && error[i] < 20.f) {
//LOGW("ERROR : %f\n", error[i]);
goodPoints.push_back(estPoints[i] *= 4);
leftsrc.push_back(srcPoints[i] *= 4);
}
}
//opticalFlowT = cv::getTickCount() - opticalFlowT;
//LOGD("Time opticalFlow and Outlier removal: %f\n", opticalFlowT*1000./tf);
//LOGD("Left Points (est/src): %i, %i", goodPoints.size(), leftsrc.size());
if(goodPoints.size() <= 0){
//LOGD("No good Points calculated");
return 0;
}
//homoT = cv::getTickCount();
cv::Mat f = cv::findHomography(leftsrc, goodPoints);
//homoT = cv::getTickCount() - homoT;
//LOGD("Homography: %f\n", homoT*1000./tf);
if(cv::countNonZero(f) < 1){
//LOGD("Homography Matrix is empty!");
return 0;
}
//perspT = cv::getTickCount();
cv::perspectiveTransform(srcCorners, estCorners, f);
//perspT = cv::getTickCount() - perspT;
//LOGD("Perspective Transform: %f\n", perspT*1000./tf);
srcCorners.swap(estCorners);
estCorners.clear();
srcPoints.swap(goodPoints);
goodPoints.clear();
status.clear();
error.clear();
return srcPoints.size();
}
return 0;
}
And the Method which calls the opticalFlow Tracking and also builds the pyramids:
std::vector<cv::Point2f> findBruteForceMatches(cv::Mat img){
int matches = 0;
BruteForceMatcher *bruteForceMatcher = new BruteForceMatcher();
if(trackKLT){
LOGD("TRACK WITH KLT");
std::vector<cv::Mat> currPyr;
cv::resize(img, img, cv::Size(img.cols/4, img.rows/4));
cv::buildOpticalFlowPyramid(img, currPyr, cv::Size(9,9), 3);
double kltTime = (double) cv::getTickCount();
matches = bruteForceMatcher->trackWithOpticalFlow(prevPyr, currPyr, srcPoints, scene_corners);
kltTime = (double) cv::getTickCount() - kltTime;
LOGD("KLT Track Time: %f\n", kltTime*1000./tf);
//returningtime = cv::getTickCount();
if(matches > 10){
trackKLT = true;
prevPyr.swap(currPyr);
currPyr.clear();
delete bruteForceMatcher;
return scene_corners;
}else{
trackKLT = false;
prevPyr.clear();
srcPoints.clear();
scene_corners.clear();
delete bruteForceMatcher;
return scene_corners;
}
} else{
LOGD("RECOGNIZE OBJECT");
std::vector<cv::Point2f> ransacs;
ransacs.reserve(100);
double bfMatchTime = (double) cv::getTickCount();
matches = bruteForceMatcher->findMatchesBF(img, features2d, descriptors, scene_corners, ransacs);
bfMatchTime = (double) cv::getTickCount() - bfMatchTime;
LOGD("BruteForceMatch Time: %f\n", bfMatchTime*1000./tf);
if(matches > 10){
trackKLT = true;
cv::resize(img, img, cv::Size(img.cols/4, img.rows/4));
cv::buildOpticalFlowPyramid(img, prevPyr, cv::Size(9,9), 3);
for(int i = 0; i < ransacs.size(); i++){
ransacs[i] *= 0.25;
}
srcPoints.swap(ransacs);
ransacs.clear();
delete bruteForceMatcher;
return scene_corners;
}else{
img.release();
scene_corners.clear();
ransacs.clear();
delete bruteForceMatcher;
return scene_corners;
}
}
}