Match 2d keypoints to 3d Triangulated points using three sets of descriptors?
I am having trouble with a feature matching workflow, and am looking for some help.
I have a stereo camera, and am triangulating points from it, using feature matching. In frame 1, I match points between the left and right image, and triangulate them. In frame 2, I match points between frame 1 and frame 2, in the left frame only.
Now, I need to find correspondences between the matched frame 2 keypoints, and the triangulated 3d points, for solvePnp.
My function is:
void Tracking::PnpTests()
{
cv::Mat rvec, tvec, rvec2, tvec2;
std::string frames = "00";
//sequential
boost::circular_buffer<cv::Mat> frameArray((2));
//storage for keypoints/descriptors
cv::Mat descCurrent;
cv::Mat descCurrentR;
cv::Mat descPrevious;
std::vector<cv::KeyPoint> keyPntsCurrent;
std::vector<cv::KeyPoint> keyPntsGoodL;
std::vector<cv::KeyPoint> keyPntsGoodR;
std::vector<cv::KeyPoint> keyPntsCurrentMatch;
std::vector<cv::KeyPoint> keyPntsCurrentR;
std::vector<cv::KeyPoint> keyPntsPrevious;
std::vector<cv::Point3f> Points3d;
cv::Mat descCurrentMatched;// = cv::Mat(descCurrent.rows, descCurrent.cols, cv::DataType<float>::type);
// Retrieve paths to images
vector<string> vstrImageLeft;
vector<string> vstrImageRight;
vector<double> vTimestamps;
LoadImages2(vstrImageLeft, vstrImageRight, vTimestamps);
const int nImages = vstrImageLeft.size();
cv::Size boardSize(8, 6);
//tringuulate stuff
std::vector<cv::Point3f> objectPointsTri;
std::vector<cv::Point3f> objectPointsGood;
std::vector<cv::KeyPoint> keyPntsTriReturn;
std::vector<cv::KeyPoint> keyPntsGood;
std::vector<cv::Point2f> projectedPoints;
std::vector<cv::DMatch> matchR;
std::vector<cv::DMatch> match;
// Main loop
int frameNumber = 0;
cv::Mat imLeft, imRight, imStored;
for (int ni = 0; ni < nImages; ni++)
{
imLeft = cv::imread("frames/left/" + vstrImageLeft[ni], CV_LOAD_IMAGE_UNCHANGED);
imRight = cv::imread("frames/right/" + vstrImageRight[ni], CV_LOAD_IMAGE_UNCHANGED);
if (imLeft.empty())
{
cerr << endl << "Failed to load image at: "
<< string(vstrImageLeft[ni]) << endl;
}
if (bFirstRun == false) // every run.
{
int64 t01 = cv::getTickCount();
//use features.
tFeatures->DetectKeypointsL(imLeft, descCurrent, keyPntsCurrent);
//knn brute force match to previous frame
match = tPointMatching->matchPointsOG2(descPrevious, descCurrent);
Mat img_matches2;
cv::drawMatches(Mat(imStored), keyPntsPrevious, Mat(imLeft), keyPntsCurrent, match, img_matches2);
cv::namedWindow("matches2", 0);
cv::imshow("matches2", img_matches2);
cv::waitKey(1);
//start tracker loop
if (match.size() >= 5)
{
objectPointsGood.clear();
keyPntsGood.clear();
for (cv::DMatch& m : match)
{
//use matched keys
cv::Point3f pos = objectPointsTri[m.trainIdx];
cv::KeyPoint img = keyPntsCurrent[m.queryIdx];
objectPointsGood.push_back(pos);
keyPntsGood.push_back(img);
}
//solve
if (objectPointsGood.size() != 0)
{
projectedPoints = tPnPSolvers->CvPnp(keyPntsGood,objectPointsGood, cameraMatrix, distCoeffs, rvec, tvec);
}
//flip
cv::Mat RotMat;
cv::Rodrigues(rvec, RotMat);
RotMat = RotMat.t();
tvec = -RotMat * tvec;
//project
for (int i = 0; i < projectedPoints.size(); i++)
{
cv::drawMarker(imLeft, cv::Point(projectedPoints[i].x, projectedPoints[i].y), cv::Scalar(0, 0, 255), cv::MARKER_CROSS, 50, 10);
}
}
}
if (bFirstRun == true) //first time, store previous frame and get keys
{
cameraMatrix.zeros(3, 3, cv::DataType<float>::type);
R.zeros(3, 3, cv::DataType<float>::type);
t.zeros(3, 1, cv::DataType<float>::type);
cv::FileStorage fs("CalibrationData.xml", cv::FileStorage::READ);
fs["cameraMatrix"] >> cameraMatrix;
fs["dist_coeffs"] >> distCoeffs;
tFeatures->DetectKeypointsL(imLeft, descCurrent, keyPntsCurrent); //Left image, left descriptors, left keypoints
tFeatures->DetectKeypointsR(imRight, descCurrentR, keyPntsCurrentR); //Right image, Right descriptors, Right keypoints
//KNNMATCH MATCHING / FILTER RESULTS.
std::vector<cv::DMatch> matchR ...