Hi,
I'm trying to implement AKAZE with opencv.js and opencv-wasm.
Here is my code:
const { cv, cvTranslateError } = require('opencv-wasm');
const Jimp = require('jimp');
const util = require('./util.js');
(async () => {
try {
console.log("Enter 1");
const jimpSrc = await Jimp.read("./images/how_to_play.png");
const jimpSrcToMatch = await Jimp.read("./images/scene.png");
console.log("Enter 2");
// initiate ORB detector, default number of features to retain is 500, bit too low
let akaze = new cv.AKAZE();
let img1 = await cv.matFromImageData(jimpSrc.bitmap);
let img2 = await cv.matFromImageData(jimpSrcToMatch.bitmap);
let res = new cv.Mat();
let mask1 = new cv.Mat();
let mask2 = new cv.Mat();
console.log("Enter 3");
cv.cvtColor(img1, img1, cv.COLOR_BGRA2GRAY, 0);
cv.cvtColor(img2, img2, cv.COLOR_BGRA2GRAY, 0);
// find descriptors and keypoints with AKAZE detector
let kp1 = new cv.KeyPointVector();
let kp2 = new cv.KeyPointVector();
let des1 = new cv.Mat();
let des2 = new cv.Mat();
akaze.detectAndCompute(img1, mask1, kp1, des1);
akaze.detectAndCompute(img2, mask2, kp2, des2);
// create BFMatcher object and and match descriptors
let matcher = new cv.BFMatcher();
let matches = new cv.DMatchVectorVector();
let filteredMatches = new cv.DMatchVector();
matcher.knnMatch(des1, des2, matches, 2);
// filter matches
for (let i = 0; i < matches.size(); ++i) {
let match = matches.get(i);
let dMatch1 = match.get(0);
let dMatch2 = match.get(1);
if (dMatch1.distance <= dMatch2.distance * 0.7) {
filteredMatches.push_back(dMatch1);
}
}
cv.drawMatches(img1, kp1, img2, kp2, filteredMatches, res);
util.saveMat(res, "matcher-output.png");
if (filteredMatches.size() > 13) {
let objectPoints = [];
let scenePoints = [];
for (let i = 0; i < filteredMatches.size(); ++i) {
let dMatch = filteredMatches.get(i);
objectPoints.push(
new cv.Point(kp1.get(dMatch.queryIdx).pt.x, kp1.get(dMatch.queryIdx).pt.y));
scenePoints.push(
new cv.Point(kp2.get(dMatch.trainIdx).pt.x, kp2.get(dMatch.trainIdx).pt.y));
}
let homography = cv.findHomography(
cv.matFromArray(objectPoints.length, 3, cv.CV_32F, objectPoints),
cv.matFromArray(scenePoints.length, 3, cv.CV_32F, scenePoints), cv.RANSAC, 3);
let sceneCorners = new cv.Mat(4, 1, cv.CV_32FC2);
//let objCorners = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, img1.cols - 1, 0, img1.cols - 1, img1.rows - 1, 0, img1.rows - 1]);
let objCorners = new cv.Mat(4, 1, cv.CV_32FC2);
console.log("Transforming object corners to scene corners...");
// cv.perspectiveTransform(objCorners, sceneCorners, homography);
var view = objCorners.data32F;
view[0] = 0;
view[1] = 0;
view[2] = img1.cols;
view[3] = 0;
view[4] = img1.cols;
view[5] = img1.rows;
view[6] = 0;
view[7] = img1.rows;
cv.perspectiveTransform(objCorners, sceneCorners, homography);
var dataAns = sceneCorners.data32F;
var x1 = new cv.Point(dataAns[0], dataAns[1]);
var x2 = new cv.Point(dataAns[2], dataAns[3]);
var x3 = new cv.Point(dataAns[4], dataAns[5]);
var x4 = new cv.Point(dataAns[6], dataAns[7]);
cv.line(res, x1, x2,
new cv.Scalar(0, 0, 0), 10, cv.LINE_AA, 0);
cv.line(res, x2, x3,
new cv.Scalar(0, 0, 0), 10, cv.LINE_AA, 0);
cv.line(res, x3, x4,
new cv.Scalar(0, 0, 0), 10, cv.LINE_AA, 0);
cv.line(res, x4, x1,
new cv.Scalar(0, 0, 0), 10, cv.LINE_AA, 0);
}
} catch (err) {
console.log(cvTranslateError(cv, err));
}
})();
The problem is, that final corners coordinates x1, x2, x3, x4 are always like 1.00007678, 0.9999767623, -0.9878787.... Any ideas why?
P.S I'm well familiar with opencv for Java and Python.