Is there any way that we can use while estimating the transformation? I wish the transformation has no scaling involved case I know the exact size of those images cause they are actually scanned images, which means no scaling should be involved.
here is my code:
if __name__ == "__main__":
full_affine = False
try_cuda = True
match_conf = 0.3
finder = cv2.ORB.create()
matcher = cv2.detail_AffineBestOf2NearestMatcher(full_affine, try_cuda, match_conf)
source_img = cv2.imread("000001.jpg")
target_img = cv2.imread("000000.jpg")
source_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=source_img)
target_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=target_img)
source_info = load_tile_info("000001.xml", img_path, 1)
target_info = load_tile_info("000000.xml", img_path, 0)
matching_result = matcher.apply(source_feature, target_feature)
print("matching_result.confidence")
print(matching_result.confidence)
print("matching_result.H")
print(matching_result.H)
print("matching_result.dst_img_idx")
print(matching_result.dst_img_idx)
print("matching_result.src_img_idx")
print(matching_result.src_img_idx)