Struggling with reprojectImageTo3d
Hi, everyone. I'm trying to triangulate some points lying on a plane in a setup which involves two cameras.
First of all, I solve the relative pose problem using the 5pts algorithm on the undistorted points for the Essential Matrix estimation, the I recover the pose. I'm using RANSAC.
Then, I rectify the stereo pairs the usual way:
R1, R2, Pn1, Pn2, Q, _, _ = cv2.stereoRectify(K1, dcoeffs1, K2, dcoeffs2,
img1.shape[::-1], R, t,
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1)
# Compute the rigid transform that OpenCV apply to world points (USEFUL LATER)
# in order for the rectified reference camera to be K_new[I|0]
tn_1 = np.zeros((3,1)) # Cameras are never translated in the rectification
G1_rect = np.block([[R1, tn_1], [np.zeros((1,3)), 1.0]])
maps1 = cv2.initUndistortRectifyMap(K1, dcoeffs1, R1, Pn1, (1920,1080), cv2.CV_32FC1)
maps2 = cv2.initUndistortRectifyMap(K2, dcoeffs2, R2, Pn2, (1920,1080), cv2.CV_32FC1)
img1_remap = cv2.remap(img1, maps1[0], maps1[1], cv2.INTER_LANCZOS4)
img2_remap = cv2.remap(img2, maps2[0], maps2[1], cv2.INTER_LANCZOS4)
Result of the rectification:
#....
#Now call a function that recognize a known object in the images (target)
# Find target
target_corners, _ = dt.detectTarget(img_scene1, img_target, 0.5) # return 4 corners of the detected polygon
target_corners = target_corners[:,0,:]
# Compute mask for the target cutout:
target_mask = mp.maskPolygon(target_corners, img_scene1.shape[::-1]) # Output: mask of same dimension of the image
Target found: https://imgur.com/QjYV8tp
# Compute disparity map
# https://docs.opencv.org/3.3.1/d2/d85/classcv_1_1StereoSGBM.html
window_size = 5
min_disp = 16
max_disp = 1024
num_disp = max_disp-min_disp # Deve essere divisibile per 16!
stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
numDisparities = num_disp,
blockSize = window_size,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
disp12MaxDiff = 1,
uniquenessRatio = 10,
speckleWindowSize = 150,
speckleRange = 2
)
print('Calcolo SGBM della disparità...')
disp = stereo.compute(img_scene1, img_scene2).astype(np.float32) / 16.0
target_disparity = target_mask*disp
points = cv2.reprojectImageTo3D(target_disparity, Q)
# DEBUG:
cv2.namedWindow('scene1', cv2.WINDOW_NORMAL)
cv2.resizeWindow('scene1', 800,450)
cv2.imshow('scene1', img_scene1)
cv2.namedWindow('disparity', cv2.WINDOW_NORMAL)
cv2.resizeWindow('disparity', 800,450)
cv2.imshow('disparity', (disp-min_disp)/num_disp)
cv2.namedWindow('target_disparity', cv2.WINDOW_NORMAL)
cv2.resizeWindow('target_disparity', 800,450)
cv2.imshow('target_disparity', target_mask*(disp-min_disp)/num_disp)
cv2.waitKey()
cv2.destroyAllWindows()
# Obtain matrix of the target 3D points starting from disparity image obtained from reprojectImageTo3D()
mask_disp = disp > disp.min()
mask_inf = ~(np.isinf(points[:,:,0]) | np.isinf(points[:,:,1]) | np.isinf(points[:,:,2]))
mask_nan = ~(np.isnan(points[:,:,0]) | np.isnan(points[:,:,1]) | np.isnan(points[:,:,2]))
mask = mask_disp & mask_inf & mask_nan
pts3D = points[mask]
Now, i have 3d reconstructed the region of the images corresponding to the target. I noted that OpenCv, during camera rectification, apply a rigid transform to world points such that the reference original camera and the new (rectified) reference camera have the same extrinsics (R=eye(3) and t=[0,0,0]'). Infact, during rectification both cameras must be rotated, and I think OpenCV simply brings back the new cameras to a new reference such that the reference rectified ...