def hypothesis_board_corner_positions(icam_intrinsics=None, idx_inliers=None, **optimization_inputs): r'''Reports the 3D chessboard points observed by a camera at calibration time SYNOPSIS model = mrcal.cameramodel("xxx.cameramodel") optimization_inputs = model.optimization_inputs() # shape (Nobservations, Nheight, Nwidth, 3) pcam = mrcal.hypothesis_board_corner_positions(**optimization_inputs)[0] i_intrinsics = \ optimization_inputs['indices_frame_camintrinsics_camextrinsics'][:,1] # shape (Nobservations,1,1,Nintrinsics) intrinsics = nps.mv(optimization_inputs['intrinsics'][i_intrinsics],-2,-4) optimization_inputs['observations_board'][...,:2] = \ mrcal.project( pcam, optimization_inputs['lensmodel'], intrinsics ) # optimization_inputs now contains perfect, noiseless board observations x = mrcal.optimizer_callback(**optimization_inputs)[1] print(nps.norm2(x[:mrcal.num_measurements_boards(**optimization_inputs)])) ==> 0 The optimization routine generates hypothetical observations from a set of parameters being evaluated, trying to match these hypothetical observations to real observations. To facilitate analysis, this routine returns these hypothetical coordinates of the chessboard corners being observed. This routine reports the 3D points in the coordinate system of the observing camera. The hypothetical points are constructed from - The calibration object geometry - The calibration object-reference transformation in optimization_inputs['frames_rt_toref'] - The camera extrinsics (reference-camera transformation) in optimization_inputs['extrinsics_rt_fromref'] - The table selecting the camera and calibration object frame for each observation in optimization_inputs['indices_frame_camintrinsics_camextrinsics'] ARGUMENTS - icam_intrinsics: optional integer specifying which intrinsic camera in the optimization_inputs we're looking at. If omitted (or None), we report camera-coordinate points for all the cameras - idx_inliers: optional numpy array of booleans of shape (Nobservations,object_height,object_width) to select the outliers manually. If omitted (or None), the outliers are selected automatically: idx_inliers = observations_board[...,2] > 0. This argument is available to pick common inliers from two separate solves. - **optimization_inputs: a dict() of arguments passable to mrcal.optimize() and mrcal.optimizer_callback(). We use the geometric data. This dict is obtainable from a cameramodel object by calling cameramodel.optimization_inputs() RETURNED VALUE - An array of shape (Nobservations, Nheight, Nwidth, 3) containing the coordinates (in the coordinate system of each camera) of the chessboard corners, for ALL the cameras. These correspond to the observations in optimization_inputs['observations_board'], which also have shape (Nobservations, Nheight, Nwidth, 3) - An array of shape (Nobservations_thiscamera, Nheight, Nwidth, 3) containing the coordinates (in the camera coordinate system) of the chessboard corners, for the particular camera requested in icam_intrinsics. If icam_intrinsics is None: this is the same array as the previous returned value - an (N,3) array containing camera-frame 3D points observed at calibration time, and accepted by the solver as inliers. This is a subset of the 2nd returned array. - an (N,3) array containing camera-frame 3D points observed at calibration time, but rejected by the solver as outliers. This is a subset of the 2nd returned array. ''' observations_board = optimization_inputs.get('observations_board') if observations_board is None: return Exception("No board observations available") indices_frame_camintrinsics_camextrinsics = \ optimization_inputs['indices_frame_camintrinsics_camextrinsics'] object_width_n = observations_board.shape[-2] object_height_n = observations_board.shape[-3] object_spacing = optimization_inputs['calibration_object_spacing'] calobject_warp = optimization_inputs.get('calobject_warp') # shape (Nh,Nw,3) full_object = mrcal.ref_calibration_object(object_width_n, object_height_n, object_spacing, calobject_warp) frames_Rt_toref = \ mrcal.Rt_from_rt( optimization_inputs['frames_rt_toref'] )\ [ indices_frame_camintrinsics_camextrinsics[:,0] ] extrinsics_Rt_fromref = \ nps.glue( mrcal.identity_Rt(), mrcal.Rt_from_rt(optimization_inputs['extrinsics_rt_fromref']), axis = -3 ) \ [ indices_frame_camintrinsics_camextrinsics[:,2]+1 ] Rt_cam_frame = mrcal.compose_Rt(extrinsics_Rt_fromref, frames_Rt_toref) p_cam_calobjects = \ mrcal.transform_point_Rt(nps.mv(Rt_cam_frame,-3,-5), full_object) # shape (Nobservations,Nheight,Nwidth) if idx_inliers is None: idx_inliers = observations_board[..., 2] > 0 idx_outliers = ~idx_inliers if icam_intrinsics is None: return \ p_cam_calobjects, \ p_cam_calobjects, \ p_cam_calobjects[idx_inliers, ...], \ p_cam_calobjects[idx_outliers, ...] # The user asked for a specific camera. Separate out its data # shape (Nobservations,) idx_observations = indices_frame_camintrinsics_camextrinsics[:, 1] == icam_intrinsics idx_inliers[~idx_observations] = False idx_outliers[~idx_observations] = False return \ p_cam_calobjects, \ p_cam_calobjects[idx_observations, ...], \ p_cam_calobjects[idx_inliers, ...], \ p_cam_calobjects[idx_outliers, ...]
confirm_equal(drt_drt, drt_drt_ref, msg='invert_rt drt/drt result') # in-place rt0_ref_copy = np.array(rt0_ref) drt_drt_copy = np.array(drt_drt) rt, drt_drt = mrcal.invert_rt(rt0_ref_copy, out=(rt0_ref_copy, drt_drt_copy), get_gradients=True) confirm_equal(rt, invert_rt(rt0_ref), msg='invert_rt with grad result written in-place') confirm_equal(drt_drt, drt_drt_ref, msg='invert_rt with grad drt/drt result written in-place') Rt2 = mrcal.compose_Rt(Rt0_ref, Rt1_ref, out=out43) confirm_equal(Rt2, compose_Rt(Rt0_ref, Rt1_ref), msg='compose_Rt result') # in-place Rt0_ref_copy = np.array(Rt0_ref) Rt1_ref_copy = np.array(Rt1_ref) Rt2 = mrcal.compose_Rt(Rt0_ref_copy, Rt1_ref_copy, out=Rt0_ref_copy) confirm_equal(Rt2, compose_Rt(Rt0_ref, Rt1_ref), msg='compose_Rt result written in-place to Rt0') Rt0_ref_copy = np.array(Rt0_ref) Rt1_ref_copy = np.array(Rt1_ref) Rt2 = mrcal.compose_Rt(Rt0_ref_copy, Rt1_ref_copy, out=Rt1_ref_copy) confirm_equal(Rt2, compose_Rt(Rt0_ref, Rt1_ref), msg='compose_Rt result written in-place to Rt1')
el_fov_deg = el_fov_deg, pixels_per_deg_az = -1./8., pixels_per_deg_el = -1./4., rectification_model = lensmodel) az0 = 0. el0 = 0. try: mrcal.stereo._validate_models_rectified(models_rectified) testutils.confirm(True, msg=f'Generated models pass validation ({lensmodel})') except: testutils.confirm(False, msg=f'Generated models pass validation ({lensmodel})') Rt_cam0_rect = mrcal.compose_Rt( model0.extrinsics_Rt_fromref(), models_rectified[0].extrinsics_Rt_toref()) Rt01_rectified = mrcal.compose_Rt( models_rectified[0].extrinsics_Rt_fromref(), models_rectified[1].extrinsics_Rt_toref()) testutils.confirm_equal(models_rectified[0].intrinsics()[0], lensmodel, msg=f'model0 has the right lensmodel ({lensmodel})') testutils.confirm_equal(models_rectified[1].intrinsics()[0], lensmodel, msg=f'model1 has the right lensmodel ({lensmodel})') testutils.confirm_equal(Rt_cam0_rect, mrcal.identity_Rt(), msg=f'vanilla stereo has a vanilla geometry ({lensmodel})') testutils.confirm_equal( Rt01_rectified[3,0], nps.mag(rt01[3:]), msg=f'vanilla stereo: baseline ({lensmodel})')
cull_left_of_center = args.cull_left_of_center, allow_nonidentity_cam0_transform = False) else: with open(cache_file, "rb") as f: (optimization_inputs_baseline, models_true, models_baseline, indices_frame_camintrinsics_camextrinsics, lensmodel, Nintrinsics, imagersizes, intrinsics_true, extrinsics_true_mounted, frames_true, observations_true, intrinsics_sampled, extrinsics_sampled_mounted, frames_sampled, calobject_warp_sampled) = pickle.load(f) baseline_rt_ref_frame = optimization_inputs_baseline['frames_rt_toref'] icam0, icam1 = args.cameras Rt01_true = mrcal.compose_Rt( mrcal.Rt_from_rt(extrinsics_rt_fromref_true[icam0]), mrcal.invert_Rt(mrcal.Rt_from_rt(extrinsics_rt_fromref_true[icam1]))) Rt10_true = mrcal.invert_Rt(Rt01_true) # shape (Npoints,Ncameras,3) p_triangulated_true_local = nps.xchg( nps.cat(p_triangulated_true0, mrcal.transform_point_Rt(Rt10_true, p_triangulated_true0)), 0, 1) # Pixel coords at the perfect intersection # shape (Npoints,Ncameras,2) q_true = nps.xchg( np.array([ mrcal.project(p_triangulated_true_local[:,i,:], lensmodel, intrinsics_true[args.cameras[i]]) \ for i in range(2)]), 0,1)
testutils.confirm_equal(np.std(x), pixel_uncertainty_stdev, eps=pixel_uncertainty_stdev * 0.1, msg="Residual have the expected distribution") # Checking the extrinsics. These aren't defined absolutely: each solve is free # to put the observed frames anywhere it likes. The projection-diff code # computes a transformation to address this. Here I simply look at the relative # transformations between cameras, which would cancel out any extra # transformations, AND since camera0 is fixed at the identity transformation, I # can simply look at each extrinsics transformation. for icam in range(1, len(models_ref)): Rt_extrinsics_err = \ mrcal.compose_Rt( models_solved[icam].extrinsics_Rt_fromref(), models_ref [icam].extrinsics_Rt_toref() ) testutils.confirm_equal( nps.mag(Rt_extrinsics_err[3, :]), 0.0, eps=0.05, msg=f"Recovered extrinsic translation for camera {icam}") testutils.confirm_equal( (np.trace(Rt_extrinsics_err[:3, :]) - 1) / 2., 1.0, eps=np.cos(1. * np.pi / 180.0), # 1 deg msg=f"Recovered extrinsic rotation for camera {icam}") Rt_frame_err = \ mrcal.compose_Rt( mrcal.Rt_from_rt(optimization_inputs['frames_rt_toref']),
def synthesize_board_observations(models, object_width_n, object_height_n, object_spacing, calobject_warp, rt_ref_boardcenter, rt_ref_boardcenter__noiseradius, Nframes, which='all-cameras-must-see-full-board'): r'''Produce synthetic chessboard observations SYNOPSIS models = [mrcal.cameramodel("0.cameramodel"), mrcal.cameramodel("1.cameramodel"),] # shapes (Nframes, Ncameras, object_height_n, object_width_n, 2) and # (Nframes, 4, 3) q,Rt_ref_boardref = \ mrcal.synthesize_board_observations( \ models, # board geometry 10,12,0.1,None, # mean board pose and the radius of the added uniform noise rt_ref_boardcenter, rt_ref_boardcenter__noiseradius, # How many frames we want 100, which = 'some-cameras-must-see-half-board') # q now contains the synthetic pixel observations, but some of them will be # out of view. I construct an (x,y,weight) observations array, as expected # by the optimizer, and I set the weight for the out-of-view points to -1 to # tell the optimizer to ignore those points # Set the weights to 1 initially # shape (Nframes, Ncameras, object_height_n, object_width_n, 3) observations = nps.glue(q, np.ones( q.shape[:-1] + (1,) ), axis = -1) # shape (Ncameras, 1, 1, 2) imagersizes = nps.mv( nps.cat(*[ m.imagersize() for m in models ]), -2, -4 ) observations[ np.any( q < 0, axis=-1 ), 2 ] = -1. observations[ np.any( q-imagersizes >= 0, axis=-1 ), 2 ] = -1. Given a description of a calibration object and of the cameras observing it, produces perfect pixel observations of the objects by those cameras. We return a dense observation array: every corner observation from every chessboard pose will be reported for every camera. Some of these observations MAY be out-of-view, depending on the value of the 'which' argument; see description below. The example above demonstrates how to mark such out-of-bounds observations as outliers to tell the optimization to ignore these. The "models" provides the intrinsics and extrinsics. The calibration objects are nominally have pose rt_ref_boardcenter in the reference coordinate system, with each pose perturbed uniformly with radius rt_ref_boardcenter__noiseradius. This is nonstandard since here I'm placing the board origin at its center instead of the corner (as mrcal.ref_calibration_object() does). But this is more appropriate to the usage of this function. The returned Rt_ref_boardref transformation DOES use the normal corner-referenced board geometry Returns the point observations and the chessboard poses that produced these observations. ARGUMENTS - models: an array of mrcal.cameramodel objects, one for each camera we're simulating. This is the intrinsics and the extrinsics. Ncameras = len(models) - object_width_n: the number of horizontal points in the calibration object grid - object_height_n: the number of vertical points in the calibration object grid - object_spacing: the distance between adjacent points in the calibration object. A square object is assumed, so the vertical and horizontal distances are assumed to be identical. - calobject_warp: a description of the calibration board warping. None means "no warping": the object is flat. Otherwise this is an array of shape (2,). See the docs for ref_calibration_object() for the meaning of the values in this array. - rt_ref_boardcenter: the nominal pose of the calibration object, in the reference coordinate system. This is an rt transformation from a center-referenced calibration object to the reference coordinate system - rt_ref_boardcenter__noiseradius: the deviation-from-nominal for the chessboard pose for each frame. I add uniform noise to rt_ref_boardcenter, with each element sampled independently, with the radius given here. - Nframes: how many frames of observations to return - which: a string, defaulting to 'all-cameras-must-see-full-board'. Controls the requirements on the visibility of the returned points. Valid values: - 'all-cameras-must-see-full-board': We return only those chessboard poses that produce observations that are FULLY visible by ALL the cameras. - 'some-cameras-must-see-full-board': We return only those chessboard poses that produce observations that are FULLY visible by AT LEAST ONE camera. - 'all-cameras-must-see-half-board': We return only those chessboard poses that produce observations that are AT LEAST HALF visible by ALL the cameras. - 'some-cameras-must-see-half-board': We return only those chessboard poses that produce observations that are AT LEAST HALF visible by AT LEAST ONE camera. RETURNED VALUES We return a tuple: - q: an array of shape (Nframes, Ncameras, object_height, object_width, 2) containing the pixel coordinates of the generated observations - Rt_ref_boardref: an array of shape (Nframes, 4,3) containing the poses of the chessboards. This transforms the object returned by ref_calibration_object() to the pose that was projected, in the ref coord system ''' # Can visualize results with this script: r''' r = np.array((30, 0, 0,), dtype=float) * np.pi/180. model = mrcal.cameramodel( intrinsics = ('LENSMODEL_PINHOLE', np.array((1000., 1000., 1000., 1000.,))), imagersize = np.array((2000,2000)) ) Rt_ref_boardref = \ mrcal.synthesize_board_observations([model], 5,20,0.1,None, nps.glue(r, np.array((0,0,3.)), axis=-1), np.array((0,0,0., 0,0,0)), 1) [1] mrcal.show_geometry( models_or_extrinsics_rt_fromref = np.zeros((1,1,6), dtype=float), frames_rt_toref = mrcal.rt_from_Rt(Rt_ref_boardref), object_width_n = 20, object_height_n = 5, object_spacing = 0.1, _set = 'xyplane 0', wait = 1 ) ''' which_valid = ( 'all-cameras-must-see-full-board', 'some-cameras-must-see-full-board', 'all-cameras-must-see-half-board', 'some-cameras-must-see-half-board', ) if not which in which_valid: raise Exception(f"'which' argument must be one of {which_valid}") Ncameras = len(models) # I move the board, and keep the cameras stationary. # # Camera coords: x,y with pixels, z forward # Board coords: x,y in-plane. z forward (i.e. back towards the camera) # The center of the board is at the origin (ignoring warping) board_center = \ np.array(( (object_width_n -1)*object_spacing/2., (object_height_n-1)*object_spacing/2., 0 )) # shape: (Nh,Nw,3) board_reference = \ mrcal.ref_calibration_object(object_width_n,object_height_n, object_spacing,calobject_warp) - \ board_center # Transformation from the board returned by ref_calibration_object() to # the one I use here. It's a shift to move the origin to the center of the # board Rt_boardref_origboardref = mrcal.identity_Rt() Rt_boardref_origboardref[3, :] = -board_center def get_observation_chunk(): '''Make Nframes observations, and return them all, even the out-of-view ones''' # I compute the full random block in one shot. This is useful for # simulations that want to see identical poses when asking for N-1 # random poses and when asking for the first N-1 of a set of N random # poses # shape (Nframes,6) randomblock = np.random.uniform(low=-1.0, high=1.0, size=(Nframes, 6)) # shape(Nframes,4,3) Rt_ref_boardref = \ mrcal.Rt_from_rt( rt_ref_boardcenter + randomblock * rt_ref_boardcenter__noiseradius ) # shape = (Nframes, Nh,Nw,3) boards_ref = mrcal.transform_point_Rt( # shape (Nframes, 1,1,4,3) nps.mv(Rt_ref_boardref, 0, -5), # shape ( Nh,Nw,3) board_reference) # I project full_board. Shape: (Nframes,Ncameras,Nh,Nw,2) q = \ nps.mv( \ nps.cat( \ *[ mrcal.project( mrcal.transform_point_Rt(models[i].extrinsics_Rt_fromref(), boards_ref), *models[i].intrinsics()) \ for i in range(Ncameras) ]), 0,1 ) return q, Rt_ref_boardref def cull_out_of_view(q, Rt_ref_boardref, which): # q has shape (Nframes,Ncameras,Nh,Nw,2) # Rt_ref_boardref has shape (Nframes,4,3) # I pick only those frames where at least one cameras sees the whole # board # shape (Nframes,Ncameras,Nh,Nw) mask_visible = (q[..., 0] >= 0) * (q[..., 1] >= 0) for i in range(Ncameras): W, H = models[i].imagersize() mask_visible[:,i,...] *= \ (q[:,i,:,:,0] <= W-1) * \ (q[:,i,:,:,1] <= H-1) # shape (Nframes, Ncameras) Nvisible = np.count_nonzero(mask_visible, axis=(-1, -2)) Nh, Nw = q.shape[2:4] if which == 'all-cameras-must-see-full-board': iframe = np.all(Nvisible == Nh * Nw, axis=-1) elif which == 'some-cameras-must-see-full-board': iframe = np.any(Nvisible == Nh * Nw, axis=-1) elif which == 'all-cameras-must-see-half-board': iframe = np.all(Nvisible > Nh * Nw // 2, axis=-1) elif which == 'some-cameras-must-see-half-board': iframe = np.any(Nvisible > Nh * Nw // 2, axis=-1) else: raise Exception( "Unknown 'which' argument. This is a bug. I checked for the valid options at the top of this function" ) # q has shape (Nframes_inview,Ncameras,Nh*Nw,2) # Rt_ref_boardref has shape (Nframes_inview,4,3) return q[iframe, ...], Rt_ref_boardref[iframe, ...] # shape (Nframes_sofar,Ncameras,Nh,Nw,2) q = np.zeros((0, Ncameras, object_height_n, object_width_n, 2), dtype=float) # shape (Nframes_sofar,4,3) Rt_ref_boardref = np.zeros((0, 4, 3), dtype=float) # I keep creating data, until I get Nframes-worth of in-view observations while True: q_here, Rt_ref_boardref_here = get_observation_chunk() q_here, Rt_ref_boardref_here = \ cull_out_of_view(q_here, Rt_ref_boardref_here, which) q = nps.glue(q, q_here, axis=-5) Rt_ref_boardref = nps.glue(Rt_ref_boardref, Rt_ref_boardref_here, axis=-3) if q.shape[0] >= Nframes: q = q[:Nframes, ...] Rt_ref_boardref = Rt_ref_boardref[:Nframes, ...] break return q, mrcal.compose_Rt(Rt_ref_boardref, Rt_boardref_origboardref)
def image_transformation_map(model_from, model_to, intrinsics_only=False, distance=None, plane_n=None, plane_d=None, mask_valid_intrinsics_region_from=False): r'''Compute a reprojection map between two models SYNOPSIS model_orig = mrcal.cameramodel("xxx.cameramodel") image_orig = cv2.imread("image.jpg") model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig, fit = "corners") mapxy = mrcal.image_transformation_map(model_orig, model_pinhole, intrinsics_only = True) image_undistorted = mrcal.transform_image(image_orig, mapxy) # image_undistorted is now a pinhole-reprojected version of image_orig Returns the transformation that describes a mapping - from pixel coordinates of an image of a scene observed by model_to - to pixel coordinates of an image of the same scene observed by model_from This transformation can then be applied to a whole image by calling mrcal.transform_image(). This function returns a transformation map in an (Nheight,Nwidth,2) array. The image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from. One application of this function is to validate the models in a stereo pair. For instance, reprojecting one camera's image at distance=infinity should produce exactly the same image that is observed by the other camera when looking at very far objects, IF the intrinsics and rotation are correct. If the images don't line up well, we know that some part of the models is off. Similarly, we can use big planes (such as observations of the ground) and plane_n, plane_d to validate. This function has several modes of operation: - intrinsics, extrinsics Used if not intrinsics_only and \ plane_n is None and \ plane_d is None This is the default. For each pixel in the output, we use the full model to unproject a given distance out, and then use the full model to project back into the other camera. - intrinsics only Used if intrinsics_only and \ plane_n is None and \ plane_d is None Similar, but the extrinsics are ignored. We unproject the pixels in one model, and project the into the other camera. The two camera coordinate systems are assumed to line up perfectly - plane Used if plane_n is not None and plane_d is not None We map observations of a given plane in camera FROM coordinates coordinates to where this plane would be observed by camera TO. We unproject each pixel in one camera, compute the intersection point with the plane, and project that intersection point back to the other camera. This uses ALL the intrinsics, extrinsics and the plane representation. The plane is represented by a normal vector plane_n, and the distance to the normal plane_d. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. ARGUMENTS - model_from: the mrcal.cameramodel object describing the camera used to capture the input image. We always use the intrinsics. if not intrinsics_only: we use the extrinsics also - model_to: the mrcal.cameramodel object describing the camera that would have captured the image we're producing. We always use the intrinsics. if not intrinsics_only: we use the extrinsics also - intrinsics_only: optional boolean, defaulting to False. If False: we respect the relative transformation in the extrinsics of the camera models. - distance: optional value, defaulting to None. Used only if not intrinsics_only. We reproject points in space a given distance out. If distance is None (the default), we look out to infinity. This is equivalent to using only the rotation component of the extrinsics, ignoring the translation. - plane_n: optional numpy array of shape (3,); None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. This argument describes the normal vector in the coordinate system of model_from. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_d should be given also, and intrinsics_only should be False. if given, we use the full intrinsics and extrinsics of both camera models - plane_d: optional floating-point value; None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_n should be given also, and intrinsics_only should be False. if given, we use the full intrinsics and extrinsics of both camera models - mask_valid_intrinsics_region_from: optional boolean defaulting to False. If True, points outside the valid-intrinsics region in the FROM image are set to black, and thus do not appear in the output image RETURNED VALUE A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the imager dimensions of model_to. This array contains 32-bit floats, as required by cv2.remap() (the function providing the internals of mrcal.transform_image()). This array can be passed to mrcal.transform_image() ''' if (plane_n is None and plane_d is not None) or \ (plane_n is not None and plane_d is None): raise Exception( "plane_n and plane_d should both be None or neither should be None" ) if plane_n is not None and \ intrinsics_only: raise Exception( "We're looking at remapping a plane (plane_d, plane_n are not None), so intrinsics_only should be False" ) if distance is not None and \ (plane_n is not None or intrinsics_only): raise Exception( "'distance' makes sense only without plane_n/plane_d and without intrinsics_only" ) if intrinsics_only: Rt_to_from = None else: Rt_to_from = mrcal.compose_Rt(model_to.extrinsics_Rt_fromref(), model_from.extrinsics_Rt_toref()) lensmodel_from, intrinsics_data_from = model_from.intrinsics() lensmodel_to, intrinsics_data_to = model_to.intrinsics() if re.match("LENSMODEL_OPENCV",lensmodel_from) and \ lensmodel_to == "LENSMODEL_PINHOLE" and \ plane_n is None and \ not mask_valid_intrinsics_region_from and \ distance is None: # This is a common special case. This branch works identically to the # other path (the other side of this "if" can always be used instead), # but the opencv-specific code is optimized and at one point ran faster # than the code on the other side. # # The mask_valid_intrinsics_region_from isn't implemented in this path. # It COULD be, then this faster path could be used import cv2 fxy_from = intrinsics_data_from[0:2] cxy_from = intrinsics_data_from[2:4] cameraMatrix_from = np.array( ((fxy_from[0], 0, cxy_from[0]), (0, fxy_from[1], cxy_from[1]), (0, 0, 1))) fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] cameraMatrix_to = np.array( ((fxy_to[0], 0, cxy_to[0]), (0, fxy_to[1], cxy_to[1]), (0, 0, 1))) output_shape = model_to.imagersize() distortion_coeffs = intrinsics_data_from[4:] if Rt_to_from is not None: R_to_from = Rt_to_from[:3, :] if np.trace(R_to_from) > 3. - 1e-12: R_to_from = None # identity, so I pass None else: R_to_from = None return nps.glue( *[ nps.dummy(arr,-1) for arr in \ cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs, R_to_from, cameraMatrix_to, tuple(output_shape), cv2.CV_32FC1)], axis = -1) W_from, H_from = model_from.imagersize() W_to, H_to = model_to.imagersize() # shape: (Nheight,Nwidth,2). Contains (x,y) rows grid = np.ascontiguousarray(nps.mv( nps.cat(*np.meshgrid(np.arange(W_to), np.arange(H_to))), 0, -1), dtype=float) v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to) if plane_n is not None: R_to_from = Rt_to_from[:3, :] t_to_from = Rt_to_from[3, :] # The homography definition. Derived in many places. For instance in # "Motion and structure from motion in a piecewise planar environment" # by Olivier Faugeras, F. Lustman. A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n) A_from_to = np.linalg.inv(A_to_from) v = nps.matmult(v, nps.transpose(A_from_to)) else: if Rt_to_from is not None: if distance is not None: v = mrcal.transform_point_Rt( mrcal.invert_Rt(Rt_to_from), v / nps.dummy(nps.mag(v), -1) * distance) else: R_to_from = Rt_to_from[:3, :] v = nps.matmult(v, R_to_from) mapxy = mrcal.project(v, lensmodel_from, intrinsics_data_from) if mask_valid_intrinsics_region_from: # Using matplotlib to compute the out-of-bounds points. It doesn't # support broadcasting, so I do that manually with a clump/reshape from matplotlib.path import Path region = Path(model_from.valid_intrinsics_region()) is_inside = region.contains_points(nps.clump(mapxy, n=2)).reshape( mapxy.shape[:2]) mapxy[~is_inside, :] = -1 return mapxy.astype(np.float32)
testutils.confirm_equal(t_fit, t, eps=1e-2, msg='Procrustes fit t') R_fit_vectors = \ mrcal.align_procrustes_vectors_R01(nps.matmult( p, nps.transpose(R) ) + noise, p) testutils.confirm_equal(R_fit_vectors, R, eps=1e-2, msg='Procrustes fit R (vectors)') testutils.confirm_equal(mrcal.invert_Rt( mrcal.Rt_from_rt(mrcal.invert_rt(mrcal.rt_from_Rt(Rt)))), Rt, msg='Rt/rt and invert') testutils.confirm_equal(mrcal.compose_Rt(Rt, mrcal.invert_Rt(Rt)), nps.glue(np.eye(3), np.zeros((3, )), axis=-2), msg='compose_Rt') testutils.confirm_equal(mrcal.compose_rt(mrcal.rt_from_Rt(Rt), mrcal.invert_rt( mrcal.rt_from_Rt(Rt))), np.zeros((6, )), msg='compose_rt') testutils.confirm_equal(mrcal.identity_Rt(), nps.glue(np.eye(3), np.zeros((3, )), axis=-2), msg='identity_Rt') testutils.confirm_equal(mrcal.identity_rt(), np.zeros((6, )),
except: pass # Generate the rectification maps if kind != "narrow": azel_kwargs = dict(az_fov_deg=145., el_fov_deg=135., el0_deg=5) else: azel_kwargs = dict(az_fov_deg=80., el_fov_deg=80., el0_deg=0) rectification_maps, cookie = \ mrcal.stereo_rectify_prepare(models, **azel_kwargs) # Display the geometry of the two cameras in the stereo pair, and of the # rectified system Rt_cam0_stereo = cookie['Rt_cam0_stereo'] Rt_cam0_ref = models[0].extrinsics_Rt_fromref() Rt_stereo_ref = mrcal.compose_Rt(mrcal.invert_Rt(Rt_cam0_stereo), Rt_cam0_ref) rt_stereo_ref = mrcal.rt_from_Rt(Rt_stereo_ref) terminal = dict( pdf='pdf size 8in,6in noenhanced solid color font ",12"', svg='svg size 800,600 noenhanced solid dynamic font ",14"', png='pngcairo size 1024,768 transparent noenhanced crop font ",12"', gp='gp') for extension in terminal.keys(): mrcal.show_geometry( models + [rt_stereo_ref], ("camera0", "camera1", "stereo"), show_calobjects=False, _set=('xyplane at -0.5', 'view 60,30,1.7'), terminal=terminal[extension], hardcopy=f'/tmp/stereo-geometry-{kind}.{extension}')
def stereo_rectify_prepare(models, az_fov_deg, el_fov_deg, az0_deg = None, el0_deg = 0, pixels_per_deg_az = None, pixels_per_deg_el = None): r'''Precompute everything needed for stereo rectification and matching SYNOPSIS import sys import mrcal import cv2 import numpy as np # Read commandline arguments: model0 model1 image0 image1 models = [ mrcal.cameramodel(sys.argv[1]), mrcal.cameramodel(sys.argv[2]), ] images = [ cv2.imread(sys.argv[i]) \ for i in (3,4) ] # Prepare the stereo system rectification_maps,cookie = \ mrcal.stereo_rectify_prepare(models, az_fov_deg = 120, el_fov_deg = 100) # Visualize the geometry of the two cameras and of the rotated stereo # coordinate system Rt_cam0_ref = models[0].extrinsics_Rt_fromref() Rt_cam0_stereo = cookie['Rt_cam0_stereo'] Rt_stereo_ref = mrcal.compose_Rt( mrcal.invert_Rt(Rt_cam0_stereo), Rt_cam0_ref ) rt_stereo_ref = mrcal.rt_from_Rt(Rt_stereo_ref) mrcal.show_geometry( models + [ rt_stereo_ref ], ( "camera0", "camera1", "stereo" ), show_calobjects = False, wait = True ) # Rectify the images images_rectified = \ [ mrcal.transform_image(images[i], rectification_maps[i]) \ for i in range(2) ] cv2.imwrite('/tmp/rectified0.jpg', images_rectified[0]) cv2.imwrite('/tmp/rectified1.jpg', images_rectified[1]) # Find stereo correspondences using OpenCV block_size = 3 max_disp = 160 # in pixels stereo = \ cv2.StereoSGBM_create(minDisparity = 0, numDisparities = max_disp, blockSize = block_size, P1 = 8 *3*block_size*block_size, P2 = 32*3*block_size*block_size, uniquenessRatio = 5, disp12MaxDiff = 1, speckleWindowSize = 50, speckleRange = 1) disparity16 = stereo.compute(*images_rectified) # in pixels*16 cv2.imwrite('/tmp/disparity.png', mrcal.apply_color_map(disparity16, 0, max_disp*16.)) # Convert the disparities to range to camera0 r = mrcal.stereo_range( disparity16.astype(np.float32) / 16., **cookie ) cv2.imwrite('/tmp/range.png', mrcal.apply_color_map(r, 5, 1000)) This function does the initial computation required to perform stereo matching, and to get ranges from a stereo pair. It computes - the pose of the rectified stereo coordinate system - the azimuth/elevation grid used in the rectified images - the rectification maps used to transform images into the rectified space Using the results of one call to this function we can compute the stereo disparities of many pairs of synchronized images. This function is generic: the two cameras may have any lens models, any resolution and any geometry. They don't even have to match. As long as there's some non-zero baseline and some overlapping views, we can set up stereo matching using this function. The input images are tranformed into a "rectified" space. Geometrically, the rectified coordinate system sits at the origin of camera0, with a rotation. The axes of the rectified coordinate system: - x: from the origin of camera0 to the origin of camera1 (the baseline direction) - y: completes the system from x,z - z: the "forward" direction of the two cameras, with the component parallel to the baseline subtracted off In a nominal geometry (the two cameras are square with each other, camera1 strictly to the right of camera0), the rectified coordinate system exactly matches the coordinate system of camera0. The above formulation supports any geometry, however, including vertical and/or forward/backward shifts. Vertical stereo is supported. Rectified images represent 3D planes intersecting the origins of the two cameras. The tilt of each plane is the "elevation". While the left/right direction inside each plane is the "azimuth". We generate rectified images where each pixel coordinate represents (x = azimuth, y = elevation). Thus each row scans the azimuths in a particular elevation, and thus each row in the two rectified images represents the same plane in 3D, and matching features in each row can produce a stereo disparity and a range. In the rectified system, elevation is a rotation along the x axis, while azimuth is a rotation normal to the resulting tilted plane. We produce rectified images whose pixel coordinates are linear with azimuths and elevations. This means that the azimuth angular resolution is constant everywhere, even at the edges of a wide-angle image. We return a set of transformation maps and a cookie. The maps can be used to generate rectified images. These rectified images can be processed by any stereo-matching routine to generate a disparity image. To interpret the disparity image, call stereo_unproject() or stereo_range() with the cookie returned here. The cookie is a Python dict that describes the rectified space. It is guaranteed to have the following keys: - Rt_cam0_stereo: an Rt transformation to map a representation of points in the rectified coordinate system to a representation in the camera0 coordinate system - baseline: the distance between the two cameras - az_row: an array of shape (Naz,) describing the azimuths in each row of the disparity image - el_col: an array of shape (Nel,1) describing the elevations in each column of the disparity image ARGUMENTS - models: an iterable of two mrcal.cameramodel objects representing the cameras in the stereo system. Any sane combination of lens models and resolutions and geometries is valid - az_fov_deg: required value for the azimuth (along-the-baseline) field-of-view of the desired rectified view, in pixels - el_fov_deg: required value for the elevation (across-the-baseline) field-of-view of the desired rectified view, in pixels - az0_deg: optional value for the azimuth center of the rectified images. This is especially significant in a camera system with some forward/backward shift. That causes the baseline to no longer be perpendicular with the view axis of the cameras, and thus azimuth = 0 is no longer at the center of the input images. If omitted, we compute the center azimuth that aligns with the center of the cameras' view - el0_deg: optional value for the elevation center of the rectified system. Defaults to 0. - pixels_per_deg_az: optional value for the azimuth resolution of the rectified image. If omitted (or None), we use the resolution of the input image at (azimuth, elevation) = 0. If a resolution of <0 is requested, we use this as a scale factor on the resolution of the input image. For instance, to downsample by a factor of 2, pass pixels_per_deg_az = -0.5 - pixels_per_deg_el: same as pixels_per_deg_az but in the elevation direction RETURNED VALUES We return a tuple - transformation maps: a tuple of length-2 containing transformation maps for each camera. Each map can be used to mrcal.transform_image() images to the rectified space - cookie: a dict describing the rectified space. Intended as input to stereo_unproject() and stereo_range(). See the description above for more detail ''' if len(models) != 2: raise Exception("I need exactly 2 camera models") def normalize(v): v /= nps.mag(v) return v def remove_projection(a, proj_base): r'''Returns the normalized component of a orthogonal to proj_base proj_base assumed normalized''' v = a - nps.inner(a,proj_base)*proj_base return normalize(v) ######## Compute the geometry of the rectified stereo system. This is a ######## rotation, centered at camera0. More or less we have axes: ######## ######## x: from camera0 to camera1 ######## y: completes the system from x,z ######## z: component of the cameras' viewing direction ######## normal to the baseline Rt_cam0_ref = models[0].extrinsics_Rt_fromref() Rt01 = mrcal.compose_Rt( Rt_cam0_ref, models[1].extrinsics_Rt_toref()) Rt10 = mrcal.invert_Rt(Rt01) # Rotation relating camera0 coords to the rectified camera coords. I fill in # each row separately R_stereo_cam0 = np.zeros((3,3), dtype=float) right = R_stereo_cam0[0,:] down = R_stereo_cam0[1,:] forward = R_stereo_cam0[2,:] # "right" of the rectified coord system: towards the origin of camera1 from # camera0, in camera0 coords right[:] = Rt01[3,:] baseline = nps.mag(right) right /= baseline # "forward" for each of the two cameras, in the cam0 coord system forward0 = np.array((0,0,1.)) forward1 = Rt01[:3,2] # "forward" of the rectified coord system, in camera0 coords. The mean of # the two non-right "forward" directions forward[:] = normalize( ( remove_projection(forward0,right) + remove_projection(forward1,right) ) / 2. ) # "down" of the rectified coord system, in camera0 coords. Completes the # right,down,forward coordinate system down[:] = np.cross(forward,right) R_cam0_stereo = nps.transpose(R_stereo_cam0) ######## Done with the geometry! Now to get the az/el grid. I need to figure ######## out the resolution and the extents if az0_deg is not None: az0 = az0_deg * np.pi/180. else: # In the rectified system az=0 sits perpendicular to the baseline. # Normally the cameras are looking out perpendicular to the baseline # also, so I center my azimuth samples around 0 to match the cameras' # field of view. But what if the geometry isn't square, and one camera # is behind the other? Like this: # # camera # view # ^ # | # \ | / # \_/ # . / # . /az=0 # ./ # . # baseline . # . # \ / # \_/ # # Here the center-of-view axis of each camera is not at all # perpendicular to the baseline. Thus I compute the mean "forward" # direction of the cameras in the rectified system, and set that as the # center azimuth az0. v0 = nps.matmult( forward0, R_cam0_stereo ).ravel() v1 = nps.matmult( forward1, R_cam0_stereo ).ravel() v0[1] = 0.0 v1[1] = 0.0 normalize(v0) normalize(v1) v = v0 + v1 az0 = np.arctan2(v[0],v[2]) el0 = el0_deg * np.pi/180. ####### Rectified image resolution if pixels_per_deg_az is None or pixels_per_deg_az < 0 or \ pixels_per_deg_el is None or pixels_per_deg_el < 0: # I need to compute the resolution of the rectified images. I try to # match the resolution of the cameras. I just look at camera0. If you # have different cameras, pass in pixels_per_deg yourself :) # # I look at the center of the stereo field of view. There I have q = # project(v) where v is a unit projection vector. I compute dq/dth where # th is an angular perturbation applied to v. def rotation_any_v_to_z(v): r'''Return any rotation matrix that maps the given unit vector v to [0,0,1]''' z = v if np.abs(v[0]) < .9: x = np.array((1,0,0)) else: x = np.array((0,1,0)) x -= nps.inner(x,v)*v x /= nps.mag(x) y = np.cross(z,x) return nps.cat(x,y,z) v, dv_dazel = stereo_unproject(az0, el0, get_gradients = True) v0 = mrcal.rotate_point_R(R_cam0_stereo, v) dv0_dazel = nps.matmult(R_cam0_stereo, dv_dazel) _,dq_dv0,_ = mrcal.project(v0, *models[0].intrinsics(), get_gradients = True) # I rotate my v to a coordinate system where u = rotate(v) is [0,0,1]. # Then u = [a,b,0] are all orthogonal to v. So du/dth = [cos, sin, 0]. # I then have dq/dth = dq/dv dv/du [cos, sin, 0]t # ---> dq/dth = dq/dv dv/du[:,:2] [cos, sin]t = M [cos,sin]t # # norm2(dq/dth) = [cos,sin] MtM [cos,sin]t is then an ellipse with the # eigenvalues of MtM giving me the best and worst sensitivities. I can # use mrcal.worst_direction_stdev() to find the densest direction. But I # actually know the directions I care about, so I evaluate them # independently for the az and el directions # Ruv = rotation_any_v_to_z(v0) # M = nps.matmult(dq_dv0, nps.transpose(Ruv[:2,:])) # # I pick the densest direction: highest |dq/dth| # pixels_per_rad = mrcal.worst_direction_stdev( nps.matmult( nps.transpose(M),M) ) if pixels_per_deg_az is None or pixels_per_deg_az < 0: dq_daz = nps.inner( dq_dv0, dv0_dazel[:,0] ) pixels_per_rad_az_have = nps.mag(dq_daz) if pixels_per_deg_az is not None: # negative pixels_per_deg_az requested means I use the requested # value as a scaling pixels_per_deg_az = -pixels_per_deg_az * pixels_per_rad_az_have*np.pi/180. else: pixels_per_deg_az = pixels_per_rad_az_have*np.pi/180. if pixels_per_deg_el is None or pixels_per_deg_el < 0: dq_del = nps.inner( dq_dv0, dv0_dazel[:,1] ) pixels_per_rad_el_have = nps.mag(dq_del) if pixels_per_deg_el is not None: # negative pixels_per_deg_el requested means I use the requested # value as a scaling pixels_per_deg_el = -pixels_per_deg_el * pixels_per_rad_el_have*np.pi/180. else: pixels_per_deg_el = pixels_per_rad_el_have*np.pi/180. Naz = round(az_fov_deg*pixels_per_deg_az) Nel = round(el_fov_deg*pixels_per_deg_el) # Adjust the fov to keep the requested resolution and pixel counts az_fov_radius_deg = Naz / (2.*pixels_per_deg_az) el_fov_radius_deg = Nel / (2.*pixels_per_deg_el) # shape (Naz,) az = np.linspace(az0 - az_fov_radius_deg*np.pi/180., az0 + az_fov_radius_deg*np.pi/180., Naz) # shape (Nel,1) el = nps.dummy( np.linspace(el0 - el_fov_radius_deg*np.pi/180., el0 + el_fov_radius_deg*np.pi/180., Nel), -1 ) # v has shape (Nel,Naz,3) v = stereo_unproject(az, el) v0 = nps.matmult( nps.dummy(v, -2), R_stereo_cam0 )[...,0,:] v1 = nps.matmult( nps.dummy(v0, -2), Rt01[:3,:] )[...,0,:] cookie = \ dict( Rt_cam0_stereo = nps.glue(R_cam0_stereo, np.zeros((3,)), axis=-2), baseline = baseline, az_row = az, el_col = el, # The caller should NOT assume these are available in the cookie: # some other rectification scheme may not produce linear az/el # maps pixels_per_deg_az = pixels_per_deg_az, pixels_per_deg_el = pixels_per_deg_el, ) return \ (mrcal.project( v0, *models[0].intrinsics()).astype(np.float32), \ mrcal.project( v1, *models[1].intrinsics()).astype(np.float32)), \ cookie
def image_transformation_map(model_from, model_to, use_rotation = False, plane_n = None, plane_d = None, mask_valid_intrinsics_region_from = False): r'''Compute a reprojection map between two models SYNOPSIS model_orig = mrcal.cameramodel("xxx.cameramodel") image_orig = cv2.imread("image.jpg") model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig, fit = "corners") mapxy = mrcal.image_transformation_map(model_orig, model_pinhole) image_undistorted = mrcal.transform_image(image_orig, mapxy) # image_undistorted is now a pinhole-reprojected version of image_orig Returns the transformation that describes a mapping - from pixel coordinates of an image of a scene observed by model_to - to pixel coordinates of an image of the same scene observed by model_from This transformation can then be applied to a whole image by calling mrcal.transform_image(). This function returns a transformation map in an (Nheight,Nwidth,2) array. The image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from. This function has 3 modes of operation: - intrinsics-only This is the default. Selected if - use_rotation = False - plane_n = None - plane_d = None All of the extrinsics are ignored. If the two cameras have the same orientation, then their observations of infinitely-far-away objects will line up exactly - rotation This can be selected explicitly with - use_rotation = True - plane_n = None - plane_d = None Here we use the rotation component of the relative extrinsics. The relative translation is impossible to use without knowing what we're looking at, so IT IS ALWAYS IGNORED. If the relative orientation in the models matches reality, then the two cameras' observations of infinitely-far-away objects will line up exactly - plane This is selected if - use_rotation = True - plane_n is not None - plane_d is not None We map observations of a given plane in camera FROM coordinates coordinates to where this plane would be observed by camera TO. This uses ALL the intrinsics, extrinsics and the plane representation. If all of these are correct, the observations of this plane would line up exactly in the remapped-camera-fromimage and the camera-to image. The plane is represented in camera-from coordinates by a normal vector plane_n, and the distance to the normal plane_d. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. ARGUMENTS - model_from: the mrcal.cameramodel object describing the camera used to capture the input image - model_to: the mrcal.cameramodel object describing the camera that would have captured the image we're producing - use_rotation: optional boolean, defaulting to False. If True: we respect the relative rotation in the extrinsics of the camera models. - plane_n: optional numpy array of shape (3,); None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. This argument describes the normal vector in the coordinate system of model_from. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_d should be given also, and use_rotation should be True. if given, we use the full intrinsics and extrinsics of both camera models - plane_d: optional floating-point value; None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_n should be given also, and use_rotation should be True. if given, we use the full intrinsics and extrinsics of both camera models - mask_valid_intrinsics_region_from: optional boolean defaulting to False. If True, points outside the valid-intrinsics region in the FROM image are set to black, and thus do not appear in the output image RETURNED VALUE A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the imager dimensions of model_to. This array contains 32-bit floats, as required by cv2.remap() (the function providing the internals of mrcal.transform_image()). This array can be passed to mrcal.transform_image() ''' if (plane_n is None and plane_d is not None) or \ (plane_n is not None and plane_d is None): raise Exception("plane_n and plane_d should both be None or neither should be None") if plane_n is not None and plane_d is not None and \ not use_rotation: raise Exception("We're looking at remapping a plane (plane_d, plane_n are not None), so use_rotation should be True") Rt_to_from = None if use_rotation: Rt_to_r = model_to. extrinsics_Rt_fromref() Rt_r_from = model_from.extrinsics_Rt_toref() Rt_to_from = mrcal.compose_Rt(Rt_to_r, Rt_r_from) lensmodel_from,intrinsics_data_from = model_from.intrinsics() lensmodel_to, intrinsics_data_to = model_to. intrinsics() if re.match("LENSMODEL_OPENCV",lensmodel_from) and \ lensmodel_to == "LENSMODEL_PINHOLE" and \ plane_n is None and \ not mask_valid_intrinsics_region_from: # This is a common special case. This branch works identically to the # other path (the other side of this "if" can always be used instead), # but the opencv-specific code is optimized and at one point ran faster # than the code on the other side. # # The mask_valid_intrinsics_region_from isn't implemented in this path. # It COULD be, then this faster path could be used fxy_from = intrinsics_data_from[0:2] cxy_from = intrinsics_data_from[2:4] cameraMatrix_from = np.array(((fxy_from[0], 0, cxy_from[0]), ( 0, fxy_from[1], cxy_from[1]), ( 0, 0, 1))) fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] cameraMatrix_to = np.array(((fxy_to[0], 0, cxy_to[0]), ( 0, fxy_to[1], cxy_to[1]), ( 0, 0, 1))) output_shape = model_to.imagersize() distortion_coeffs = intrinsics_data_from[4: ] if Rt_to_from is not None: R_to_from = Rt_to_from[:3,:] if np.trace(R_to_from) > 3. - 1e-12: R_to_from = None # identity, so I pass None else: R_to_from = None return nps.glue( *[ nps.dummy(arr,-1) for arr in \ cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs, R_to_from, cameraMatrix_to, tuple(output_shape), cv2.CV_32FC1)], axis = -1) W_from,H_from = model_from.imagersize() W_to, H_to = model_to. imagersize() # shape: (Nheight,Nwidth,2). Contains (x,y) rows grid = np.ascontiguousarray(nps.mv(nps.cat(*np.meshgrid(np.arange(W_to), np.arange(H_to))), 0,-1), dtype = float) if lensmodel_to == "LENSMODEL_PINHOLE": # Faster path for the unproject. Nice, simple closed-form solution fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] v = np.zeros( (grid.shape[0], grid.shape[1], 3), dtype=float) v[..., :2] = (grid-cxy_to)/fxy_to v[..., 2] = 1 elif lensmodel_to == "LENSMODEL_STEREOGRAPHIC": # Faster path for the unproject. Nice, simple closed-form solution v = mrcal.unproject_stereographic(grid, *intrinsics_data_to[:4]) else: v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to) if plane_n is not None: R_to_from = Rt_to_from[:3,:] t_to_from = Rt_to_from[ 3,:] # The homography definition. Derived in many places. For instance in # "Motion and structure from motion in a piecewise planar environment" # by Olivier Faugeras, F. Lustman. A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n) A_from_to = np.linalg.inv(A_to_from) v = nps.matmult( v, nps.transpose(A_from_to) ) else: if Rt_to_from is not None: R_to_from = Rt_to_from[:3,:] if np.trace(R_to_from) < 3. - 1e-12: # rotation isn't identity. apply v = nps.matmult(v, R_to_from) mapxy = mrcal.project( v, lensmodel_from, intrinsics_data_from ) if mask_valid_intrinsics_region_from: # Using matplotlib to compute the out-of-bounds points. It doesn't # support broadcasting, so I do that manually with a clump/reshape from matplotlib.path import Path region = Path(model_from.valid_intrinsics_region()) is_inside = region.contains_points(nps.clump(mapxy,n=2)).reshape(mapxy.shape[:2]) mapxy[ ~is_inside, :] = -1 return mapxy.astype(np.float32)
J_r_ref = grad(R_from_r, rt0_ref[:3]) confirm_equal(Rt, Rt0_ref, msg='Rt_from_rt result') confirm_equal(J_r, J_r_ref, msg='Rt_from_rt grad result') Rt = mrcal.invert_Rt(Rt0_ref, out=out43) confirm_equal(Rt, invert_Rt(Rt0_ref), msg='invert_Rt result') rt = mrcal.invert_rt(rt0_ref, out=out6) confirm_equal(rt, invert_rt(rt0_ref), msg='invert_rt result') rt, drt_drt = mrcal.invert_rt(rt0_ref, get_gradients=True, out=(out6, out66)) drt_drt_ref = grad(invert_rt, rt0_ref) confirm_equal(rt, invert_rt(rt0_ref), msg='invert_rt with grad result') confirm_equal(drt_drt, drt_drt_ref, msg='invert_rt drt/drt result') Rt2 = mrcal.compose_Rt(Rt0_ref, Rt1_ref, out=out43) confirm_equal(Rt2, compose_Rt(Rt0_ref, Rt1_ref), msg='compose_Rt result') rt2 = mrcal.compose_rt(rt0_ref, rt1_ref, out=out6) confirm_equal(rt2, compose_rt(rt0_ref, rt1_ref), msg='compose_rt result') # _compose_rt() is not excercised by the python library, so I explicitly test it # here rt2 = _poseutils._compose_rt(rt0_ref, rt1_ref, out=out6) confirm_equal(rt2, compose_rt(rt0_ref, rt1_ref), msg='compose_rt result; calling _compose_rt() directly') rt2,drt2_drt0,drt2_drt1 = \ mrcal.compose_rt(rt0_ref, rt1_ref, get_gradients=True, out = (out6, out66, out66a))
icameras = ((2, 1), (1, 0)) Nmodelpairs = len(icameras) # shape (Nmodelpairs,2) M = [[models_baseline[i] for i in icameras[0]], [models_baseline[i] for i in icameras[1]]] # shape (Npoints,Nmodelpairs, 2,2) q = np.zeros((Npoints, Nmodelpairs, 2, 2), dtype=float) for ipt in range(Npoints): for imp in range(Nmodelpairs): q[ipt, imp, 0, :] = mrcal.project(p_triangulated_true0[ipt], *M[imp][0].intrinsics()) q[ipt, imp, 1, :] = mrcal.project( mrcal.transform_point_Rt( mrcal.compose_Rt(M[imp][1].extrinsics_Rt_fromref(), M[imp][0].extrinsics_Rt_toref()), p_triangulated_true0[ipt]), *M[imp][1].intrinsics()) p, \ Var_p0p1_calibration_big, \ Var_p0p1_observation_big, \ Var_p0p1_joint_big = \ mrcal.triangulate( q, M, q_calibration_stdev = args.q_calibration_stdev, q_observation_stdev = args.q_observation_stdev, q_observation_stdev_correlation = args.q_observation_stdev_correlation, stabilize_coords = args.stabilize_coords ) testutils.confirm_equal(p.shape, (Npoints, Nmodelpairs, 3), msg="point array has the right shape")