def get_observation_chunk(): '''Make Nframes observations, and return them all, even the out-of-view ones''' # I compute the full random block in one shot. This is useful for # simulations that want to see identical poses when asking for N-1 # random poses and when asking for the first N-1 of a set of N random # poses # shape (Nframes,6) randomblock = np.random.uniform(low=-1.0, high=1.0, size=(Nframes, 6)) # shape(Nframes,4,3) Rt_ref_boardref = \ mrcal.Rt_from_rt( rt_ref_boardcenter + randomblock * rt_ref_boardcenter__noiseradius ) # shape = (Nframes, Nh,Nw,3) boards_ref = mrcal.transform_point_Rt( # shape (Nframes, 1,1,4,3) nps.mv(Rt_ref_boardref, 0, -5), # shape ( Nh,Nw,3) board_reference) # I project full_board. Shape: (Nframes,Ncameras,Nh,Nw,2) q = \ nps.mv( \ nps.cat( \ *[ mrcal.project( mrcal.transform_point_Rt(models[i].extrinsics_Rt_fromref(), boards_ref), *models[i].intrinsics()) \ for i in range(Ncameras) ]), 0,1 ) return q, Rt_ref_boardref
def _extrinsics_Rt(self, toref, Rt=None): r'''Get or set the extrinsics in this model This function represents the pose as a shape (4,3) numpy array that contains a (3,3) rotation matrix, followed by a (1,3) translation in the last row: R = Rt[:3,:] t = Rt[ 3,:] The transformation is b <-- R*a + t: import numpysane as nps b = nps.matmult(a, nps.transpose(R)) + t if Rt is None: this is a getter; otherwise a setter. toref is a boolean. if toref: then Rt maps points in the coord system of THIS camera to the reference coord system. Otherwise in the opposite direction ''' # The internal representation is rt_fromref if Rt is None: # getter rt_fromref = self._extrinsics Rt_fromref = mrcal.Rt_from_rt(rt_fromref) if not toref: return Rt_fromref return mrcal.invert_Rt(Rt_fromref) # setter if toref: Rt_fromref = mrcal.invert_Rt(Rt) self._extrinsics = mrcal.rt_from_Rt(Rt_fromref) return True self._extrinsics = mrcal.rt_from_Rt(Rt) return True
R, J_r = mrcal.R_from_r(r0_ref, get_gradients=True, out=(out33, out333)) Rref, J_r_ref = cv2.Rodrigues(r0_ref) J_r_ref = nps.transpose(J_r_ref) # fix opencv's weirdness. Now shape=(9,3) J_r_ref = J_r_ref.reshape(3, 3, 3) confirm_equal(R, Rref, msg='R_from_r result, comparing with cv2.Rodrigues') confirm_equal(J_r, J_r_ref, msg='R_from_r J_r, comparing with cv2.Rodrigues') rt = mrcal.rt_from_Rt(Rt0_ref, out=out6) confirm_equal(rt, rt0_ref, msg='rt_from_Rt result') rt, J_R = mrcal.rt_from_Rt(Rt0_ref, get_gradients=True, out=(out6, out333)) J_R_ref = grad(r_from_R, Rt0_ref[:3, :]) confirm_equal(rt, rt0_ref, msg='rt_from_Rt result') confirm_equal(J_R, J_R_ref, msg='rt_from_Rt grad result') Rt = mrcal.Rt_from_rt(rt0_ref, out=out43) confirm_equal(Rt, Rt0_ref, msg='Rt_from_rt result') Rt, J_r = mrcal.Rt_from_rt(rt0_ref, get_gradients=True, out=(out43, out333)) J_r_ref = grad(R_from_r, rt0_ref[:3]) confirm_equal(Rt, Rt0_ref, msg='Rt_from_rt result') confirm_equal(J_r, J_r_ref, msg='Rt_from_rt grad result') Rt = mrcal.invert_Rt(Rt0_ref, out=out43) confirm_equal(Rt, invert_Rt(Rt0_ref), msg='invert_Rt result') # in-place Rt0_ref_copy = np.array(Rt0_ref) Rt = mrcal.invert_Rt(Rt0_ref_copy, out=Rt0_ref_copy) confirm_equal(Rt, invert_Rt(Rt0_ref), msg='invert_Rt result written in-place')
def hypothesis_board_corner_positions(icam_intrinsics=None, idx_inliers=None, **optimization_inputs): r'''Reports the 3D chessboard points observed by a camera at calibration time SYNOPSIS model = mrcal.cameramodel("xxx.cameramodel") optimization_inputs = model.optimization_inputs() # shape (Nobservations, Nheight, Nwidth, 3) pcam = mrcal.hypothesis_board_corner_positions(**optimization_inputs)[0] i_intrinsics = \ optimization_inputs['indices_frame_camintrinsics_camextrinsics'][:,1] # shape (Nobservations,1,1,Nintrinsics) intrinsics = nps.mv(optimization_inputs['intrinsics'][i_intrinsics],-2,-4) optimization_inputs['observations_board'][...,:2] = \ mrcal.project( pcam, optimization_inputs['lensmodel'], intrinsics ) # optimization_inputs now contains perfect, noiseless board observations x = mrcal.optimizer_callback(**optimization_inputs)[1] print(nps.norm2(x[:mrcal.num_measurements_boards(**optimization_inputs)])) ==> 0 The optimization routine generates hypothetical observations from a set of parameters being evaluated, trying to match these hypothetical observations to real observations. To facilitate analysis, this routine returns these hypothetical coordinates of the chessboard corners being observed. This routine reports the 3D points in the coordinate system of the observing camera. The hypothetical points are constructed from - The calibration object geometry - The calibration object-reference transformation in optimization_inputs['frames_rt_toref'] - The camera extrinsics (reference-camera transformation) in optimization_inputs['extrinsics_rt_fromref'] - The table selecting the camera and calibration object frame for each observation in optimization_inputs['indices_frame_camintrinsics_camextrinsics'] ARGUMENTS - icam_intrinsics: optional integer specifying which intrinsic camera in the optimization_inputs we're looking at. If omitted (or None), we report camera-coordinate points for all the cameras - idx_inliers: optional numpy array of booleans of shape (Nobservations,object_height,object_width) to select the outliers manually. If omitted (or None), the outliers are selected automatically: idx_inliers = observations_board[...,2] > 0. This argument is available to pick common inliers from two separate solves. - **optimization_inputs: a dict() of arguments passable to mrcal.optimize() and mrcal.optimizer_callback(). We use the geometric data. This dict is obtainable from a cameramodel object by calling cameramodel.optimization_inputs() RETURNED VALUE - An array of shape (Nobservations, Nheight, Nwidth, 3) containing the coordinates (in the coordinate system of each camera) of the chessboard corners, for ALL the cameras. These correspond to the observations in optimization_inputs['observations_board'], which also have shape (Nobservations, Nheight, Nwidth, 3) - An array of shape (Nobservations_thiscamera, Nheight, Nwidth, 3) containing the coordinates (in the camera coordinate system) of the chessboard corners, for the particular camera requested in icam_intrinsics. If icam_intrinsics is None: this is the same array as the previous returned value - an (N,3) array containing camera-frame 3D points observed at calibration time, and accepted by the solver as inliers. This is a subset of the 2nd returned array. - an (N,3) array containing camera-frame 3D points observed at calibration time, but rejected by the solver as outliers. This is a subset of the 2nd returned array. ''' observations_board = optimization_inputs.get('observations_board') if observations_board is None: return Exception("No board observations available") indices_frame_camintrinsics_camextrinsics = \ optimization_inputs['indices_frame_camintrinsics_camextrinsics'] object_width_n = observations_board.shape[-2] object_height_n = observations_board.shape[-3] object_spacing = optimization_inputs['calibration_object_spacing'] calobject_warp = optimization_inputs.get('calobject_warp') # shape (Nh,Nw,3) full_object = mrcal.ref_calibration_object(object_width_n, object_height_n, object_spacing, calobject_warp) frames_Rt_toref = \ mrcal.Rt_from_rt( optimization_inputs['frames_rt_toref'] )\ [ indices_frame_camintrinsics_camextrinsics[:,0] ] extrinsics_Rt_fromref = \ nps.glue( mrcal.identity_Rt(), mrcal.Rt_from_rt(optimization_inputs['extrinsics_rt_fromref']), axis = -3 ) \ [ indices_frame_camintrinsics_camextrinsics[:,2]+1 ] Rt_cam_frame = mrcal.compose_Rt(extrinsics_Rt_fromref, frames_Rt_toref) p_cam_calobjects = \ mrcal.transform_point_Rt(nps.mv(Rt_cam_frame,-3,-5), full_object) # shape (Nobservations,Nheight,Nwidth) if idx_inliers is None: idx_inliers = observations_board[..., 2] > 0 idx_outliers = ~idx_inliers if icam_intrinsics is None: return \ p_cam_calobjects, \ p_cam_calobjects, \ p_cam_calobjects[idx_inliers, ...], \ p_cam_calobjects[idx_outliers, ...] # The user asked for a specific camera. Separate out its data # shape (Nobservations,) idx_observations = indices_frame_camintrinsics_camextrinsics[:, 1] == icam_intrinsics idx_inliers[~idx_observations] = False idx_outliers[~idx_observations] = False return \ p_cam_calobjects, \ p_cam_calobjects[idx_observations, ...], \ p_cam_calobjects[idx_inliers, ...], \ p_cam_calobjects[idx_outliers, ...]
cull_left_of_center = args.cull_left_of_center, allow_nonidentity_cam0_transform = False) else: with open(cache_file, "rb") as f: (optimization_inputs_baseline, models_true, models_baseline, indices_frame_camintrinsics_camextrinsics, lensmodel, Nintrinsics, imagersizes, intrinsics_true, extrinsics_true_mounted, frames_true, observations_true, intrinsics_sampled, extrinsics_sampled_mounted, frames_sampled, calobject_warp_sampled) = pickle.load(f) baseline_rt_ref_frame = optimization_inputs_baseline['frames_rt_toref'] icam0, icam1 = args.cameras Rt01_true = mrcal.compose_Rt( mrcal.Rt_from_rt(extrinsics_rt_fromref_true[icam0]), mrcal.invert_Rt(mrcal.Rt_from_rt(extrinsics_rt_fromref_true[icam1]))) Rt10_true = mrcal.invert_Rt(Rt01_true) # shape (Npoints,Ncameras,3) p_triangulated_true_local = nps.xchg( nps.cat(p_triangulated_true0, mrcal.transform_point_Rt(Rt10_true, p_triangulated_true0)), 0, 1) # Pixel coords at the perfect intersection # shape (Npoints,Ncameras,2) q_true = nps.xchg( np.array([ mrcal.project(p_triangulated_true_local[:,i,:], lensmodel, intrinsics_true[args.cameras[i]]) \ for i in range(2)]), 0,1)
models_ref [icam].extrinsics_Rt_toref() ) testutils.confirm_equal( nps.mag(Rt_extrinsics_err[3, :]), 0.0, eps=0.05, msg=f"Recovered extrinsic translation for camera {icam}") testutils.confirm_equal( (np.trace(Rt_extrinsics_err[:3, :]) - 1) / 2., 1.0, eps=np.cos(1. * np.pi / 180.0), # 1 deg msg=f"Recovered extrinsic rotation for camera {icam}") Rt_frame_err = \ mrcal.compose_Rt( mrcal.Rt_from_rt(optimization_inputs['frames_rt_toref']), mrcal.invert_Rt(Rt_cam0_board_ref) ) testutils.confirm_equal(np.max(nps.mag(Rt_frame_err[..., 3, :])), 0.0, eps=0.08, msg="Recovered frame translation") testutils.confirm_equal( np.min((nps.trace(Rt_frame_err[..., :3, :]) - 1) / 2.), 1.0, eps=np.cos(1. * np.pi / 180.0), # 1 deg msg="Recovered frame rotation") # Checking the intrinsics. Each intrinsics vector encodes an implicit # transformation. I compute and apply this transformation when making my # intrinsics comparisons. I make sure that within some distance of the pixel
p) R_fit = Rt_fit[:3, :] t_fit = Rt_fit[3, :] testutils.confirm_equal(R_fit, R, eps=1e-2, msg='Procrustes fit R') testutils.confirm_equal(t_fit, t, eps=1e-2, msg='Procrustes fit t') R_fit_vectors = \ mrcal.align_procrustes_vectors_R01(nps.matmult( p, nps.transpose(R) ) + noise, p) testutils.confirm_equal(R_fit_vectors, R, eps=1e-2, msg='Procrustes fit R (vectors)') testutils.confirm_equal(mrcal.invert_Rt( mrcal.Rt_from_rt(mrcal.invert_rt(mrcal.rt_from_Rt(Rt)))), Rt, msg='Rt/rt and invert') testutils.confirm_equal(mrcal.compose_Rt(Rt, mrcal.invert_Rt(Rt)), nps.glue(np.eye(3), np.zeros((3, )), axis=-2), msg='compose_Rt') testutils.confirm_equal(mrcal.compose_rt(mrcal.rt_from_Rt(Rt), mrcal.invert_rt( mrcal.rt_from_Rt(Rt))), np.zeros((6, )), msg='compose_rt') testutils.confirm_equal(mrcal.identity_Rt(), nps.glue(np.eye(3), np.zeros((3, )), axis=-2),
Nintrinsics, imagersizes, intrinsics_true, extrinsics_true_mounted, frames_true, observations_true, intrinsics_sampled, extrinsics_sampled_mounted, frames_sampled, calobject_warp_sampled) = pickle.load(f) Npoints = p_triangulated_true.shape[0] Rt01_true = mrcal.compose_Rt(mrcal.Rt_from_rt(extrinsics_rt_fromref_true[0]), mrcal.invert_Rt(mrcal.Rt_from_rt(extrinsics_rt_fromref_true[1]))) Rt10_true = mrcal.invert_Rt(Rt01_true) # shape (Npoints,Ncameras,3) p_triangulated_true_local = nps.xchg( nps.cat( p_triangulated_true, mrcal.transform_point_Rt(Rt10_true, p_triangulated_true) ), 0,1) # Pixel coords at the perfect intersection # shape (Npoints,Ncameras,2) q_true = nps.xchg( np.array([ mrcal.project(p_triangulated_true_local[:,i,:], lensmodel, intrinsics_true[i]) \ for i in range(len(intrinsics_true))]), 0,1)
0.0005275929652, 0.01968883397, 0.01482863541, -0.0562239888, 0.0500223357, ]) rt_0r = np.array([ 4e-1, -1e-2, 1e-3, -2., 3, -5., ]) Rt_r0 = mrcal.invert_Rt(mrcal.Rt_from_rt(rt_0r)) m.extrinsics_Rt_toref(Rt_r0) testutils.confirm_equal(m.extrinsics_rt_fromref(), rt_0r) # Let's make sure I can read and write empty and non-empty valid-intrinsics # regions m = mrcal.cameramodel(f"{testdir}/data/cam0.opencv8.cameramodel") testutils.confirm_equal(m.valid_intrinsics_region(), None, "read 'valid_intrinsics_region is None' properly") r_open = np.array(((0, 0), (0, 10), (10, 10), (10, 0))) r_closed = np.array(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0))) r_empty = np.zeros((0, 2)) m.valid_intrinsics_region(r_open) testutils.confirm_equal(