icam_extrinsics1, \ istate_e1, \ istate_e0 = \ mrcal.triangulation._triangulation_uncertainty_internal(slices, optimization_inputs_baseline, 0,0, stabilize_coords = args.stabilize_coords) ########## Gradient check dp_triangulated_di0_empirical = grad(lambda i0: triangulate_nograd( i0, models_baseline[icam1].intrinsics()[1], models_baseline[icam0].extrinsics_rt_fromref(), models_baseline[icam0].extrinsics_rt_fromref(), models_baseline[icam1].extrinsics_rt_fromref(), baseline_rt_ref_frame, baseline_rt_ref_frame, q_true, lensmodel, stabilize_coords=args.stabilize_coords), models_baseline[icam0].intrinsics()[1], step=1e-5) dp_triangulated_di1_empirical = grad( lambda i1: triangulate_nograd( models_baseline[icam0].intrinsics()[1], i1, models_baseline[icam0].extrinsics_rt_fromref(), models_baseline[icam0].extrinsics_rt_fromref(), models_baseline[icam1].extrinsics_rt_fromref(), baseline_rt_ref_frame, baseline_rt_ref_frame,
nps.glue(np.eye(3), np.zeros((3, ), ), axis=-2), msg='identity_Rt') confirm_equal(mrcal.identity_r(out=out3), np.zeros((3, )), msg='identity_r') confirm_equal(mrcal.identity_rt(out=out6), np.zeros((6, )), msg='identity_rt') ################# rotate_point_R y = \ mrcal.rotate_point_R(R0_ref, x, out = out3) confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') y, J_R, J_x = \ mrcal.rotate_point_R(R0_ref, x, get_gradients=True, out = (out3,out333,out33)) J_R_ref = grad(lambda R: nps.matmult(x, nps.transpose(R)), R0_ref) J_x_ref = R0_ref confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') confirm_equal(J_R, J_R_ref, msg='rotate_point_R J_R') confirm_equal(J_x, J_x_ref, msg='rotate_point_R J_x') # In-place R0_ref_copy = np.array(R0_ref) x_copy = np.array(x) y = \ mrcal.rotate_point_R(R0_ref_copy, x_copy, out = x_copy) confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result written in-place into x')
def grad_normalized_broadcasted(q_ref, i_ref): return grad(lambda qi: \ mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True), nps.glue(q_ref,i_ref, axis=-1))
def grad_broadcasted(p_ref, i_ref): return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]), nps.glue(p_ref,i_ref, axis=-1))
def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: \ mrcal.unproject_stereographic( \ mrcal.project_stereographic( mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))), nps.glue(q_ref,i_ref, axis=-1))
def grad_broadcasted(q_ref, i_ref): return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]), nps.glue(q_ref,i_ref, axis=-1))
def test_geometry(Rt01, p, whatgeometry, out_of_bounds=False, check_gradients=False): R01 = Rt01[:3, :] t01 = Rt01[3, :] # p now has shape (Np,3). The leading dims have been flattened p = p.reshape(p.size // 3, 3) Np = len(p) # p has shape (Np,3) # v has shape (Np,2) v0local_noisy, v1local_noisy,v0_noisy,v1_noisy,q0_ref,q1_ref,q0_noisy,q1_noisy = \ [v[...,0,:] for v in \ mrcal.synthetic_data. _noisy_observation_vectors_for_triangulation(p, Rt01, model0.intrinsics(), model1.intrinsics(), 1, sigma = 0.1)] scenarios = \ ( (mrcal.triangulate_geometric, callback_l2_geometric, v0_noisy, v1_noisy, t01), (mrcal.triangulate_leecivera_l1, callback_l1_angle, v0_noisy, v1_noisy, t01), (mrcal.triangulate_leecivera_linf, callback_linf_angle, v0_noisy, v1_noisy, t01), (mrcal.triangulate_leecivera_mid2, None, v0_noisy, v1_noisy, t01), (mrcal.triangulate_leecivera_wmid2,None, v0_noisy, v1_noisy, t01), (mrcal.triangulate_lindstrom, callback_l2_reprojection, v0local_noisy, v1local_noisy, Rt01), ) for scenario in scenarios: f, callback = scenario[:2] args = scenario[2:] result = f(*args, get_gradients=True) p_reported = result[0] what = f"{whatgeometry} {f.__name__}" if out_of_bounds: p_optimized = np.zeros(p_reported.shape) else: # Check all the gradients if check_gradients: grads = result[1:] for ip in range(Np): args_cut = (args[0][ip], args[1][ip], args[2]) for ivar in range(len(args)): grad_empirical = \ grad( lambda x: f( *args_cut[:ivar], x, *args_cut[ivar+1:]), args_cut[ivar], step = 1e-6) testutils.confirm_equal( grads[ivar][ip], grad_empirical, relative=True, worstcase=True, msg=f"{what}: grad(ip={ip}, ivar = {ivar})", eps=2e-2) if callback is not None: # I run an optimization to directly optimize the quantity each triangulation # routine is supposed to be optimizing, and then I compare p_optimized = \ nps.cat(*[ scipy.optimize.minimize(callback, p_reported[ip], # seed from the "right" value args = (args[0][ip], args[1][ip], args[2]), method = 'Nelder-Mead', # options = dict(disp = True) )['x'] \ for ip in range(Np) ]) # print( f"{what} p reported,optimized:\n{nps.cat(p_reported, p_optimized)}" ) # print( f"{what} p_err: {p_reported - p_optimized}" ) # print( f"{what} optimum reported/optimized:\n{callback(p_reported, *args)/callback(p_optimized, *args)}" ) testutils.confirm_equal(p_reported, p_optimized, relative=True, worstcase=True, msg=what, eps=1e-3) else: # No callback defined. Compare projected q q0 = mrcal.project(p_reported, *model0.intrinsics()) q1 = mrcal.project( mrcal.transform_point_Rt(mrcal.invert_Rt(Rt01), p_reported), *model1.intrinsics()) testutils.confirm_equal(q0, q0_ref, relative=False, worstcase=True, msg=f'{what} q0', eps=25.) testutils.confirm_equal(q1, q1_ref, relative=False, worstcase=True, msg=f'{what} q1', eps=25.)
q_projected, msg=f"project(unproject()) is an identity", worstcase=True, relative=True) testutils.confirm_equal( func_project(func_unproject(q_projected, fx, fy, cx, cy), fx, fy, cx, cy), q_projected, msg=f"project_{name}(unproject_{name}()) is an identity", worstcase=True, relative=True) # Now gradients for project() ipt = 1 _, dq_dp_reported = func_project(p[ipt], fx, fy, cx, cy, get_gradients=True) dq_dp_observed = grad(lambda p: func_project(p, fx, fy, cx, cy), p[ipt]) testutils.confirm_equal(dq_dp_reported, dq_dp_observed, msg=f"project_{name}() dq/dp", worstcase=True, relative=True) _, dq_dp_reported, dq_di_reported = mrcal.project(p[ipt], *intrinsics, get_gradients=True) dq_dp_observed = grad(lambda p: mrcal.project(p, *intrinsics), p[ipt]) dq_di_observed = grad( lambda intrinsics_data: mrcal.project(p[ipt], intrinsics[0], intrinsics_data), intrinsics[1]) testutils.confirm_equal(dq_dp_reported, dq_dp_observed, msg=f"project({name}) dq/dp",
# dp_triangulated_dt1r = # dp_triangulated_dt01 dt01_dt1r nps.matmult( dp_triangulated_dt01, dt01_dt1r, out = dp_triangulated_dpstate[..., istate_e1+3:istate_e1+6]) # dp_triangulated_drtrf has shape (Npoints,Nframes,3,6). I reshape to (Npoints,3,Nframes*6) dp_triangulated_dpstate[..., istate_f0:istate_f0+Nstate_frames] = \ nps.clump(nps.xchg(dp_triangulated_drtrf,-2,-3), n=-2) ########## Gradient check dp_triangulated_di0_empirical = grad(lambda i0: triangulate_nograd([i0, models_baseline[1].intrinsics()[1]], [m.extrinsics_rt_fromref() for m in models_baseline], optimization_inputs_baseline['frames_rt_toref'], frames_true, q_true, lensmodel, stabilize_coords=args.stabilize_coords), models_baseline[0].intrinsics()[1]) dp_triangulated_di1_empirical = grad(lambda i1: triangulate_nograd([models_baseline[0].intrinsics()[1],i1], [m.extrinsics_rt_fromref() for m in models_baseline], optimization_inputs_baseline['frames_rt_toref'], frames_true, q_true, lensmodel, stabilize_coords=args.stabilize_coords), models_baseline[1].intrinsics()[1]) dp_triangulated_de1_empirical = grad(lambda e1: triangulate_nograd([m.intrinsics()[1] for m in models_baseline], [models_baseline[0].extrinsics_rt_fromref(),e1], optimization_inputs_baseline['frames_rt_toref'], frames_true, q_true, lensmodel,