def CooksD_query_test(J,p,x, f, query,fquery_ref, CooksD_nox, k_dima, k_cook, i=0): r'''Test the concept of CooksD for querying hypothetical data fquery_test = f(q) isn't true here. If it WERE true, the x of the query point would be 0 (we fit the model exactly), so the outlierness factor would be 0 also ''' # current solve Nmeasurements,Nstate = J.shape query = query [i] fquery_ref = fquery_ref[i] # I add a new point, and reoptimize fquery = func_hypothesis(query,p) xquery = fquery - fquery_ref jquery = model_matrix(query, len(p)) J1 = nps.glue(J, jquery, axis=-2) f1 = nps.glue(f, fquery_ref, axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J1)) - f1 dx = x1[:-1] - x dx_both = x1 - nps.glue(x,xquery, axis=-1) report_mismatch_relerr( nps.inner(dx_both,dx_both)*k_cook, CooksD_nox['self_others'][i]*xquery*xquery, "self_others query-CooksD computed analytically, explicitly") report_mismatch_relerr( nps.inner(dx,dx)*k_cook, CooksD_nox['others'][i]*xquery*xquery, "others query-CooksD computed analytically, explicitly")
def _align_procrustes_points_Rt01(p0, p1, weights): p0 = nps.transpose(p0) p1 = nps.transpose(p1) # I process Mt instead of M to not need to transpose anything later, and to # end up with contiguous-memory results Mt = nps.matmult((p0 - np.mean(p0, axis=-1)[..., np.newaxis]) * weights, nps.transpose(p1 - np.mean(p1, axis=-1)[..., np.newaxis])) V, S, Ut = np.linalg.svd(Mt) R = nps.matmult(V, Ut) # det(R) is now +1 or -1. If it's -1, then this contains a mirror, and thus # is not a physical rotation. I compensate by negating the least-important # pair of singular vectors if np.linalg.det(R) < 0: V[:, 2] *= -1 R = nps.matmult(V, Ut) # Now that I have my optimal R, I compute the optimal t. From before: # # t = mean(a) - R mean(b) t = np.mean(p0, axis=-1)[..., np.newaxis] - nps.matmult( R, np.mean(p1, axis=-1)[..., np.newaxis]) return nps.glue(R, t.ravel(), axis=-2)
def CooksD_test(J, x, f, CooksD, k_dima, k_cook, i=0): r'''Test the computation of Cook's D I have an analytical expression for this computed in compute_outliernesses(). This explicitly computes the quantity represented by compute_outliernesses() to make sure that that analytical expression is correct ''' # I reoptimize without measurement i Nmeasurements,Nstate = J.shape J1 = nps.glue(J[:i,:], J[(i+1):,:], axis=-2) f1 = nps.glue(f[:i ], f[(i+1): ], axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J)) - f dx = x1-x report_mismatch_relerr( nps.inner(dx,dx) * k_cook, CooksD['self_others'][i], "self_others CooksD computed analytically, explicitly") report_mismatch_relerr( (nps.inner(dx,dx) - dx[i]*dx[i]) * k_cook, CooksD['others'][i], "others CooksD computed analytically, explicitly")
def outlierness_test(J, x, f, outlierness, k_dima, k_cook, i=0): r'''Test the computation of outlierness I have an analytical expression for this computed in compute_outliernesses(). This explicitly computes the quantity represented by compute_outliernesses() to make sure that that analytical expression is correct ''' # I reoptimize without measurement i E0 = nps.inner(x,x) J1 = nps.glue(J[:i,:], J[(i+1):,:], axis=-2) f1 = nps.glue(f[:i ], f[(i+1): ], axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J1)) - f1 E1 = nps.inner(x1,x1) report_mismatch_relerr( (E0-E1) * k_dima, outlierness['self_others'][i], "self_others outlierness computed analytically, explicitly") report_mismatch_relerr( (E0-x[i]*x[i] - E1) * k_dima, outlierness['others'][i], "others outlierness computed analytically, explicitly")
def test_outer(self): r'''Testing the broadcasted outer product''' # comes from PDL. numpy has a reversed axis ordering convention from # PDL, so I transpose the array before comparing ref = nps.transpose( np.array([[[[[0,0,0,0,0],[0,1,2,3,4],[0,2,4,6,8],[0,3,6,9,12],[0,4,8,12,16]], [[25,30,35,40,45],[30,36,42,48,54],[35,42,49,56,63],[40,48,56,64,72],[45,54,63,72,81]], [[100,110,120,130,140],[110,121,132,143,154],[120,132,144,156,168],[130,143,156,169,182],[140,154,168,182,196]]], [[[0,0,0,0,0],[15,16,17,18,19],[30,32,34,36,38],[45,48,51,54,57],[60,64,68,72,76]], [[100,105,110,115,120],[120,126,132,138,144],[140,147,154,161,168],[160,168,176,184,192],[180,189,198,207,216]], [[250,260,270,280,290],[275,286,297,308,319],[300,312,324,336,348],[325,338,351,364,377],[350,364,378,392,406]]]], [[[[0,15,30,45,60],[0,16,32,48,64],[0,17,34,51,68],[0,18,36,54,72],[0,19,38,57,76]], [[100,120,140,160,180],[105,126,147,168,189],[110,132,154,176,198],[115,138,161,184,207],[120,144,168,192,216]], [[250,275,300,325,350],[260,286,312,338,364],[270,297,324,351,378],[280,308,336,364,392],[290,319,348,377,406]]], [[[225,240,255,270,285],[240,256,272,288,304],[255,272,289,306,323],[270,288,306,324,342],[285,304,323,342,361]], [[400,420,440,460,480],[420,441,462,483,504],[440,462,484,506,528],[460,483,506,529,552],[480,504,528,552,576]], [[625,650,675,700,725],[650,676,702,728,754],[675,702,729,756,783],[700,728,756,784,812],[725,754,783,812,841]]]], [[[[0,30,60,90,120],[0,31,62,93,124],[0,32,64,96,128],[0,33,66,99,132],[0,34,68,102,136]], [[175,210,245,280,315],[180,216,252,288,324],[185,222,259,296,333],[190,228,266,304,342],[195,234,273,312,351]], [[400,440,480,520,560],[410,451,492,533,574],[420,462,504,546,588],[430,473,516,559,602],[440,484,528,572,616]]], [[[450,480,510,540,570],[465,496,527,558,589],[480,512,544,576,608],[495,528,561,594,627],[510,544,578,612,646]], [[700,735,770,805,840],[720,756,792,828,864],[740,777,814,851,888],[760,798,836,874,912],[780,819,858,897,936]], [[1000,1040,1080,1120,1160],[1025,1066,1107,1148,1189],[1050,1092,1134,1176,1218],[1075,1118,1161,1204,1247],[1100,1144,1188,1232,1276]]]], [[[[0,45,90,135,180],[0,46,92,138,184],[0,47,94,141,188],[0,48,96,144,192],[0,49,98,147,196]], [[250,300,350,400,450],[255,306,357,408,459],[260,312,364,416,468],[265,318,371,424,477],[270,324,378,432,486]], [[550,605,660,715,770],[560,616,672,728,784],[570,627,684,741,798],[580,638,696,754,812],[590,649,708,767,826]]], [[[675,720,765,810,855],[690,736,782,828,874],[705,752,799,846,893],[720,768,816,864,912],[735,784,833,882,931]], [[1000,1050,1100,1150,1200],[1020,1071,1122,1173,1224],[1040,1092,1144,1196,1248],[1060,1113,1166,1219,1272],[1080,1134,1188,1242,1296]], [[1375,1430,1485,1540,1595],[1400,1456,1512,1568,1624],[1425,1482,1539,1596,1653],[1450,1508,1566,1624,1682],[1475,1534,1593,1652,1711]]]]])) self._check_output_modes( ref, nps.outer, arr(2,3,5), arr(4,1,3,5), dtype=float )
def invert_Rt(Rt): r'''Simple reference implementation b = Ra + t -> a = R'b - R't ''' R = Rt[:3, :] tinv = -nps.matmult(Rt[3, :], R) return nps.glue(nps.transpose(R), tinv.ravel(), axis=-2)
def model_matrix(q, order): r'''Returns the model matrix S for particular domain points Here the "order" is the number of parameters in the fit. Thus order==2 means "linear" and order==3 means "quadratic"" ''' q = nps.atleast_dims(q,-1) return nps.transpose(nps.cat(*[q ** i for i in range(order)]))
def _align_procrustes_vectors_R01(v0, v1, weights): v0 = nps.transpose(v0) v1 = nps.transpose(v1) # I process Mt instead of M to not need to transpose anything later, and to # end up with contiguous-memory results Mt = nps.matmult(v0 * weights, nps.transpose(v1)) V, S, Ut = np.linalg.svd(Mt) R = nps.matmult(V, Ut) # det(R) is now +1 or -1. If it's -1, then this contains a mirror, and thus # is not a physical rotation. I compensate by negating the least-important # pair of singular vectors if np.linalg.det(R) < 0: V[:, 2] *= -1 R = nps.matmult(V, Ut) return R
def get_cov_plot_args(q, Var, what): l, v = sorted_eig(Var) l0, l1 = l v0, v1 = nps.transpose(v) major = np.sqrt(l0) minor = np.sqrt(l1) return \ ((q[0], q[1], 2*major, 2*minor, 180./np.pi*np.arctan2(v0[1],v0[0]), dict(_with='ellipses', tuplesize=5, legend=what)),)
def Var_df(J, squery, stdev): r'''Propagates noise in input to noise in f noise in input -> noise in params -> noise in f dp ~ M dm where M = inv(JtJ)Jt df = df/dp dp df/dp = squery Var(dm) = stdev^2 I -> Var(df) = stdev^2 squery inv(JtJ) Jt J inv(JtJ) squeryt = = stdev^2 squery inv(JtJ) squeryt This function broadcasts over squery ''' return \ nps.inner(squery, nps.transpose(np.linalg.solve(nps.matmult(nps.transpose(J),J), nps.transpose(squery)))) *stdev*stdev
def plot_arg_covariance_ellipse(q_mean, Var, what): l, v = sorted_eig(Var) l0, l1 = l v0, v1 = nps.transpose(v) major = np.sqrt(l0) minor = np.sqrt(l1) return \ (q_mean[0], q_mean[1], 2*major, 2*minor, 180./np.pi*np.arctan2(v0[1],v0[0]), dict(_with='ellipses', tuplesize=5, legend=what))
def compose_Rt(Rt0, Rt1): r'''Simple reference implementation b = R0 (R1 x + t1) + t0 = = R0 R1 x + R0 t1 + t0 ''' R0 = Rt0[:3, :] t0 = Rt0[3, :] R1 = Rt1[:3, :] t1 = Rt1[3, :] R2 = nps.matmult(R0, R1) t2 = nps.matmult(t1, nps.transpose(R0)) + t0 return nps.glue(R2, t2.ravel(), axis=-2)
def confirm_covariances_equal( var, var_ref, what, # scalar float to use for all the eigenvalues, of # a list of length 3, to use in order from largest # to smallest. None to skip that axis eps_eigenvalues, eps_eigenvectors_deg): # First, the thing is symmetric, right? confirm_equal(nps.transpose(var), var, worstcase=True, msg=f"Var(dq) is symmetric for {what}") l_predicted, v_predicted = mrcal.utils._sorted_eig(var) l_observed, v_observed = mrcal.utils._sorted_eig(var_ref) eccentricity_predicted = l_predicted[-1] / l_predicted[-2] for i in range(var.shape[-1]): # check all the eigenvalues, in order from largest to smallest if isinstance(eps_eigenvalues, float): eps = eps_eigenvalues else: eps = eps_eigenvalues[i] if eps is None: continue confirm_equal(l_observed[-1 - i], l_predicted[-1 - i], eps=eps, worstcase=True, relative=True, msg=f"Var(dq) worst[{i}] eigenvalue match for {what}") # I only check the eigenvector directions if the ellipse is sufficiently # non-circular. A circular ellipse has poorly-defined eigenvector directions if eccentricity_predicted > 2.: # I look at the direction of the largest ellipse axis only v0_predicted = v_predicted[:, -1] v0_observed = v_observed[:, -1] confirm_equal(np.arcsin(nps.mag(np.cross(v0_observed, v0_predicted))) * 180. / np.pi, 0, eps=eps_eigenvectors_deg, worstcase=True, msg=f"Var(dq) eigenvectors match for {what}")
def test_matmult(self): r'''Testing the broadcasted matrix multiplication''' self.assertValueShape( None, (4,2,3,5), nps.matmult, arr(2,3,7), arr(4,1,7,5) ) ref = np.array([[[[ 42, 48, 54], [ 114, 136, 158]], [[ 114, 120, 126], [ 378, 400, 422]]], [[[ 186, 224, 262], [ 258, 312, 366]], [[ 642, 680, 718], [ 906, 960, 1014]]]]) self._check_output_modes( ref, nps.matmult2, arr(2,1,2,4), arr(2,4,3), dtype=float ) ref2 = np.array([[[[ 156.], [ 452.]], [[ 372.], [ 1244.]]], [[[ 748.], [ 1044.]], [[ 2116.], [ 2988.]]]]) self._check_output_modes(ref2, nps.matmult2, arr(2,1,2,4), nps.matmult2(arr(2,4,3), arr(3,1))) # not doing _check_output_modes() because matmult() doesn't take an # 'out' kwarg self.assertNumpyAlmostEqual(ref2, nps.matmult(arr(2,1,2,4), arr(2,4,3), arr(3,1))) # checking the null-dimensionality logic A = arr(2,3) self._check_output_modes( nps.inner(nps.transpose(A), np.arange(2)), nps.matmult2, np.arange(2), A ) A = arr(3) self._check_output_modes( A*2, nps.matmult2, np.array([2]), A ) A = arr(3) self._check_output_modes( A*2, nps.matmult2, np.array(2), A )
def outlierness_query_test(J,p,x, f, query,fquery_ref, outlierness_nox, k_dima, k_cook, i=0): r'''Test the concept of outlierness for querying hypothetical data fquery_test = f(q) isn't true here. If it WERE true, the x of the query point would be 0 (we fit the model exactly), so the outlierness factor would be 0 also ''' # current solve E0 = nps.inner(x,x) query = query [i] fquery_ref = fquery_ref[i] # I add a new point, and reoptimize fquery = func_hypothesis(query,p) xquery = fquery - fquery_ref jquery = model_matrix(query, len(p)) J1 = nps.glue(J, jquery, axis=-2) f1 = nps.glue(f, fquery_ref, axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J1)) - f1 E1 = nps.inner(x1,x1) report_mismatch_relerr( (x1[-1]*x1[-1]) * k_dima, outlierness_nox['self'][i]*xquery*xquery, "self query-outlierness computed analytically, explicitly") report_mismatch_relerr( (E1-x1[-1]*x1[-1] - E0) * k_dima, outlierness_nox['others'][i]*xquery*xquery, "others query-outlierness computed analytically, explicitly") report_mismatch_relerr( (E1 - E0) * k_dima, outlierness_nox['self_others'][i]*xquery*xquery, "self_others query-outlierness computed analytically, explicitly")
def _plot_arg_covariance_ellipse(q_mean, Var, what): # if the variance is 0, the ellipse is infinitely small, and I don't even # try to plot it. Gnuplot has the arguably-buggy behavior where drawing an # ellipse with major_diam = minor_diam = 0 plots a nominally-sized ellipse. if np.max(np.abs(Var)) == 0: return None l, v = _sorted_eig(Var) l0, l1 = l v0, v1 = nps.transpose(v) major = np.sqrt(l0) minor = np.sqrt(l1) return \ (q_mean[0], q_mean[1], 2*major, 2*minor, 180./np.pi*np.arctan2(v0[1],v0[0]), dict(_with='ellipses', tuplesize=5, legend=what))
indices_point_camintrinsics_camextrinsics = \ np.array(((0,1,-1), (1,0,-1), (1,1, 0), (2,0,-1), (2,1, 0)), dtype = np.int32) points = 10. + 2. * linspace_shaped(3, 3) observations_point_xy = 1000. + 500. * linspace_shaped(5, 2) observations_point_weights = np.array((0.9, 0.8, 0.9, 1.3, 1.8)) observations_point = \ nps.glue(observations_point_xy, nps.transpose(observations_point_weights), axis = -1) all_test_kwargs = (dict(do_optimize_intrinsics_core=False, do_optimize_intrinsics_distortions=True, do_optimize_extrinsics=False, do_optimize_frames=False, do_optimize_calobject_warp=False, do_apply_regularization=True), dict(do_optimize_intrinsics_core=True, do_optimize_intrinsics_distortions=False, do_optimize_extrinsics=False, do_optimize_frames=False, do_optimize_calobject_warp=False, do_apply_regularization=True), dict(do_optimize_intrinsics_core=False,
def invert_R(R): r'''Simple reference implementation ''' return nps.transpose(R)
out6 = base[1, 4, :6, 1] out66 = base[5, 3:9, 3:9, 2] out66a = base[6, 3:9, 3:9, 2] confirm_equal(mrcal.identity_R(out=out33), np.eye(3), msg='identity_R') confirm_equal(mrcal.identity_Rt(out=out43), nps.glue(np.eye(3), np.zeros((3, ), ), axis=-2), msg='identity_Rt') confirm_equal(mrcal.identity_r(out=out3), np.zeros((3, )), msg='identity_r') confirm_equal(mrcal.identity_rt(out=out6), np.zeros((6, )), msg='identity_rt') ################# rotate_point_R y = \ mrcal.rotate_point_R(R0_ref, x, out = out3) confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') y, J_R, J_x = \ mrcal.rotate_point_R(R0_ref, x, get_gradients=True, out = (out3,out333,out33)) J_R_ref = grad(lambda R: nps.matmult(x, nps.transpose(R)), R0_ref) J_x_ref = R0_ref confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') confirm_equal(J_R, J_R_ref, msg='rotate_point_R J_R') confirm_equal(J_x, J_x_ref, msg='rotate_point_R J_x') # In-place R0_ref_copy = np.array(R0_ref)
def make_noisy_inputs(): r'''Construct incomplete, noisy observations to feed to the solver''' # The seed points array is the true array, but corrupted by noise. All the # points are observed at some point #print(repr((np.random.random(points.shape)-0.5)/3)) points_noise = np.array([[-0.16415198, 0.10697666, 0.07137079], [-0.02353459, 0.07269802, 0.05804911], [-0.05218085, -0.09302461, -0.16626839], [0.03649283, -0.04345566, -0.1589429], [-0.05530528, 0.03942736, -0.02755858], [-0.16252387, 0.07792151, -0.12200266], [-0.02611094, -0.13695699, 0.06799326]]) points_noisy = ref_p * (1. + points_noise) Ncamposes, Npoints = ref_p_cam.shape[:2] ipoints = indices_point_camintrinsics_camextrinsics[:, 0] icamposes = indices_point_camintrinsics_camextrinsics[:, 2] ref_q_cam_indexed = nps.clump(ref_q_cam, n=2)[icamposes * Npoints + ipoints, :] #print(repr(np.random.randn(*ref_q_cam_indexed.shape) * 1.0)) q_cam_noise = np.array([[-0.40162837, -0.60884836], [-0.65186956, -2.23240529], [0.40217293, -0.40160168], [2.05376895, -1.47389235], [-0.01090807, 0.35468639], [-0.37916168, -1.06052742], [-0.08546853, -2.69946391], [0.76133345, -1.38759769], [-1.05998307, -0.27779779], [-2.22203688, 1.47809028], [1.68526798, 0.83635394], [1.26203342, 2.58905488], [1.18282463, -0.41362789], [0.41615768, 2.06621809], [0.27271605, 1.19721072], [-1.48421641, 3.20841776], [1.10563011, 0.38313526], [0.25591618, -0.97987565], [-0.2431585, -1.34797656], [1.57805536, -0.26467537], [1.23762306, 0.94616712], [0.29441229, -0.78921128], [-1.33799634, -1.65173241], [-0.24854348, -0.14145806]]) q_cam_indexed_noisy = ref_q_cam_indexed + q_cam_noise observations = nps.glue(q_cam_indexed_noisy, nps.transpose( np.ones((q_cam_indexed_noisy.shape[0], ))), axis=-1) #print(repr((np.random.random(ref_extrinsics_rt_fromref.shape)-0.5)/10)) extrinsics_rt_fromref_noise = \ np.array([[-0.00781127, -0.04067386, -0.01039731, 0.02057068, -0.0461704 , 0.02112582], [-0.02466267, -0.01445134, -0.01290107, -0.01956848, 0.04604318, 0.0439563 ], [-0.02335697, 0.03171099, -0.00900416, -0.0346394 , -0.0392821 , 0.03892269], [ 0.00229462, -0.01716853, 0.01336239, -0.0228473 , -0.03919978, 0.02671576], [ 0.03782446, -0.016981 , 0.03949906, -0.03256744, 0.02496247, 0.02924358]]) extrinsics_rt_fromref_noisy = ref_extrinsics_rt_fromref * ( 1.0 + extrinsics_rt_fromref_noise) return extrinsics_rt_fromref_noisy, points_noisy, observations
def image_transformation_map(model_from, model_to, intrinsics_only=False, distance=None, plane_n=None, plane_d=None, mask_valid_intrinsics_region_from=False): r'''Compute a reprojection map between two models SYNOPSIS model_orig = mrcal.cameramodel("xxx.cameramodel") image_orig = cv2.imread("image.jpg") model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig, fit = "corners") mapxy = mrcal.image_transformation_map(model_orig, model_pinhole, intrinsics_only = True) image_undistorted = mrcal.transform_image(image_orig, mapxy) # image_undistorted is now a pinhole-reprojected version of image_orig Returns the transformation that describes a mapping - from pixel coordinates of an image of a scene observed by model_to - to pixel coordinates of an image of the same scene observed by model_from This transformation can then be applied to a whole image by calling mrcal.transform_image(). This function returns a transformation map in an (Nheight,Nwidth,2) array. The image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from. One application of this function is to validate the models in a stereo pair. For instance, reprojecting one camera's image at distance=infinity should produce exactly the same image that is observed by the other camera when looking at very far objects, IF the intrinsics and rotation are correct. If the images don't line up well, we know that some part of the models is off. Similarly, we can use big planes (such as observations of the ground) and plane_n, plane_d to validate. This function has several modes of operation: - intrinsics, extrinsics Used if not intrinsics_only and \ plane_n is None and \ plane_d is None This is the default. For each pixel in the output, we use the full model to unproject a given distance out, and then use the full model to project back into the other camera. - intrinsics only Used if intrinsics_only and \ plane_n is None and \ plane_d is None Similar, but the extrinsics are ignored. We unproject the pixels in one model, and project the into the other camera. The two camera coordinate systems are assumed to line up perfectly - plane Used if plane_n is not None and plane_d is not None We map observations of a given plane in camera FROM coordinates coordinates to where this plane would be observed by camera TO. We unproject each pixel in one camera, compute the intersection point with the plane, and project that intersection point back to the other camera. This uses ALL the intrinsics, extrinsics and the plane representation. The plane is represented by a normal vector plane_n, and the distance to the normal plane_d. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. ARGUMENTS - model_from: the mrcal.cameramodel object describing the camera used to capture the input image. We always use the intrinsics. if not intrinsics_only: we use the extrinsics also - model_to: the mrcal.cameramodel object describing the camera that would have captured the image we're producing. We always use the intrinsics. if not intrinsics_only: we use the extrinsics also - intrinsics_only: optional boolean, defaulting to False. If False: we respect the relative transformation in the extrinsics of the camera models. - distance: optional value, defaulting to None. Used only if not intrinsics_only. We reproject points in space a given distance out. If distance is None (the default), we look out to infinity. This is equivalent to using only the rotation component of the extrinsics, ignoring the translation. - plane_n: optional numpy array of shape (3,); None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. This argument describes the normal vector in the coordinate system of model_from. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_d should be given also, and intrinsics_only should be False. if given, we use the full intrinsics and extrinsics of both camera models - plane_d: optional floating-point value; None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_n should be given also, and intrinsics_only should be False. if given, we use the full intrinsics and extrinsics of both camera models - mask_valid_intrinsics_region_from: optional boolean defaulting to False. If True, points outside the valid-intrinsics region in the FROM image are set to black, and thus do not appear in the output image RETURNED VALUE A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the imager dimensions of model_to. This array contains 32-bit floats, as required by cv2.remap() (the function providing the internals of mrcal.transform_image()). This array can be passed to mrcal.transform_image() ''' if (plane_n is None and plane_d is not None) or \ (plane_n is not None and plane_d is None): raise Exception( "plane_n and plane_d should both be None or neither should be None" ) if plane_n is not None and \ intrinsics_only: raise Exception( "We're looking at remapping a plane (plane_d, plane_n are not None), so intrinsics_only should be False" ) if distance is not None and \ (plane_n is not None or intrinsics_only): raise Exception( "'distance' makes sense only without plane_n/plane_d and without intrinsics_only" ) if intrinsics_only: Rt_to_from = None else: Rt_to_from = mrcal.compose_Rt(model_to.extrinsics_Rt_fromref(), model_from.extrinsics_Rt_toref()) lensmodel_from, intrinsics_data_from = model_from.intrinsics() lensmodel_to, intrinsics_data_to = model_to.intrinsics() if re.match("LENSMODEL_OPENCV",lensmodel_from) and \ lensmodel_to == "LENSMODEL_PINHOLE" and \ plane_n is None and \ not mask_valid_intrinsics_region_from and \ distance is None: # This is a common special case. This branch works identically to the # other path (the other side of this "if" can always be used instead), # but the opencv-specific code is optimized and at one point ran faster # than the code on the other side. # # The mask_valid_intrinsics_region_from isn't implemented in this path. # It COULD be, then this faster path could be used import cv2 fxy_from = intrinsics_data_from[0:2] cxy_from = intrinsics_data_from[2:4] cameraMatrix_from = np.array( ((fxy_from[0], 0, cxy_from[0]), (0, fxy_from[1], cxy_from[1]), (0, 0, 1))) fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] cameraMatrix_to = np.array( ((fxy_to[0], 0, cxy_to[0]), (0, fxy_to[1], cxy_to[1]), (0, 0, 1))) output_shape = model_to.imagersize() distortion_coeffs = intrinsics_data_from[4:] if Rt_to_from is not None: R_to_from = Rt_to_from[:3, :] if np.trace(R_to_from) > 3. - 1e-12: R_to_from = None # identity, so I pass None else: R_to_from = None return nps.glue( *[ nps.dummy(arr,-1) for arr in \ cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs, R_to_from, cameraMatrix_to, tuple(output_shape), cv2.CV_32FC1)], axis = -1) W_from, H_from = model_from.imagersize() W_to, H_to = model_to.imagersize() # shape: (Nheight,Nwidth,2). Contains (x,y) rows grid = np.ascontiguousarray(nps.mv( nps.cat(*np.meshgrid(np.arange(W_to), np.arange(H_to))), 0, -1), dtype=float) v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to) if plane_n is not None: R_to_from = Rt_to_from[:3, :] t_to_from = Rt_to_from[3, :] # The homography definition. Derived in many places. For instance in # "Motion and structure from motion in a piecewise planar environment" # by Olivier Faugeras, F. Lustman. A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n) A_from_to = np.linalg.inv(A_to_from) v = nps.matmult(v, nps.transpose(A_from_to)) else: if Rt_to_from is not None: if distance is not None: v = mrcal.transform_point_Rt( mrcal.invert_Rt(Rt_to_from), v / nps.dummy(nps.mag(v), -1) * distance) else: R_to_from = Rt_to_from[:3, :] v = nps.matmult(v, R_to_from) mapxy = mrcal.project(v, lensmodel_from, intrinsics_data_from) if mask_valid_intrinsics_region_from: # Using matplotlib to compute the out-of-bounds points. It doesn't # support broadcasting, so I do that manually with a clump/reshape from matplotlib.path import Path region = Path(model_from.valid_intrinsics_region()) is_inside = region.contains_points(nps.clump(mapxy, n=2)).reshape( mapxy.shape[:2]) mapxy[~is_inside, :] = -1 return mapxy.astype(np.float32)
[0.9447233, 6.8439095, 9.6958398]]) R = Rt[:3, :] t = Rt[3, :] noise = np.array([[0.00035356, 0.00043613, 0.00006606], [0.00043968, 0.00043783, 0.00060678], [0.00063803, 0.00024423, 0.00010871], [0.00004966, 0.00053377, 0.00018905], [0.00007708, 0.00023529, 0.0002229], [0.00090558, 0.00072379, 0.00004062], [0.00072059, 0.00074467, 0.00044128], [0.00024228, 0.00058201, 0.00041458], [0.00018121, 0.00078172, 0.00016128], [0.00019021, 0.00001371, 0.00096808]]) Tp = nps.matmult(p, nps.transpose(R)) + t Rt_fit = \ mrcal.align_procrustes_points_Rt01(Tp + noise, p) R_fit = Rt_fit[:3, :] t_fit = Rt_fit[3, :] testutils.confirm_equal(R_fit, R, eps=1e-2, msg='Procrustes fit R') testutils.confirm_equal(t_fit, t, eps=1e-2, msg='Procrustes fit t') R_fit_vectors = \ mrcal.align_procrustes_vectors_R01(nps.matmult( p, nps.transpose(R) ) + noise, p) testutils.confirm_equal(R_fit_vectors, R, eps=1e-2, msg='Procrustes fit R (vectors)')
p_triangulated_sampled0 = triangulate_nograd( intrinsics_sampled[..., icam0, :], intrinsics_sampled[..., icam1, :], extrinsics_sampled_cam0, models_baseline[icam0].extrinsics_rt_fromref(), extrinsics_sampled_cam1, frames_sampled, baseline_rt_ref_frame, q_sampled, lensmodel, stabilize_coords=args.stabilize_coords) ranges = nps.mag(p_triangulated0) ranges_true = nps.mag(p_triangulated_true0) ranges_sampled = nps.transpose(nps.mag(p_triangulated_sampled0)) mean_ranges_sampled = ranges_sampled.mean(axis=-1) Var_ranges_sampled = ranges_sampled.var(axis=-1) # r = np.mag(p) # dr_dp = p/r # Var(r) = dr_dp var(p) dr_dpT # = p var(p) pT / norm2(p) Var_ranges_joint = np.zeros((Npoints, ), dtype=float) Var_ranges_calibration = np.zeros((Npoints, ), dtype=float) Var_ranges_observations = np.zeros((Npoints, ), dtype=float) for ipt in range(Npoints): Var_ranges_joint[ipt] = \ nps.matmult(p_triangulated0[ipt], Var_p_joint[ipt,:,ipt,:], nps.transpose(p_triangulated0[ipt]))[0] / nps.norm2(p_triangulated0[ipt]) Var_ranges_calibration[ipt] = \
########################################################################### # We're supposed to be at the optimum. E = norm2(x) ~ norm2(x0 + J dp) = # norm2(x0) + 2 dpt Jt x0 + norm2(J dp). At the optimum Jt x0 = 0 -> E = # norm2(x0) + norm2(J dp). dE = norm2(J dp) = norm2(dx_predicted) x_predicted = x0 + dx_predicted dE = nps.norm2(x1) - nps.norm2(x0) dE_predicted = nps.norm2(dx_predicted) testutils.confirm_equal(dE_predicted, dE, eps=1e-3, relative=True, msg="diff(E) predicted") # At the optimum dE/dp = 0 -> xtJ = 0 xtJ0 = nps.inner(nps.transpose(J0), x0) mrcal.pack_state(xtJ0, **optimization_inputs) testutils.confirm_equal(xtJ0, 0, eps=1.5e-2, worstcase=True, msg="dE/dp = 0 at the optimum: original") ########################################################################### # I perturb my input observation vector qref by dqref. noise_for_gradients = 1e-3 dqref, observations_perturbed = sample_dqref(baseline['observations_board'], noise_for_gradients) optimization_inputs = copy.deepcopy(baseline) optimization_inputs['observations_board'] = observations_perturbed
def unproject(q, lensmodel, intrinsics_data, normalize = False, get_gradients = False, out = None): r'''Unprojects pixel coordinates to observation vectors SYNOPSIS # q is a (...,2) array of pixel observations v = mrcal.unproject( q, lensmodel, intrinsics_data ) ### OR ### m = mrcal.cameramodel(...) v = mrcal.unproject( q, *m.intrinsics() ) Maps a set of 2D imager points q to a set of 3D vectors in camera coordinates that produced these pixel observations. Each 3D vector is unique only up-to-length, and the returned vectors aren't normalized by default. The default length of the returned vector is arbitrary, and selected for the convenience of the implementation. Pass normalize=True to always return unit vectors. This is the "reverse" direction, so an iterative nonlinear optimization is performed internally to compute this result. This is much slower than mrcal_project. For OpenCV distortions specifically, OpenCV has cvUndistortPoints() (and cv2.undistortPoints()), but these are inaccurate and we do not use them: https://github.com/opencv/opencv/issues/8811 Gradients are available by passing get_gradients=True. Since unproject() is implemented as an iterative solve around project(), the unproject() gradients are computed by manipulating the gradients reported by project() at the solution. The reported gradients are relative to whatever unproject() is reporting; the unprojection is unique only up-to-length, and the magnitude isn't fixed. So the gradients may include a component in the direction of the returned observation vector: this follows the arbitrary scaling used by unproject(). It is possible to pass normalize=True; we then return NORMALIZED observation vectors and the gradients of those NORMALIZED vectors. In that case, those gradients are guaranteed to be orthogonal to the observation vector. The vector normalization involves a bit more computation, so it isn't the default. NOTE: THE MAGNITUDE OF THE RETURNED VECTOR CHANGES IF get_gradients CHANGES. The reported gradients are correct relative to the output returned with get_gradients=True. Passing normalize=True can be used to smooth this out: unproject(..., normalize=True) returns the same vectors as unproject(..., normalize=True, get_gradients=True)[0] Broadcasting is fully supported across q and intrinsics_data. Models that have no gradients available cannot use mrcal_unproject() in C, but CAN still use this mrcal.unproject() Python routine: a slower routine is employed that uses numerical differences instead of analytical gradients. ARGUMENTS - q: array of dims (...,2); the pixel coordinates we're unprojecting - lensmodel: a string such as LENSMODEL_PINHOLE LENSMODEL_OPENCV4 LENSMODEL_CAHVOR LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=16_Ny=12_fov_x_deg=100 - intrinsics_data: array of dims (Nintrinsics): (focal_x, focal_y, center_pixel_x, center_pixel_y, distortion0, distortion1, ...) The focal lengths are given in pixels. - normalize: optional boolean defaults to False. If True: normalize the output vectors - get_gradients: optional boolean that defaults to False. Whether we should compute and report the gradients. This affects what we return (see below). If not normalize, the magnitude of the reported vectors changes if get_gradients is turned on/off (see above) - out: optional argument specifying the destination. By default, new numpy array(s) are created and returned. To write the results into existing arrays, specify them with the 'out' kwarg. If not get_gradients: 'out' is the one numpy array we will write into. Else: 'out' is a tuple of all the output numpy arrays. If 'out' is given, we return the same arrays passed in. This is the standard behavior provided by numpysane_pywrap. RETURNED VALUE if not get_gradients: we return an (...,3) array of unprojected observation vectors. Not normalized by default; see description above if get_gradients: we return a tuple: - (...,3) array of unprojected observation vectors - (...,3,2) array of gradients of unprojected observation vectors in respect to pixel coordinates - (...,3,Nintrinsics) array of gradients of unprojected observation vectors in respect to the intrinsics ''' def apply_normalization_to_output_with_gradients(v,dv_dq,dv_di): # vn = v/mag(v) # dvn = dv (1/mag(v)) + v d(1/mag(v)) # = dv( 1/mag(v) - v vt / mag^3(v) ) # = dv( 1/mag(v) - vn vnt / mag(v) ) # = dv/mag(v) ( 1 - vn vnt ) # v has shape (...,3) # dv_dq has shape (...,3,2) # dv_di has shape (...,3,N) # shape (...,1) magv_recip = 1. / nps.dummy(nps.mag(v), -1) v *= magv_recip # shape (...,1,1) magv_recip = nps.dummy(magv_recip,-1) dv_dq *= magv_recip dv_dq -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_dq, -1,-2), -2), nps.dummy(nps.outer(v,v),-3) )[...,0,:], -1, -2) dv_di *= magv_recip dv_di -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_di, -1,-2), -2), nps.dummy(nps.outer(v,v),-3) )[...,0,:], -1, -2) # First, handle some trivial cases. I don't want to run the # optimization-based unproject() if I don't have to if lensmodel == 'LENSMODEL_PINHOLE' or \ lensmodel == 'LENSMODEL_LONLAT' or \ lensmodel == 'LENSMODEL_LATLON' or \ lensmodel == 'LENSMODEL_STEREOGRAPHIC': if lensmodel == 'LENSMODEL_PINHOLE': func = mrcal.unproject_pinhole always_normalized = False elif lensmodel == 'LENSMODEL_LONLAT': func = mrcal.unproject_lonlat always_normalized = True elif lensmodel == 'LENSMODEL_LATLON': func = mrcal.unproject_latlon always_normalized = True elif lensmodel == 'LENSMODEL_STEREOGRAPHIC': func = mrcal.unproject_stereographic always_normalized = False if not get_gradients: v = func(q, intrinsics_data, out = out) if normalize and not always_normalized: v /= nps.dummy(nps.mag(v), axis=-1) return v # shapes (...,2) fxy = intrinsics_data[..., :2] cxy = intrinsics_data[..., 2:] # shapes (...,3) and (...,3,2) v, dv_dq = \ func(q, intrinsics_data, get_gradients = True, out = None if out is None else (out[0],out[1])) # q = f l(v) + c # l(v) = (q-c)/f # # dl/dv dv/df = (c-q) / f^2 # dl/dv dv/dq = 1/f # -> dl/dv = 1 / ( f dv/dq ) # -> dv/df = (c-q) / (f^2 dl/dv) = (c-q) dv/dq / f # # dl/dv dv/dc = -1/f # -> dv/dc = -1 / (f dl/dv) = -1 / (f /( f dv/dq )) = -dv/dq dv_di_shape = dv_dq.shape[:-1] + (4,) if out is None: dv_di = np.zeros( dv_di_shape, dtype=float) else: if not (out[2].shape[-len(dv_di_shape):] == dv_di_shape and \ not any(np.array(out[2].shape[:-len(dv_di_shape)]) - 1)): raise Exception(f"Shape of out[2] doesn't match broadcasted shape for dv_di. Wanted {dv_di_shape}, but got {out[2].shape}") dv_di = out[2] dv_di *= 0 # dv/df dv_di[..., :2] += nps.dummy((cxy - q)/fxy, -2) * dv_dq # dv/dc dv_di[..., 2:] -= dv_dq if normalize and not always_normalized: apply_normalization_to_output_with_gradients(v,dv_dq,dv_di) return v,dv_dq,dv_di try: meta = mrcal.lensmodel_metadata_and_config(lensmodel) except: raise Exception(f"Invalid lens model '{lensmodel}': couldn't get the metadata") if meta['has_gradients']: # Main path. We have gradients. # # Internal function must have a different argument order so # that all the broadcasting stuff is in the leading arguments if not get_gradients: v = mrcal._mrcal_npsp._unproject(q, intrinsics_data, lensmodel=lensmodel, out=out) if normalize: # Explicitly handle nan and inf to set their normalized values # to 0. Otherwise I get a scary-looking warning from numpy i_vgood = \ np.isfinite(v[...,0]) * \ np.isfinite(v[...,1]) * \ np.isfinite(v[...,2]) v[~i_vgood] = np.array((0.,0.,1.)) v /= nps.dummy(nps.mag(v), -1) v[~i_vgood] = np.array((0.,0.,0.)) return v # We need to report gradients vs = mrcal._mrcal_npsp._unproject(q, intrinsics_data, lensmodel=lensmodel) # I have no gradients available for unproject(), and I need to invert a # non-square matrix to use the gradients from project(). I deal with this # with a stereographic mapping # # With a simple unprojection I have q -> v # Instead I now do q -> vs -> u -> v # I reproject vs, to produce a scaled v = k*vs. I'm assuming all # projections are central, so vs represents q just as well as v does. u # is a 2-vector, so dq_du is (2x2), and I can invert it u = mrcal.project_stereographic(vs) dv_du = np.zeros( vs.shape + (2,), dtype=float) v, dv_du = \ mrcal.unproject_stereographic(u, get_gradients = True, out = (vs if out is None else out[0], dv_du)) _,dq_dv,dq_di = mrcal.project(v, lensmodel, intrinsics_data, get_gradients = True) # shape (..., 2,2). Square. Invertible! dq_du = nps.matmult( dq_dv, dv_du ) # dv/dq = dv/du du/dq = # = dv/du inv(dq/du) # = transpose(inv(transpose(dq/du)) transpose(dv/du)) dv_dq = nps.transpose(np.linalg.solve( nps.transpose(dq_du), nps.transpose(dv_du) )) if out is not None: out[1] *= 0. out[1] += dv_dq dv_dq = out[1] # dv/di is a bit different. I have (q,i) -> v. I want to find out # how moving i affects v while keeping q constant. Taylor expansion # of projection: q = q0 + dq/dv dv + dq/di di. q is constant so # dq/dv dv + dq/di di = 0 -> dv/di = - dv/dq dq/di dv_di = nps.matmult(dv_dq, dq_di, out = None if out is None else out[2]) dv_di *= -1. if normalize: apply_normalization_to_output_with_gradients(v,dv_dq,dv_di) return v, dv_dq, dv_di # No projection gradients implemented in C. We should get here approximately # never. At this time, the only projection function that has no gradients # implemented is LENSMODEL_CAHVORE, which nobody is really expected to be # using. If these see use, real gradients should be implemented # # We compute the gradients numerically. This is a reimplementation of the C # code. It's barely maintained, and here for legacy compatibility only if get_gradients: raise Exception(f"unproject(..., get_gradients=True) is unsupported for models with no gradients, such as '{lensmodel}'") if q is None: return q if q.size == 0: s = q.shape return np.zeros(s[:-1] + (3,)) if out is not None: raise Exception(f"unproject(..., out) is unsupported if out is not None and we're using a model with no gradients, such as '{lensmodel}'") fxy = intrinsics_data[..., :2] cxy = intrinsics_data[..., 2:4] # undistort the q, by running an optimizer import scipy.optimize # I optimize each point separately because the internal optimization # algorithm doesn't know that each point is independent, so if I optimized # it all together, it would solve a dense linear system whose size is linear # in Npoints. The computation time thus would be much slower than # linear(Npoints) @nps.broadcast_define( ((2,),), ) def undistort_this(q0): def cost_no_gradients(vxy, *args, **kwargs): '''Optimization functions''' return \ mrcal.project(np.array((vxy[0],vxy[1],1.)), lensmodel, intrinsics_data) - \ q0 # seed assuming distortions aren't there vxy_seed = (q0 - cxy) / fxy # no gradients available result = scipy.optimize.least_squares(cost_no_gradients, vxy_seed, '3-point') vxy = result.x # This needs to be precise; if it isn't, I barf. Shouldn't happen # very often if np.sqrt(result.cost/2.) > 1e-3: if not unproject.__dict__.get('already_complained'): sys.stderr.write("WARNING: unproject() wasn't able to precisely compute some points. Returning nan for those. Will complain just once\n") unproject.already_complained = True return np.array((np.nan,np.nan)) return vxy vxy = undistort_this(q) # I append a 1. shape = (..., 3) v = nps.glue(vxy, np.ones( vxy.shape[:-1] + (1,) ), axis=-1) if normalize: v /= nps.dummy(nps.mag(v), -1) return v
def check_uncertainties_at(q0_baseline, idistance): distance = args.distances[idistance] # distance of "None" means I'll simulate a large distance, but compare # against a special-case distance of "infinity" if distance is None: distance = 1e5 atinfinity = True distancestr = "infinity" else: atinfinity = False distancestr = str(distance) # shape (Ncameras,3) p_cam_baseline = mrcal.unproject( q0_baseline, lensmodel, intrinsics_baseline, normalize=True) * distance # shape (Nsamples, Ncameras, 2) q_sampled = \ reproject_perturbed(q0_baseline, distance, intrinsics_baseline, extrinsics_baseline_mounted, frames_baseline, calobject_warp_baseline, intrinsics_sampled, extrinsics_sampled_mounted, frames_sampled, calobject_warp_sampled) # shape (Ncameras, 2) q_sampled_mean = np.mean(q_sampled, axis=-3) # shape (Ncameras, 2,2) Var_dq_observed = np.mean(nps.outer(q_sampled - q_sampled_mean, q_sampled - q_sampled_mean), axis=-4) # shape (Ncameras) worst_direction_stdev_observed = mrcal.worst_direction_stdev( Var_dq_observed) # shape (Ncameras, 2,2) Var_dq = \ nps.cat(*[ mrcal.projection_uncertainty( \ p_cam_baseline[icam], atinfinity = atinfinity, model = models_baseline[icam]) \ for icam in range(args.Ncameras) ]) # shape (Ncameras) worst_direction_stdev_predicted = mrcal.worst_direction_stdev(Var_dq) # q_sampled should be evenly distributed around q0_baseline. I can make eps # as tight as I want by increasing Nsamples testutils.confirm_equal( nps.mag(q_sampled_mean - q0_baseline), 0, eps=0.3, worstcase=True, msg= f"Sampled projections cluster around the sample point at distance = {distancestr}" ) # I accept 20% error. This is plenty good-enough. And I can get tighter matches # if I grab more samples testutils.confirm_equal( worst_direction_stdev_observed, worst_direction_stdev_predicted, eps=0.2, worstcase=True, relative=True, msg= f"Predicted worst-case projections match sampled observations at distance = {distancestr}" ) # I now compare the variances. The cross terms have lots of apparent error, # but it's more meaningful to compare the eigenvectors and eigenvalues, so I # just do that # First, the thing is symmetric, right? testutils.confirm_equal( nps.transpose(Var_dq), Var_dq, worstcase=True, msg=f"Var(dq) is symmetric at distance = {distancestr}") for icam in range(args.Ncameras): l_predicted, v = sorted_eig(Var_dq[icam]) v0_predicted = v[:, 0] l_observed, v = sorted_eig(Var_dq_observed[icam]) v0_observed = v[:, 0] testutils.confirm_equal( l_observed, l_predicted, eps=0.35, # high error tolerance. Nsamples is too low for better worstcase=True, relative=True, msg= f"Var(dq) eigenvalues match for camera {icam} at distance = {distancestr}" ) if icam == 3: # I only check the eigenvectors for camera 3. The other cameras have # isotropic covariances, so the eigenvectors aren't well defined. If # one isn't isotropic for some reason, the eigenvalue check will # fail testutils.confirm_equal( np.arcsin(nps.mag(np.cross(v0_observed, v0_predicted))) * 180. / np.pi, 0, eps=15, # high error tolerance. Nsamples is too low for better worstcase=True, msg= f"Var(dq) eigenvectors match for camera {icam} at distance = {distancestr}" ) # I don't bother checking v1. I already made sure the matrix is # symmetric. Thus the eigenvectors are orthogonal, so any angle offset # in v0 will be exactly the same in v1 return q_sampled, Var_dq
prect0 = mrcal.transform_point_Rt( mrcal.invert_Rt(Rt_cam0_rect), pcam0) prect1 = prect0 - Rt01_rectified[3,:] qrect0 = mrcal.project(prect0, *models_rectified[0].intrinsics()) qrect1 = mrcal.project(prect1, *models_rectified[1].intrinsics()) Naz,Nel = models_rectified[0].imagersize() row = np.arange(Naz, dtype=float) col = np.arange(Nel, dtype=float) rectification_maps = mrcal.rectification_maps((model0,model1), models_rectified) interp_rectification_map0x = \ scipy.interpolate.RectBivariateSpline(row, col, nps.transpose(rectification_maps[0][...,0])) interp_rectification_map0y = \ scipy.interpolate.RectBivariateSpline(row, col, nps.transpose(rectification_maps[0][...,1])) interp_rectification_map1x = \ scipy.interpolate.RectBivariateSpline(row, col, nps.transpose(rectification_maps[1][...,0])) interp_rectification_map1y = \ scipy.interpolate.RectBivariateSpline(row, col, nps.transpose(rectification_maps[1][...,1])) if lensmodel == 'LENSMODEL_LATLON': qcam0_from_map = \ nps.transpose( nps.cat( interp_rectification_map0x(*nps.transpose(qrect0), grid=False), interp_rectification_map0y(*nps.transpose(qrect0), grid=False) ) ) qcam1_from_map = \
################################ # some 3d stuff ################################ # gp.plot a sphere gp.plot3d(x_3d, y_3d, z_3d, _with='points', title='sphere', square=True, legend='sphere', wait=1) # sphere, ellipse together gp.plot3d((x_3d * nps.transpose(np.array([[1, 2]])), y_3d * nps.transpose(np.array([[1, 2]])), z_3d, dict(legend=np.array(('sphere', 'ellipse')))), title='sphere, ellipse', square=True, _with='points', wait=1) # similar, written to a png gp.plot3d((x_3d * nps.transpose(np.array([[1, 2]])), y_3d * nps.transpose(np.array([[1, 2]])), z_3d, dict(legend=np.array(('sphere', 'ellipse')))), title='sphere, ellipse', square=True, _with='points', hardcopy='spheres.png',
Jsparse = csr_matrix((data, indices, indptr)) Jdense = Jsparse.toarray() Jdense_ref = \ np.array(((1, 0, 2), (0, 0, 3), (4, 5, 6), (0, 7, 8)), dtype=float) testutils.confirm_equal(Jdense, Jdense_ref, relative = True, worstcase = True, eps = 1e-6, msg = "csr_matrix representation works as expected") bt = np.array(((1., 5., 3.), (2., -2., -8))) F = mrcal.CHOLMOD_factorization(Jsparse) xt = F.solve_xt_JtJ_bt(bt) JtJ = nps.matmult(nps.transpose(Jdense), Jdense) xt_ref = nps.transpose(np.linalg.solve(JtJ, nps.transpose(bt))) testutils.confirm_equal(xt, xt_ref, relative = True, worstcase = True, eps = 1e-6, msg = "solve_xt_JtJ_bt produces the correct result") testutils.finish()
def _write(f, m, note=None): r'''Writes a cameramodel as a .cahvor to a writeable file object''' if note is not None: for l in note.splitlines(): f.write('# ' + l + '\n') d = m.imagersize() f.write('Dimensions = {} {}\n'.format(int(d[0]), int(d[1]))) lensmodel,intrinsics = m.intrinsics() if lensmodel == 'LENSMODEL_CAHVOR': f.write("Model = CAHVOR = perspective, distortion\n") elif re.match('LENSMODEL_(OPENCV.*|PINHOLE)', lensmodel): f.write("Model = CAHV = perspective, linear\n") else: match = re.match('^LENSMODEL_CAHVORE_linearity=([0-9\.]+)$', lensmodel) if match is not None: f.write("Model = CAHVORE3,{} = general\n".format(match.group(1))) else: raise Exception("Don't know how to handle lens model '{}'".format(lensmodel)) fx,fy,cx,cy = intrinsics[:4] Rt_toref = m.extrinsics_Rt_toref() R_toref = Rt_toref[:3,:] t_toref = Rt_toref[ 3,:] C = t_toref A = R_toref[:,2] Hp = R_toref[:,0] Vp = R_toref[:,1] H = fx*Hp + A*cx V = fy*Vp + A*cy f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('C', *C)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('A', *A)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('H', *H)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('V', *V)) if re.match('^LENSMODEL_CAHVOR', lensmodel): # CAHVOR(E) alpha,beta,R0,R1,R2 = intrinsics[4:9] s_al,c_al,s_be,c_be = np.sin(alpha),np.cos(alpha),np.sin(beta),np.cos(beta) O = nps.matmult( R_toref, nps.transpose(np.array(( s_al*c_be, s_be, c_al*c_be ), dtype=float)) ).ravel() R = np.array((R0, R1, R2), dtype=float) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('O', *O)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('R', *R)) if re.match('^LENSMODEL_CAHVORE', lensmodel): E = intrinsics[9:] f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('E', *E)) elif re.match('LENSMODEL_OPENCV*', lensmodel): Ndistortions = mrcal.lensmodel_num_params(lensmodel) - 4 f.write(("{} =" + (" {:15.10f}" * Ndistortions) + "\n").format(lensmodel, *intrinsics[4:])) elif lensmodel == 'LENSMODEL_PINHOLE': # the CAHV values we already wrote are all that's needed pass else: raise Exception(f"Cannot write lens model '{lensmodel}' to a .cahvor file. I only support PINHOLE, CAHVOR(E) and OPENCV model") c = m.valid_intrinsics_region() if c is not None: f.write("VALID_INTRINSICS_REGION = ") np.savetxt(f, c.ravel(), fmt='%.2f', newline=' ') f.write('\n') Hs,Vs,Hc,Vc = intrinsics[:4] f.write("Hs = {}\n".format(Hs)) f.write("Hc = {}\n".format(Hc)) f.write("Vs = {}\n".format(Vs)) f.write("Vc = {}\n".format(Vc)) f.write("# this is hard-coded\nTheta = {} (-90.0 deg)\n".format(-np.pi/2)) return True
def test_outer(self): r'''Testing the broadcasted outer product''' # comes from PDL. numpy has a reversed axis ordering convention from # PDL, so I transpose the array before comparing ref = nps.transpose( np.array([[[[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [0, 3, 6, 9, 12], [0, 4, 8, 12, 16]], [[25, 30, 35, 40, 45], [30, 36, 42, 48, 54], [35, 42, 49, 56, 63], [40, 48, 56, 64, 72], [45, 54, 63, 72, 81]], [[100, 110, 120, 130, 140], [110, 121, 132, 143, 154], [120, 132, 144, 156, 168], [130, 143, 156, 169, 182], [140, 154, 168, 182, 196]]], [[[0, 0, 0, 0, 0], [15, 16, 17, 18, 19], [30, 32, 34, 36, 38], [45, 48, 51, 54, 57], [60, 64, 68, 72, 76]], [[100, 105, 110, 115, 120], [120, 126, 132, 138, 144], [140, 147, 154, 161, 168], [160, 168, 176, 184, 192], [180, 189, 198, 207, 216]], [[250, 260, 270, 280, 290], [275, 286, 297, 308, 319], [300, 312, 324, 336, 348], [325, 338, 351, 364, 377], [350, 364, 378, 392, 406]]]], [[[[0, 15, 30, 45, 60], [0, 16, 32, 48, 64], [0, 17, 34, 51, 68], [0, 18, 36, 54, 72], [0, 19, 38, 57, 76]], [[100, 120, 140, 160, 180], [105, 126, 147, 168, 189], [110, 132, 154, 176, 198], [115, 138, 161, 184, 207], [120, 144, 168, 192, 216]], [[250, 275, 300, 325, 350], [260, 286, 312, 338, 364], [270, 297, 324, 351, 378], [280, 308, 336, 364, 392], [290, 319, 348, 377, 406]]], [[[225, 240, 255, 270, 285], [240, 256, 272, 288, 304], [255, 272, 289, 306, 323], [270, 288, 306, 324, 342], [285, 304, 323, 342, 361]], [[400, 420, 440, 460, 480], [420, 441, 462, 483, 504], [440, 462, 484, 506, 528], [460, 483, 506, 529, 552], [480, 504, 528, 552, 576]], [[625, 650, 675, 700, 725], [650, 676, 702, 728, 754], [675, 702, 729, 756, 783], [700, 728, 756, 784, 812], [725, 754, 783, 812, 841]]]], [[[[0, 30, 60, 90, 120], [0, 31, 62, 93, 124], [0, 32, 64, 96, 128], [0, 33, 66, 99, 132], [0, 34, 68, 102, 136]], [[175, 210, 245, 280, 315], [180, 216, 252, 288, 324], [185, 222, 259, 296, 333], [190, 228, 266, 304, 342], [195, 234, 273, 312, 351]], [[400, 440, 480, 520, 560], [410, 451, 492, 533, 574], [420, 462, 504, 546, 588], [430, 473, 516, 559, 602], [440, 484, 528, 572, 616]]], [[[450, 480, 510, 540, 570], [465, 496, 527, 558, 589], [480, 512, 544, 576, 608], [495, 528, 561, 594, 627], [510, 544, 578, 612, 646]], [[700, 735, 770, 805, 840], [720, 756, 792, 828, 864], [740, 777, 814, 851, 888], [760, 798, 836, 874, 912], [780, 819, 858, 897, 936]], [[1000, 1040, 1080, 1120, 1160], [1025, 1066, 1107, 1148, 1189], [1050, 1092, 1134, 1176, 1218], [1075, 1118, 1161, 1204, 1247], [1100, 1144, 1188, 1232, 1276]]]], [[[[0, 45, 90, 135, 180], [0, 46, 92, 138, 184], [0, 47, 94, 141, 188], [0, 48, 96, 144, 192], [0, 49, 98, 147, 196]], [[250, 300, 350, 400, 450], [255, 306, 357, 408, 459], [260, 312, 364, 416, 468], [265, 318, 371, 424, 477], [270, 324, 378, 432, 486]], [[550, 605, 660, 715, 770], [560, 616, 672, 728, 784], [570, 627, 684, 741, 798], [580, 638, 696, 754, 812], [590, 649, 708, 767, 826]]], [[[675, 720, 765, 810, 855], [690, 736, 782, 828, 874], [705, 752, 799, 846, 893], [720, 768, 816, 864, 912], [735, 784, 833, 882, 931]], [[1000, 1050, 1100, 1150, 1200], [1020, 1071, 1122, 1173, 1224], [1040, 1092, 1144, 1196, 1248], [1060, 1113, 1166, 1219, 1272], [1080, 1134, 1188, 1242, 1296]], [[1375, 1430, 1485, 1540, 1595], [1400, 1456, 1512, 1568, 1624], [1425, 1482, 1539, 1596, 1653], [1450, 1508, 1566, 1624, 1682], [1475, 1534, 1593, 1652, 1711]]]]])) self._check_output_modes(ref, nps.outer, arr(2, 3, 5), arr(4, 1, 3, 5), float)
def _read(s, name): r'''Reads a .cahvor file into a cameramodel The input is the .cahvor file contents as a string''' re_f = '[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' re_u = '\d+' re_d = '[-+]?\d+' re_s = '.+' # I parse all key=value lines into my dict as raw text. Further I # post-process some of these raw lines. x = {} for l in s.splitlines(): if re.match('^\s*#|^\s*$', l): continue m = re.match('\s*(\w+)\s*=\s*(.+?)\s*\n?$', l, flags=re.I) if m: key = m.group(1) if key in x: raise Exception("Reading '{}': key '{}' seen more than once".format(name, m.group(1))) value = m.group(2) # for compatibility if re.match('^DISTORTION', key): key = key.replace('DISTORTION', 'LENSMODEL') x[key] = value # Done reading. Any values that look like numbers, I convert to numbers. for i in x: if re.match('{}$'.format(re_f), x[i]): x[i] = float(x[i]) # I parse the fields I know I care about into numpy arrays for i in ('Dimensions','C','A','H','V','O','R','E', 'LENSMODEL_OPENCV4', 'LENSMODEL_OPENCV5', 'LENSMODEL_OPENCV8', 'LENSMODEL_OPENCV12', 'VALID_INTRINSICS_REGION'): if i in x: # Any data that's composed only of digits and whitespaces (no "."), # use integers if re.match('[0-9\s]+$', x[i]): totype = int else: totype = float x[i] = np.array( [ totype(v) for v in re.split('\s+', x[i])], dtype=totype) # Now I sanity-check the results and call it done for k in ('Dimensions','C','A','H','V'): if not k in x: raise Exception("Cahvor file '{}' incomplete. Missing values for: {}". format(name, k)) is_cahvor_or_cahvore = False if 'LENSMODEL_OPENCV12' in x: distortions = x["LENSMODEL_OPENCV12"] lensmodel = 'LENSMODEL_OPENCV12' elif 'LENSMODEL_OPENCV8' in x: distortions = x["LENSMODEL_OPENCV8"] lensmodel = 'LENSMODEL_OPENCV8' elif 'LENSMODEL_OPENCV5' in x: distortions = x["LENSMODEL_OPENCV5"] lensmodel = 'LENSMODEL_OPENCV5' elif 'LENSMODEL_OPENCV4' in x: distortions = x["LENSMODEL_OPENCV4"] lensmodel = 'LENSMODEL_OPENCV4' elif 'R' not in x: distortions = np.array(()) lensmodel = 'LENSMODEL_PINHOLE' else: is_cahvor_or_cahvore = True if 'VALID_INTRINSICS_REGION' in x: x['VALID_INTRINSICS_REGION'] = \ x['VALID_INTRINSICS_REGION'].reshape( len(x['VALID_INTRINSICS_REGION'])//2, 2) # get extrinsics from cahvor if 'Model' not in x: x['Model'] = '' m = re.match('CAHVORE3,([0-9\.e-]+)\s*=\s*general',x['Model']) if m: is_cahvore = True cahvore_linearity = float(m.group(1)) else: is_cahvore = False Hp,Vp = _HVs_HVc_HVp(x)[-2:] R_toref = nps.transpose( nps.cat( Hp, Vp, x['A'] )) t_toref = x['C'] if is_cahvor_or_cahvore: if 'O' not in x: alpha = 0 beta = 0 else: o = nps.matmult( x['O'], R_toref ) alpha = np.arctan2(o[0], o[2]) beta = np.arcsin( o[1] ) if is_cahvore: # CAHVORE if 'E' not in x: raise Exception('Cahvor file {} LOOKS like a cahvore, but lacks the E'.format(name)) R0,R1,R2 = x['R'].ravel() E0,E1,E2 = x['E'].ravel() distortions = np.array((alpha,beta,R0,R1,R2,E0,E1,E2), dtype=float) lensmodel = f'LENSMODEL_CAHVORE_linearity={cahvore_linearity}' else: # CAHVOR if 'E' in x: raise Exception('Cahvor file {} LOOKS like a cahvor, but has an E'.format(name)) if abs(beta) < 1e-8 and \ ( 'R' not in x or np.linalg.norm(x['R']) < 1e-8): # pinhole alpha = 0 beta = 0 else: R0,R1,R2 = x['R'].ravel() if alpha == 0 and beta == 0: distortions = np.array(()) lensmodel = 'LENSMODEL_PINHOLE' else: distortions = np.array((alpha,beta,R0,R1,R2), dtype=float) lensmodel = 'LENSMODEL_CAHVOR' m = mrcal.cameramodel(imagersize = x['Dimensions'].astype(np.int32), intrinsics = (lensmodel, nps.glue( np.array(_fxy_cxy(x), dtype=float), distortions, axis = -1)), valid_intrinsics_region = x.get('VALID_INTRINSICS_REGION'), extrinsics_Rt_toref = np.ascontiguousarray(nps.glue(R_toref,t_toref, axis=-2))) return m