def CooksD_query_test(J,p,x, f, query,fquery_ref, CooksD_nox, k_dima, k_cook, i=0): r'''Test the concept of CooksD for querying hypothetical data fquery_test = f(q) isn't true here. If it WERE true, the x of the query point would be 0 (we fit the model exactly), so the outlierness factor would be 0 also ''' # current solve Nmeasurements,Nstate = J.shape query = query [i] fquery_ref = fquery_ref[i] # I add a new point, and reoptimize fquery = func_hypothesis(query,p) xquery = fquery - fquery_ref jquery = model_matrix(query, len(p)) J1 = nps.glue(J, jquery, axis=-2) f1 = nps.glue(f, fquery_ref, axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J1)) - f1 dx = x1[:-1] - x dx_both = x1 - nps.glue(x,xquery, axis=-1) report_mismatch_relerr( nps.inner(dx_both,dx_both)*k_cook, CooksD_nox['self_others'][i]*xquery*xquery, "self_others query-CooksD computed analytically, explicitly") report_mismatch_relerr( nps.inner(dx,dx)*k_cook, CooksD_nox['others'][i]*xquery*xquery, "others query-CooksD computed analytically, explicitly")
def outlierness_test(J, x, f, outlierness, k_dima, k_cook, i=0): r'''Test the computation of outlierness I have an analytical expression for this computed in compute_outliernesses(). This explicitly computes the quantity represented by compute_outliernesses() to make sure that that analytical expression is correct ''' # I reoptimize without measurement i E0 = nps.inner(x,x) J1 = nps.glue(J[:i,:], J[(i+1):,:], axis=-2) f1 = nps.glue(f[:i ], f[(i+1): ], axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J1)) - f1 E1 = nps.inner(x1,x1) report_mismatch_relerr( (E0-E1) * k_dima, outlierness['self_others'][i], "self_others outlierness computed analytically, explicitly") report_mismatch_relerr( (E0-x[i]*x[i] - E1) * k_dima, outlierness['others'][i], "others outlierness computed analytically, explicitly")
def _align_procrustes_points_Rt01(p0, p1, weights): p0 = nps.transpose(p0) p1 = nps.transpose(p1) # I process Mt instead of M to not need to transpose anything later, and to # end up with contiguous-memory results Mt = nps.matmult((p0 - np.mean(p0, axis=-1)[..., np.newaxis]) * weights, nps.transpose(p1 - np.mean(p1, axis=-1)[..., np.newaxis])) V, S, Ut = np.linalg.svd(Mt) R = nps.matmult(V, Ut) # det(R) is now +1 or -1. If it's -1, then this contains a mirror, and thus # is not a physical rotation. I compensate by negating the least-important # pair of singular vectors if np.linalg.det(R) < 0: V[:, 2] *= -1 R = nps.matmult(V, Ut) # Now that I have my optimal R, I compute the optimal t. From before: # # t = mean(a) - R mean(b) t = np.mean(p0, axis=-1)[..., np.newaxis] - nps.matmult( R, np.mean(p1, axis=-1)[..., np.newaxis]) return nps.glue(R, t.ravel(), axis=-2)
def CooksD_test(J, x, f, CooksD, k_dima, k_cook, i=0): r'''Test the computation of Cook's D I have an analytical expression for this computed in compute_outliernesses(). This explicitly computes the quantity represented by compute_outliernesses() to make sure that that analytical expression is correct ''' # I reoptimize without measurement i Nmeasurements,Nstate = J.shape J1 = nps.glue(J[:i,:], J[(i+1):,:], axis=-2) f1 = nps.glue(f[:i ], f[(i+1): ], axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J)) - f dx = x1-x report_mismatch_relerr( nps.inner(dx,dx) * k_cook, CooksD['self_others'][i], "self_others CooksD computed analytically, explicitly") report_mismatch_relerr( (nps.inner(dx,dx) - dx[i]*dx[i]) * k_cook, CooksD['others'][i], "others CooksD computed analytically, explicitly")
def apply_normalization_to_output_with_gradients(v,dv_dq,dv_di): # vn = v/mag(v) # dvn = dv (1/mag(v)) + v d(1/mag(v)) # = dv( 1/mag(v) - v vt / mag^3(v) ) # = dv( 1/mag(v) - vn vnt / mag(v) ) # = dv/mag(v) ( 1 - vn vnt ) # v has shape (...,3) # dv_dq has shape (...,3,2) # dv_di has shape (...,3,N) # shape (...,1) magv_recip = 1. / nps.dummy(nps.mag(v), -1) v *= magv_recip # shape (...,1,1) magv_recip = nps.dummy(magv_recip,-1) dv_dq *= magv_recip dv_dq -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_dq, -1,-2), -2), nps.dummy(nps.outer(v,v),-3) )[...,0,:], -1, -2) dv_di *= magv_recip dv_di -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_di, -1,-2), -2), nps.dummy(nps.outer(v,v),-3) )[...,0,:], -1, -2)
def compose_Rt(Rt0, Rt1): r'''Simple reference implementation b = R0 (R1 x + t1) + t0 = = R0 R1 x + R0 t1 + t0 ''' R0 = Rt0[:3, :] t0 = Rt0[3, :] R1 = Rt1[:3, :] t1 = Rt1[3, :] R2 = nps.matmult(R0, R1) t2 = nps.matmult(t1, nps.transpose(R0)) + t0 return nps.glue(R2, t2.ravel(), axis=-2)
def invert_Rt(Rt): r'''Simple reference implementation b = Ra + t -> a = R'b - R't ''' R = Rt[:3, :] tinv = -nps.matmult(Rt[3, :], R) return nps.glue(nps.transpose(R), tinv.ravel(), axis=-2)
def invert_rt(rt): r'''Simple reference implementation b = Ra + t -> a = R'b - R't ''' r = rt[:3] t = rt[3:] R = R_from_r(r) tinv = -nps.matmult(t, R) return nps.glue(-r, tinv.ravel(), axis=-1)
def _align_procrustes_vectors_R01(v0, v1, weights): v0 = nps.transpose(v0) v1 = nps.transpose(v1) # I process Mt instead of M to not need to transpose anything later, and to # end up with contiguous-memory results Mt = nps.matmult(v0 * weights, nps.transpose(v1)) V, S, Ut = np.linalg.svd(Mt) R = nps.matmult(V, Ut) # det(R) is now +1 or -1. If it's -1, then this contains a mirror, and thus # is not a physical rotation. I compensate by negating the least-important # pair of singular vectors if np.linalg.det(R) < 0: V[:, 2] *= -1 R = nps.matmult(V, Ut) return R
def R_from_r(r): r'''Rotation matrix from a Rodrigues vector Simple reference implementation from wikipedia: https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula ''' th = nps.mag(r) Kth = np.array(((0, -r[2], r[1]), (r[2], 0, -r[0]), (-r[1], r[0], 0))) if th > 1e-10: # normal path s = np.sin(th) c = np.cos(th) K = Kth / th return np.eye(3) + s * K + (1. - c) * nps.matmult(K, K) # small th. Can't divide by it. But I can look at the limit. # # s*K = Kth * sin(th)/th -> Kth # # (1-c)*nps.matmult(K,K) = (1-c) Kth^2/th^2 -> Kth^2 s / 2th -> Kth^2/2 return np.eye(3) + Kth + nps.matmult(Kth, Kth) / 2.
def test_matmult(self): r'''Testing the broadcasted matrix multiplication''' self.assertValueShape( None, (4,2,3,5), nps.matmult, arr(2,3,7), arr(4,1,7,5) ) ref = np.array([[[[ 42, 48, 54], [ 114, 136, 158]], [[ 114, 120, 126], [ 378, 400, 422]]], [[[ 186, 224, 262], [ 258, 312, 366]], [[ 642, 680, 718], [ 906, 960, 1014]]]]) self._check_output_modes( ref, nps.matmult2, arr(2,1,2,4), arr(2,4,3), dtype=float ) ref2 = np.array([[[[ 156.], [ 452.]], [[ 372.], [ 1244.]]], [[[ 748.], [ 1044.]], [[ 2116.], [ 2988.]]]]) self._check_output_modes(ref2, nps.matmult2, arr(2,1,2,4), nps.matmult2(arr(2,4,3), arr(3,1))) # not doing _check_output_modes() because matmult() doesn't take an # 'out' kwarg self.assertNumpyAlmostEqual(ref2, nps.matmult(arr(2,1,2,4), arr(2,4,3), arr(3,1))) # checking the null-dimensionality logic A = arr(2,3) self._check_output_modes( nps.inner(nps.transpose(A), np.arange(2)), nps.matmult2, np.arange(2), A ) A = arr(3) self._check_output_modes( A*2, nps.matmult2, np.array([2]), A ) A = arr(3) self._check_output_modes( A*2, nps.matmult2, np.array(2), A )
def outlierness_query_test(J,p,x, f, query,fquery_ref, outlierness_nox, k_dima, k_cook, i=0): r'''Test the concept of outlierness for querying hypothetical data fquery_test = f(q) isn't true here. If it WERE true, the x of the query point would be 0 (we fit the model exactly), so the outlierness factor would be 0 also ''' # current solve E0 = nps.inner(x,x) query = query [i] fquery_ref = fquery_ref[i] # I add a new point, and reoptimize fquery = func_hypothesis(query,p) xquery = fquery - fquery_ref jquery = model_matrix(query, len(p)) J1 = nps.glue(J, jquery, axis=-2) f1 = nps.glue(f, fquery_ref, axis=-1) p1 = nps.matmult( f1, nps.transpose(np.linalg.pinv(J1))) x1 = nps.matmult(p1, nps.transpose(J1)) - f1 E1 = nps.inner(x1,x1) report_mismatch_relerr( (x1[-1]*x1[-1]) * k_dima, outlierness_nox['self'][i]*xquery*xquery, "self query-outlierness computed analytically, explicitly") report_mismatch_relerr( (E1-x1[-1]*x1[-1] - E0) * k_dima, outlierness_nox['others'][i]*xquery*xquery, "others query-outlierness computed analytically, explicitly") report_mismatch_relerr( (E1 - E0) * k_dima, outlierness_nox['self_others'][i]*xquery*xquery, "self_others query-outlierness computed analytically, explicitly")
def Var_df(J, squery, stdev): r'''Propagates noise in input to noise in f noise in input -> noise in params -> noise in f dp ~ M dm where M = inv(JtJ)Jt df = df/dp dp df/dp = squery Var(dm) = stdev^2 I -> Var(df) = stdev^2 squery inv(JtJ) Jt J inv(JtJ) squeryt = = stdev^2 squery inv(JtJ) squeryt This function broadcasts over squery ''' return \ nps.inner(squery, nps.transpose(np.linalg.solve(nps.matmult(nps.transpose(J),J), nps.transpose(squery)))) *stdev*stdev
def test_order(q,f, query, order): # I look at linear and quadratic models: a0 + a1 q + a2 q^2, with a2=0 for the # linear case. I use plain least squares. The parameter vector is [a0 a1 a2]t. S # = [1 q q^2], so the measurement vector x = S p - f. E = norm2(x). J = dx/dp = # S. # # Note the problem "order" is the number of parameters, so a linear model has # order==2 p,J,x = fit(q,f,order) Nmeasurements,Nstate = J.shape k_dima = 1.0/Nmeasurements k_cook = 1.0/((Nstate + 1.0) * nps.inner(x,x)/(Nmeasurements - Nstate - 1.0)) report_mismatch_abserr(np.linalg.norm(nps.matmult(x,J)), 0, "Jtx") squery = model_matrix(query, order) fquery = func_hypothesis(query, p) metrics = compute_outliernesses(J,x, squery, k_dima, k_cook) outlierness_test(J, x, f, metrics['dima']['outliers'], k_dima, k_cook, i=10) CooksD_test (J, x, f, metrics['cook']['outliers'], k_dima, k_cook, i=10) outlierness_query_test(J,p,x,f, query, fquery + 1.2e-3, metrics['dima']['query'], k_dima, k_cook, i=10 ) CooksD_query_test (J,p,x,f, query, fquery + 1.2e-3, metrics['cook']['query'], k_dima, k_cook, i=10 ) Vquery = Var_df(J, squery, noise_stdev) return \ dict( p = p, J = J, x = x, Vquery = Vquery, squery = squery, fquery = fquery, metrics = metrics, k_dima = k_dima, k_cook = k_cook )
ranges = nps.mag(p_triangulated0) ranges_true = nps.mag(p_triangulated_true0) ranges_sampled = nps.transpose(nps.mag(p_triangulated_sampled0)) mean_ranges_sampled = ranges_sampled.mean(axis=-1) Var_ranges_sampled = ranges_sampled.var(axis=-1) # r = np.mag(p) # dr_dp = p/r # Var(r) = dr_dp var(p) dr_dpT # = p var(p) pT / norm2(p) Var_ranges_joint = np.zeros((Npoints, ), dtype=float) Var_ranges_calibration = np.zeros((Npoints, ), dtype=float) Var_ranges_observations = np.zeros((Npoints, ), dtype=float) for ipt in range(Npoints): Var_ranges_joint[ipt] = \ nps.matmult(p_triangulated0[ipt], Var_p_joint[ipt,:,ipt,:], nps.transpose(p_triangulated0[ipt]))[0] / nps.norm2(p_triangulated0[ipt]) Var_ranges_calibration[ipt] = \ nps.matmult(p_triangulated0[ipt], Var_p_calibration[ipt,:,ipt,:], nps.transpose(p_triangulated0[ipt]))[0] / nps.norm2(p_triangulated0[ipt]) Var_ranges_observations[ipt] = \ nps.matmult(p_triangulated0[ipt], Var_p_observation[ipt,:,:], nps.transpose(p_triangulated0[ipt]))[0] / nps.norm2(p_triangulated0[ipt]) diff = p_triangulated0[1] - p_triangulated0[0] distance = nps.mag(diff) distance_true = nps.mag(p_triangulated_true0[:, 0] - p_triangulated_true0[:, 1]) distance_sampled = nps.mag(p_triangulated_sampled0[:, 1, :] -
# dict(legend = np.array(('observed','predicted')), # _with = 'lines')), # (dx_observed-dx_predicted, # dict(legend = "err", # _with = "lines lw 2", # y2=1))) testutils.confirm_equal(dx_predicted, dx_observed, eps=1e-6, worstcase=True, msg="dx follows the prediction") # The effect on the # parameters should be dp = M dqref. Where M = inv(JtJ) Jobservationst W M = np.linalg.solve(nps.matmult(nps.transpose(J0), J0), nps.transpose(J0[:Nmeasurements_boards, :])) * w dp_predicted = nps.matmult(dqref.ravel(), nps.transpose(M)).ravel() istate0_frames = mrcal.state_index_frames(0, **baseline) istate0_calobject_warp = mrcal.state_index_calobject_warp(**baseline) istate0_extrinsics = mrcal.state_index_extrinsics(0, **baseline) if istate0_extrinsics is None: istate0_extrinsics = istate0_frames slice_intrinsics = slice(0, istate0_extrinsics) slice_extrinsics = slice(istate0_extrinsics, istate0_frames) slice_frames = slice(istate0_frames, istate0_calobject_warp) # These thresholds look terrible. And they are. But I'm pretty sure this is # working properly. Look at the plots:
def _read(s, name): r'''Reads a .cahvor file into a cameramodel The input is the .cahvor file contents as a string''' re_f = '[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' re_u = '\d+' re_d = '[-+]?\d+' re_s = '.+' # I parse all key=value lines into my dict as raw text. Further I # post-process some of these raw lines. x = {} for l in s.splitlines(): if re.match('^\s*#|^\s*$', l): continue m = re.match('\s*(\w+)\s*=\s*(.+?)\s*\n?$', l, flags=re.I) if m: key = m.group(1) if key in x: raise Exception("Reading '{}': key '{}' seen more than once".format(name, m.group(1))) value = m.group(2) # for compatibility if re.match('^DISTORTION', key): key = key.replace('DISTORTION', 'LENSMODEL') x[key] = value # Done reading. Any values that look like numbers, I convert to numbers. for i in x: if re.match('{}$'.format(re_f), x[i]): x[i] = float(x[i]) # I parse the fields I know I care about into numpy arrays for i in ('Dimensions','C','A','H','V','O','R','E', 'LENSMODEL_OPENCV4', 'LENSMODEL_OPENCV5', 'LENSMODEL_OPENCV8', 'LENSMODEL_OPENCV12', 'VALID_INTRINSICS_REGION'): if i in x: # Any data that's composed only of digits and whitespaces (no "."), # use integers if re.match('[0-9\s]+$', x[i]): totype = int else: totype = float x[i] = np.array( [ totype(v) for v in re.split('\s+', x[i])], dtype=totype) # Now I sanity-check the results and call it done for k in ('Dimensions','C','A','H','V'): if not k in x: raise Exception("Cahvor file '{}' incomplete. Missing values for: {}". format(name, k)) is_cahvor_or_cahvore = False if 'LENSMODEL_OPENCV12' in x: distortions = x["LENSMODEL_OPENCV12"] lensmodel = 'LENSMODEL_OPENCV12' elif 'LENSMODEL_OPENCV8' in x: distortions = x["LENSMODEL_OPENCV8"] lensmodel = 'LENSMODEL_OPENCV8' elif 'LENSMODEL_OPENCV5' in x: distortions = x["LENSMODEL_OPENCV5"] lensmodel = 'LENSMODEL_OPENCV5' elif 'LENSMODEL_OPENCV4' in x: distortions = x["LENSMODEL_OPENCV4"] lensmodel = 'LENSMODEL_OPENCV4' elif 'R' not in x: distortions = np.array(()) lensmodel = 'LENSMODEL_PINHOLE' else: is_cahvor_or_cahvore = True if 'VALID_INTRINSICS_REGION' in x: x['VALID_INTRINSICS_REGION'] = \ x['VALID_INTRINSICS_REGION'].reshape( len(x['VALID_INTRINSICS_REGION'])//2, 2) # get extrinsics from cahvor if 'Model' not in x: x['Model'] = '' m = re.match('CAHVORE3,([0-9\.e-]+)\s*=\s*general',x['Model']) if m: is_cahvore = True cahvore_linearity = float(m.group(1)) else: is_cahvore = False Hp,Vp = _HVs_HVc_HVp(x)[-2:] R_toref = nps.transpose( nps.cat( Hp, Vp, x['A'] )) t_toref = x['C'] if is_cahvor_or_cahvore: if 'O' not in x: alpha = 0 beta = 0 else: o = nps.matmult( x['O'], R_toref ) alpha = np.arctan2(o[0], o[2]) beta = np.arcsin( o[1] ) if is_cahvore: # CAHVORE if 'E' not in x: raise Exception('Cahvor file {} LOOKS like a cahvore, but lacks the E'.format(name)) R0,R1,R2 = x['R'].ravel() E0,E1,E2 = x['E'].ravel() distortions = np.array((alpha,beta,R0,R1,R2,E0,E1,E2), dtype=float) lensmodel = f'LENSMODEL_CAHVORE_linearity={cahvore_linearity}' else: # CAHVOR if 'E' in x: raise Exception('Cahvor file {} LOOKS like a cahvor, but has an E'.format(name)) if abs(beta) < 1e-8 and \ ( 'R' not in x or np.linalg.norm(x['R']) < 1e-8): # pinhole alpha = 0 beta = 0 else: R0,R1,R2 = x['R'].ravel() if alpha == 0 and beta == 0: distortions = np.array(()) lensmodel = 'LENSMODEL_PINHOLE' else: distortions = np.array((alpha,beta,R0,R1,R2), dtype=float) lensmodel = 'LENSMODEL_CAHVOR' m = mrcal.cameramodel(imagersize = x['Dimensions'].astype(np.int32), intrinsics = (lensmodel, nps.glue( np.array(_fxy_cxy(x), dtype=float), distortions, axis = -1)), valid_intrinsics_region = x.get('VALID_INTRINSICS_REGION'), extrinsics_Rt_toref = np.ascontiguousarray(nps.glue(R_toref,t_toref, axis=-2))) return m
def stereo_rectify_prepare(models, az_fov_deg, el_fov_deg, az0_deg = None, el0_deg = 0, pixels_per_deg_az = None, pixels_per_deg_el = None): r'''Precompute everything needed for stereo rectification and matching SYNOPSIS import sys import mrcal import cv2 import numpy as np # Read commandline arguments: model0 model1 image0 image1 models = [ mrcal.cameramodel(sys.argv[1]), mrcal.cameramodel(sys.argv[2]), ] images = [ cv2.imread(sys.argv[i]) \ for i in (3,4) ] # Prepare the stereo system rectification_maps,cookie = \ mrcal.stereo_rectify_prepare(models, az_fov_deg = 120, el_fov_deg = 100) # Visualize the geometry of the two cameras and of the rotated stereo # coordinate system Rt_cam0_ref = models[0].extrinsics_Rt_fromref() Rt_cam0_stereo = cookie['Rt_cam0_stereo'] Rt_stereo_ref = mrcal.compose_Rt( mrcal.invert_Rt(Rt_cam0_stereo), Rt_cam0_ref ) rt_stereo_ref = mrcal.rt_from_Rt(Rt_stereo_ref) mrcal.show_geometry( models + [ rt_stereo_ref ], ( "camera0", "camera1", "stereo" ), show_calobjects = False, wait = True ) # Rectify the images images_rectified = \ [ mrcal.transform_image(images[i], rectification_maps[i]) \ for i in range(2) ] cv2.imwrite('/tmp/rectified0.jpg', images_rectified[0]) cv2.imwrite('/tmp/rectified1.jpg', images_rectified[1]) # Find stereo correspondences using OpenCV block_size = 3 max_disp = 160 # in pixels stereo = \ cv2.StereoSGBM_create(minDisparity = 0, numDisparities = max_disp, blockSize = block_size, P1 = 8 *3*block_size*block_size, P2 = 32*3*block_size*block_size, uniquenessRatio = 5, disp12MaxDiff = 1, speckleWindowSize = 50, speckleRange = 1) disparity16 = stereo.compute(*images_rectified) # in pixels*16 cv2.imwrite('/tmp/disparity.png', mrcal.apply_color_map(disparity16, 0, max_disp*16.)) # Convert the disparities to range to camera0 r = mrcal.stereo_range( disparity16.astype(np.float32) / 16., **cookie ) cv2.imwrite('/tmp/range.png', mrcal.apply_color_map(r, 5, 1000)) This function does the initial computation required to perform stereo matching, and to get ranges from a stereo pair. It computes - the pose of the rectified stereo coordinate system - the azimuth/elevation grid used in the rectified images - the rectification maps used to transform images into the rectified space Using the results of one call to this function we can compute the stereo disparities of many pairs of synchronized images. This function is generic: the two cameras may have any lens models, any resolution and any geometry. They don't even have to match. As long as there's some non-zero baseline and some overlapping views, we can set up stereo matching using this function. The input images are tranformed into a "rectified" space. Geometrically, the rectified coordinate system sits at the origin of camera0, with a rotation. The axes of the rectified coordinate system: - x: from the origin of camera0 to the origin of camera1 (the baseline direction) - y: completes the system from x,z - z: the "forward" direction of the two cameras, with the component parallel to the baseline subtracted off In a nominal geometry (the two cameras are square with each other, camera1 strictly to the right of camera0), the rectified coordinate system exactly matches the coordinate system of camera0. The above formulation supports any geometry, however, including vertical and/or forward/backward shifts. Vertical stereo is supported. Rectified images represent 3D planes intersecting the origins of the two cameras. The tilt of each plane is the "elevation". While the left/right direction inside each plane is the "azimuth". We generate rectified images where each pixel coordinate represents (x = azimuth, y = elevation). Thus each row scans the azimuths in a particular elevation, and thus each row in the two rectified images represents the same plane in 3D, and matching features in each row can produce a stereo disparity and a range. In the rectified system, elevation is a rotation along the x axis, while azimuth is a rotation normal to the resulting tilted plane. We produce rectified images whose pixel coordinates are linear with azimuths and elevations. This means that the azimuth angular resolution is constant everywhere, even at the edges of a wide-angle image. We return a set of transformation maps and a cookie. The maps can be used to generate rectified images. These rectified images can be processed by any stereo-matching routine to generate a disparity image. To interpret the disparity image, call stereo_unproject() or stereo_range() with the cookie returned here. The cookie is a Python dict that describes the rectified space. It is guaranteed to have the following keys: - Rt_cam0_stereo: an Rt transformation to map a representation of points in the rectified coordinate system to a representation in the camera0 coordinate system - baseline: the distance between the two cameras - az_row: an array of shape (Naz,) describing the azimuths in each row of the disparity image - el_col: an array of shape (Nel,1) describing the elevations in each column of the disparity image ARGUMENTS - models: an iterable of two mrcal.cameramodel objects representing the cameras in the stereo system. Any sane combination of lens models and resolutions and geometries is valid - az_fov_deg: required value for the azimuth (along-the-baseline) field-of-view of the desired rectified view, in pixels - el_fov_deg: required value for the elevation (across-the-baseline) field-of-view of the desired rectified view, in pixels - az0_deg: optional value for the azimuth center of the rectified images. This is especially significant in a camera system with some forward/backward shift. That causes the baseline to no longer be perpendicular with the view axis of the cameras, and thus azimuth = 0 is no longer at the center of the input images. If omitted, we compute the center azimuth that aligns with the center of the cameras' view - el0_deg: optional value for the elevation center of the rectified system. Defaults to 0. - pixels_per_deg_az: optional value for the azimuth resolution of the rectified image. If omitted (or None), we use the resolution of the input image at (azimuth, elevation) = 0. If a resolution of <0 is requested, we use this as a scale factor on the resolution of the input image. For instance, to downsample by a factor of 2, pass pixels_per_deg_az = -0.5 - pixels_per_deg_el: same as pixels_per_deg_az but in the elevation direction RETURNED VALUES We return a tuple - transformation maps: a tuple of length-2 containing transformation maps for each camera. Each map can be used to mrcal.transform_image() images to the rectified space - cookie: a dict describing the rectified space. Intended as input to stereo_unproject() and stereo_range(). See the description above for more detail ''' if len(models) != 2: raise Exception("I need exactly 2 camera models") def normalize(v): v /= nps.mag(v) return v def remove_projection(a, proj_base): r'''Returns the normalized component of a orthogonal to proj_base proj_base assumed normalized''' v = a - nps.inner(a,proj_base)*proj_base return normalize(v) ######## Compute the geometry of the rectified stereo system. This is a ######## rotation, centered at camera0. More or less we have axes: ######## ######## x: from camera0 to camera1 ######## y: completes the system from x,z ######## z: component of the cameras' viewing direction ######## normal to the baseline Rt_cam0_ref = models[0].extrinsics_Rt_fromref() Rt01 = mrcal.compose_Rt( Rt_cam0_ref, models[1].extrinsics_Rt_toref()) Rt10 = mrcal.invert_Rt(Rt01) # Rotation relating camera0 coords to the rectified camera coords. I fill in # each row separately R_stereo_cam0 = np.zeros((3,3), dtype=float) right = R_stereo_cam0[0,:] down = R_stereo_cam0[1,:] forward = R_stereo_cam0[2,:] # "right" of the rectified coord system: towards the origin of camera1 from # camera0, in camera0 coords right[:] = Rt01[3,:] baseline = nps.mag(right) right /= baseline # "forward" for each of the two cameras, in the cam0 coord system forward0 = np.array((0,0,1.)) forward1 = Rt01[:3,2] # "forward" of the rectified coord system, in camera0 coords. The mean of # the two non-right "forward" directions forward[:] = normalize( ( remove_projection(forward0,right) + remove_projection(forward1,right) ) / 2. ) # "down" of the rectified coord system, in camera0 coords. Completes the # right,down,forward coordinate system down[:] = np.cross(forward,right) R_cam0_stereo = nps.transpose(R_stereo_cam0) ######## Done with the geometry! Now to get the az/el grid. I need to figure ######## out the resolution and the extents if az0_deg is not None: az0 = az0_deg * np.pi/180. else: # In the rectified system az=0 sits perpendicular to the baseline. # Normally the cameras are looking out perpendicular to the baseline # also, so I center my azimuth samples around 0 to match the cameras' # field of view. But what if the geometry isn't square, and one camera # is behind the other? Like this: # # camera # view # ^ # | # \ | / # \_/ # . / # . /az=0 # ./ # . # baseline . # . # \ / # \_/ # # Here the center-of-view axis of each camera is not at all # perpendicular to the baseline. Thus I compute the mean "forward" # direction of the cameras in the rectified system, and set that as the # center azimuth az0. v0 = nps.matmult( forward0, R_cam0_stereo ).ravel() v1 = nps.matmult( forward1, R_cam0_stereo ).ravel() v0[1] = 0.0 v1[1] = 0.0 normalize(v0) normalize(v1) v = v0 + v1 az0 = np.arctan2(v[0],v[2]) el0 = el0_deg * np.pi/180. ####### Rectified image resolution if pixels_per_deg_az is None or pixels_per_deg_az < 0 or \ pixels_per_deg_el is None or pixels_per_deg_el < 0: # I need to compute the resolution of the rectified images. I try to # match the resolution of the cameras. I just look at camera0. If you # have different cameras, pass in pixels_per_deg yourself :) # # I look at the center of the stereo field of view. There I have q = # project(v) where v is a unit projection vector. I compute dq/dth where # th is an angular perturbation applied to v. def rotation_any_v_to_z(v): r'''Return any rotation matrix that maps the given unit vector v to [0,0,1]''' z = v if np.abs(v[0]) < .9: x = np.array((1,0,0)) else: x = np.array((0,1,0)) x -= nps.inner(x,v)*v x /= nps.mag(x) y = np.cross(z,x) return nps.cat(x,y,z) v, dv_dazel = stereo_unproject(az0, el0, get_gradients = True) v0 = mrcal.rotate_point_R(R_cam0_stereo, v) dv0_dazel = nps.matmult(R_cam0_stereo, dv_dazel) _,dq_dv0,_ = mrcal.project(v0, *models[0].intrinsics(), get_gradients = True) # I rotate my v to a coordinate system where u = rotate(v) is [0,0,1]. # Then u = [a,b,0] are all orthogonal to v. So du/dth = [cos, sin, 0]. # I then have dq/dth = dq/dv dv/du [cos, sin, 0]t # ---> dq/dth = dq/dv dv/du[:,:2] [cos, sin]t = M [cos,sin]t # # norm2(dq/dth) = [cos,sin] MtM [cos,sin]t is then an ellipse with the # eigenvalues of MtM giving me the best and worst sensitivities. I can # use mrcal.worst_direction_stdev() to find the densest direction. But I # actually know the directions I care about, so I evaluate them # independently for the az and el directions # Ruv = rotation_any_v_to_z(v0) # M = nps.matmult(dq_dv0, nps.transpose(Ruv[:2,:])) # # I pick the densest direction: highest |dq/dth| # pixels_per_rad = mrcal.worst_direction_stdev( nps.matmult( nps.transpose(M),M) ) if pixels_per_deg_az is None or pixels_per_deg_az < 0: dq_daz = nps.inner( dq_dv0, dv0_dazel[:,0] ) pixels_per_rad_az_have = nps.mag(dq_daz) if pixels_per_deg_az is not None: # negative pixels_per_deg_az requested means I use the requested # value as a scaling pixels_per_deg_az = -pixels_per_deg_az * pixels_per_rad_az_have*np.pi/180. else: pixels_per_deg_az = pixels_per_rad_az_have*np.pi/180. if pixels_per_deg_el is None or pixels_per_deg_el < 0: dq_del = nps.inner( dq_dv0, dv0_dazel[:,1] ) pixels_per_rad_el_have = nps.mag(dq_del) if pixels_per_deg_el is not None: # negative pixels_per_deg_el requested means I use the requested # value as a scaling pixels_per_deg_el = -pixels_per_deg_el * pixels_per_rad_el_have*np.pi/180. else: pixels_per_deg_el = pixels_per_rad_el_have*np.pi/180. Naz = round(az_fov_deg*pixels_per_deg_az) Nel = round(el_fov_deg*pixels_per_deg_el) # Adjust the fov to keep the requested resolution and pixel counts az_fov_radius_deg = Naz / (2.*pixels_per_deg_az) el_fov_radius_deg = Nel / (2.*pixels_per_deg_el) # shape (Naz,) az = np.linspace(az0 - az_fov_radius_deg*np.pi/180., az0 + az_fov_radius_deg*np.pi/180., Naz) # shape (Nel,1) el = nps.dummy( np.linspace(el0 - el_fov_radius_deg*np.pi/180., el0 + el_fov_radius_deg*np.pi/180., Nel), -1 ) # v has shape (Nel,Naz,3) v = stereo_unproject(az, el) v0 = nps.matmult( nps.dummy(v, -2), R_stereo_cam0 )[...,0,:] v1 = nps.matmult( nps.dummy(v0, -2), Rt01[:3,:] )[...,0,:] cookie = \ dict( Rt_cam0_stereo = nps.glue(R_cam0_stereo, np.zeros((3,)), axis=-2), baseline = baseline, az_row = az, el_col = el, # The caller should NOT assume these are available in the cookie: # some other rectification scheme may not produce linear az/el # maps pixels_per_deg_az = pixels_per_deg_az, pixels_per_deg_el = pixels_per_deg_el, ) return \ (mrcal.project( v0, *models[0].intrinsics()).astype(np.float32), \ mrcal.project( v1, *models[1].intrinsics()).astype(np.float32)), \ cookie
out3 = base[1, 3, 6:9, 1] out6 = base[1, 4, :6, 1] out66 = base[5, 3:9, 3:9, 2] out66a = base[6, 3:9, 3:9, 2] confirm_equal(mrcal.identity_R(out=out33), np.eye(3), msg='identity_R') confirm_equal(mrcal.identity_Rt(out=out43), nps.glue(np.eye(3), np.zeros((3, ), ), axis=-2), msg='identity_Rt') confirm_equal(mrcal.identity_r(out=out3), np.zeros((3, )), msg='identity_r') confirm_equal(mrcal.identity_rt(out=out6), np.zeros((6, )), msg='identity_rt') y = \ mrcal.rotate_point_R(R0_ref, x, out = out3) confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') y, J_R, J_x = \ mrcal.rotate_point_R(R0_ref, x, get_gradients=True, out = (out3,out333,out33)) J_R_ref = grad(lambda R: nps.matmult(x, nps.transpose(R)), R0_ref) J_x_ref = R0_ref confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') confirm_equal(J_R, J_R_ref, msg='rotate_point_R J_R') confirm_equal(J_x, J_x_ref, msg='rotate_point_R J_x') y = mrcal.rotate_point_r(r0_ref, x, out=out3) confirm_equal(y,
def _triangulate(# shape (Ncameras, Nintrinsics) intrinsics_data, # shape (Ncameras, 6) rt_cam_ref, # shape (Nframes,6), rt_ref_frame, rt_ref_frame_true, # shape (..., Ncameras, 2) q, lensmodel, stabilize_coords, get_gradients): if not ( intrinsics_data.ndim == 2 and intrinsics_data.shape[0] == 2 and \ rt_cam_ref.shape == (2,6) and \ rt_ref_frame.ndim == 2 and rt_ref_frame.shape[-1] == 6 and \ q.shape[-2:] == (2,2 ) ): raise Exception("Arguments must have a consistent Ncameras == 2") # I now compute the same triangulation, but just at the un-perturbed baseline, # and keeping track of all the gradients rt0r = rt_cam_ref[0] rt1r = rt_cam_ref[1] if not get_gradients: rtr1 = mrcal.invert_rt(rt1r) rt01_baseline = mrcal.compose_rt(rt0r, rtr1) # all the v have shape (...,3) vlocal0 = \ mrcal.unproject(q[...,0,:], lensmodel, intrinsics_data[0]) vlocal1 = \ mrcal.unproject(q[...,1,:], lensmodel, intrinsics_data[1]) v0 = vlocal0 v1 = \ mrcal.rotate_point_r(rt01_baseline[:3], vlocal1) # p_triangulated has shape (..., 3) p_triangulated = \ mrcal.triangulate_leecivera_mid2(v0, v1, rt01_baseline[3:]) if stabilize_coords: # shape (..., Nframes, 3) p_frames_new = \ mrcal.transform_point_rt(mrcal.invert_rt(rt_ref_frame), nps.dummy(p_triangulated,-2)) # shape (..., Nframes, 3) p_refs = mrcal.transform_point_rt(rt_ref_frame_true, p_frames_new) # shape (..., 3) p_triangulated = np.mean(p_refs, axis=-2) return p_triangulated else: rtr1,drtr1_drt1r = mrcal.invert_rt(rt1r, get_gradients=True) rt01_baseline,drt01_drt0r, drt01_drtr1 = mrcal.compose_rt(rt0r, rtr1, get_gradients=True) # all the v have shape (...,3) vlocal0, dvlocal0_dq0, dvlocal0_dintrinsics0 = \ mrcal.unproject(q[...,0,:], lensmodel, intrinsics_data[0], get_gradients = True) vlocal1, dvlocal1_dq1, dvlocal1_dintrinsics1 = \ mrcal.unproject(q[...,1,:], lensmodel, intrinsics_data[1], get_gradients = True) v0 = vlocal0 v1, dv1_dr01, dv1_dvlocal1 = \ mrcal.rotate_point_r(rt01_baseline[:3], vlocal1, get_gradients=True) # p_triangulated has shape (..., 3) p_triangulated, dp_triangulated_dv0, dp_triangulated_dv1, dp_triangulated_dt01 = \ mrcal.triangulate_leecivera_mid2(v0, v1, rt01_baseline[3:], get_gradients = True) shape_leading = dp_triangulated_dv0.shape[:-2] dp_triangulated_dq = np.zeros(shape_leading + (3,) + q.shape[-2:], dtype=float) nps.matmult( dp_triangulated_dv0, dvlocal0_dq0, out = dp_triangulated_dq[..., 0, :]) nps.matmult( dp_triangulated_dv1, dv1_dvlocal1, dvlocal1_dq1, out = dp_triangulated_dq[..., 1, :]) Nframes = len(rt_ref_frame) if stabilize_coords: # shape (Nframes,6) rt_frame_ref, drtfr_drtrf = \ mrcal.invert_rt(rt_ref_frame, get_gradients=True) # shape (Nframes,6) rt_true_shifted, _, drt_drtfr = \ mrcal.compose_rt(rt_ref_frame_true, rt_frame_ref, get_gradients=True) # shape (..., Nframes, 3) p_refs,dprefs_drt,dprefs_dptriangulated = \ mrcal.transform_point_rt(rt_true_shifted, nps.dummy(p_triangulated,-2), get_gradients = True) # shape (..., 3) p_triangulated = np.mean(p_refs, axis=-2) # I have dpold/dx. dpnew/dx = dpnew/dpold dpold/dx # shape (...,3,3) dpnew_dpold = np.mean(dprefs_dptriangulated, axis=-3) dp_triangulated_dv0 = nps.matmult(dpnew_dpold, dp_triangulated_dv0) dp_triangulated_dv1 = nps.matmult(dpnew_dpold, dp_triangulated_dv1) dp_triangulated_dt01 = nps.matmult(dpnew_dpold, dp_triangulated_dt01) dp_triangulated_dq = nps.xchg(nps.matmult( dpnew_dpold, nps.xchg(dp_triangulated_dq, -2,-3)), -2,-3) # shape (..., Nframes,3,6) dp_triangulated_drtrf = \ nps.matmult(dprefs_drt, drt_drtfr, drtfr_drtrf) / Nframes else: dp_triangulated_drtrf = np.zeros(shape_leading + (Nframes,3,6), dtype=float) return \ p_triangulated, \ drtr1_drt1r, \ drt01_drt0r, drt01_drtr1, \ dvlocal0_dintrinsics0, dvlocal1_dintrinsics1, \ dv1_dr01, dv1_dvlocal1, \ dp_triangulated_dv0, dp_triangulated_dv1, dp_triangulated_dt01, \ dp_triangulated_drtrf, \ dp_triangulated_dq
def unproject(q, lensmodel, intrinsics_data, normalize = False, get_gradients = False, out = None): r'''Unprojects pixel coordinates to observation vectors SYNOPSIS # q is a (...,2) array of pixel observations v = mrcal.unproject( q, lensmodel, intrinsics_data ) ### OR ### m = mrcal.cameramodel(...) v = mrcal.unproject( q, *m.intrinsics() ) Maps a set of 2D imager points q to a set of 3D vectors in camera coordinates that produced these pixel observations. Each 3D vector is unique only up-to-length, and the returned vectors aren't normalized by default. The default length of the returned vector is arbitrary, and selected for the convenience of the implementation. Pass normalize=True to always return unit vectors. This is the "reverse" direction, so an iterative nonlinear optimization is performed internally to compute this result. This is much slower than mrcal_project. For OpenCV distortions specifically, OpenCV has cvUndistortPoints() (and cv2.undistortPoints()), but these are inaccurate and we do not use them: https://github.com/opencv/opencv/issues/8811 Gradients are available by passing get_gradients=True. Since unproject() is implemented as an iterative solve around project(), the unproject() gradients are computed by manipulating the gradients reported by project() at the solution. The reported gradients are relative to whatever unproject() is reporting; the unprojection is unique only up-to-length, and the magnitude isn't fixed. So the gradients may include a component in the direction of the returned observation vector: this follows the arbitrary scaling used by unproject(). It is possible to pass normalize=True; we then return NORMALIZED observation vectors and the gradients of those NORMALIZED vectors. In that case, those gradients are guaranteed to be orthogonal to the observation vector. The vector normalization involves a bit more computation, so it isn't the default. NOTE: THE MAGNITUDE OF THE RETURNED VECTOR CHANGES IF get_gradients CHANGES. The reported gradients are correct relative to the output returned with get_gradients=True. Passing normalize=True can be used to smooth this out: unproject(..., normalize=True) returns the same vectors as unproject(..., normalize=True, get_gradients=True)[0] Broadcasting is fully supported across q and intrinsics_data. Models that have no gradients available cannot use mrcal_unproject() in C, but CAN still use this mrcal.unproject() Python routine: a slower routine is employed that uses numerical differences instead of analytical gradients. ARGUMENTS - q: array of dims (...,2); the pixel coordinates we're unprojecting - lensmodel: a string such as LENSMODEL_PINHOLE LENSMODEL_OPENCV4 LENSMODEL_CAHVOR LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=16_Ny=12_fov_x_deg=100 - intrinsics_data: array of dims (Nintrinsics): (focal_x, focal_y, center_pixel_x, center_pixel_y, distortion0, distortion1, ...) The focal lengths are given in pixels. - normalize: optional boolean defaults to False. If True: normalize the output vectors - get_gradients: optional boolean that defaults to False. Whether we should compute and report the gradients. This affects what we return (see below). If not normalize, the magnitude of the reported vectors changes if get_gradients is turned on/off (see above) - out: optional argument specifying the destination. By default, new numpy array(s) are created and returned. To write the results into existing arrays, specify them with the 'out' kwarg. If not get_gradients: 'out' is the one numpy array we will write into. Else: 'out' is a tuple of all the output numpy arrays. If 'out' is given, we return the same arrays passed in. This is the standard behavior provided by numpysane_pywrap. RETURNED VALUE if not get_gradients: we return an (...,3) array of unprojected observation vectors. Not normalized by default; see description above if get_gradients: we return a tuple: - (...,3) array of unprojected observation vectors - (...,3,2) array of gradients of unprojected observation vectors in respect to pixel coordinates - (...,3,Nintrinsics) array of gradients of unprojected observation vectors in respect to the intrinsics ''' def apply_normalization_to_output_with_gradients(v,dv_dq,dv_di): # vn = v/mag(v) # dvn = dv (1/mag(v)) + v d(1/mag(v)) # = dv( 1/mag(v) - v vt / mag^3(v) ) # = dv( 1/mag(v) - vn vnt / mag(v) ) # = dv/mag(v) ( 1 - vn vnt ) # v has shape (...,3) # dv_dq has shape (...,3,2) # dv_di has shape (...,3,N) # shape (...,1) magv_recip = 1. / nps.dummy(nps.mag(v), -1) v *= magv_recip # shape (...,1,1) magv_recip = nps.dummy(magv_recip,-1) dv_dq *= magv_recip dv_dq -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_dq, -1,-2), -2), nps.dummy(nps.outer(v,v),-3) )[...,0,:], -1, -2) dv_di *= magv_recip dv_di -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_di, -1,-2), -2), nps.dummy(nps.outer(v,v),-3) )[...,0,:], -1, -2) # First, handle some trivial cases. I don't want to run the # optimization-based unproject() if I don't have to if lensmodel == 'LENSMODEL_PINHOLE' or \ lensmodel == 'LENSMODEL_LONLAT' or \ lensmodel == 'LENSMODEL_LATLON' or \ lensmodel == 'LENSMODEL_STEREOGRAPHIC': if lensmodel == 'LENSMODEL_PINHOLE': func = mrcal.unproject_pinhole always_normalized = False elif lensmodel == 'LENSMODEL_LONLAT': func = mrcal.unproject_lonlat always_normalized = True elif lensmodel == 'LENSMODEL_LATLON': func = mrcal.unproject_latlon always_normalized = True elif lensmodel == 'LENSMODEL_STEREOGRAPHIC': func = mrcal.unproject_stereographic always_normalized = False if not get_gradients: v = func(q, intrinsics_data, out = out) if normalize and not always_normalized: v /= nps.dummy(nps.mag(v), axis=-1) return v # shapes (...,2) fxy = intrinsics_data[..., :2] cxy = intrinsics_data[..., 2:] # shapes (...,3) and (...,3,2) v, dv_dq = \ func(q, intrinsics_data, get_gradients = True, out = None if out is None else (out[0],out[1])) # q = f l(v) + c # l(v) = (q-c)/f # # dl/dv dv/df = (c-q) / f^2 # dl/dv dv/dq = 1/f # -> dl/dv = 1 / ( f dv/dq ) # -> dv/df = (c-q) / (f^2 dl/dv) = (c-q) dv/dq / f # # dl/dv dv/dc = -1/f # -> dv/dc = -1 / (f dl/dv) = -1 / (f /( f dv/dq )) = -dv/dq dv_di_shape = dv_dq.shape[:-1] + (4,) if out is None: dv_di = np.zeros( dv_di_shape, dtype=float) else: if not (out[2].shape[-len(dv_di_shape):] == dv_di_shape and \ not any(np.array(out[2].shape[:-len(dv_di_shape)]) - 1)): raise Exception(f"Shape of out[2] doesn't match broadcasted shape for dv_di. Wanted {dv_di_shape}, but got {out[2].shape}") dv_di = out[2] dv_di *= 0 # dv/df dv_di[..., :2] += nps.dummy((cxy - q)/fxy, -2) * dv_dq # dv/dc dv_di[..., 2:] -= dv_dq if normalize and not always_normalized: apply_normalization_to_output_with_gradients(v,dv_dq,dv_di) return v,dv_dq,dv_di try: meta = mrcal.lensmodel_metadata_and_config(lensmodel) except: raise Exception(f"Invalid lens model '{lensmodel}': couldn't get the metadata") if meta['has_gradients']: # Main path. We have gradients. # # Internal function must have a different argument order so # that all the broadcasting stuff is in the leading arguments if not get_gradients: v = mrcal._mrcal_npsp._unproject(q, intrinsics_data, lensmodel=lensmodel, out=out) if normalize: # Explicitly handle nan and inf to set their normalized values # to 0. Otherwise I get a scary-looking warning from numpy i_vgood = \ np.isfinite(v[...,0]) * \ np.isfinite(v[...,1]) * \ np.isfinite(v[...,2]) v[~i_vgood] = np.array((0.,0.,1.)) v /= nps.dummy(nps.mag(v), -1) v[~i_vgood] = np.array((0.,0.,0.)) return v # We need to report gradients vs = mrcal._mrcal_npsp._unproject(q, intrinsics_data, lensmodel=lensmodel) # I have no gradients available for unproject(), and I need to invert a # non-square matrix to use the gradients from project(). I deal with this # with a stereographic mapping # # With a simple unprojection I have q -> v # Instead I now do q -> vs -> u -> v # I reproject vs, to produce a scaled v = k*vs. I'm assuming all # projections are central, so vs represents q just as well as v does. u # is a 2-vector, so dq_du is (2x2), and I can invert it u = mrcal.project_stereographic(vs) dv_du = np.zeros( vs.shape + (2,), dtype=float) v, dv_du = \ mrcal.unproject_stereographic(u, get_gradients = True, out = (vs if out is None else out[0], dv_du)) _,dq_dv,dq_di = mrcal.project(v, lensmodel, intrinsics_data, get_gradients = True) # shape (..., 2,2). Square. Invertible! dq_du = nps.matmult( dq_dv, dv_du ) # dv/dq = dv/du du/dq = # = dv/du inv(dq/du) # = transpose(inv(transpose(dq/du)) transpose(dv/du)) dv_dq = nps.transpose(np.linalg.solve( nps.transpose(dq_du), nps.transpose(dv_du) )) if out is not None: out[1] *= 0. out[1] += dv_dq dv_dq = out[1] # dv/di is a bit different. I have (q,i) -> v. I want to find out # how moving i affects v while keeping q constant. Taylor expansion # of projection: q = q0 + dq/dv dv + dq/di di. q is constant so # dq/dv dv + dq/di di = 0 -> dv/di = - dv/dq dq/di dv_di = nps.matmult(dv_dq, dq_di, out = None if out is None else out[2]) dv_di *= -1. if normalize: apply_normalization_to_output_with_gradients(v,dv_dq,dv_di) return v, dv_dq, dv_di # No projection gradients implemented in C. We should get here approximately # never. At this time, the only projection function that has no gradients # implemented is LENSMODEL_CAHVORE, which nobody is really expected to be # using. If these see use, real gradients should be implemented # # We compute the gradients numerically. This is a reimplementation of the C # code. It's barely maintained, and here for legacy compatibility only if get_gradients: raise Exception(f"unproject(..., get_gradients=True) is unsupported for models with no gradients, such as '{lensmodel}'") if q is None: return q if q.size == 0: s = q.shape return np.zeros(s[:-1] + (3,)) if out is not None: raise Exception(f"unproject(..., out) is unsupported if out is not None and we're using a model with no gradients, such as '{lensmodel}'") fxy = intrinsics_data[..., :2] cxy = intrinsics_data[..., 2:4] # undistort the q, by running an optimizer import scipy.optimize # I optimize each point separately because the internal optimization # algorithm doesn't know that each point is independent, so if I optimized # it all together, it would solve a dense linear system whose size is linear # in Npoints. The computation time thus would be much slower than # linear(Npoints) @nps.broadcast_define( ((2,),), ) def undistort_this(q0): def cost_no_gradients(vxy, *args, **kwargs): '''Optimization functions''' return \ mrcal.project(np.array((vxy[0],vxy[1],1.)), lensmodel, intrinsics_data) - \ q0 # seed assuming distortions aren't there vxy_seed = (q0 - cxy) / fxy # no gradients available result = scipy.optimize.least_squares(cost_no_gradients, vxy_seed, '3-point') vxy = result.x # This needs to be precise; if it isn't, I barf. Shouldn't happen # very often if np.sqrt(result.cost/2.) > 1e-3: if not unproject.__dict__.get('already_complained'): sys.stderr.write("WARNING: unproject() wasn't able to precisely compute some points. Returning nan for those. Will complain just once\n") unproject.already_complained = True return np.array((np.nan,np.nan)) return vxy vxy = undistort_this(q) # I append a 1. shape = (..., 3) v = nps.glue(vxy, np.ones( vxy.shape[:-1] + (1,) ), axis=-1) if normalize: v /= nps.dummy(nps.mag(v), -1) return v
def image_transformation_map(model_from, model_to, use_rotation = False, plane_n = None, plane_d = None, mask_valid_intrinsics_region_from = False): r'''Compute a reprojection map between two models SYNOPSIS model_orig = mrcal.cameramodel("xxx.cameramodel") image_orig = cv2.imread("image.jpg") model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig, fit = "corners") mapxy = mrcal.image_transformation_map(model_orig, model_pinhole) image_undistorted = mrcal.transform_image(image_orig, mapxy) # image_undistorted is now a pinhole-reprojected version of image_orig Returns the transformation that describes a mapping - from pixel coordinates of an image of a scene observed by model_to - to pixel coordinates of an image of the same scene observed by model_from This transformation can then be applied to a whole image by calling mrcal.transform_image(). This function returns a transformation map in an (Nheight,Nwidth,2) array. The image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from. This function has 3 modes of operation: - intrinsics-only This is the default. Selected if - use_rotation = False - plane_n = None - plane_d = None All of the extrinsics are ignored. If the two cameras have the same orientation, then their observations of infinitely-far-away objects will line up exactly - rotation This can be selected explicitly with - use_rotation = True - plane_n = None - plane_d = None Here we use the rotation component of the relative extrinsics. The relative translation is impossible to use without knowing what we're looking at, so IT IS ALWAYS IGNORED. If the relative orientation in the models matches reality, then the two cameras' observations of infinitely-far-away objects will line up exactly - plane This is selected if - use_rotation = True - plane_n is not None - plane_d is not None We map observations of a given plane in camera FROM coordinates coordinates to where this plane would be observed by camera TO. This uses ALL the intrinsics, extrinsics and the plane representation. If all of these are correct, the observations of this plane would line up exactly in the remapped-camera-fromimage and the camera-to image. The plane is represented in camera-from coordinates by a normal vector plane_n, and the distance to the normal plane_d. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. ARGUMENTS - model_from: the mrcal.cameramodel object describing the camera used to capture the input image - model_to: the mrcal.cameramodel object describing the camera that would have captured the image we're producing - use_rotation: optional boolean, defaulting to False. If True: we respect the relative rotation in the extrinsics of the camera models. - plane_n: optional numpy array of shape (3,); None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. This argument describes the normal vector in the coordinate system of model_from. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_d should be given also, and use_rotation should be True. if given, we use the full intrinsics and extrinsics of both camera models - plane_d: optional floating-point value; None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_n should be given also, and use_rotation should be True. if given, we use the full intrinsics and extrinsics of both camera models - mask_valid_intrinsics_region_from: optional boolean defaulting to False. If True, points outside the valid-intrinsics region in the FROM image are set to black, and thus do not appear in the output image RETURNED VALUE A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the imager dimensions of model_to. This array contains 32-bit floats, as required by cv2.remap() (the function providing the internals of mrcal.transform_image()). This array can be passed to mrcal.transform_image() ''' if (plane_n is None and plane_d is not None) or \ (plane_n is not None and plane_d is None): raise Exception("plane_n and plane_d should both be None or neither should be None") if plane_n is not None and plane_d is not None and \ not use_rotation: raise Exception("We're looking at remapping a plane (plane_d, plane_n are not None), so use_rotation should be True") Rt_to_from = None if use_rotation: Rt_to_r = model_to. extrinsics_Rt_fromref() Rt_r_from = model_from.extrinsics_Rt_toref() Rt_to_from = mrcal.compose_Rt(Rt_to_r, Rt_r_from) lensmodel_from,intrinsics_data_from = model_from.intrinsics() lensmodel_to, intrinsics_data_to = model_to. intrinsics() if re.match("LENSMODEL_OPENCV",lensmodel_from) and \ lensmodel_to == "LENSMODEL_PINHOLE" and \ plane_n is None and \ not mask_valid_intrinsics_region_from: # This is a common special case. This branch works identically to the # other path (the other side of this "if" can always be used instead), # but the opencv-specific code is optimized and at one point ran faster # than the code on the other side. # # The mask_valid_intrinsics_region_from isn't implemented in this path. # It COULD be, then this faster path could be used fxy_from = intrinsics_data_from[0:2] cxy_from = intrinsics_data_from[2:4] cameraMatrix_from = np.array(((fxy_from[0], 0, cxy_from[0]), ( 0, fxy_from[1], cxy_from[1]), ( 0, 0, 1))) fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] cameraMatrix_to = np.array(((fxy_to[0], 0, cxy_to[0]), ( 0, fxy_to[1], cxy_to[1]), ( 0, 0, 1))) output_shape = model_to.imagersize() distortion_coeffs = intrinsics_data_from[4: ] if Rt_to_from is not None: R_to_from = Rt_to_from[:3,:] if np.trace(R_to_from) > 3. - 1e-12: R_to_from = None # identity, so I pass None else: R_to_from = None return nps.glue( *[ nps.dummy(arr,-1) for arr in \ cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs, R_to_from, cameraMatrix_to, tuple(output_shape), cv2.CV_32FC1)], axis = -1) W_from,H_from = model_from.imagersize() W_to, H_to = model_to. imagersize() # shape: (Nheight,Nwidth,2). Contains (x,y) rows grid = np.ascontiguousarray(nps.mv(nps.cat(*np.meshgrid(np.arange(W_to), np.arange(H_to))), 0,-1), dtype = float) if lensmodel_to == "LENSMODEL_PINHOLE": # Faster path for the unproject. Nice, simple closed-form solution fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] v = np.zeros( (grid.shape[0], grid.shape[1], 3), dtype=float) v[..., :2] = (grid-cxy_to)/fxy_to v[..., 2] = 1 elif lensmodel_to == "LENSMODEL_STEREOGRAPHIC": # Faster path for the unproject. Nice, simple closed-form solution v = mrcal.unproject_stereographic(grid, *intrinsics_data_to[:4]) else: v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to) if plane_n is not None: R_to_from = Rt_to_from[:3,:] t_to_from = Rt_to_from[ 3,:] # The homography definition. Derived in many places. For instance in # "Motion and structure from motion in a piecewise planar environment" # by Olivier Faugeras, F. Lustman. A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n) A_from_to = np.linalg.inv(A_to_from) v = nps.matmult( v, nps.transpose(A_from_to) ) else: if Rt_to_from is not None: R_to_from = Rt_to_from[:3,:] if np.trace(R_to_from) < 3. - 1e-12: # rotation isn't identity. apply v = nps.matmult(v, R_to_from) mapxy = mrcal.project( v, lensmodel_from, intrinsics_data_from ) if mask_valid_intrinsics_region_from: # Using matplotlib to compute the out-of-bounds points. It doesn't # support broadcasting, so I do that manually with a clump/reshape from matplotlib.path import Path region = Path(model_from.valid_intrinsics_region()) is_inside = region.contains_points(nps.clump(mapxy,n=2)).reshape(mapxy.shape[:2]) mapxy[ ~is_inside, :] = -1 return mapxy.astype(np.float32)
def fit(q, f, order): S = model_matrix(q, order) J = S p = nps.matmult( f, nps.transpose(np.linalg.pinv(S))) x = func_hypothesis(q,p) - f return p,J,x
out6 = base[1, 4, :6, 1] out66 = base[5, 3:9, 3:9, 2] out66a = base[6, 3:9, 3:9, 2] confirm_equal(mrcal.identity_R(out=out33), np.eye(3), msg='identity_R') confirm_equal(mrcal.identity_Rt(out=out43), nps.glue(np.eye(3), np.zeros((3, ), ), axis=-2), msg='identity_Rt') confirm_equal(mrcal.identity_r(out=out3), np.zeros((3, )), msg='identity_r') confirm_equal(mrcal.identity_rt(out=out6), np.zeros((6, )), msg='identity_rt') ################# rotate_point_R y = \ mrcal.rotate_point_R(R0_ref, x, out = out3) confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') y, J_R, J_x = \ mrcal.rotate_point_R(R0_ref, x, get_gradients=True, out = (out3,out333,out33)) J_R_ref = grad(lambda R: nps.matmult(x, nps.transpose(R)), R0_ref) J_x_ref = R0_ref confirm_equal(y, nps.matmult(x, nps.transpose(R0_ref)), msg='rotate_point_R result') confirm_equal(J_R, J_R_ref, msg='rotate_point_R J_R') confirm_equal(J_x, J_x_ref, msg='rotate_point_R J_x') # In-place R0_ref_copy = np.array(R0_ref)
[0.9447233, 6.8439095, 9.6958398]]) R = Rt[:3, :] t = Rt[3, :] noise = np.array([[0.00035356, 0.00043613, 0.00006606], [0.00043968, 0.00043783, 0.00060678], [0.00063803, 0.00024423, 0.00010871], [0.00004966, 0.00053377, 0.00018905], [0.00007708, 0.00023529, 0.0002229], [0.00090558, 0.00072379, 0.00004062], [0.00072059, 0.00074467, 0.00044128], [0.00024228, 0.00058201, 0.00041458], [0.00018121, 0.00078172, 0.00016128], [0.00019021, 0.00001371, 0.00096808]]) Tp = nps.matmult(p, nps.transpose(R)) + t Rt_fit = \ mrcal.align_procrustes_points_Rt01(Tp + noise, p) R_fit = Rt_fit[:3, :] t_fit = Rt_fit[3, :] testutils.confirm_equal(R_fit, R, eps=1e-2, msg='Procrustes fit R') testutils.confirm_equal(t_fit, t, eps=1e-2, msg='Procrustes fit t') R_fit_vectors = \ mrcal.align_procrustes_vectors_R01(nps.matmult( p, nps.transpose(R) ) + noise, p) testutils.confirm_equal(R_fit_vectors, R, eps=1e-2, msg='Procrustes fit R (vectors)')
def func_hypothesis(q, p): '''Hypothesis based on parameters ''' S = model_matrix(q, len(p)) return nps.matmult(p, nps.transpose(S))
def _write(f, m, note=None): r'''Writes a cameramodel as a .cahvor to a writeable file object''' if note is not None: for l in note.splitlines(): f.write('# ' + l + '\n') d = m.imagersize() f.write('Dimensions = {} {}\n'.format(int(d[0]), int(d[1]))) lensmodel,intrinsics = m.intrinsics() if lensmodel == 'LENSMODEL_CAHVOR': f.write("Model = CAHVOR = perspective, distortion\n") elif re.match('LENSMODEL_(OPENCV.*|PINHOLE)', lensmodel): f.write("Model = CAHV = perspective, linear\n") else: match = re.match('^LENSMODEL_CAHVORE_linearity=([0-9\.]+)$', lensmodel) if match is not None: f.write("Model = CAHVORE3,{} = general\n".format(match.group(1))) else: raise Exception("Don't know how to handle lens model '{}'".format(lensmodel)) fx,fy,cx,cy = intrinsics[:4] Rt_toref = m.extrinsics_Rt_toref() R_toref = Rt_toref[:3,:] t_toref = Rt_toref[ 3,:] C = t_toref A = R_toref[:,2] Hp = R_toref[:,0] Vp = R_toref[:,1] H = fx*Hp + A*cx V = fy*Vp + A*cy f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('C', *C)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('A', *A)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('H', *H)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('V', *V)) if re.match('^LENSMODEL_CAHVOR', lensmodel): # CAHVOR(E) alpha,beta,R0,R1,R2 = intrinsics[4:9] s_al,c_al,s_be,c_be = np.sin(alpha),np.cos(alpha),np.sin(beta),np.cos(beta) O = nps.matmult( R_toref, nps.transpose(np.array(( s_al*c_be, s_be, c_al*c_be ), dtype=float)) ).ravel() R = np.array((R0, R1, R2), dtype=float) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('O', *O)) f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('R', *R)) if re.match('^LENSMODEL_CAHVORE', lensmodel): E = intrinsics[9:] f.write(("{} =" + (" {:15.10f}" * 3) + "\n").format('E', *E)) elif re.match('LENSMODEL_OPENCV*', lensmodel): Ndistortions = mrcal.lensmodel_num_params(lensmodel) - 4 f.write(("{} =" + (" {:15.10f}" * Ndistortions) + "\n").format(lensmodel, *intrinsics[4:])) elif lensmodel == 'LENSMODEL_PINHOLE': # the CAHV values we already wrote are all that's needed pass else: raise Exception(f"Cannot write lens model '{lensmodel}' to a .cahvor file. I only support PINHOLE, CAHVOR(E) and OPENCV model") c = m.valid_intrinsics_region() if c is not None: f.write("VALID_INTRINSICS_REGION = ") np.savetxt(f, c.ravel(), fmt='%.2f', newline=' ') f.write('\n') Hs,Vs,Hc,Vc = intrinsics[:4] f.write("Hs = {}\n".format(Hs)) f.write("Hc = {}\n".format(Hc)) f.write("Vs = {}\n".format(Vs)) f.write("Vc = {}\n".format(Vc)) f.write("# this is hard-coded\nTheta = {} (-90.0 deg)\n".format(-np.pi/2)) return True
Jsparse = csr_matrix((data, indices, indptr)) Jdense = Jsparse.toarray() Jdense_ref = \ np.array(((1, 0, 2), (0, 0, 3), (4, 5, 6), (0, 7, 8)), dtype=float) testutils.confirm_equal(Jdense, Jdense_ref, relative = True, worstcase = True, eps = 1e-6, msg = "csr_matrix representation works as expected") bt = np.array(((1., 5., 3.), (2., -2., -8))) F = mrcal.CHOLMOD_factorization(Jsparse) xt = F.solve_xt_JtJ_bt(bt) JtJ = nps.matmult(nps.transpose(Jdense), Jdense) xt_ref = nps.transpose(np.linalg.solve(JtJ, nps.transpose(bt))) testutils.confirm_equal(xt, xt_ref, relative = True, worstcase = True, eps = 1e-6, msg = "solve_xt_JtJ_bt produces the correct result") testutils.finish()
def compute_outliernesses(J, x, jq, k_dima, k_cook): '''Computes all the outlierness/Cook's D metrics I have 8 things I can compute coming from 3 yes/no choices. These are all very similar, with two pairs actually coming out identical. I choose: - Are we detecting outliers, or looking at effects of a new query point? - Dima's outlierness factor or Cook's D - Look ONLY at the effect on the other variables, or on the other variables AND self? If we're detecting outliers, we REMOVE measurements from the dataset, and see what happens to the fit. If we're looking at effects of a new query point, we see what happend if we ADD measurements Dima's outlierness factor metric looks at what happens to the cost function E = norm2(x). Specifically I look at (norm2(x_before) - norm(x_after))/Nmeasurements Cook's D instead looks at (norm2(x_before - x_after)) * k for some constant k. Finally, we can decide whether to include the effects on the measurements we're adding/removing, or not. Note that here I only look at adding/removing SCALAR measurements ============= This is similar-to, but not exactly-the-same-as Cook's D. I assume the least squares fit optimizes a cost function E = norm2(x). The outlierness factor I return is f = 1/Nmeasurements (E(outliers and inliers) - E(inliers only)) For a scalar measurement, this solves to k = xo^2 / Nmeasurements B = 1.0/(jt inv(JtJ) j - 1) f = -k * B (see the comment in dogleg_getOutliernessFactors() for a description) Note that my metric is proportional to norm2(x_io) - norm2(x_i). This is NOT the same as Cook's distance, which is proportional to norm2(x_io - x_i). It's not yet obvious to me which is better There're several slightly-different definitions of Cook's D and of a rule-of-thumb threshold floating around on the internet. Wikipedia says: D = norm2(x_io - x_i)^2 / (Nstate * norm2(x_io)/(Nmeasurements - Nstate)) D_threshold = 1 An article https://www.nature.com/articles/nmeth.3812 says D = norm2(x_io - x_i)^2 / ((Nstate+1) * norm2(x_io)/(Nmeasurements - Nstate -1)) D_threshold = 4/Nmeasurements Here I use the second definition. That definition expands to k = xo^2 / ((Nstate+1) * norm2(x_io)/(Nmeasurements - Nstate -1)) B = 1.0/(jt inv(JtJ) j - 1) f = k * (B + B*B) ''' Nmeasurements,Nstate = J.shape # The A values for each measurement Aoutliers = nps.inner(J, nps.transpose(np.linalg.pinv(J))) Aquery = nps.inner(jq, nps.transpose(np.linalg.solve(nps.matmult(nps.transpose(J),J), nps.transpose(jq)))) def dima(): k = k_dima k = 1 # Here the metrics are linear, so self + others = self_others def outliers(): B = 1.0 / (Aoutliers - 1.0) return dict( self = k * x*x, others = k * x*x*(-B-1), self_others = k * x*x*(-B )) def query(): B = 1.0 / (Aquery + 1.0) return dict( self = k * ( B*B), others = k * (B-B*B), self_others = k * (B)) return dict(outliers = outliers(), query = query()) def cook(): k = k_cook k = 1 # Here the metrics maybe aren't linear (I need to think about it), so # maybe self + others != self_others. I thus am not returning the "self" # metric def outliers(): B = 1.0 / (Aoutliers - 1.0) return dict( self_others = k * x*x*(B+B*B ) , others = k * x*x*(-B-1)) def query(): B = 1.0 / (Aquery + 1.0) return dict( self_others = k * (1-B) , others = k * (B-B*B)) return dict(outliers = outliers(), query = query()) return dict(cook = cook(), dima = dima())
def image_transformation_map(model_from, model_to, intrinsics_only=False, distance=None, plane_n=None, plane_d=None, mask_valid_intrinsics_region_from=False): r'''Compute a reprojection map between two models SYNOPSIS model_orig = mrcal.cameramodel("xxx.cameramodel") image_orig = cv2.imread("image.jpg") model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig, fit = "corners") mapxy = mrcal.image_transformation_map(model_orig, model_pinhole, intrinsics_only = True) image_undistorted = mrcal.transform_image(image_orig, mapxy) # image_undistorted is now a pinhole-reprojected version of image_orig Returns the transformation that describes a mapping - from pixel coordinates of an image of a scene observed by model_to - to pixel coordinates of an image of the same scene observed by model_from This transformation can then be applied to a whole image by calling mrcal.transform_image(). This function returns a transformation map in an (Nheight,Nwidth,2) array. The image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from. One application of this function is to validate the models in a stereo pair. For instance, reprojecting one camera's image at distance=infinity should produce exactly the same image that is observed by the other camera when looking at very far objects, IF the intrinsics and rotation are correct. If the images don't line up well, we know that some part of the models is off. Similarly, we can use big planes (such as observations of the ground) and plane_n, plane_d to validate. This function has several modes of operation: - intrinsics, extrinsics Used if not intrinsics_only and \ plane_n is None and \ plane_d is None This is the default. For each pixel in the output, we use the full model to unproject a given distance out, and then use the full model to project back into the other camera. - intrinsics only Used if intrinsics_only and \ plane_n is None and \ plane_d is None Similar, but the extrinsics are ignored. We unproject the pixels in one model, and project the into the other camera. The two camera coordinate systems are assumed to line up perfectly - plane Used if plane_n is not None and plane_d is not None We map observations of a given plane in camera FROM coordinates coordinates to where this plane would be observed by camera TO. We unproject each pixel in one camera, compute the intersection point with the plane, and project that intersection point back to the other camera. This uses ALL the intrinsics, extrinsics and the plane representation. The plane is represented by a normal vector plane_n, and the distance to the normal plane_d. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. ARGUMENTS - model_from: the mrcal.cameramodel object describing the camera used to capture the input image. We always use the intrinsics. if not intrinsics_only: we use the extrinsics also - model_to: the mrcal.cameramodel object describing the camera that would have captured the image we're producing. We always use the intrinsics. if not intrinsics_only: we use the extrinsics also - intrinsics_only: optional boolean, defaulting to False. If False: we respect the relative transformation in the extrinsics of the camera models. - distance: optional value, defaulting to None. Used only if not intrinsics_only. We reproject points in space a given distance out. If distance is None (the default), we look out to infinity. This is equivalent to using only the rotation component of the extrinsics, ignoring the translation. - plane_n: optional numpy array of shape (3,); None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. This argument describes the normal vector in the coordinate system of model_from. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_d should be given also, and intrinsics_only should be False. if given, we use the full intrinsics and extrinsics of both camera models - plane_d: optional floating-point value; None by default. If given, we produce a transformation to map observations of a given plane to the same pixels in the source and target images. The plane is all points p such that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any scaling is compensated in plane_d. If given, plane_n should be given also, and intrinsics_only should be False. if given, we use the full intrinsics and extrinsics of both camera models - mask_valid_intrinsics_region_from: optional boolean defaulting to False. If True, points outside the valid-intrinsics region in the FROM image are set to black, and thus do not appear in the output image RETURNED VALUE A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the imager dimensions of model_to. This array contains 32-bit floats, as required by cv2.remap() (the function providing the internals of mrcal.transform_image()). This array can be passed to mrcal.transform_image() ''' if (plane_n is None and plane_d is not None) or \ (plane_n is not None and plane_d is None): raise Exception( "plane_n and plane_d should both be None or neither should be None" ) if plane_n is not None and \ intrinsics_only: raise Exception( "We're looking at remapping a plane (plane_d, plane_n are not None), so intrinsics_only should be False" ) if distance is not None and \ (plane_n is not None or intrinsics_only): raise Exception( "'distance' makes sense only without plane_n/plane_d and without intrinsics_only" ) if intrinsics_only: Rt_to_from = None else: Rt_to_from = mrcal.compose_Rt(model_to.extrinsics_Rt_fromref(), model_from.extrinsics_Rt_toref()) lensmodel_from, intrinsics_data_from = model_from.intrinsics() lensmodel_to, intrinsics_data_to = model_to.intrinsics() if re.match("LENSMODEL_OPENCV",lensmodel_from) and \ lensmodel_to == "LENSMODEL_PINHOLE" and \ plane_n is None and \ not mask_valid_intrinsics_region_from and \ distance is None: # This is a common special case. This branch works identically to the # other path (the other side of this "if" can always be used instead), # but the opencv-specific code is optimized and at one point ran faster # than the code on the other side. # # The mask_valid_intrinsics_region_from isn't implemented in this path. # It COULD be, then this faster path could be used import cv2 fxy_from = intrinsics_data_from[0:2] cxy_from = intrinsics_data_from[2:4] cameraMatrix_from = np.array( ((fxy_from[0], 0, cxy_from[0]), (0, fxy_from[1], cxy_from[1]), (0, 0, 1))) fxy_to = intrinsics_data_to[0:2] cxy_to = intrinsics_data_to[2:4] cameraMatrix_to = np.array( ((fxy_to[0], 0, cxy_to[0]), (0, fxy_to[1], cxy_to[1]), (0, 0, 1))) output_shape = model_to.imagersize() distortion_coeffs = intrinsics_data_from[4:] if Rt_to_from is not None: R_to_from = Rt_to_from[:3, :] if np.trace(R_to_from) > 3. - 1e-12: R_to_from = None # identity, so I pass None else: R_to_from = None return nps.glue( *[ nps.dummy(arr,-1) for arr in \ cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs, R_to_from, cameraMatrix_to, tuple(output_shape), cv2.CV_32FC1)], axis = -1) W_from, H_from = model_from.imagersize() W_to, H_to = model_to.imagersize() # shape: (Nheight,Nwidth,2). Contains (x,y) rows grid = np.ascontiguousarray(nps.mv( nps.cat(*np.meshgrid(np.arange(W_to), np.arange(H_to))), 0, -1), dtype=float) v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to) if plane_n is not None: R_to_from = Rt_to_from[:3, :] t_to_from = Rt_to_from[3, :] # The homography definition. Derived in many places. For instance in # "Motion and structure from motion in a piecewise planar environment" # by Olivier Faugeras, F. Lustman. A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n) A_from_to = np.linalg.inv(A_to_from) v = nps.matmult(v, nps.transpose(A_from_to)) else: if Rt_to_from is not None: if distance is not None: v = mrcal.transform_point_Rt( mrcal.invert_Rt(Rt_to_from), v / nps.dummy(nps.mag(v), -1) * distance) else: R_to_from = Rt_to_from[:3, :] v = nps.matmult(v, R_to_from) mapxy = mrcal.project(v, lensmodel_from, intrinsics_data_from) if mask_valid_intrinsics_region_from: # Using matplotlib to compute the out-of-bounds points. It doesn't # support broadcasting, so I do that manually with a clump/reshape from matplotlib.path import Path region = Path(model_from.valid_intrinsics_region()) is_inside = region.contains_points(nps.clump(mapxy, n=2)).reshape( mapxy.shape[:2]) mapxy[~is_inside, :] = -1 return mapxy.astype(np.float32)
# I'm expecting the layout of a vanilla calibration problem, and I assume that # camera0 is at the reference below. Here I confirm that this assumption is # correct icam_extrinsics0 = mrcal.corresponding_icam_extrinsics(0, **optimization_inputs_baseline) icam_extrinsics1 = mrcal.corresponding_icam_extrinsics(1, **optimization_inputs_baseline) if not (icam_extrinsics0 < 0 and icam_extrinsics1 == 0): raise Exception("Vanilla calibration problem expected, but got something else instead. Among others, _triangulate() assumes the triangulated result is in cam0, which is the same as the ref coord system") istate_e1 = mrcal.state_index_extrinsics(icam_extrinsics1, **optimization_inputs_baseline) istate_f0 = mrcal.state_index_frames(0, **optimization_inputs_baseline) Nstate_frames = mrcal.num_states_frames(**optimization_inputs_baseline) # dp_triangulated_di0 = dp_triangulated_dv0 dvlocal0_di0 # dp_triangulated_di1 = dp_triangulated_dv1 dv1_dvlocal1 dvlocal1_di1 nps.matmult( dp_triangulated_dv0, dvlocal0_dintrinsics0, out = dp_triangulated_dpstate[..., istate_i0:istate_i0+Nintrinsics]) nps.matmult( dp_triangulated_dv1, dv1_dvlocal1, dvlocal1_dintrinsics1, out = dp_triangulated_dpstate[..., istate_i1:istate_i1+Nintrinsics]) # dp_triangulated_de0 doesn't exist: assuming vanilla calibration problem, so # there is no e0 # dp_triangulated_dr1r = # dp_triangulated_dv1 dv1_dr01 dr01_dr1r + # dp_triangulated_dt01 dt01_dr1r dr01_drr1 = drt01_drtr1[:3,:3] drr1_dr1r = drtr1_drt1r[:3,:3] dr01_dr1r = nps.matmult(dr01_drr1, drr1_dr1r)