def compute_rectification_homographies_sift(im1, im2, rpc1, rpc2, x, y, w, h): """ Computes rectifying homographies for a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: two instances of the rpc_model.RPCModel class x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies to be applied to the two images. disp_min, disp_max: horizontal disparity range, computed on a set of sift matches """ # in brief: use ransac to estimate F from a set of sift matches, then use # loop-zhang to estimate rectifying homographies. matches = matches_from_sift_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h) p1 = matches[:, 0:2] p2 = matches[:, 2:4] # the matching points are translated to be centered in 0, in order to deal # with coordinates ranging from -1000 to 1000, and decrease imprecision # effects of the loop-zhang rectification. These effects may become very # important (~ 10 pixels error) when using coordinates around 20000. pp1, T1 = center_2d_points(p1) pp2, T2 = center_2d_points(p2) F = estimation.fundamental_matrix_ransac(np.hstack([pp1, pp2])) H1, H2 = estimation.loop_zhang(F, w, h) # compose with previous translations to get H1, H2 in the big images frame H1 = np.dot(H1, T1) H2 = np.dot(H2, T2) # for debug print "max, min, mean rectification error on sift matches ----------------" tmp = common.points_apply_homography(H1, p1) y1 = tmp[:, 1] tmp = common.points_apply_homography(H2, p2) y2 = tmp[:, 1] err = np.abs(y1 - y2) print np.max(err), np.min(err), np.mean(err) # pull back top-left corner of the ROI in the origin roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]] pts = common.points_apply_homography(H1, roi) x0, y0 = common.bounding_box2D(pts)[0:2] T = common.matrix_translation(-x0, -y0) H1 = np.dot(T, H1) H2 = np.dot(T, H2) # add an horizontal translation to H2 to center the disparity range around H2 = register_horizontally(matches, H1, H2) disp_m, disp_M = update_disp_range(matches, H1, H2, w, h) return H1, H2, disp_m, disp_M
def corresponding_roi(rpc1, rpc2, x, y, w, h): """ Uses RPC functions to determine the region of im2 associated to the specified ROI of im1. Args: rpc1, rpc2: two instances of the rpc_model.RPCModel class, or paths to the xml files x, y, w, h: four integers defining a rectangular region of interest (ROI) in the first view. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: four integers defining a ROI in the second view. This ROI is supposed to contain the projections of the 3D points that are visible in the input ROI. """ # read rpc files if not isinstance(rpc1, rpc_model.RPCModel): rpc1 = rpc_model.RPCModel(rpc1) if not isinstance(rpc2, rpc_model.RPCModel): rpc2 = rpc_model.RPCModel(rpc2) m, M = altitude_range(rpc1, x, y, w, h, 0, 0) # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M] a = np.array([x, x, x, x, x+w, x+w, x+w, x+w]) b = np.array([y, y, y+h, y+h, y, y, y+h, y+h]) c = np.array([m, M, m, M, m, M, m, M]) # corresponding points in im2 xx, yy = find_corresponding_point(rpc1, rpc2, a, b, c)[0:2] # return coordinates of the bounding box in im2 out = common.bounding_box2D(np.vstack([xx, yy]).T) return np.round(out)
def compute_rectification_homographies_sift(im1, im2, rpc1, rpc2, x, y, w, h): """ Computes rectifying homographies for a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: two instances of the rpc_model.RPCModel class x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies to be applied to the two images. disp_min, disp_max: horizontal disparity range, computed on a set of sift matches """ # in brief: use ransac to estimate F from a set of sift matches, then use # loop-zhang to estimate rectifying homographies. matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h) p1 = matches[:, 0:2] p2 = matches[:, 2:4] # the matching points are translated to be centered in 0, in order to deal # with coordinates ranging from -1000 to 1000, and decrease imprecision # effects of the loop-zhang rectification. These effects may become very # important (~ 10 pixels error) when using coordinates around 20000. pp1, T1 = center_2d_points(p1) pp2, T2 = center_2d_points(p2) F = estimation.fundamental_matrix_ransac(np.hstack([pp1, pp2])) H1, H2 = estimation.loop_zhang(F, w, h) # compose with previous translations to get H1, H2 in the big images frame H1 = np.dot(H1, T1) H2 = np.dot(H2, T2) # for debug print "max, min, mean rectification error on sift matches ----------------" tmp = common.points_apply_homography(H1, p1) y1 = tmp[:, 1] tmp = common.points_apply_homography(H2, p2) y2 = tmp[:, 1] err = np.abs(y1 - y2) print np.max(err), np.min(err), np.mean(err) # pull back top-left corner of the ROI in the origin roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] pts = common.points_apply_homography(H1, roi) x0, y0 = common.bounding_box2D(pts)[0:2] T = common.matrix_translation(-x0, -y0) H1 = np.dot(T, H1) H2 = np.dot(T, H2) # add an horizontal translation to H2 to center the disparity range around H2 = register_horizontally(matches, H1, H2) disp_m, disp_M = update_disp_range(matches, H1, H2, w, h) return H1, H2, disp_m, disp_M
def rectification_homographies(matches, x, y, w, h): """ Computes rectifying homographies from point matches for a given ROI. The affine fundamental matrix F is estimated with the gold-standard algorithm, then two rectifying similarities (rotation, zoom, translation) are computed directly from F. Args: matches: numpy array of shape (n, 4) containing a list of 2D point correspondences between the two images. x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: S1, S2, F: three numpy arrays of shape (3, 3) representing the two rectifying similarities to be applied to the two images and the corresponding affine fundamental matrix. """ # estimate the affine fundamental matrix with the Gold standard algorithm F = estimation.affine_fundamental_matrix(matches) # compute rectifying similarities S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, True) if cfg["debug"]: y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1] y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1] err = np.abs(y1 - y2) print "max, min, mean rectification error on point matches: ", print np.max(err), np.min(err), np.mean(err) # pull back top-left corner of the ROI to the origin pts = common.points_apply_homography(S1, [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]) x0, y0 = common.bounding_box2D(pts)[:2] T = common.matrix_translation(-x0, -y0) return np.dot(T, S1), np.dot(T, S2), F
def cost_function_linear(v, rpc1, rpc2, matches): """ Objective function to minimize in order to correct the pointing error. Arguments: v: vector of size 4, containing the 4 parameters of the euclidean transformation we are looking for. rpc1, rpc2: two instances of the rpc_model.RPCModel class matches: 2D numpy array containing a list of matches. Each line contains one pair of points, ordered as x1 y1 x2 y2. The coordinate system is the one of the big images. alpha: relative weight of the error terms: e + alpha*(h-h0)^2. See paper for more explanations. Returns: The sum of pointing errors and altitude differences, as written in the paper formula (1). """ print_params(v) # verify that parameters are in the bounding box if (np.abs(v[0]) > 200*np.pi or np.abs(v[1]) > 10000 or np.abs(v[2]) > 10000 or np.abs(v[3]) > 20000): print 'warning: cost_function is going too far' print v x, y, w, h = common.bounding_box2D(matches[:, 0:2]) matches_rpc = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5) F = estimation.fundamental_matrix(matches_rpc) # transform the coordinates of points in the second image according to # matrix A, built from vector v A = euclidean_transform_matrix(v) p2 = common.points_apply_homography(A, matches[:, 2:4]) return evaluation.fundamental_matrix_L1(F, np.hstack([matches[:, 0:2], p2]))
def rectification_homographies(matches, x, y, w, h): """ Computes rectifying homographies from point matches for a given ROI. The affine fundamental matrix F is estimated with the gold-standard algorithm, then two rectifying similarities (rotation, zoom, translation) are computed directly from F. Args: matches: numpy array of shape (n, 4) containing a list of 2D point correspondences between the two images. x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: S1, S2, F: three numpy arrays of shape (3, 3) representing the two rectifying similarities to be applied to the two images and the corresponding affine fundamental matrix. """ # estimate the affine fundamental matrix with the Gold standard algorithm F = estimation.affine_fundamental_matrix(matches) # compute rectifying similarities S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, True) if cfg['debug']: y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1] y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1] err = np.abs(y1 - y2) print "max, min, mean rectification error on point matches: ", print np.max(err), np.min(err), np.mean(err) # pull back top-left corner of the ROI to the origin pts = common.points_apply_homography(S1, [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]) x0, y0 = common.bounding_box2D(pts)[:2] T = common.matrix_translation(-x0, -y0) return np.dot(T, S1), np.dot(T, S2), F
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, m=None, flag='rpc'): """ Rectify a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: paths to the two xml files containing RPC data x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. m (optional): Nx4 numpy array containing a list of sift matches, in the full image coordinates frame flag (default: 'rpc'): option to decide wether to use rpc of sift matches for the fundamental matrix estimation. This function uses the parameter subsampling_factor from the config module. If the factor z > 1 then the output images will be subsampled by a factor z. The output matrices H1, H2, and the ranges are also updated accordingly: Hi = Z*Hi with Z = diag(1/z,1/z,1) and disp_min = disp_min/z (resp _max) Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two (big) images. disp_min, disp_max: horizontal disparity range """ # read RPC data rpc1 = rpc_model.RPCModel(rpc1) rpc2 = rpc_model.RPCModel(rpc2) # compute rectifying homographies if flag == 'rpc': H1, H2, disp_min, disp_max = compute_rectification_homographies( im1, im2, rpc1, rpc2, x, y, w, h, A, m) else: H1, H2, disp_min, disp_max = compute_rectification_homographies_sift( im1, im2, rpc1, rpc2, x, y, w, h) # compute output images size roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) # check that the first homography maps the ROI in the positive quadrant np.testing.assert_allclose(np.round([x0, y0]), 0, atol=.01) # apply homographies and do the crops TODO XXX FIXME cleanup here #homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True) #homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True) common.image_apply_homography(out1, im1, H1, w0, h0) common.image_apply_homography(out2, im2, H2, w0, h0) # If subsampling_factor'] the homographies are altered to reflect the zoom if cfg['subsampling_factor'] != 1: from math import floor, ceil # update the H1 and H2 to reflect the zoom Z = np.eye(3) Z[0, 0] = Z[1, 1] = 1.0 / cfg['subsampling_factor'] H1 = np.dot(Z, H1) H2 = np.dot(Z, H2) disp_min = floor(disp_min / cfg['subsampling_factor']) disp_max = ceil(disp_max / cfg['subsampling_factor']) return H1, H2, disp_min, disp_max
def compute_rectification_homographies(im1, im2, rpc1, rpc2, x, y, w, h, A=None, m=None): """ Computes rectifying homographies for a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: two instances of the rpc_model.RPCModel class x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. m (optional): Nx4 numpy array containing a list of matches. Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies to be applied to the two images. disp_min, disp_max: horizontal disparity range, computed on a set of sift matches """ # in brief: use 8-pts normalized algo to estimate F, then use loop-zhang to # estimate rectifying homographies. print "step 1: find virtual matches, and center them ----------------------" n = cfg['n_gcp_per_axis'] rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, n) p1 = rpc_matches[:, 0:2] p2 = rpc_matches[:, 2:4] if A is not None: print "applying pointing error correction" # correct coordinates of points in im2, according to A p2 = common.points_apply_homography(np.linalg.inv(A), p2) # the matching points are translated to be centered in 0, in order to deal # with coordinates ranging from -1000 to 1000, and decrease imprecision # effects of the loop-zhang rectification. These effects may become very # important (~ 10 pixels error) when using coordinates around 20000. pp1, T1 = center_2d_points(p1) pp2, T2 = center_2d_points(p2) print "step 2: estimate F (Gold standard algorithm) -----------------------" F = estimation.affine_fundamental_matrix(np.hstack([pp1, pp2])) print "step 3: compute rectifying homographies (loop-zhang algorithm) -----" H1, H2 = estimation.loop_zhang(F, w, h) S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix( F, True) print "F\n", F, "\n" print "H1\n", H1, "\n" print "S1\n", S1, "\n" print "H2\n", H2, "\n" print "S2\n", S2, "\n" # compose with previous translations to get H1, H2 in the big images frame H1 = np.dot(H1, T1) H2 = np.dot(H2, T2) # for debug print "max, min, mean rectification error on rpc matches ------------------" tmp = common.points_apply_homography(H1, p1) y1 = tmp[:, 1] tmp = common.points_apply_homography(H2, p2) y2 = tmp[:, 1] err = np.abs(y1 - y2) print np.max(err), np.min(err), np.mean(err) print "step 4: pull back top-left corner of the ROI in the origin ---------" roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] pts = common.points_apply_homography(H1, roi) x0, y0 = common.bounding_box2D(pts)[0:2] T = common.matrix_translation(-x0, -y0) H1 = np.dot(T, H1) H2 = np.dot(T, H2) # add an horizontal translation to H2 to center the disparity range around # the origin, if sift matches are available if m is not None: print "step 5: horizontal registration --------------------------------" # filter sift matches with the known fundamental matrix # but first convert F for big images coordinate frame F = np.dot(T2.T, np.dot(F, T1)) print '%d sift matches before epipolar constraint filering' % len(m) m = filter_matches_epipolar_constraint(F, m, cfg['epipolar_thresh']) print '%d sift matches after epipolar constraint filering' % len(m) if len(m) < 2: # 0 or 1 sift match print 'rectification.compute_rectification_homographies: less than' print '2 sift matches after filtering by the epipolar constraint.' print 'This may be due to the pointing error, or to strong' print 'illumination changes between the input images.' print 'No registration will be performed.' else: H2 = register_horizontally(m, H1, H2) disp_m, disp_M = update_disp_range(m, H1, H2, w, h) print "SIFT disparity range: [%f,%f]" % (disp_m, disp_M) # expand disparity range with srtm according to cfg params print cfg['disp_range_method'] if (cfg['disp_range_method'] == "srtm") or (m is None) or (len(m) < 2): disp_m, disp_M = rpc_utils.srtm_disp_range_estimation( rpc1, rpc2, x, y, w, h, H1, H2, A, cfg['disp_range_srtm_high_margin'], cfg['disp_range_srtm_low_margin']) print "SRTM disparity range: [%f,%f]" % (disp_m, disp_M) if ((cfg['disp_range_method'] == "wider_sift_srtm") and (m is not None) and (len(m) >= 2)): d_m, d_M = rpc_utils.srtm_disp_range_estimation( rpc1, rpc2, x, y, w, h, H1, H2, A, cfg['disp_range_srtm_high_margin'], cfg['disp_range_srtm_low_margin']) print "SRTM disparity range: [%f,%f]" % (d_m, d_M) disp_m = min(disp_m, d_m) disp_M = max(disp_M, d_M) print "Final disparity range: [%s, %s]" % (disp_m, disp_M) return H1, H2, disp_m, disp_M
def matches_from_projection_matrices_roi(im1, im2, rpc1, rpc2, x, y, w, h): """ Computes a list of sift matches between two Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: two instances of the rpc_model.RPCModel class x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. This function uses the parameter subsampling_factor_registration from the config module. If factor > 1 then the registration is performed over subsampled images, but the resulting keypoints are then scaled back to conceal the subsampling Returns: matches: 2D numpy array containing a list of matches. Each line contains one pair of points, ordered as x1 y1 x2 y2. The coordinate system is that of the big images. If no sift matches are found, then an exception is raised. """ #m, M = rpc_utils.altitude_range(rpc1, x, y, w, h) m=5 M=20 # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M] # also include the midpoints because the 8 corners of the frustum alone don't seem to work a = np.array([x, x, x, x, x+w, x+w, x+w, x+w,x+w/2,x+w/2,x+w/2,x+w/2,x+w/2,x+w/2,x ,x ,x+w ,x+w ]) b = np.array([y, y, y+h, y+h, y, y, y+h, y+h,y ,y ,y+h/2,y+h/2,y+h ,y+h ,y+h/2,y+h/2,y+h/2,y+h/2]) c = np.array([m, M, m, M, m, M, m, M,m ,M ,m ,M ,m ,M ,m ,M ,m ,M ]) xx = np.zeros(len(a)) yy = np.zeros(len(a)) # corresponding points in im2 P1 = np.loadtxt(rpc1) P2 = np.loadtxt(rpc2) M = P1[:,:3] p4 = P1[:,3] m3 = M[2,:] inv_M = np.linalg.inv(M) v = np.vstack((a,b,c*0+1)) for i in range(len(a)): v = np.array([a[i],b[i],1]) mu = c[i] / np.sign ( np.linalg.det(M) ) X3D = inv_M.dot (mu * v - p4 ) # backproject newpoints = P2.dot(np.hstack([X3D,1])) xx[i] = newpoints[0] / newpoints[2] yy[i] = newpoints[1] / newpoints[2] print xx print yy matches = np.vstack([a, b,xx,yy]).T return matches ##### xx, yy = rpc_utils.find_corresponding_point(rpc1, rpc2, a, b, c)[0:2] # bounding box in im2 x2, y2, w2, h2 = common.bounding_box2D(np.vstack([xx, yy]).T) ## GF NOT USED x1, y1, w1, h1 = x, y, w, h x2, y2, w2, h2 = x, y, w, h # do crops, to apply sift on reasonably sized images crop1 = common.image_crop_LARGE(im1, x1, y1, w1, h1) crop2 = common.image_crop_LARGE(im2, x2, y2, w2, h2) T1 = common.matrix_translation(x1, y1) T2 = common.matrix_translation(x2, y2) # call sift matches for the images matches = matches_from_sift(crop1, crop2) if matches.size: # compensate coordinates for the crop and the zoom pts1 = common.points_apply_homography(T1, matches[:, 0:2]) pts2 = common.points_apply_homography(T2, matches[:, 2:4]) return np.hstack([pts1, pts2]) else: raise Exception("no sift matches")
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None): """ Rectify a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: paths to the two xml files containing RPC data x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. This function uses the parameter subsampling_factor from the config module. If the factor z > 1 then the output images will be subsampled by a factor z. The output matrices H1, H2, and the ranges are also updated accordingly: Hi = Z*Hi with Z = diag(1/z,1/z,1) and disp_min = disp_min/z (resp _max) Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two (big) images. disp_min, disp_max: horizontal disparity range """ # compute rectifying homographies H1, H2, disp_min, disp_max = compute_rectification_homographies(im1, im2, rpc1, rpc2, x, y, w, h, A) ## compute output images size roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) #x0,y0,w0,h0 = x,y,w,h # check that the first homography maps the ROI in the positive quadrant assert (round(x0) == 0) assert (round(y0) == 0) z = cfg['subsampling_factor'] # apply homographies and do the crops # THIS STEP IS HERE TO PRODUCE THE MASKS WHERE THE IMAGE IS KNOWN # SURE THIS IS A CRAPPY WAY TO DO THIS, WE SHOULD DEFINITIVELY DO IT # SIMULTANEOUSLY WITH THE HOMOGRAPHIC TRANSFORMATION msk1 = common.tmpfile('.png') msk2 = common.tmpfile('.png') common.run('plambda %s "x 255" -o %s' % (im1, msk1)) common.run('plambda %s "x 255" -o %s' % (im2, msk2)) homography_cropper.crop_and_apply_homography(msk1, msk1, H1, w0, h0, z) homography_cropper.crop_and_apply_homography(msk2, msk2, H2, w0, h0, z) # FINALLY : apply homographies and do the crops of the images homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, z) homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, z) # COMBINE THE MASK TO REMOVE THE POINTS THAT FALL OUTSIDE THE IMAGE common.run('plambda %s %s "x 200 > y nan if" -o %s' % (msk1, out1, out1)) common.run('plambda %s %s "x 200 > y nan if" -o %s' % (msk2, out2, out2)) # This also does the job but when z != 1 it fails (segfault: homography) # TODO: FIX homography, maybe code a new one # common.image_apply_homography(out1, im1, H1, w0, h0) # common.image_apply_homography(out2, im2, H2, w0, h0) # If subsampling_factor the homographies are altered to reflect the zoom if z != 1: from math import floor, ceil # update the H1 and H2 to reflect the zoom Z = np.eye(3); Z[0,0] = Z[1,1] = 1.0 / z H1 = np.dot(Z, H1) H2 = np.dot(Z, H2) disp_min = floor(disp_min / z) disp_max = ceil(disp_max / z) w0 = w0 / subsampling_factor h0 = h0 / subsampling_factor return H1, H2, disp_min, disp_max
def compute_rectification_homographies(im1, im2, rpc1, rpc2, x, y, w, h, A=None): """ Computes rectifying homographies for a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: two instances of the rpc_model.RPCModel class x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies to be applied to the two images. disp_min, disp_max: horizontal disparity range, computed on a set of sift matches """ # in brief: use 8-pts normalized algo to estimate F, then use loop-zhang to # estimate rectifying homographies. print "step 1: find matches, and center them ------------------------------" sift_matches = matches_from_projection_matrices_roi(im1, im2, rpc1, rpc2, x+w/4, y+h/4, w*2/4, h*2/4) #sift_matches2 = matches_from_sift(im1, im2) #sift_matches = sift_matches2 # import visualisation # print visualisation.plot_matches(im1,im2,sift_matches) p1 = sift_matches[:, 0:2] p2 = sift_matches[:, 2:4] # the matching points are translated to be centered in 0, in order to deal # with coordinates ranging from -1000 to 1000, and decrease imprecision # effects of the loop-zhang rectification. These effects may become very # important (~ 10 pixels error) when using coordinates around 20000. pp1, T1 = center_2d_points(p1) pp2, T2 = center_2d_points(p2) print "step 2: estimate F (8-points algorithm) ----------------------------" F = estimation.fundamental_matrix(np.hstack([pp1, pp2])) F = np.dot(T2.T, np.dot(F, T1)) # convert F for big images coordinate frame print "step 3: compute rectifying homographies (loop-zhang algorithm) -----" H1, H2 = estimation.loop_zhang(F, w, h) #### ATTENTION: LOOP-ZHANG IMPLICITLY ASSUMES THAT F IS IN THE FINAL (CROPPED) # IMAGE GEOMETRY. THUS 0,0 IS THE UPPER LEFT CORNER OF THE IMAGE AND W,H ARE # USED TO ESTIMATE THE DISTORTION WITHIN THE REGION. BY CENTERING THE COORDINATES # OF THE PIXELS WE ARE CONSTRUCTING A RECTIFICATION DOES NOT TAKE INTO ACCOUNT THE # CORRECT IMAGE PORTION. # compose with previous translations to get H1, H2 in the big images frame #H1 = np.dot(H1, T1) #H2 = np.dot(H2, T2) # for debug print "min, max, mean rectification error on rpc matches ------------------" tmp = common.points_apply_homography(H1, p1) y1 = tmp[:, 1] tmp = common.points_apply_homography(H2, p2) y2 = tmp[:, 1] err = np.abs(y1 - y2) print np.min(err), np.max(err), np.mean(err) # print "step 4: pull back top-left corner of the ROI in the origin ---------" roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]] pts = common.points_apply_homography(H1, roi) x0, y0 = common.bounding_box2D(pts)[0:2] T = common.matrix_translation(-x0, -y0) H1 = np.dot(T, H1) H2 = np.dot(T, H2) # add an horizontal translation to H2 to center the disparity range around # the origin, if sift matches are available print "step 5: horizontal registration ------------------------------------" sift_matches2 = matches_from_sift(im1, im2) # filter sift matches with the known fundamental matrix sift_matches2 = filter_matches_epipolar_constraint(F, sift_matches2, cfg['epipolar_thresh']) if not len(sift_matches2): print """all the sift matches have been discarded by the epipolar constraint. This is probably due to the pointing error. Try with a bigger value for epipolar_thresh.""" sys.exit() H2, disp_m, disp_M = register_horizontally(sift_matches2, H1, H2, do_scale_horizontally=True) disp_m, disp_M = update_minmax_range_extrapolating_registration_affinity(sift_matches2, H1, H2, w, h) return H1, H2, disp_m, disp_M
def compute_rectification_homographies(im1, im2, rpc1, rpc2, x, y, w, h, A=None, m=None): """ Computes rectifying homographies for a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: two instances of the rpc_model.RPCModel class x, y, w, h: four integers definig the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. m (optional): Nx4 numpy array containing a list of matches. Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies to be applied to the two images. disp_min, disp_max: horizontal disparity range, computed on a set of sift matches """ # in brief: use 8-pts normalized algo to estimate F, then use loop-zhang to # estimate rectifying homographies. print "step 1: find virtual matches, and center them ----------------------" n = cfg['n_gcp_per_axis'] rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, n) p1 = rpc_matches[:, 0:2] p2 = rpc_matches[:, 2:4] if A is not None: print "applying pointing error correction" # correct coordinates of points in im2, according to A p2 = common.points_apply_homography(np.linalg.inv(A), p2) # the matching points are translated to be centered in 0, in order to deal # with coordinates ranging from -1000 to 1000, and decrease imprecision # effects of the loop-zhang rectification. These effects may become very # important (~ 10 pixels error) when using coordinates around 20000. pp1, T1 = center_2d_points(p1) pp2, T2 = center_2d_points(p2) print "step 2: estimate F (Gold standard algorithm) -----------------------" F = estimation.affine_fundamental_matrix(np.hstack([pp1, pp2])) print "step 3: compute rectifying homographies (loop-zhang algorithm) -----" H1, H2 = estimation.loop_zhang(F, w, h) S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix( F, True) print "F\n", F, "\n" print "H1\n", H1, "\n" print "S1\n", S1, "\n" print "H2\n", H2, "\n" print "S2\n", S2, "\n" # compose with previous translations to get H1, H2 in the big images frame H1 = np.dot(H1, T1) H2 = np.dot(H2, T2) # for debug print "max, min, mean rectification error on rpc matches ------------------" tmp = common.points_apply_homography(H1, p1) y1 = tmp[:, 1] tmp = common.points_apply_homography(H2, p2) y2 = tmp[:, 1] err = np.abs(y1 - y2) print np.max(err), np.min(err), np.mean(err) print "step 4: pull back top-left corner of the ROI in the origin ---------" roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]] pts = common.points_apply_homography(H1, roi) x0, y0 = common.bounding_box2D(pts)[0:2] T = common.matrix_translation(-x0, -y0) H1 = np.dot(T, H1) H2 = np.dot(T, H2) # add an horizontal translation to H2 to center the disparity range around # the origin, if sift matches are available if m is not None: print "step 5: horizontal registration --------------------------------" # filter sift matches with the known fundamental matrix # but first convert F for big images coordinate frame F = np.dot(T2.T, np.dot(F, T1)) print '%d sift matches before epipolar constraint filering' % len(m) m = filter_matches_epipolar_constraint(F, m, cfg['epipolar_thresh']) print '%d sift matches after epipolar constraint filering' % len(m) if len(m) < 2: # 0 or 1 sift match print 'rectification.compute_rectification_homographies: less than' print '2 sift matches after filtering by the epipolar constraint.' print 'This may be due to the pointing error, or to strong' print 'illumination changes between the input images.' print 'No registration will be performed.' else: H2 = register_horizontally(m, H1, H2) disp_m, disp_M = update_disp_range(m, H1, H2, w, h) print "SIFT disparity range: [%f,%f]"%(disp_m,disp_M) # expand disparity range with srtm according to cfg params print cfg['disp_range_method'] if (cfg['disp_range_method'] == "srtm") or (m is None) or (len(m) < 2): disp_m, disp_M = rpc_utils.srtm_disp_range_estimation( rpc1, rpc2, x, y, w, h, H1, H2, A, cfg['disp_range_srtm_high_margin'], cfg['disp_range_srtm_low_margin']) print "SRTM disparity range: [%f,%f]"%(disp_m,disp_M) if ((cfg['disp_range_method'] == "wider_sift_srtm") and (m is not None) and (len(m) >= 2)): d_m, d_M = rpc_utils.srtm_disp_range_estimation( rpc1, rpc2, x, y, w, h, H1, H2, A, cfg['disp_range_srtm_high_margin'], cfg['disp_range_srtm_low_margin']) print "SRTM disparity range: [%f,%f]"%(d_m,d_M) disp_m = min(disp_m, d_m) disp_M = max(disp_M, d_M) print "Final disparity range: [%s, %s]" % (disp_m, disp_M) return H1, H2, disp_m, disp_M
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, sift_matches=None, method="rpc"): """ Rectify a ROI in a pair of images. Args: im1, im2: paths to two image files rpc1, rpc2: paths to the two xml files containing RPC data x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output rectified crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. sift_matches (optional): Nx4 numpy array containing a list of sift matches, in the full image coordinates frame method (default: 'rpc'): option to decide wether to use rpc of sift matches for the fundamental matrix estimation. This function uses the parameter subsampling_factor from the config module. If the factor z > 1 then the output images will be subsampled by a factor z. The output matrices H1, H2, and the ranges are also updated accordingly: Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and disp_min = disp_min / z (resp _max) Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two original (large) images. disp_min, disp_max: horizontal disparity range """ # read RPC data rpc1 = rpc_model.RPCModel(rpc1) rpc2 = rpc_model.RPCModel(rpc2) # compute real or virtual matches if method == "rpc": # find virtual matches from RPC camera models matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, cfg["n_gcp_per_axis"]) # correct second image coordinates with the pointing correction matrix if A is not None: matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A), matches[:, 2:]) else: matches = sift_matches # compute rectifying homographies H1, H2, F = rectification_homographies(matches, x, y, w, h) if cfg["register_with_shear"]: # compose H2 with a horizontal shear to reduce the disparity range a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h)) lon, lat, alt = rpc_utils.ground_control_points(rpc1, x, y, w, h, a, a, 4) x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2] x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2] m = np.vstack([x1, y1, x2, y2]).T m = np.vstack({tuple(row) for row in m}) # remove duplicates due to no alt range H2 = register_horizontally_shear(m, H1, H2) # compose H2 with a horizontal translation to center disp range around 0 if sift_matches is not None: sift_matches = filter_matches_epipolar_constraint(F, sift_matches, cfg["epipolar_thresh"]) if len(sift_matches) < 10: print "WARNING: no registration with less than 10 matches" else: H2 = register_horizontally_translation(sift_matches, H1, H2) # compute disparity range disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, sift_matches, A) # compute output images size roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) # check that the first homography maps the ROI in the positive quadrant np.testing.assert_allclose(np.round([x0, y0]), 0, atol=0.01) # apply homographies and do the crops TODO XXX FIXME cleanup here # homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True) # homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True) common.image_apply_homography(out1, im1, H1, w0, h0) common.image_apply_homography(out2, im2, H2, w0, h0) # if subsampling_factor'] the homographies are altered to reflect the zoom if cfg["subsampling_factor"] != 1: Z = np.eye(3) Z[0, 0] = Z[1, 1] = 1.0 / cfg["subsampling_factor"] H1 = np.dot(Z, H1) H2 = np.dot(Z, H2) disp_m = np.floor(disp_m / cfg["subsampling_factor"]) disp_M = np.ceil(disp_M / cfg["subsampling_factor"]) return H1, H2, disp_m, disp_M
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, sift_matches=None, method='rpc'): """ Rectify a ROI in a pair of images. Args: im1, im2: paths to two image files rpc1, rpc2: paths to the two xml files containing RPC data x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output rectified crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. sift_matches (optional): Nx4 numpy array containing a list of sift matches, in the full image coordinates frame method (default: 'rpc'): option to decide wether to use rpc of sift matches for the fundamental matrix estimation. This function uses the parameter subsampling_factor from the config module. If the factor z > 1 then the output images will be subsampled by a factor z. The output matrices H1, H2, and the ranges are also updated accordingly: Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and disp_min = disp_min / z (resp _max) Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two original (large) images. disp_min, disp_max: horizontal disparity range """ # read RPC data rpc1 = rpc_model.RPCModel(rpc1) rpc2 = rpc_model.RPCModel(rpc2) # compute real or virtual matches if method == 'rpc': # find virtual matches from RPC camera models matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, cfg['n_gcp_per_axis']) # correct second image coordinates with the pointing correction matrix if A is not None: matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A), matches[:, 2:]) else: matches = sift_matches # compute rectifying homographies H1, H2, F = rectification_homographies(matches, x, y, w, h) # compose H2 with a horizontal translation to center disp range around 0 if sift_matches is not None: sift_matches = filter_matches_epipolar_constraint(F, sift_matches, cfg['epipolar_thresh']) if len(sift_matches) < 10: print 'WARNING: no registration with less than 10 matches' else: H2 = register_horizontally(sift_matches, H1, H2) # compute disparity range disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, sift_matches, A) # compute output images size roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) # check that the first homography maps the ROI in the positive quadrant np.testing.assert_allclose(np.round([x0, y0]), 0, atol=.01) # apply homographies and do the crops TODO XXX FIXME cleanup here #homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True) #homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True) common.image_apply_homography(out1, im1, H1, w0, h0) common.image_apply_homography(out2, im2, H2, w0, h0) # if subsampling_factor'] the homographies are altered to reflect the zoom if cfg['subsampling_factor'] != 1: Z = np.eye(3) Z[0, 0] = Z[1, 1] = 1.0 / cfg['subsampling_factor'] H1 = np.dot(Z, H1) H2 = np.dot(Z, H2) disp_m = np.floor(disp_m / cfg['subsampling_factor']) disp_M = np.ceil(disp_M / cfg['subsampling_factor']) return H1, H2, disp_m, disp_M
def crop_and_apply_homography(im_out, im_in, H, w, h, subsampling_factor=1, convert_to_gray=False): """ Warps a piece of a Pleiades (panchro or ms) image with a homography. Args: im_out: path to the output image im_in: path to the input (tif) full Pleiades image H: numpy array containing the 3x3 homography matrix w, h: size of the output image subsampling_factor (optional, default=1): when set to z>1, will result in the application of the homography Z*H where Z = diag(1/z, 1/z, 1), so the output will be zoomed out by a factor z. The output image will be (w/z, h/z) convert_to_gray (optional, default False): it set to True, and if the input image has 4 channels, it is converted to gray before applying zoom and homographies. Returns: nothing The homography has to be used as: coord_out = H coord_in. The produced output image corresponds to coord_out in [0, w] x [0, h]. The warp is made by Pascal Monasse's binary named 'homography'. """ # crop a piece of the big input image, to which the homography will be # applied # warning: as the crop uses integer coordinates, be careful to round off # (x0, y0) before modifying the homograpy. You want the crop and the # translation representing it do exactly the same thing. pts = [[0, 0], [w, 0], [w, h], [0, h]] inv_H_pts = common.points_apply_homography(np.linalg.inv(H), pts) x0, y0, w0, h0 = common.bounding_box2D(inv_H_pts) x0, y0 = np.floor([x0, y0]) w0, h0 = np.ceil([w0, h0]) crop_fullres = common.image_crop_LARGE(im_in, x0, y0, w0, h0) # This filter is needed (for panchro images) because the original PLEAIDES # SENSOR PERFECT images are aliased if (common.image_pix_dim(crop_fullres) == 1 and subsampling_factor == 1 and cfg['use_pleiades_unsharpening']): tmp = image_apply_pleiades_unsharpening_filter(crop_fullres) common.run('rm -f %s' % crop_fullres) crop_fullres = tmp # convert to gray if common.image_pix_dim(crop_fullres) == 4: if convert_to_gray: crop_fullres = common.pansharpened_to_panchro(crop_fullres) # compensate the homography with the translation induced by the preliminary # crop, then apply the homography and crop. H = np.dot(H, common.matrix_translation(x0, y0)) # Since the objective is to compute a zoomed out homographic transformation # of the input image, to save computations we zoom out the image before # applying the homography. If Z is the matrix representing the zoom out and # H the homography matrix, this trick consists in applying Z*H*Z^{-1} to # the zoomed image Z*Im instead of applying Z*H to the original image Im. if subsampling_factor == 1: common.image_apply_homography(im_out, crop_fullres, H, w, h) return else: assert(subsampling_factor >= 1) # H becomes Z*H*Z^{-1} Z = np.eye(3); Z[0,0] = Z[1,1] = 1 / float(subsampling_factor) H = np.dot(Z, H) H = np.dot(H, np.linalg.inv(Z)) # w, and h are updated accordingly w = int(w / subsampling_factor) h = int(h / subsampling_factor) # the DCT zoom is NOT SAFE when the input image size is not a multiple # of the zoom factor tmpw, tmph = common.image_size(crop_fullres) tmpw, tmph = int(tmpw / subsampling_factor), int(tmph / subsampling_factor) crop_fullres_safe = common.image_crop_tif(crop_fullres, 0, 0, tmpw * subsampling_factor, tmph * subsampling_factor) common.run('rm -f %s' % crop_fullres) # zoom out the input image (crop_fullres) crop_zoom_out = common.image_safe_zoom_fft(crop_fullres_safe, subsampling_factor) common.run('rm -f %s' % crop_fullres_safe) # apply the homography to the zoomed out crop common.image_apply_homography(im_out, crop_zoom_out, H, w, h) return
def crop_and_apply_homography(im_out, im_in, H, w, h, subsampling_factor=1, convert_to_gray=False): """ Warps a piece of a Pleiades (panchro or ms) image with a homography. Args: im_out: path to the output image im_in: path to the input (tif) full Pleiades image H: numpy array containing the 3x3 homography matrix w, h: size of the output image subsampling_factor (optional, default=1): when set to z>1, will result in the application of the homography Z*H where Z = diag(1/z, 1/z, 1), so the output will be zoomed out by a factor z. The output image will be (w/z, h/z) convert_to_gray (optional, default False): it set to True, and if the input image has 4 channels, it is converted to gray before applying zoom and homographies. Returns: nothing The homography has to be used as: coord_out = H coord_in. The produced output image corresponds to coord_out in [0, w] x [0, h]. The warp is made by Pascal Monasse's binary named 'homography'. """ # crop a piece of the big input image, to which the homography will be # applied # warning: as the crop uses integer coordinates, be careful to round off # (x0, y0) before modifying the homograpy. You want the crop and the # translation representing it do exactly the same thing. pts = [[0, 0], [w, 0], [w, h], [0, h]] inv_H_pts = common.points_apply_homography(np.linalg.inv(H), pts) x0, y0, w0, h0 = common.bounding_box2D(inv_H_pts) x0, y0 = np.floor([x0, y0]) w0, h0 = np.ceil([w0, h0]) crop_fullres = common.image_crop_LARGE(im_in, x0, y0, w0, h0) # This filter is needed (for panchro images) because the original PLEAIDES # SENSOR PERFECT images are aliased if (common.image_pix_dim(crop_fullres) == 1 and subsampling_factor == 1 and cfg['use_pleiades_unsharpening']): tmp = image_apply_pleiades_unsharpening_filter(crop_fullres) common.run('rm -f %s' % crop_fullres) crop_fullres = tmp # convert to gray if common.image_pix_dim(crop_fullres) == 4: if convert_to_gray: crop_fullres = common.pansharpened_to_panchro(crop_fullres) # compensate the homography with the translation induced by the preliminary # crop, then apply the homography and crop. H = np.dot(H, common.matrix_translation(x0, y0)) # Since the objective is to compute a zoomed out homographic transformation # of the input image, to save computations we zoom out the image before # applying the homography. If Z is the matrix representing the zoom out and # H the homography matrix, this trick consists in applying Z*H*Z^{-1} to # the zoomed image Z*Im instead of applying Z*H to the original image Im. if subsampling_factor == 1: common.image_apply_homography(im_out, crop_fullres, H, w, h) return else: assert (subsampling_factor >= 1) # H becomes Z*H*Z^{-1} Z = np.eye(3) Z[0, 0] = Z[1, 1] = 1 / float(subsampling_factor) H = np.dot(Z, H) H = np.dot(H, np.linalg.inv(Z)) # w, and h are updated accordingly w = int(w / subsampling_factor) h = int(h / subsampling_factor) # the DCT zoom is NOT SAFE when the input image size is not a multiple # of the zoom factor tmpw, tmph = common.image_size(crop_fullres) tmpw, tmph = int(tmpw / subsampling_factor), int(tmph / subsampling_factor) crop_fullres_safe = common.image_crop_tif(crop_fullres, 0, 0, tmpw * subsampling_factor, tmph * subsampling_factor) common.run('rm -f %s' % crop_fullres) # zoom out the input image (crop_fullres) crop_zoom_out = common.image_safe_zoom_fft(crop_fullres_safe, subsampling_factor) common.run('rm -f %s' % crop_fullres_safe) # apply the homography to the zoomed out crop common.image_apply_homography(im_out, crop_zoom_out, H, w, h) return
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, m=None, flag='rpc'): """ Rectify a ROI in a pair of Pleiades images. Args: im1, im2: paths to the two Pleiades images (usually jp2 or tif) rpc1, rpc2: paths to the two xml files containing RPC data x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. m (optional): Nx4 numpy array containing a list of sift matches, in the full image coordinates frame flag (default: 'rpc'): option to decide wether to use rpc of sift matches for the fundamental matrix estimation. This function uses the parameter subsampling_factor from the config module. If the factor z > 1 then the output images will be subsampled by a factor z. The output matrices H1, H2, and the ranges are also updated accordingly: Hi = Z*Hi with Z = diag(1/z,1/z,1) and disp_min = disp_min/z (resp _max) Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two (big) images. disp_min, disp_max: horizontal disparity range """ # read RPC data rpc1 = rpc_model.RPCModel(rpc1) rpc2 = rpc_model.RPCModel(rpc2) # compute rectifying homographies if flag == 'rpc': H1, H2, disp_min, disp_max = compute_rectification_homographies( im1, im2, rpc1, rpc2, x, y, w, h, A, m) else: H1, H2, disp_min, disp_max = compute_rectification_homographies_sift( im1, im2, rpc1, rpc2, x, y, w, h) # compute output images size roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) # check that the first homography maps the ROI in the positive quadrant np.testing.assert_allclose(np.round([x0, y0]), 0, atol=.01) # apply homographies and do the crops homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True) homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True) # If subsampling_factor'] the homographies are altered to reflect the zoom if cfg['subsampling_factor'] != 1: from math import floor, ceil # update the H1 and H2 to reflect the zoom Z = np.eye(3) Z[0, 0] = Z[1, 1] = 1.0 / cfg['subsampling_factor'] H1 = np.dot(Z, H1) H2 = np.dot(Z, H2) disp_min = floor(disp_min / cfg['subsampling_factor']) disp_max = ceil(disp_max / cfg['subsampling_factor']) return H1, H2, disp_min, disp_max