def local_translation(r1, r2, x, y, w, h, m): """ Estimates the optimal translation to minimise the relative pointing error on a given tile. Args: r1, r2: two instances of the rpc_model.RPCModel class x, y, w, h: region of interest in the reference image (r1) m: Nx4 numpy array containing a list of matches, one per line. Each match is given by (p1, p2, q1, q2) where (p1, p2) is a point of the reference view and (q1, q2) is the corresponding point in the secondary view. Returns: 3x3 numpy array containing the homogeneous representation of the optimal planar translation, to be applied to the secondary image in order to correct the pointing error. """ # estimate the affine fundamental matrix between the two views n = cfg['n_gcp_per_axis'] rpc_matches = rpc_utils.matches_from_rpc(r1, r2, x, y, w, h, n) F = estimation.affine_fundamental_matrix(rpc_matches) # compute the error vectors e = error_vectors(m, F, 'sec') # compute the median: as the vectors are collinear (because F is affine) # computing the median of each component independently is correct N = len(e) out_x = np.sort(e[:, 0])[int(N / 2)] out_y = np.sort(e[:, 1])[int(N / 2)] # the correction to be applied to the second view is the opposite A = np.array([[1, 0, -out_x], [0, 1, -out_y], [0, 0, 1]]) return A
def matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h): """ Compute a list of SIFT matches between two images on a given roi. The corresponding roi in the second image is determined using the rpc functions. Args: im1, im2: paths to two large tif images rpc1, rpc2: two instances of the rpcm.RPCModel class x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: matches: 2D numpy array containing a list of matches. Each line contains one pair of points, ordered as x1 y1 x2 y2. The coordinate system is that of the full images. """ x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x, y, w, h) # estimate an approximate affine fundamental matrix from the rpcs rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5) F = estimation.affine_fundamental_matrix(rpc_matches) # sift matching method: method = 'relative' if cfg[ 'relative_sift_match_thresh'] is True else 'absolute' # if less than 10 matches, lower thresh_dog. An alternative would be ASIFT thresh_dog = 0.0133 for i in range(2): p1 = image_keypoints(im1, x, y, w, h, thresh_dog=thresh_dog) p2 = image_keypoints(im2, x2, y2, w2, h2, thresh_dog=thresh_dog) matches = keypoints_match(p1, p2, method, cfg['sift_match_thresh'], F, epipolar_threshold=cfg['max_pointing_error'], model='fundamental') if matches is not None and matches.ndim == 2 and matches.shape[0] > 10: break thresh_dog /= 2.0 else: print("WARNING: sift.matches_on_rpc_roi: found no matches.") return None return matches
def rectification_homographies(matches, x, y, w, h): """ Computes rectifying homographies from point matches for a given ROI. The affine fundamental matrix F is estimated with the gold-standard algorithm, then two rectifying similarities (rotation, zoom, translation) are computed directly from F. Args: matches: numpy array of shape (n, 4) containing a list of 2D point correspondences between the two images. x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: S1, S2, F: three numpy arrays of shape (3, 3) representing the two rectifying similarities to be applied to the two images and the corresponding affine fundamental matrix. """ # estimate the affine fundamental matrix with the Gold standard algorithm F = estimation.affine_fundamental_matrix(matches) # compute rectifying similarities S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix( F, cfg['debug']) if cfg['debug']: y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1] y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1] err = np.abs(y1 - y2) print("max, min, mean rectification error on point matches: ", end=' ') print(np.max(err), np.min(err), np.mean(err)) # pull back top-left corner of the ROI to the origin (plus margin) pts = common.points_apply_homography( S1, [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]) x0, y0 = common.bounding_box2D(pts)[:2] T = common.matrix_translation(-x0, -y0) return np.dot(T, S1), np.dot(T, S2), F