Esempio n. 1
0
def rectifying_affine_transforms(rpc1, rpc2, aoi, z=0, register_ground=True):
    """
    Compute two affine transforms that rectify two images over a given AOI.

    Args:
        rpc1, rpc2 (rpc_model.RPCModel): two RPC camera models
        aoi (geojson.Polygon): area of interest

    Return:
        S1, S2 (2D arrays): two numpy arrays of shapes (3, 3) representing the
            rectifying affine transforms in homogeneous coordinates
        w, h (ints): minimal width and height of the rectified image crops
            needed to cover the AOI in both images
        P1, P2 (2D arrays): two numpy arrays of shapes (3, 3) representing the
            affine camera matrices used to approximate the rpc camera models
    """
    # center of the AOI
    lons, lats = np.asarray(aoi['coordinates'][0][:4]).T
    lon, lat = np.mean([lons, lats], axis=1)

    # affine projection matrices that approximate the rpc models around the
    # center of the AOI
    P1 = rpc_affine_approximation(rpc1, (lon, lat, z))
    P2 = rpc_affine_approximation(rpc2, (lon, lat, z))

    # affine fundamental matrix associated to the two images
    F = affine_fundamental_matrix(P1, P2)

    # rectifying similarities
    S1, S2 = rectifying_similarities(F)

    if register_ground:
        S1, S2 = ground_registration(aoi, z, P1, P2, S1, S2)

    # aoi bounding boxes in the rectified images
    x1, y1, w1, h1 = utils.bounding_box_of_projected_aoi(rpc1,
                                                         aoi,
                                                         z=z,
                                                         homography=S1)
    x2, y2, w2, h2 = utils.bounding_box_of_projected_aoi(rpc2,
                                                         aoi,
                                                         z=z,
                                                         homography=S2)
    S1 = utils.matrix_translation(-x1, -min(y1, y2)) @ S1
    S2 = utils.matrix_translation(-x2, -min(y1, y2)) @ S2

    w = int(round(max(w1, w2)))
    h = int(round(max(y1 + h1, y2 + h2) - min(y1, y2)))
    return S1, S2, w, h, P1, P2
Esempio n. 2
0
def affine_crop(input_path, A, w, h):
    """
    Apply an affine transform to an image.

    Args:
        input_path (string): path or url to the input image
        A (numpy array): 3x3 array representing an affine transform in
            homogeneous coordinates
        w, h (ints): width and height of the output image

    Return:
        numpy array of shape (h, w) containing a subset of the transformed
        image. The subset is the rectangle between points 0, 0 and w, h.
    """
    # determine the rectangle that we need to read in the input image
    output_rectangle = [[0, 0], [w, 0], [w, h], [0, h]]
    x, y, w0, h0 = utils.bounding_box2D(utils.points_apply_homography(np.linalg.inv(A),
                                                                      output_rectangle))
    x, y = np.floor((x, y)).astype(int)
    w0, h0 = np.ceil((w0, h0)).astype(int)

    # crop the needed rectangle in the input image
    with rasterio.open(input_path, 'r') as src:
        aoi = src.read(indexes=1, window=((y, y + h0), (x, x + w0)),  boundless=True)

    # compensate the affine transform for the crop
    B = A @ utils.matrix_translation(x, y)

    # apply the affine transform
    out = ndimage.affine_transform(aoi.T, np.linalg.inv(B), output_shape=(w, h)).T
    return out
Esempio n. 3
0
def sift_roi(file1, file2, aoi, z):
    """
    Args:
        file1, file2 (str): paths or urls to two GeoTIFF images
        aoi (geojson.Polygon): area of interest
        z (float): base altitude for the aoi

    Returns:
        two numpy arrays with the coordinates of the matching points in the
        original (full-size) image domains
    """
    # image crops
    crop1, x1, y1 = utils.crop_aoi(file1, aoi, z=z)
    crop2, x2, y2 = utils.crop_aoi(file2, aoi, z=z)

    # sift keypoint matches
    p1, p2 = match_pair(crop1, crop2)
    q1 = utils.points_apply_homography(utils.matrix_translation(x1, y1), p1)
    q2 = utils.points_apply_homography(utils.matrix_translation(x2, y2), p2)
    return q1, q2
Esempio n. 4
0
def pointing_error_correction(S1, S2, q1, q2):
    """
    Correct rectifying similarities for the pointing error.

    Args:
        S1, S2 (np.array): two 3x3 matrices representing the rectifying similarities
        q1, q2 (lists): two lists of matching keypoints

    Returns:
        two 3x3 matrices representing the corrected rectifying similarities
    """
    # transform the matches to the domain of the rectified images
    q1 = utils.points_apply_homography(S1, q1)
    q2 = utils.points_apply_homography(S2, q2)

    # CODE HERE: insert a few lines to correct the vertical shift
    y_shift = np.median(q2 - q1, axis=0)[1]
    S1 = utils.matrix_translation(-0, +y_shift / 2) @ S1
    S2 = utils.matrix_translation(-0, -y_shift / 2) @ S2
    return S1, S2