Пример #1
0
def alt_to_disp(rpc1, rpc2, x, y, alt, H1, H2, A=None):
    """
    Converts an altitude into a disparity.

    Args:
        rpc1: an instance of the rpc_model.RPCModel class for the reference
            image
        rpc2: an instance of the rpc_model.RPCModel class for the secondary
            image
        x, y: coordinates of the point in the reference image
        alt: altitude above the WGS84 ellipsoid (in meters) of the point
        H1, H2: rectifying homographies
        A (optional): pointing correction matrix

    Returns:
        the horizontal disparity of the (x, y) point of im1, assuming that the
        3-space point associated has altitude alt. The disparity is made
        horizontal thanks to the two rectifying homographies H1 and H2.
    """
    xx, yy = find_corresponding_point(rpc1, rpc2, x, y, alt)[0:2]
    p1 = np.vstack([x, y]).T
    p2 = np.vstack([xx, yy]).T

    if A is not None:
        print "rpc_utils.alt_to_disp: applying pointing error correction"
        # correct coordinates of points in im2, according to A
        p2 = common.points_apply_homography(np.linalg.inv(A), p2)

    p1 = common.points_apply_homography(H1, p1)
    p2 = common.points_apply_homography(H2, p2)
    # np.testing.assert_allclose(p1[:, 1], p2[:, 1], atol=0.1)
    disp = p2[:, 0] - p1[:, 0]
    return disp
Пример #2
0
def register_horizontally_shear(matches, H1, H2):
    """
    Adjust rectifying homographies with a shear to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices

    Returns:
        H2: corrected homography H2

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis.
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, :2])
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    p2 = common.points_apply_homography(H2, matches[:, 2:])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    if cfg["debug"]:
        print "Residual vertical disparities: max, min, mean. Should be zero"
        print np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1)

    # we search the (s, b) vector that minimises \sum (x1 - (x2+s*y2+b))^2
    # it is a least squares minimisation problem
    A = np.vstack((y2, y2 * 0 + 1)).T
    B = x1 - x2
    s, b = np.linalg.lstsq(A, B)[0].flatten()

    # correct H2 with the estimated shear
    return np.dot(np.array([[1, s, b], [0, 1, 0], [0, 0, 1]]), H2)
Пример #3
0
def compute_rectification_homographies_sift(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be
            applied to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use ransac to estimate F from a set of sift matches, then use
    # loop-zhang to estimate rectifying homographies.

    matches = matches_from_sift_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]

    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    F = estimation.fundamental_matrix_ransac(np.hstack([pp1, pp2]))
    H1, H2 = estimation.loop_zhang(F, w, h)

    # compose with previous translations to get H1, H2 in the big images frame
    H1 = np.dot(H1, T1)
    H2 = np.dot(H2, T2)

    # for debug
    print "max, min, mean rectification error on sift matches ----------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.max(err), np.min(err), np.mean(err)

    # pull back top-left corner of the ROI in the origin
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    H2 = register_horizontally(matches, H1, H2)
    disp_m, disp_M = update_disp_range(matches, H1, H2, w, h)

    return H1, H2, disp_m, disp_M
Пример #4
0
def compute_rectification_homographies_sift(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be
            applied to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use ransac to estimate F from a set of sift matches, then use
    # loop-zhang to estimate rectifying homographies.

    matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]

    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    F = estimation.fundamental_matrix_ransac(np.hstack([pp1, pp2]))
    H1, H2 = estimation.loop_zhang(F, w, h)

    # compose with previous translations to get H1, H2 in the big images frame
    H1 = np.dot(H1, T1)
    H2 = np.dot(H2, T2)

    # for debug
    print "max, min, mean rectification error on sift matches ----------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.max(err), np.min(err), np.mean(err)

    # pull back top-left corner of the ROI in the origin
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    H2 = register_horizontally(matches, H1, H2)
    disp_m, disp_M = update_disp_range(matches, H1, H2, w, h)

    return H1, H2, disp_m, disp_M
Пример #5
0
def update_disp_range(matches, H1, H2, w, h):
    """
    Update the disparity range considering the extrapolation of the affine
    registration estimated from the matches. Extrapolate on the whole region
    of the region of interest.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two rectifying homographies, stored as numpy 3x3 matrices
        w, h: width and height of the region of interest

    Returns:
        disp_min, disp_max: horizontal disparity range
    """
    # transform the matches according to the homographies
    pt1 = common.points_apply_homography(H1, matches[:, 0:2])
    x1 = pt1[:, 0]
    pt2 = common.points_apply_homography(H2, matches[:, 2:4])
    x2 = pt2[:, 0]
    y2 = pt2[:, 1]

    # estimate an affine transformation (tilt, shear and bias)
    # that maps pt1 on pt2
    A = np.vstack((x2, y2, y2*0+1)).T
    B = x1
    z = np.linalg.lstsq(A, B)[0]
    t, s, b = z[0:3]

    # corners of ROI
    xx2 = np.array([0, w, 0, w])
    yy2 = np.array([0, 0, h, h])

    # compute the max and min disparity values according to the estimated
    # model. The min/max disp values are necessarily obtained at the ROI
    # corners
    roi_disparities_by_the_affine_model = (xx2*t + yy2*s + b) - xx2
    max_roi = np.max(roi_disparities_by_the_affine_model)
    min_roi = np.min(roi_disparities_by_the_affine_model)

    # min/max disparities according to the keypoints
    max_kpt = np.max(x2 - x1)
    min_kpt = np.min(x2 - x1)

    # compute the range with the extracted min and max disparities
    dispx_min = np.floor(min(min_roi, min_kpt))
    dispx_max = np.floor(max(max_roi, max_kpt))

    # add a security margin to the disp range
    d = cfg['disp_range_extra_margin']
    if (dispx_min < 0):
        dispx_min = (1 + d) * dispx_min
    else:
        dispx_min = (1 - d) * dispx_min
    if (dispx_max > 0):
        dispx_max = (1 + d) * dispx_max
    else:
        dispx_max = (1 - d) * dispx_max

    return dispx_min, dispx_max
Пример #6
0
def update_disp_range(matches, H1, H2, w, h):
    """
    Update the disparity range considering the extrapolation of the affine
    registration estimated from the matches. Extrapolate on the whole region
    of the region of interest.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two rectifying homographies, stored as numpy 3x3 matrices
        w, h: width and height of the region of interest

    Returns:
        disp_min, disp_max: horizontal disparity range
    """
    # transform the matches according to the homographies
    pt1 = common.points_apply_homography(H1, matches[:, 0:2])
    x1 = pt1[:, 0]
    pt2 = common.points_apply_homography(H2, matches[:, 2:4])
    x2 = pt2[:, 0]
    y2 = pt2[:, 1]

    # estimate an affine transformation (tilt, shear and bias)
    # that maps pt1 on pt2
    A = np.vstack((x2, y2, y2 * 0 + 1)).T
    B = x1
    z = np.linalg.lstsq(A, B)[0]
    t, s, b = z[0:3]

    # corners of ROI
    xx2 = np.array([0, w, 0, w])
    yy2 = np.array([0, 0, h, h])

    # compute the max and min disparity values according to the estimated
    # model. The min/max disp values are necessarily obtained at the ROI
    # corners
    roi_disparities_by_the_affine_model = (xx2 * t + yy2 * s + b) - xx2
    max_roi = np.max(roi_disparities_by_the_affine_model)
    min_roi = np.min(roi_disparities_by_the_affine_model)

    # min/max disparities according to the keypoints
    max_kpt = np.max(x2 - x1)
    min_kpt = np.min(x2 - x1)

    # compute the range with the extracted min and max disparities
    dispx_min = np.floor(min(min_roi, min_kpt))
    dispx_max = np.floor(max(max_roi, max_kpt))

    # add a security margin to the disp range
    d = cfg['disp_range_extra_margin']
    if (dispx_min < 0):
        dispx_min = (1 + d) * dispx_min
    else:
        dispx_min = (1 - d) * dispx_min
    if (dispx_max > 0):
        dispx_max = (1 + d) * dispx_max
    else:
        dispx_max = (1 - d) * dispx_max

    return dispx_min, dispx_max
Пример #7
0
def register_horizontally(matches, H1, H2, do_shear=False, flag='center'):
    """
    Adjust rectifying homographies to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        do_shear: boolean flag indicating wheter to minimize the shear on im2
            or not.
        flag: option needed to control how to modify the disparity range:
            'center': move the barycenter of disparities of matches to zero
            'positive': make all the disparities positive
            'negative': make all the disparities negative. Required for
                Hirshmuller stereo (java)

    Returns:
        H2: corrected homography H2

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis. The second homography H2
    is corrected with a horizontal translation to obtain the desired property
    on the disparity range.
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, 0:2])
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    p2 = common.points_apply_homography(H2, matches[:, 2:4])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # for debug, print the vertical disparities. Should be zero.
    print "Residual vertical disparities: max, min, mean. Should be zero ------"
    print np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1)

    # shear correction
    # we search the (s, b) vector that minimises \sum (x1 - (x2+s*y2+b))^2
    # it is a least squares minimisation problem
    if do_shear:
        A = np.vstack((y2, y2*0+1)).T
        B = x1 - x2
        z = np.linalg.lstsq(A, B)[0]
        s = z[0]
        b = z[1]
        H2 = np.dot(np.array([[1, s, b], [0, 1, 0], [0, 0, 1]]), H2)
        x2 = x2 + s*y2 + b

    # compute the disparity offset according to selected option
    t = 0
    if (flag == 'center'):
        t = np.mean(x2 - x1)
    if (flag == 'positive'):
        t = np.min(x2 - x1)
    if (flag == 'negative'):
        t = np.max(x2 - x1)

    # correct H2 with a translation
    return np.dot(common.matrix_translation(-t, 0), H2)
Пример #8
0
def update_minmax_range_extrapolating_registration_affinity(matches, H1, H2,w_roi,h_roi):
    """
    Update the disparity range considering the extrapolation of the affine registration
    transformation. Extrapolate until the boundary of the region of interest

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        roi_w/h: width and height of the region of interest

    Returns:
        disp_min, disp_max: horizontal disparity range
    """
    # transform the matches according to the homographies
    pt1 = common.points_apply_homography(H1, matches[:, 0:2])
    x1 = pt1[:, 0]
    y1 = pt1[:, 1]
    pt2 = common.points_apply_homography(H2, matches[:, 2:4])
    x2 = pt2[:, 0]
    y2 = pt2[:, 1]

    # estimate an affine transformation (tilt, shear and bias)
    # from the matched keypoints
    A = np.vstack((x2, y2, y2*0+1)).T
#    A = x2[:, np.newaxis]
    b = x1
    z = np.linalg.lstsq(A, b)[0]
    t,s,dx = z[0:3]

    # corners of ROI
    xx2 = np.array([0,w_roi,0,w_roi])
    yy2 = np.array([0,0,h_roi,h_roi])

    # compute the max and min disparity values (according to
    # the estimated model) at the ROI corners
    roi_disparities_by_the_affine_model = (xx2*t + yy2*s + dx) - xx2
    maxb = np.max(roi_disparities_by_the_affine_model)
    minb = np.min(roi_disparities_by_the_affine_model)
    #print minb,maxb

    # compute the rage with the extract min and max disparities
    dispx_min = np.floor(minb + np.min(x2 - x1))
    dispx_max = np.ceil(maxb + np.max(x2 - x1))

    # add 20% security margin
    if (dispx_min < 0):
        dispx_min = 1.2 * dispx_min
    else:
        dispx_min = 0.8 * dispx_min
    if (dispx_max > 0):
        dispx_max = 1.2 * dispx_max
    else:
        dispx_max = 0.8 * dispx_max

    return dispx_min, dispx_max
Пример #9
0
def test_affine_transformation():
    x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

    # list of transformations to be tested
    T = np.eye(3)
    I = np.eye(3)
    S = np.eye(3)
    A = np.eye(3)
    translations = []
    isometries = []
    similarities = []
    affinities = []

    for i in xrange(100):
        translations.append(T)
        isometries.append(I)
        similarities.append(S)
        affinities.append(A)
        T[0:2, 2] = np.random.random(2)
        I = rotation_matrix(2 * np.pi * np.random.random_sample())
        I[0:2, 2] = np.random.random(2)
        S = similarity_matrix(2 * np.pi * np.random.random_sample(),
                              np.random.random_sample())
        S[0:2, 2] = 100 * np.random.random(2)
        A[0:2, :] = np.random.random((2, 3))

    for B in translations + isometries + similarities + affinities:
        xx = common.points_apply_homography(B, x)
        E = estimation.affine_transformation(x, xx)
        assert_array_almost_equal(E, B)
Пример #10
0
def test_affine_transformation():
    x =  np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

    # list of transformations to be tested
    T = np.eye(3)
    I = np.eye(3)
    S = np.eye(3)
    A = np.eye(3)
    translations = []
    isometries = []
    similarities = []
    affinities = []

    for i in xrange(100):
        translations.append(T)
        isometries.append(I)
        similarities.append(S)
        affinities.append(A)
        T[0:2, 2] = np.random.random(2)
        I = rotation_matrix(2*np.pi * np.random.random_sample())
        I[0:2, 2] = np.random.random(2)
        S = similarity_matrix(2*np.pi * np.random.random_sample(),
                np.random.random_sample())
        S[0:2, 2] = 100 * np.random.random(2)
        A[0:2, :] = np.random.random((2, 3))

    for B in translations + isometries + similarities + affinities:
        xx = common.points_apply_homography(B, x)
        E = estimation.affine_transformation(x, xx)
        assert_array_almost_equal(E, B)
Пример #11
0
def matches_from_sift_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes a list of sift matches between two Pleiades images.

    Args:
        im1, im2: paths to two large tif images
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the big images.
    """
    x1, y1, w1, h1 = x, y, w, h
    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x, y, w, h)

    p1 = common.image_sift_keypoints(im1, x1, y1, w1, h1, max_nb=2000)
    p2 = common.image_sift_keypoints(im2, x2, y2, w2, h2, max_nb=2000)
    matches = common.sift_keypoints_match(p1, p2, 'relative',
                                          cfg['sift_match_thresh'])

    # Below is an alternative to ASIFT: lower the thresh_dog for the sift calls.
    # Default value for thresh_dog is 0.0133
    thresh_dog = 0.0133
    nb_sift_tries = 1
    while (matches.shape[0] < 10 and nb_sift_tries < 6):
        nb_sift_tries += 1
        thresh_dog /= 2.0
        p1 = common.image_sift_keypoints(im1, x1, y1, w1, h1, None,
                                         '-thresh_dog %f' % thresh_dog)
        p2 = common.image_sift_keypoints(im2, x2, y2, w2, h2, None,
                                         '-thresh_dog %f' % thresh_dog)
        matches = common.sift_keypoints_match(p1, p2, 'relative',
                                              cfg['sift_match_thresh'])

    return matches

    if matches.size:
        # compensate coordinates for the crop and the zoom
        pts1 = common.points_apply_homography(T1, matches[:, 0:2])
        pts2 = common.points_apply_homography(T2, matches[:, 2:4])
        return np.hstack([pts1, pts2])
    else:
        return np.array([[]])
Пример #12
0
def register_horizontally_translation(matches, H1, H2, flag="center"):
    """
    Adjust rectifying homographies with a translation to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        flag: option needed to control how to modify the disparity range:
            'center': move the barycenter of disparities of matches to zero
            'positive': make all the disparities positive
            'negative': make all the disparities negative. Required for
                Hirshmuller stereo (java)

    Returns:
        H2: corrected homography H2

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis. The second homography H2
    is corrected with a horizontal translation to obtain the desired property
    on the disparity range.
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, :2])
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    p2 = common.points_apply_homography(H2, matches[:, 2:])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # for debug, print the vertical disparities. Should be zero.
    if cfg["debug"]:
        print "Residual vertical disparities: max, min, mean. Should be zero"
        print np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1)

    # compute the disparity offset according to selected option
    t = 0
    if flag == "center":
        t = np.mean(x2 - x1)
    if flag == "positive":
        t = np.min(x2 - x1)
    if flag == "negative":
        t = np.max(x2 - x1)

    # correct H2 with a translation
    return np.dot(common.matrix_translation(-t, 0), H2)
Пример #13
0
def cost_function(v, *args):
    """
    Objective function to minimize in order to correct the pointing error.

    Arguments:
        v: vector of size 3 or 4, containing the parameters of the euclidean
            transformation we are looking for.
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is the one of the big images.
        alpha: relative weight of the error terms: e + alpha*(h-h0)^2. See
            paper for more explanations.

    Returns:
        The sum of pointing errors and altitude differences, as written in the
        paper formula (1).
    """
    rpc1, rpc2, matches = args[0], args[1], args[2]
    if len(args) == 4:
        alpha = args[3]
    else:
        alpha = 0.01

    # verify that parameters are in the bounding box
    if (np.abs(v[0]) > 200*np.pi or
        np.abs(v[1]) > 10000 or
        np.abs(v[2]) > 10000):
        print 'warning: cost_function is going too far'
        print v

    if (len(v) > 3):
        if (np.abs(v[3]) > 20000):
            print 'warning: cost_function is going too far'
            print v

    # compute the altitudes from the matches without correction
    x1 = matches[:, 0]
    y1 = matches[:, 1]
    x2 = matches[:, 2]
    y2 = matches[:, 3]
    h0 = rpc_utils.compute_height(rpc1, rpc2, x1, y1, x2, y2)[0]

    # transform the coordinates of points in the second image according to
    # matrix A, built from vector v
    A = euclidean_transform_matrix(v)
    p2 = common.points_apply_homography(A, matches[:, 2:4])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # compute the cost
    h, e = rpc_utils.compute_height(rpc1, rpc2, x1, y1, x2, y2)
    cost = np.sum((h - h0)**2)
    cost *= alpha
    cost += np.sum(e)

    #print cost
    return cost
Пример #14
0
def matches_from_sift_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes a list of sift matches between two Pleiades images.

    Args:
        im1, im2: paths to two large tif images
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the big images.
    """
    x1, y1, w1, h1 = x, y, w, h
    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x, y, w, h)

    p1 = common.image_sift_keypoints(im1, x1, y1, w1, h1, max_nb=2000)
    p2 = common.image_sift_keypoints(im2, x2, y2, w2, h2, max_nb=2000)
    matches = common.sift_keypoints_match(p1, p2, 'relative',
                                          cfg['sift_match_thresh'])

    # Below is an alternative to ASIFT: lower the thresh_dog for the sift calls.
    # Default value for thresh_dog is 0.0133
    thresh_dog = 0.0133
    nb_sift_tries = 1
    while (matches.shape[0] < 10 and nb_sift_tries < 6):
        nb_sift_tries += 1
        thresh_dog /= 2.0
        p1 = common.image_sift_keypoints(im1, x1, y1, w1, h1, None, '-thresh_dog %f' % thresh_dog)
        p2 = common.image_sift_keypoints(im2, x2, y2, w2, h2, None, '-thresh_dog %f' % thresh_dog)
        matches = common.sift_keypoints_match(p1, p2, 'relative',
                                              cfg['sift_match_thresh'])

    return matches

    if matches.size:
        # compensate coordinates for the crop and the zoom
        pts1 = common.points_apply_homography(T1, matches[:, 0:2])
        pts2 = common.points_apply_homography(T2, matches[:, 2:4])
        return np.hstack([pts1, pts2])
    else:
        return np.array([[]])
Пример #15
0
def disparity_range_from_matches(matches, H1, H2, w, h):
    """
    Compute the disparity range of a ROI from a list of point matches.

    The estimation is based on the extrapolation of the affine registration
    estimated from the matches. The extrapolation is done on the whole region of
    interest.

    Args:
        matches: Nx4 numpy array containing a list of matches, in the full
            image coordinates frame, before rectification
        w, h: width and height of the rectangular ROI in the first image.
        H1, H2: two rectifying homographies, stored as numpy 3x3 matrices

    Returns:
        disp_min, disp_max: horizontal disparity range
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, :2])
    x1 = p1[:, 0]
    p2 = common.points_apply_homography(H2, matches[:, 2:])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # estimate an affine transformation (tilt, shear and bias) mapping p1 on p2
    t, s, b = np.linalg.lstsq(np.vstack((x2, y2, y2*0+1)).T, x1)[0][:3]

    # compute the disparities for the affine model. The extrema are obtained at
    # the ROI corners
    xx = np.array([0, w, 0, w])
    yy = np.array([0, 0, h, h])
    disp_affine_model = (xx*t + yy*s + b) - xx

    # compute the final disparity range
    disp_min = np.floor(min(np.min(disp_affine_model), np.min(x2 - x1)))
    disp_max = np.ceil(max(np.max(disp_affine_model), np.max(x2 - x1)))

    # add a security margin to the disparity range
    disp_min *= (1 - np.sign(disp_min) * cfg['disp_range_extra_margin'])
    disp_max *= (1 + np.sign(disp_max) * cfg['disp_range_extra_margin'])
    return disp_min, disp_max
Пример #16
0
def disparity_range_from_matches(matches, H1, H2, w, h):
    """
    Compute the disparity range of a ROI from a list of point matches.

    The estimation is based on the extrapolation of the affine registration
    estimated from the matches. The extrapolation is done on the whole region of
    interest.

    Args:
        matches: Nx4 numpy array containing a list of matches, in the full
            image coordinates frame, before rectification
        w, h: width and height of the rectangular ROI in the first image.
        H1, H2: two rectifying homographies, stored as numpy 3x3 matrices

    Returns:
        disp_min, disp_max: horizontal disparity range
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, :2])
    x1 = p1[:, 0]
    p2 = common.points_apply_homography(H2, matches[:, 2:])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # estimate an affine transformation (tilt, shear and bias) mapping p1 on p2
    t, s, b = np.linalg.lstsq(np.vstack((x2, y2, y2 * 0 + 1)).T, x1)[0][:3]

    # compute the disparities for the affine model. The extrema are obtained at
    # the ROI corners
    xx = np.array([0, w, 0, w])
    yy = np.array([0, 0, h, h])
    disp_affine_model = (xx * t + yy * s + b) - xx

    # compute the final disparity range
    disp_min = np.floor(min(np.min(disp_affine_model), np.min(x2 - x1)))
    disp_max = np.ceil(max(np.max(disp_affine_model), np.max(x2 - x1)))

    # add a security margin to the disparity range
    disp_min *= 1 - np.sign(disp_min) * cfg["disp_range_extra_margin"]
    disp_max *= 1 + np.sign(disp_max) * cfg["disp_range_extra_margin"]
    return disp_min, disp_max
Пример #17
0
def evaluation_from_estimated_F(im1,
                                im2,
                                rpc1,
                                rpc2,
                                x,
                                y,
                                w,
                                h,
                                A=None,
                                matches=None):
    """
    Measures the pointing error on a Pleiades' pair of images, affine approx.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels, and computed from an
        approximated fundamental matrix.
    """
    if not matches:
        matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # estimate the fundamental matrix between the two views
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # compute the mean displacement from epipolar lines
    d_sum = 0
    for i in range(len(p1)):
        x = np.array([p1[i, 0], p1[i, 1], 1])
        xx = np.array([p2[i, 0], p2[i, 1], 1])
        ll = F.dot(x)
        #d = np.sign(xx.dot(ll)) * evaluation.distance_point_to_line(xx, ll)
        d = evaluation.distance_point_to_line(xx, ll)
        d_sum += d
    return d_sum / len(p1)
Пример #18
0
def evaluation_iterative(im1,
                         im2,
                         rpc1,
                         rpc2,
                         x,
                         y,
                         w,
                         h,
                         A=None,
                         matches=None):
    """
    Measures the maximal pointing error on a Pleiades' pair of images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels.
    """
    if not matches:
        matches = sift.matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # compute the pointing error for each match
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    x2 = p2[:, 0]
    y2 = p2[:, 1]
    e = rpc_utils.compute_height(rpc1, rpc2, x1, y1, x2, y2)[1]
    # matches = matches[e < 0.1, :]
    # visualisation.plot_matches_pleiades(im1, im2, matches)
    print "max, mean, min pointing error, from %d points:" % (len(matches))
    print np.max(e), np.mean(e), np.min(e)

    # return the mean error
    return np.mean(np.abs(e))
Пример #19
0
def rectification_homographies(matches, x, y, w, h):
    """
    Computes rectifying homographies from point matches for a given ROI.

    The affine fundamental matrix F is estimated with the gold-standard
    algorithm, then two rectifying similarities (rotation, zoom, translation)
    are computed directly from F.

    Args:
        matches: numpy array of shape (n, 4) containing a list of 2D point
            correspondences between the two images.
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        S1, S2, F: three numpy arrays of shape (3, 3) representing the
        two rectifying similarities to be applied to the two images and the
        corresponding affine fundamental matrix.
    """
    # estimate the affine fundamental matrix with the Gold standard algorithm
    F = estimation.affine_fundamental_matrix(matches)

    # compute rectifying similarities
    S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, True)

    if cfg["debug"]:
        y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1]
        y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1]
        err = np.abs(y1 - y2)
        print "max, min, mean rectification error on point matches: ",
        print np.max(err), np.min(err), np.mean(err)

    # pull back top-left corner of the ROI to the origin
    pts = common.points_apply_homography(S1, [[x, y], [x + w, y], [x + w, y + h], [x, y + h]])
    x0, y0 = common.bounding_box2D(pts)[:2]
    T = common.matrix_translation(-x0, -y0)
    return np.dot(T, S1), np.dot(T, S2), F
Пример #20
0
def rectification_homographies(matches, x, y, w, h):
    """
    Computes rectifying homographies from point matches for a given ROI.

    The affine fundamental matrix F is estimated with the gold-standard
    algorithm, then two rectifying similarities (rotation, zoom, translation)
    are computed directly from F.

    Args:
        matches: numpy array of shape (n, 4) containing a list of 2D point
            correspondences between the two images.
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        S1, S2, F: three numpy arrays of shape (3, 3) representing the
        two rectifying similarities to be applied to the two images and the
        corresponding affine fundamental matrix.
    """
    # estimate the affine fundamental matrix with the Gold standard algorithm
    F = estimation.affine_fundamental_matrix(matches)

    # compute rectifying similarities
    S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, True)

    if cfg['debug']:
        y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1]
        y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1]
        err = np.abs(y1 - y2)
        print "max, min, mean rectification error on point matches: ",
        print np.max(err), np.min(err), np.mean(err)

    # pull back top-left corner of the ROI to the origin
    pts = common.points_apply_homography(S1, [[x, y], [x+w, y], [x+w, y+h], [x, y+h]])
    x0, y0 = common.bounding_box2D(pts)[:2]
    T = common.matrix_translation(-x0, -y0)
    return np.dot(T, S1), np.dot(T, S2), F
Пример #21
0
def plot_pointing_error_tile(im1, im2, rpc1, rpc2, x, y, w, h,
        matches_sift=None, f=100, out_files_pattern=None):
    """
    Args:
        im1, im2: path to full pleiades images
        rpc1, rcp2: path to associated rpc xml files
        x, y, w, h: four integers defining the rectangular tile in the reference
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the tile.
        f (optional, default is 100): exageration factor for the error vectors
        out_files_pattern (optional, default is None): pattern used to name the
            two output files (plots of the pointing error)

    Returns:
        nothing, but opens a display
    """
    # read rpcs
    r1 = rpc_model.RPCModel(rpc1)
    r2 = rpc_model.RPCModel(rpc2)

    # compute sift matches
    if matches_sift is None:
        matches_sift = pointing_accuracy.filtered_sift_matches_roi(im1, im2,
                r1, r2, x, y, w, h)

    # compute rpc matches
    matches_rpc = rpc_utils.matches_from_rpc(r1, r2, x, y, w, h, 5)

    # estimate affine fundamental matrix
    F = estimation.affine_fundamental_matrix(matches_rpc)

    # compute error vectors
    e = pointing_accuracy.error_vectors(matches_sift, F, 'ref')

    A = pointing_accuracy.local_translation(r1, r2, x, y, w, h, matches_sift)
    p = matches_sift[:, 0:2]
    q = matches_sift[:, 2:4]
    qq = common.points_apply_homography(A, q)
    ee = pointing_accuracy.error_vectors(np.hstack((p, qq)), F, 'ref')
    print pointing_accuracy.evaluation_from_estimated_F(im1, im2, r1, r2, x, y, w, h, None, matches_sift)
    print pointing_accuracy.evaluation_from_estimated_F(im1, im2, r1, r2, x, y, w, h, A, matches_sift)

    # plot the vectors: they go from the point x to the line (F.T)x'
    plot_vectors(p, -e, x, y, w, h, f, out_file='%s_before.png' % out_files_pattern)
    plot_vectors(p, -ee, x, y, w, h, f, out_file='%s_after.png' % out_files_pattern)
Пример #22
0
def evaluation_from_estimated_F(im1, im2, rpc1, rpc2, x, y, w, h, A=None,
        matches=None):
    """
    Measures the pointing error on a Pleiades' pair of images, affine approx.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels, and computed from an
        approximated fundamental matrix.
    """
    if matches is None:
        matches = filtered_sift_matches_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # estimate the fundamental matrix between the two views
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # compute the mean displacement from epipolar lines
    d_sum = 0
    for i in range(len(p1)):
        x  = np.array([p1[i, 0], p1[i, 1], 1])
        xx = np.array([p2[i, 0], p2[i, 1], 1])
        ll  = F.dot(x)
        #d = np.sign(xx.dot(ll)) * evaluation.distance_point_to_line(xx, ll)
        d = evaluation.distance_point_to_line(xx, ll)
        d_sum += d
    return d_sum/len(p1)
Пример #23
0
def evaluation_iterative(im1, im2, rpc1, rpc2, x, y, w, h, A=None,
                         matches=None):
    """
    Measures the maximal pointing error on a Pleiades' pair of images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2.
        matches (optional): Nx4 numpy array containing a list of matches to use
            to compute the pointing error

    Returns:
        the mean pointing error, in the direction orthogonal to the epipolar
        lines. This error is measured in pixels.
    """
    if matches is None:
        matches = filtered_sift_matches_roi(im1, im2, rpc1, rpc2, x, y, w, h)
    p1 = matches[:, 0:2]
    p2 = matches[:, 2:4]
    print '%d sift matches' % len(matches)

    # apply pointing correction matrix, if available
    if A is not None:
        p2 = common.points_apply_homography(A, p2)

    # compute the pointing error for each match
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    x2 = p2[:, 0]
    y2 = p2[:, 1]
    e = rpc_utils.compute_height(rpc1, rpc2, x1, y1, x2, y2)[1]
    # matches = matches[e < 0.1, :]
    # visualisation.plot_matches_pleiades(im1, im2, matches)
    print "max, mean, min pointing error, from %d points:" % (len(matches))
    print np.max(e), np.mean(e), np.min(e)

    # return the mean error
    return np.mean(np.abs(e))
Пример #24
0
def cost_function_linear(v, rpc1, rpc2, matches):
    """
    Objective function to minimize in order to correct the pointing error.

    Arguments:
        v: vector of size 4, containing the 4 parameters of the euclidean
            transformation we are looking for.
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is the one of the big images.
        alpha: relative weight of the error terms: e + alpha*(h-h0)^2. See
            paper for more explanations.

    Returns:
        The sum of pointing errors and altitude differences, as written in the
        paper formula (1).
    """
    print_params(v)

    # verify that parameters are in the bounding box
    if (np.abs(v[0]) > 200*np.pi or
        np.abs(v[1]) > 10000 or
        np.abs(v[2]) > 10000 or
        np.abs(v[3]) > 20000):
        print 'warning: cost_function is going too far'
        print v

    x, y, w, h = common.bounding_box2D(matches[:, 0:2])
    matches_rpc = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
    F = estimation.fundamental_matrix(matches_rpc)

    # transform the coordinates of points in the second image according to
    # matrix A, built from vector v
    A = euclidean_transform_matrix(v)
    p2 = common.points_apply_homography(A, matches[:, 2:4])

    return evaluation.fundamental_matrix_L1(F, np.hstack([matches[:, 0:2], p2]))
Пример #25
0
def matches_from_projection_matrices_roi(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Computes a list of sift matches between two Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.

        This function uses the parameter subsampling_factor_registration
        from the config module. If factor > 1 then the registration
        is performed over subsampled images, but the resulting keypoints
        are then scaled back to conceal the subsampling

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the big images.
            If no sift matches are found, then an exception is raised.
    """
    #m, M = rpc_utils.altitude_range(rpc1, x, y, w, h)
    m=5
    M=20

    # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M]
    # also include the midpoints because the 8 corners of the frustum alone don't seem to work
    a = np.array([x, x,   x,   x, x+w, x+w, x+w, x+w,x+w/2,x+w/2,x+w/2,x+w/2,x+w/2,x+w/2,x    ,x    ,x+w  ,x+w  ])
    b = np.array([y, y, y+h, y+h,   y,   y, y+h, y+h,y    ,y    ,y+h/2,y+h/2,y+h  ,y+h  ,y+h/2,y+h/2,y+h/2,y+h/2])
    c = np.array([m, M,   m,   M,   m,   M,   m,   M,m    ,M    ,m    ,M    ,m    ,M    ,m    ,M    ,m    ,M    ])

    xx = np.zeros(len(a))
    yy = np.zeros(len(a))

    # corresponding points in im2
    P1 = np.loadtxt(rpc1)
    P2 = np.loadtxt(rpc2)

    M  = P1[:,:3]
    p4 = P1[:,3]
    m3 = M[2,:]

    inv_M = np.linalg.inv(M)

    v = np.vstack((a,b,c*0+1))

    for i in range(len(a)):
       v = np.array([a[i],b[i],1])
       mu = c[i] / np.sign ( np.linalg.det(M) )

       X3D = inv_M.dot (mu * v - p4 )

       # backproject
       newpoints = P2.dot(np.hstack([X3D,1]))
       xx[i] = newpoints[0]  / newpoints[2]
       yy[i] = newpoints[1]  / newpoints[2]


    print xx
    print yy

    matches = np.vstack([a, b,xx,yy]).T
    return matches

   ##### xx, yy = rpc_utils.find_corresponding_point(rpc1, rpc2, a, b, c)[0:2]


    # bounding box in im2
    x2, y2, w2, h2 = common.bounding_box2D(np.vstack([xx, yy]).T) ## GF NOT USED
    x1, y1, w1, h1 = x, y, w, h
    x2, y2, w2, h2 = x, y, w, h

    # do crops, to apply sift on reasonably sized images
    crop1 = common.image_crop_LARGE(im1, x1, y1, w1, h1)
    crop2 = common.image_crop_LARGE(im2, x2, y2, w2, h2)
    T1 = common.matrix_translation(x1, y1)
    T2 = common.matrix_translation(x2, y2)

    # call sift matches for the images
    matches = matches_from_sift(crop1, crop2)

    if matches.size:
        # compensate coordinates for the crop and the zoom
        pts1 = common.points_apply_homography(T1, matches[:, 0:2])
        pts2 = common.points_apply_homography(T2, matches[:, 2:4])

        return np.hstack([pts1, pts2])
    else:
        raise Exception("no sift matches")
Пример #26
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None):
    """
    Rectify a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        out1, out2: paths to the output crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.

        This function uses the parameter subsampling_factor from the config module.
        If the factor z > 1 then the output images will be subsampled by a factor z.
        The output matrices H1, H2, and the ranges are also updated accordingly:
        Hi = Z*Hi   with Z = diag(1/z,1/z,1)   and
        disp_min = disp_min/z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
            have been applied to the two (big) images.
        disp_min, disp_max: horizontal disparity range
    """

    # compute rectifying homographies
    H1, H2, disp_min, disp_max = compute_rectification_homographies(im1, im2,
        rpc1, rpc2, x, y, w, h, A)

    ## compute output images size
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    #x0,y0,w0,h0 = x,y,w,h

    # check that the first homography maps the ROI in the positive quadrant
    assert (round(x0) == 0)
    assert (round(y0) == 0)

    z = cfg['subsampling_factor']

    # apply homographies and do the crops
    # THIS STEP IS HERE TO PRODUCE THE MASKS WHERE THE IMAGE IS KNOWN
    # SURE THIS IS A CRAPPY WAY TO DO THIS, WE SHOULD DEFINITIVELY DO IT
    # SIMULTANEOUSLY WITH THE HOMOGRAPHIC TRANSFORMATION
    msk1 = common.tmpfile('.png')
    msk2 = common.tmpfile('.png')
    common.run('plambda %s "x 255" -o %s' % (im1, msk1))
    common.run('plambda %s "x 255" -o %s' % (im2, msk2))
    homography_cropper.crop_and_apply_homography(msk1, msk1, H1, w0, h0, z)
    homography_cropper.crop_and_apply_homography(msk2, msk2, H2, w0, h0, z)
    # FINALLY : apply homographies and do the crops of the images
    homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, z)
    homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, z)
    # COMBINE THE MASK TO REMOVE THE POINTS THAT FALL OUTSIDE THE IMAGE
    common.run('plambda %s %s "x 200 > y nan if" -o %s' % (msk1, out1, out1))
    common.run('plambda %s %s "x 200 > y nan if" -o %s' % (msk2, out2, out2))

#    This also does the job but when z != 1 it fails (segfault: homography)
#    TODO: FIX homography, maybe code a new one
#    common.image_apply_homography(out1, im1, H1, w0, h0)
#    common.image_apply_homography(out2, im2, H2, w0, h0)

    #  If subsampling_factor the homographies are altered to reflect the zoom
    if z != 1:
        from math import floor, ceil
        # update the H1 and H2 to reflect the zoom
        Z = np.eye(3);
        Z[0,0] = Z[1,1] = 1.0 / z

        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_min = floor(disp_min / z)
        disp_max = ceil(disp_max / z)
        w0 = w0 / subsampling_factor
        h0 = h0 / subsampling_factor

    return H1, H2, disp_min, disp_max
Пример #27
0
def compute_rectification_homographies(im1, im2, rpc1, rpc2, x, y, w, h, A=None):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first image.
            (x, y) is the top-left corner, and (w, h) are the dimensions of the
            rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be applied
            to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use 8-pts normalized algo to estimate F, then use loop-zhang to
    # estimate rectifying homographies.

    print "step 1: find matches, and center them ------------------------------"
    sift_matches = matches_from_projection_matrices_roi(im1, im2, rpc1, rpc2, x+w/4, y+h/4, w*2/4, h*2/4)
    #sift_matches2 = matches_from_sift(im1, im2)
    #sift_matches = sift_matches2
#    import visualisation
#    print visualisation.plot_matches(im1,im2,sift_matches)

    p1 = sift_matches[:, 0:2]
    p2 = sift_matches[:, 2:4]


    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    print "step 2: estimate F (8-points algorithm) ----------------------------"
    F = estimation.fundamental_matrix(np.hstack([pp1, pp2]))
    F = np.dot(T2.T, np.dot(F, T1)) # convert F for big images coordinate frame

    print "step 3: compute rectifying homographies (loop-zhang algorithm) -----"
    H1, H2 = estimation.loop_zhang(F, w, h)
    #### ATTENTION: LOOP-ZHANG IMPLICITLY ASSUMES THAT F IS IN THE FINAL (CROPPED)
    # IMAGE GEOMETRY. THUS 0,0 IS THE UPPER LEFT CORNER OF THE IMAGE AND W,H ARE
    # USED TO ESTIMATE THE DISTORTION WITHIN THE REGION. BY CENTERING THE COORDINATES
    # OF THE PIXELS WE ARE CONSTRUCTING A RECTIFICATION DOES NOT TAKE INTO ACCOUNT THE
    # CORRECT IMAGE PORTION.
    # compose with previous translations to get H1, H2 in the big images frame
    #H1 = np.dot(H1, T1)
    #H2 = np.dot(H2, T2)

    # for debug
    print "min, max, mean rectification error on rpc matches ------------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.min(err), np.max(err), np.mean(err)

#    print "step 4: pull back top-left corner of the ROI in the origin ---------"
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    # the origin, if sift matches are available
    print "step 5: horizontal registration ------------------------------------"
    sift_matches2 = matches_from_sift(im1, im2)

    # filter sift matches with the known fundamental matrix
    sift_matches2 = filter_matches_epipolar_constraint(F, sift_matches2,
            cfg['epipolar_thresh'])
    if not len(sift_matches2):
        print """all the sift matches have been discarded by the epipolar
        constraint. This is probably due to the pointing error. Try with a
        bigger value for epipolar_thresh."""
        sys.exit()

    H2, disp_m, disp_M = register_horizontally(sift_matches2, H1, H2, do_scale_horizontally=True)
    disp_m, disp_M = update_minmax_range_extrapolating_registration_affinity(sift_matches2,
        H1, H2, w, h)

    return H1, H2, disp_m, disp_M
Пример #28
0
def compute_rectification_homographies(im1, im2, rpc1, rpc2, x, y, w, h, A=None,
                                       m=None):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        m (optional): Nx4 numpy array containing a list of matches.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be
            applied to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use 8-pts normalized algo to estimate F, then use loop-zhang to
    # estimate rectifying homographies.

    print "step 1: find virtual matches, and center them ----------------------"
    n = cfg['n_gcp_per_axis']
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, n)
    p1 = rpc_matches[:, 0:2]
    p2 = rpc_matches[:, 2:4]

    if A is not None:
        print "applying pointing error correction"
        # correct coordinates of points in im2, according to A
        p2 = common.points_apply_homography(np.linalg.inv(A), p2)

    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    print "step 2: estimate F (Gold standard algorithm) -----------------------"
    F = estimation.affine_fundamental_matrix(np.hstack([pp1, pp2]))

    print "step 3: compute rectifying homographies (loop-zhang algorithm) -----"
    H1, H2 = estimation.loop_zhang(F, w, h)
    S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(
        F, True)
    print "F\n", F, "\n"
    print "H1\n", H1, "\n"
    print "S1\n", S1, "\n"
    print "H2\n", H2, "\n"
    print "S2\n", S2, "\n"
    # compose with previous translations to get H1, H2 in the big images frame
    H1 = np.dot(H1, T1)
    H2 = np.dot(H2, T2)

    # for debug
    print "max, min, mean rectification error on rpc matches ------------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.max(err), np.min(err), np.mean(err)

    print "step 4: pull back top-left corner of the ROI in the origin ---------"
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    # the origin, if sift matches are available
    if m is not None:
        print "step 5: horizontal registration --------------------------------"
        # filter sift matches with the known fundamental matrix
        # but first convert F for big images coordinate frame
        F = np.dot(T2.T, np.dot(F, T1))
        print '%d sift matches before epipolar constraint filering' % len(m)
        m = filter_matches_epipolar_constraint(F, m, cfg['epipolar_thresh'])
        print '%d sift matches after epipolar constraint filering' % len(m)
        if len(m) < 2:
            # 0 or 1 sift match
            print 'rectification.compute_rectification_homographies: less than'
            print '2 sift matches after filtering by the epipolar constraint.'
            print 'This may be due to the pointing error, or to strong'
            print 'illumination changes between the input images.'
            print 'No registration will be performed.'
        else:
            H2 = register_horizontally(m, H1, H2)
            disp_m, disp_M = update_disp_range(m, H1, H2, w, h)
            print "SIFT disparity range:  [%f,%f]"%(disp_m,disp_M)

    # expand disparity range with srtm according to cfg params
    print cfg['disp_range_method']
    if (cfg['disp_range_method'] == "srtm") or (m is None) or (len(m) < 2):
        disp_m, disp_M = rpc_utils.srtm_disp_range_estimation(
            rpc1, rpc2, x, y, w, h, H1, H2, A,
            cfg['disp_range_srtm_high_margin'],
            cfg['disp_range_srtm_low_margin'])
        print "SRTM disparity range:  [%f,%f]"%(disp_m,disp_M)
    if ((cfg['disp_range_method'] == "wider_sift_srtm") and (m is not None) and
            (len(m) >= 2)):
        d_m, d_M = rpc_utils.srtm_disp_range_estimation(
            rpc1, rpc2, x, y, w, h, H1, H2, A,
            cfg['disp_range_srtm_high_margin'],
            cfg['disp_range_srtm_low_margin'])
        print "SRTM disparity range:  [%f,%f]"%(d_m,d_M)
        disp_m = min(disp_m, d_m)
        disp_M = max(disp_M, d_M)

    print "Final disparity range:  [%s, %s]" % (disp_m, disp_M)
    return H1, H2, disp_m, disp_M
Пример #29
0
def rectify_pair(im1,
                 im2,
                 rpc1,
                 rpc2,
                 x,
                 y,
                 w,
                 h,
                 out1,
                 out2,
                 A=None,
                 m=None,
                 flag='rpc'):
    """
    Rectify a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        m (optional): Nx4 numpy array containing a list of sift matches, in the
            full image coordinates frame
        flag (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.

        This function uses the parameter subsampling_factor from the
        config module.  If the factor z > 1 then the output images will
        be subsampled by a factor z.  The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z*Hi   with Z = diag(1/z,1/z,1)   and
        disp_min = disp_min/z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
            have been applied to the two (big) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute rectifying homographies
    if flag == 'rpc':
        H1, H2, disp_min, disp_max = compute_rectification_homographies(
            im1, im2, rpc1, rpc2, x, y, w, h, A, m)
    else:
        H1, H2, disp_min, disp_max = compute_rectification_homographies_sift(
            im1, im2, rpc1, rpc2, x, y, w, h)

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), 0, atol=.01)

    # apply homographies and do the crops TODO XXX FIXME cleanup here
    #homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True)
    #homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True)
    common.image_apply_homography(out1, im1, H1, w0, h0)
    common.image_apply_homography(out2, im2, H2, w0, h0)

    #  If subsampling_factor'] the homographies are altered to reflect the zoom
    if cfg['subsampling_factor'] != 1:
        from math import floor, ceil
        # update the H1 and H2 to reflect the zoom
        Z = np.eye(3)
        Z[0, 0] = Z[1, 1] = 1.0 / cfg['subsampling_factor']

        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_min = floor(disp_min / cfg['subsampling_factor'])
        disp_max = ceil(disp_max / cfg['subsampling_factor'])

    return H1, H2, disp_min, disp_max
Пример #30
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, sift_matches=None, method="rpc"):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.

        This function uses the parameter subsampling_factor from the
        config module. If the factor z > 1 then the output images will
        be subsampled by a factor z. The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and
        disp_min = disp_min / z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == "rpc":
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, cfg["n_gcp_per_axis"])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A), matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h)

    if cfg["register_with_shear"]:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row) for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(F, sift_matches, cfg["epipolar_thresh"])
        if len(sift_matches) < 10:
            print "WARNING: no registration with less than 10 matches"
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, sift_matches, A)

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), 0, atol=0.01)

    # apply homographies and do the crops TODO XXX FIXME cleanup here
    # homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True)
    # homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True)
    common.image_apply_homography(out1, im1, H1, w0, h0)
    common.image_apply_homography(out2, im2, H2, w0, h0)

    #  if subsampling_factor'] the homographies are altered to reflect the zoom
    if cfg["subsampling_factor"] != 1:
        Z = np.eye(3)
        Z[0, 0] = Z[1, 1] = 1.0 / cfg["subsampling_factor"]

        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_m = np.floor(disp_m / cfg["subsampling_factor"])
        disp_M = np.ceil(disp_M / cfg["subsampling_factor"])

    return H1, H2, disp_m, disp_M
Пример #31
0
def register_horizontally(matches, H1, H2, do_shear=False, flag='center'):
    """
    Adjust rectifying homographies to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        do_shear: boolean flag indicating wheter to minimize the shear on im2
            or not.
        flag: option needed to control how to modify the disparity range:
            'center': move the barycenter of disparities of matches to zero
            'positive': make all the disparities positive
            'negative': make all the disparities negative. Required for
                Hirshmuller stereo (java)

    Returns:
        H2: corrected homography H2

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis. The second homography H2
    is corrected with a horizontal translation to obtain the desired property
    on the disparity range.
    """
    # transform the matches according to the homographies
    pt1 = common.points_apply_homography(H1, matches[:, 0:2])
    x1 = pt1[:, 0]
    y1 = pt1[:, 1]
    pt2 = common.points_apply_homography(H2, matches[:, 2:4])
    x2 = pt2[:, 0]
    y2 = pt2[:, 1]

    # for debug, print the vertical disparities. Should be zero.
    print "Residual vertical disparities: max, min, mean. Should be zero ------"
    print np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1)

    # shear correction
    # we search the (s, b) vector that minimises \sum (x1 - (x2+s*y2+b))^2
    # it is a least squares minimisation problem
    if do_shear:
        A = np.vstack((y2, y2*0+1)).T
        B = x1 - x2
        z = np.linalg.lstsq(A, B)[0]
        s = z[0]
        b = z[1]
        H2 = np.dot(np.array([[1, s, b], [0, 1, 0], [0, 0, 1]]), H2)
        x2 = x2 + s*y2 + b

    # compute the disparity offset according to selected option
    if (flag == 'center'):
        t = np.mean(x2 - x1)
    if (flag == 'positive'):
        t = np.min(x2 - x1)
    if (flag == 'negative'):
        t = np.max(x2 - x1)
    if (flag == 'none'):
        t = 0

    # correct H2 with a translation
    H2 = np.dot(common.matrix_translation(-t, 0), H2)
    return H2
Пример #32
0
def compute_rectification_homographies(im1,
                                       im2,
                                       rpc1,
                                       rpc2,
                                       x,
                                       y,
                                       w,
                                       h,
                                       A=None,
                                       m=None):
    """
    Computes rectifying homographies for a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: two instances of the rpc_model.RPCModel class
        x, y, w, h: four integers definig the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        m (optional): Nx4 numpy array containing a list of matches.

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies to be
            applied to the two images.
        disp_min, disp_max: horizontal disparity range, computed on a set of
            sift matches
    """
    # in brief: use 8-pts normalized algo to estimate F, then use loop-zhang to
    # estimate rectifying homographies.

    print "step 1: find virtual matches, and center them ----------------------"
    n = cfg['n_gcp_per_axis']
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, n)
    p1 = rpc_matches[:, 0:2]
    p2 = rpc_matches[:, 2:4]

    if A is not None:
        print "applying pointing error correction"
        # correct coordinates of points in im2, according to A
        p2 = common.points_apply_homography(np.linalg.inv(A), p2)

    # the matching points are translated to be centered in 0, in order to deal
    # with coordinates ranging from -1000 to 1000, and decrease imprecision
    # effects of the loop-zhang rectification. These effects may become very
    # important (~ 10 pixels error) when using coordinates around 20000.
    pp1, T1 = center_2d_points(p1)
    pp2, T2 = center_2d_points(p2)

    print "step 2: estimate F (Gold standard algorithm) -----------------------"
    F = estimation.affine_fundamental_matrix(np.hstack([pp1, pp2]))

    print "step 3: compute rectifying homographies (loop-zhang algorithm) -----"
    H1, H2 = estimation.loop_zhang(F, w, h)
    S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(
        F, True)
    print "F\n", F, "\n"
    print "H1\n", H1, "\n"
    print "S1\n", S1, "\n"
    print "H2\n", H2, "\n"
    print "S2\n", S2, "\n"
    # compose with previous translations to get H1, H2 in the big images frame
    H1 = np.dot(H1, T1)
    H2 = np.dot(H2, T2)

    # for debug
    print "max, min, mean rectification error on rpc matches ------------------"
    tmp = common.points_apply_homography(H1, p1)
    y1 = tmp[:, 1]
    tmp = common.points_apply_homography(H2, p2)
    y2 = tmp[:, 1]
    err = np.abs(y1 - y2)
    print np.max(err), np.min(err), np.mean(err)

    print "step 4: pull back top-left corner of the ROI in the origin ---------"
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts = common.points_apply_homography(H1, roi)
    x0, y0 = common.bounding_box2D(pts)[0:2]
    T = common.matrix_translation(-x0, -y0)
    H1 = np.dot(T, H1)
    H2 = np.dot(T, H2)

    # add an horizontal translation to H2 to center the disparity range around
    # the origin, if sift matches are available
    if m is not None:
        print "step 5: horizontal registration --------------------------------"
        # filter sift matches with the known fundamental matrix
        # but first convert F for big images coordinate frame
        F = np.dot(T2.T, np.dot(F, T1))
        print '%d sift matches before epipolar constraint filering' % len(m)
        m = filter_matches_epipolar_constraint(F, m, cfg['epipolar_thresh'])
        print '%d sift matches after epipolar constraint filering' % len(m)
        if len(m) < 2:
            # 0 or 1 sift match
            print 'rectification.compute_rectification_homographies: less than'
            print '2 sift matches after filtering by the epipolar constraint.'
            print 'This may be due to the pointing error, or to strong'
            print 'illumination changes between the input images.'
            print 'No registration will be performed.'
        else:
            H2 = register_horizontally(m, H1, H2)
            disp_m, disp_M = update_disp_range(m, H1, H2, w, h)
            print "SIFT disparity range:  [%f,%f]" % (disp_m, disp_M)

    # expand disparity range with srtm according to cfg params
    print cfg['disp_range_method']
    if (cfg['disp_range_method'] == "srtm") or (m is None) or (len(m) < 2):
        disp_m, disp_M = rpc_utils.srtm_disp_range_estimation(
            rpc1, rpc2, x, y, w, h, H1, H2, A,
            cfg['disp_range_srtm_high_margin'],
            cfg['disp_range_srtm_low_margin'])
        print "SRTM disparity range:  [%f,%f]" % (disp_m, disp_M)
    if ((cfg['disp_range_method'] == "wider_sift_srtm") and (m is not None)
            and (len(m) >= 2)):
        d_m, d_M = rpc_utils.srtm_disp_range_estimation(
            rpc1, rpc2, x, y, w, h, H1, H2, A,
            cfg['disp_range_srtm_high_margin'],
            cfg['disp_range_srtm_low_margin'])
        print "SRTM disparity range:  [%f,%f]" % (d_m, d_M)
        disp_m = min(disp_m, d_m)
        disp_M = max(disp_M, d_M)

    print "Final disparity range:  [%s, %s]" % (disp_m, disp_M)
    return H1, H2, disp_m, disp_M
Пример #33
0
def crop_and_apply_homography(im_out,
                              im_in,
                              H,
                              w,
                              h,
                              subsampling_factor=1,
                              convert_to_gray=False):
    """
    Warps a piece of a Pleiades (panchro or ms) image with a homography.

    Args:
        im_out: path to the output image
        im_in: path to the input (tif) full Pleiades image
        H: numpy array containing the 3x3 homography matrix
        w, h: size of the output image
        subsampling_factor (optional, default=1): when set to z>1,
            will result in the application of the homography Z*H where Z =
            diag(1/z, 1/z, 1), so the output will be zoomed out by a factor z.
            The output image will be (w/z, h/z)
        convert_to_gray (optional, default False): it set to True, and if the
            input image has 4 channels, it is converted to gray before applying
            zoom and homographies.

    Returns:
        nothing

    The homography has to be used as: coord_out = H coord_in. The produced
    output image corresponds to coord_out in [0, w] x [0, h]. The warp is made
    by Pascal Monasse's binary named 'homography'.
    """

    # crop a piece of the big input image, to which the homography will be
    # applied
    # warning: as the crop uses integer coordinates, be careful to round off
    # (x0, y0) before modifying the homograpy. You want the crop and the
    # translation representing it do exactly the same thing.
    pts = [[0, 0], [w, 0], [w, h], [0, h]]
    inv_H_pts = common.points_apply_homography(np.linalg.inv(H), pts)
    x0, y0, w0, h0 = common.bounding_box2D(inv_H_pts)
    x0, y0 = np.floor([x0, y0])
    w0, h0 = np.ceil([w0, h0])
    crop_fullres = common.image_crop_LARGE(im_in, x0, y0, w0, h0)

    # This filter is needed (for panchro images) because the original PLEAIDES
    # SENSOR PERFECT images are aliased
    if (common.image_pix_dim(crop_fullres) == 1 and subsampling_factor == 1
            and cfg['use_pleiades_unsharpening']):
        tmp = image_apply_pleiades_unsharpening_filter(crop_fullres)
        common.run('rm -f %s' % crop_fullres)
        crop_fullres = tmp

    # convert to gray
    if common.image_pix_dim(crop_fullres) == 4:
        if convert_to_gray:
            crop_fullres = common.pansharpened_to_panchro(crop_fullres)

    # compensate the homography with the translation induced by the preliminary
    # crop, then apply the homography and crop.
    H = np.dot(H, common.matrix_translation(x0, y0))

    # Since the objective is to compute a zoomed out homographic transformation
    # of the input image, to save computations we zoom out the image before
    # applying the homography. If Z is the matrix representing the zoom out and
    # H the homography matrix, this trick consists in applying Z*H*Z^{-1} to
    # the zoomed image Z*Im instead of applying Z*H to the original image Im.
    if subsampling_factor == 1:
        common.image_apply_homography(im_out, crop_fullres, H, w, h)
        return

    else:
        assert (subsampling_factor >= 1)

        # H becomes Z*H*Z^{-1}
        Z = np.eye(3)
        Z[0, 0] = Z[1, 1] = 1 / float(subsampling_factor)
        H = np.dot(Z, H)
        H = np.dot(H, np.linalg.inv(Z))

        # w, and h are updated accordingly
        w = int(w / subsampling_factor)
        h = int(h / subsampling_factor)

        # the DCT zoom is NOT SAFE when the input image size is not a multiple
        # of the zoom factor
        tmpw, tmph = common.image_size(crop_fullres)
        tmpw, tmph = int(tmpw / subsampling_factor), int(tmph /
                                                         subsampling_factor)
        crop_fullres_safe = common.image_crop_tif(crop_fullres, 0, 0,
                                                  tmpw * subsampling_factor,
                                                  tmph * subsampling_factor)
        common.run('rm -f %s' % crop_fullres)

        # zoom out the input image (crop_fullres)
        crop_zoom_out = common.image_safe_zoom_fft(crop_fullres_safe,
                                                   subsampling_factor)
        common.run('rm -f %s' % crop_fullres_safe)

        # apply the homography to the zoomed out crop
        common.image_apply_homography(im_out, crop_zoom_out, H, w, h)
        return
Пример #34
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None,
                 sift_matches=None, method='rpc'):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.

        This function uses the parameter subsampling_factor from the
        config module. If the factor z > 1 then the output images will
        be subsampled by a factor z. The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and
        disp_min = disp_min / z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A),
                                                            matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(F, sift_matches,
                                                          cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print 'WARNING: no registration with less than 10 matches'
        else:
            H2 = register_horizontally(sift_matches, H1, H2)

    # compute disparity range
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # compute output images size
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), 0, atol=.01)

    # apply homographies and do the crops TODO XXX FIXME cleanup here
    #homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0, cfg['subsampling_factor'], True)
    #homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0, cfg['subsampling_factor'], True)
    common.image_apply_homography(out1, im1, H1, w0, h0)
    common.image_apply_homography(out2, im2, H2, w0, h0)

    #  if subsampling_factor'] the homographies are altered to reflect the zoom
    if cfg['subsampling_factor'] != 1:
        Z = np.eye(3)
        Z[0, 0] = Z[1, 1] = 1.0 / cfg['subsampling_factor']

        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_m = np.floor(disp_m / cfg['subsampling_factor'])
        disp_M = np.ceil(disp_M / cfg['subsampling_factor'])

    return H1, H2, disp_m, disp_M
Пример #35
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, m=None,
                 flag='rpc'):
    """
    Rectify a ROI in a pair of Pleiades images.

    Args:
        im1, im2: paths to the two Pleiades images (usually jp2 or tif)
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        m (optional): Nx4 numpy array containing a list of sift matches, in the
            full image coordinates frame
        flag (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.

        This function uses the parameter subsampling_factor from the
        config module.  If the factor z > 1 then the output images will
        be subsampled by a factor z.  The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z*Hi   with Z = diag(1/z,1/z,1)   and
        disp_min = disp_min/z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
            have been applied to the two (big) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute rectifying homographies
    if flag == 'rpc':
        H1, H2, disp_min, disp_max = compute_rectification_homographies(
            im1, im2, rpc1, rpc2, x, y, w, h, A, m)
    else:
        H1, H2, disp_min, disp_max = compute_rectification_homographies_sift(
            im1, im2, rpc1, rpc2, x, y, w, h)

    # compute output images size
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), 0, atol=.01)

    # apply homographies and do the crops
    homography_cropper.crop_and_apply_homography(out1, im1, H1, w0, h0,
                                                 cfg['subsampling_factor'],
                                                 True)
    homography_cropper.crop_and_apply_homography(out2, im2, H2, w0, h0,
                                                 cfg['subsampling_factor'],
                                                 True)

    #  If subsampling_factor'] the homographies are altered to reflect the zoom
    if cfg['subsampling_factor'] != 1:
        from math import floor, ceil
        # update the H1 and H2 to reflect the zoom
        Z = np.eye(3)
        Z[0, 0] = Z[1, 1] = 1.0 / cfg['subsampling_factor']

        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_min = floor(disp_min / cfg['subsampling_factor'])
        disp_max = ceil(disp_max / cfg['subsampling_factor'])

    return H1, H2, disp_min, disp_max
Пример #36
0
def crop_and_apply_homography(im_out, im_in, H, w, h, subsampling_factor=1,
        convert_to_gray=False):
    """
    Warps a piece of a Pleiades (panchro or ms) image with a homography.

    Args:
        im_out: path to the output image
        im_in: path to the input (tif) full Pleiades image
        H: numpy array containing the 3x3 homography matrix
        w, h: size of the output image
        subsampling_factor (optional, default=1): when set to z>1,
            will result in the application of the homography Z*H where Z =
            diag(1/z, 1/z, 1), so the output will be zoomed out by a factor z.
            The output image will be (w/z, h/z)
        convert_to_gray (optional, default False): it set to True, and if the
            input image has 4 channels, it is converted to gray before applying
            zoom and homographies.

    Returns:
        nothing

    The homography has to be used as: coord_out = H coord_in. The produced
    output image corresponds to coord_out in [0, w] x [0, h]. The warp is made
    by Pascal Monasse's binary named 'homography'.
    """

    # crop a piece of the big input image, to which the homography will be
    # applied
    # warning: as the crop uses integer coordinates, be careful to round off
    # (x0, y0) before modifying the homograpy. You want the crop and the
    # translation representing it do exactly the same thing.
    pts = [[0, 0], [w, 0], [w, h], [0, h]]
    inv_H_pts = common.points_apply_homography(np.linalg.inv(H), pts)
    x0, y0, w0, h0 = common.bounding_box2D(inv_H_pts)
    x0, y0 = np.floor([x0, y0])
    w0, h0 = np.ceil([w0, h0])
    crop_fullres = common.image_crop_LARGE(im_in, x0, y0, w0, h0)

    # This filter is needed (for panchro images) because the original PLEAIDES
    # SENSOR PERFECT images are aliased
    if (common.image_pix_dim(crop_fullres) == 1 and subsampling_factor == 1 and
            cfg['use_pleiades_unsharpening']):
        tmp = image_apply_pleiades_unsharpening_filter(crop_fullres)
        common.run('rm -f %s' % crop_fullres)
        crop_fullres = tmp

    # convert to gray
    if common.image_pix_dim(crop_fullres) == 4:
        if convert_to_gray:
            crop_fullres = common.pansharpened_to_panchro(crop_fullres)

    # compensate the homography with the translation induced by the preliminary
    # crop, then apply the homography and crop.
    H = np.dot(H, common.matrix_translation(x0, y0))

    # Since the objective is to compute a zoomed out homographic transformation
    # of the input image, to save computations we zoom out the image before
    # applying the homography. If Z is the matrix representing the zoom out and
    # H the homography matrix, this trick consists in applying Z*H*Z^{-1} to
    # the zoomed image Z*Im instead of applying Z*H to the original image Im.
    if subsampling_factor == 1:
        common.image_apply_homography(im_out, crop_fullres, H, w, h)
        return

    else:
        assert(subsampling_factor >= 1)

        # H becomes Z*H*Z^{-1}
        Z = np.eye(3);
        Z[0,0] = Z[1,1] = 1 / float(subsampling_factor)
        H = np.dot(Z, H)
        H = np.dot(H, np.linalg.inv(Z))

        # w, and h are updated accordingly
        w = int(w / subsampling_factor)
        h = int(h / subsampling_factor)

        # the DCT zoom is NOT SAFE when the input image size is not a multiple
        # of the zoom factor
        tmpw, tmph = common.image_size(crop_fullres)
        tmpw, tmph = int(tmpw / subsampling_factor), int(tmph / subsampling_factor)
        crop_fullres_safe = common.image_crop_tif(crop_fullres, 0, 0, tmpw *
                subsampling_factor, tmph * subsampling_factor)
        common.run('rm -f %s' % crop_fullres)

        # zoom out the input image (crop_fullres)
        crop_zoom_out = common.image_safe_zoom_fft(crop_fullres_safe,
                subsampling_factor)
        common.run('rm -f %s' % crop_fullres_safe)

        # apply the homography to the zoomed out crop
        common.image_apply_homography(im_out, crop_zoom_out, H, w, h)
        return
Пример #37
0
def register_horizontally(matches, H1, H2, do_shear=True,
        do_scale_horizontally=False , flag='center'):
    """
    Adjust rectifying homographies to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        do_shear: boolean flag indicating wheter to minimize the shear on im2
            or not.
        do_scale_horizontally: boolean flag indicating wheter to minimize
            with respect to the horizontal scaling on im2 or not.
        flag: option needed to control how to modify the disparity range:
            'center': move the barycenter of disparities of matches to zero
            'positive': make all the disparities positive
            'negative': make all the disparities negative. Required for
                Hirshmuller stereo (java)

    Returns:
        H2: corrected homography H2
        disp_min, disp_max: horizontal disparity range

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis. The second homography H2
    is corrected with a horizontal translation to obtain the desired property
    on the disparity range.  The minimum and maximal disparities over the set
    of matches are extracted, with a security margin of 20 percent.
    """
    # transform the matches according to the homographies
    pt1 = common.points_apply_homography(H1, matches[:, 0:2])
    x1 = pt1[:, 0]
    y1 = pt1[:, 1]
    pt2 = common.points_apply_homography(H2, matches[:, 2:4])
    x2 = pt2[:, 0]
    y2 = pt2[:, 1]

    # shear correction
    # we search the (s, b) vector that minimises \sum (x1 - (x2+s*y2+b))^2
    # it is a least squares minimisation problem
    if do_shear:
      # horizontal scale correction
      if do_scale_horizontally: # | x1 -  (s*x2 + t*y2 +d) |^2
          A = np.vstack((x2, y2, y2*0+1)).T
          b = x1
          z = np.linalg.lstsq(A, b)[0]
          s = z[0]
          t = z[1]
          d = z[2]
          H2 = np.dot(np.array([[s, t, d], [0, 1, 0], [0, 0, 1]]), H2)
          x2 = s*x2  + t*y2 + d
      else:
          A = np.vstack((y2, y2*0+1)).T
          b = x1 - x2
          z = np.linalg.lstsq(A, b)[0]
          s = z[0]
          b = z[1]
          H2 = np.dot(np.array([[1, s, b], [0, 1, 0], [0, 0, 1]]), H2)
          x2 = x2 + s*y2 + b


    # compute the disparity offset according to selected option
    if (flag == 'center'):
        t = np.mean(x2 - x1)
    if (flag == 'positive'):
        t = np.min(x2 - x1)
    if (flag == 'negative'):
        t = np.max(x2 - x1)
    if (flag == 'none'):
        t = 0


    # correct H2 with a translation
    H2 = np.dot(common.matrix_translation(-t, 0), H2)
    x2 = x2 - t

    # extract min and max disparities
    dispx_min = np.floor((np.min(x2 - x1)))
    dispx_max = np.ceil((np.max(x2 - x1)))

    # add a security margin to the disp range
    d = cfg['disp_range_extra_margin']
    if (dispx_min < 0):
        dispx_min = (1+d) * dispx_min
    else:
        dispx_min = (1-d) * dispx_min
    if (dispx_max > 0):
        dispx_max = (1+d) * dispx_max
    else:
        dispx_max = (1-d) * dispx_max

    # for debug, print the vertical disparities. Should be zero.
    print "Residual vertical disparities: min, max, mean. Should be zero ------"
    print np.min(y2 - y1), np.max(y2 - y1), np.mean(y1 - y2)
    return H2, dispx_min, dispx_max