Пример #1
0
def compute_correction(img1, rpc1, img2, rpc2, x, y, w, h):
    """
    Computes pointing correction matrix for specific ROI

    Args:
        img1: path to the reference image.
        rpc1: paths to the xml file containing the rpc coefficients of the
            reference image
        img2: path to the secondary image.
        rpc2: paths to the xml file containing the rpc coefficients of the
            secondary image
        x, y, w, h: four integers defining the rectangular ROI in the reference
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle. The ROI may be as big as you want. If bigger than
            1 Mpix, only five crops will be used to compute sift matches.

    Returns:
        a 3x3 matrix representing the planar transformation to apply to img2 in
        order to correct the pointing error, and the list of sift matches used
        to compute this correction.
    """
    # read rpcs
    r1 = rpc_model.RPCModel(rpc1)
    r2 = rpc_model.RPCModel(rpc2)

    m = sift.matches_on_rpc_roi(img1, img2, r1, r2, x, y, w, h)

    if m is not None:
        A = local_translation(r1, r2, x, y, w, h, m)
    else:
        A = None

    return A, m
Пример #2
0
def corresponding_roi(rpc1, rpc2, x, y, w, h):
    """
    Uses RPC functions to determine the region of im2 associated to the
    specified ROI of im1.

    Args:
        rpc1, rpc2: two instances of the rpc_model.RPCModel class, or paths to
            the xml files
        x, y, w, h: four integers defining a rectangular region of interest
            (ROI) in the first view. (x, y) is the top-left corner, and (w, h)
            are the dimensions of the rectangle.

    Returns:
        four integers defining a ROI in the second view. This ROI is supposed
        to contain the projections of the 3D points that are visible in the
        input ROI.
    """
    # read rpc files
    if not isinstance(rpc1, rpc_model.RPCModel):
        rpc1 = rpc_model.RPCModel(rpc1)
    if not isinstance(rpc2, rpc_model.RPCModel):
        rpc2 = rpc_model.RPCModel(rpc2)
    m, M = altitude_range(rpc1, x, y, w, h, 0, 0)

    # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M]
    a = np.array([x, x, x, x, x + w, x + w, x + w, x + w])
    b = np.array([y, y, y + h, y + h, y, y, y + h, y + h])
    c = np.array([m, M, m, M, m, M, m, M])

    # corresponding points in im2
    xx, yy = find_corresponding_point(rpc1, rpc2, a, b, c)[0:2]

    # return coordinates of the bounding box in im2
    out = common.bounding_box2D(np.vstack([xx, yy]).T)
    return np.round(out)
Пример #3
0
def compute_correction(img1, rpc1, img2, rpc2, x, y, w, h, pec_method='analytic',
                       pec_degree=1, apply_rectification=True):
    """
    Computes pointing correction matrix for specific ROI

    Args:
        img1: path to the reference image.
        rpc1: paths to the xml file containing the rpc coefficients of the
            reference image
        img2: path to the secondary image.
        rpc2: paths to the xml file containing the rpc coefficients of the
            secondary image
        x, y, w, h: four integers defining the rectangular ROI in the reference
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle. The ROI may be as big as you want. If bigger than
            1 Mpix, only five crops will be used to compute sift matches.

    Returns:
        a 3x3 matrix representing the planar transformation to apply to img2 in
        order to correct the pointing error, and the list of sift matches used
        to compute this correction.
    """
    # read rpcs
    r1 = rpc_model.RPCModel(rpc1)
    r2 = rpc_model.RPCModel(rpc2)

    m = sift.matches_on_rpc_roi(img1, img2, r1, r2, x, y, w, h)

    print(pec_method)
    if m is not None:
        if (pec_method == 'analytic'):
            if (pec_degree == 1):
                if (apply_rectification):
                    A, F = local_translation_rectified(r1, r2, x, y, w, h, m)
                else:
                    A, F = local_translation(r1, r2, x, y, w, h, m)
            elif (pec_degree == 2):
                A, F = local_translation_rotation(r1, r2, x, y, w, h, m)
            else:
                raise ValueError("Analytic method for degree > 2 is not implemented")
        elif (pec_method == 'numeric'):
            A, F = local_transformation(r1, r2, x, y, w, h, m, pec_degree, apply_rectification)
        else:
            raise ValueError("Choose between analytic or numeric solver for correcting the pointing error")


    else:
        A = None
        F = None

    return A, m, F
Пример #4
0
def utm_zone(rpc, x, y, w, h):
    """
    Compute the UTM zone where the ROI probably falls (or close to its border).

    Args:
        rpc: instance of the rpc_model.RPCModel class, or path to the xml file
        x, y, w, h: four integers defining a rectangular region of interest
            (ROI) in the image. (x, y) is the top-left corner, and (w, h)
            are the dimensions of the rectangle.

    Returns:
        a string of the form '18N' or '18S' where 18 is the utm zone
        identificator.
    """
    # read rpc file
    if not isinstance(rpc, rpc_model.RPCModel):
        rpc = rpc_model.RPCModel(rpc)

    # determine lat lon of the center of the roi, assuming median altitude
    lon, lat = rpc.direct_estimate(x + .5 * w, y + .5 * h, rpc.altOff)[:2]

    # compute the utm zone number and add the hemisphere letter
    zone = utm.conversion.latlon_to_zone_number(lat, lon)
    if lat < 0:
        return '%dS' % zone
    else:
        return '%dN' % zone
Пример #5
0
def rpc_from_geotiff(geotiff_path, outrpcfile='.rpc'):
    """
    extracts the rpc from a geotiff file (including vsicurl)
    """
    import os
    import subprocess

    env = os.environ.copy()
    if geotiff_path.startswith(('http://', 'https://')):
        env['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = geotiff_path[-3:]
        path = '/vsicurl/{}'.format(geotiff_path)
    else:
        path = geotiff_path

    f = open(outrpcfile, 'wb')
    x = subprocess.Popen(["gdalinfo", path], stdout=subprocess.PIPE).communicate()[0]
    x = x.splitlines()
    for l in x:

        if (b'SAMP_' not in l) and (b'LINE_' not in l) and (b'HEIGHT_' not in l) and (b'LAT_' not in l) and (b'LONG_' not in l) and (b'MAX_' not in l) and (b'MIN_' not in l):
              continue
        y = l.strip().replace(b'=',b': ')
        if b'COEFF' in y:
              z = y.split()
              t=1
              for j in z[1:]:
                      f.write(b'%s_%d: %s\n'%(z[0][:-1],t,j))
                      t+=1
        else:
              f.write((y+b'\n'))

    f.close()
    return rpc_model.RPCModel(outrpcfile)
Пример #6
0
def kml_roi_process(rpc, kml):
    """
    """
    # extract lon lat from kml
    with open(kml, 'r') as f:
        a = bs4.BeautifulSoup(f, "lxml").find_all('coordinates')[0].text.split()
    ll_bbx = np.array([list(map(float, x.split(','))) for x in a])[:4, :2]

    # save lon lat bounding box to cfg dictionary
    lon_min = min(ll_bbx[:, 0])
    lon_max = max(ll_bbx[:, 0])
    lat_min = min(ll_bbx[:, 1])
    lat_max = max(ll_bbx[:, 1])
    cfg['ll_bbx'] = (lon_min, lon_max, lat_min, lat_max)

    # convert lon lat bbox to utm
    z = utm.conversion.latlon_to_zone_number((lat_min + lat_max) * .5,
                                             (lon_min + lon_max) * .5)
    utm_bbx = np.array([utm.from_latlon(p[1], p[0], force_zone_number=z)[:2] for
                        p in ll_bbx])
    east_min = min(utm_bbx[:, 0])
    east_max = max(utm_bbx[:, 0])
    nort_min = min(utm_bbx[:, 1])
    nort_max = max(utm_bbx[:, 1])
    cfg['utm_bbx'] = (east_min, east_max, nort_min, nort_max)

    # project lon lat vertices into the image
    if not isinstance(rpc, rpc_model.RPCModel):
        rpc = rpc_model.RPCModel(rpc)
    img_pts = [rpc.inverse_estimate(p[0], p[1], rpc.altOff)[:2] for p in ll_bbx]

    # return image roi
    x, y, w, h = common.bounding_box2D(img_pts)
    return {'x': x, 'y': y, 'w': w, 'h': h}
Пример #7
0
def list_srtm_tiles(rpcfile, x, y, w, h):
    """
    Tells which srtm tiles are needed to cover a given region of interest.

    Args:
        rpcfile: path to the xml file describing the rpc model.
        x, y, w, h: four integers defining a rectangular region of interest
            (ROI) in the image. (x, y) is the top-left corner, and (w, h) are
            the dimensions of the rectangle.

    Returns:
        the set of srtm tiles corresponding to the input ROI.
    """
    rpc = rpc_model.RPCModel(rpcfile)
    lon_min, lon_max, lat_min, lat_max = rpc_utils.geodesic_bounding_box(
        rpc, x, y, w, h)
    out = []
    for lon in [lon_min, lon_max]:
        for lat in [lat_min, lat_max]:
            p = subprocess.Popen(
                ['srtm4_which_tile', str(lon),
                 str(lat)],
                stdout=subprocess.PIPE)
            out.append(p.stdout.readline().split()[0].decode())
    out = set(out)
    print("Needed srtm tiles: ", out)
    return out
Пример #8
0
def get_coordinates_with_config(tile, m, M):
    tile_cfg = s2p.read_config_file(os.path.join(tile, "config.json"))

    x = tile_cfg['roi']['x']
    y = tile_cfg['roi']['y']
    w = tile_cfg['roi']['w']
    h = tile_cfg['roi']['h']

    rpcfile = tile_cfg['images'][0]['rpc']
    rpc = rpc_model.RPCModel(rpcfile)

    a = np.array([x, x, x, x, x + w, x + w, x + w, x + w])
    b = np.array([y, y, y + h, y + h, y, y, y + h, y + h])
    c = np.array([m, M, m, M, m, M, m, M])

    lon, lat, __ = rpc.direct_estimate(a, b, c)

    out = list(common.bounding_box2D(np.vstack([lon, lat]).T))

    out[2] += out[0]
    out[3] += out[1]

    latlon = [[out[0], out[3], 0], [out[2], out[3], 0], [out[2], out[1], 0],
              [out[0], out[1], 0], [out[0], out[3], 0]]

    return latlon
Пример #9
0
def plot_pointing_error_tile(im1, im2, rpc1, rpc2, x, y, w, h,
        matches_sift=None, f=100, out_files_pattern=None):
    """
    Args:
        im1, im2: path to full images
        rpc1, rcp2: path to associated rpc xml files
        x, y, w, h: four integers defining the rectangular tile in the reference
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the tile.
        f (optional, default is 100): exageration factor for the error vectors
        out_files_pattern (optional, default is None): pattern used to name the
            two output files (plots of the pointing error)

    Returns:
        nothing, but opens a display
    """
    # read rpcs
    r1 = rpc_model.RPCModel(rpc1)
    r2 = rpc_model.RPCModel(rpc2)

    # compute sift matches
    if not matches_sift:
        matches_sift = sift.matches_on_rpc_roi(im1, im2, r1, r2, x, y, w, h)

    # compute rpc matches
    matches_rpc = rpc_utils.matches_from_rpc(r1, r2, x, y, w, h, 5)

    # estimate affine fundamental matrix
    F = estimation.affine_fundamental_matrix(matches_rpc)

    # compute error vectors
    e = s2plib.pointing_accuracy.error_vectors(matches_sift, F, 'ref')

    A = s2plib.pointing_accuracy.local_translation(r1,r2, x,y,w,h, matches_sift)
    p = matches_sift[:, 0:2]
    q = matches_sift[:, 2:4]
    qq = common.points_apply_homography(A, q)
    ee = s2plib.pointing_accuracy.error_vectors(np.hstack((p, qq)), F, 'ref')
    print(s2plib.pointing_accuracy.evaluation_from_estimated_F(im1, im2,
        r1, r2, x, y, w, h, None, matches_sift))
    print(s2plib.pointing_accuracy.evaluation_from_estimated_F(im1, im2,
        r1, r2, x, y, w, h, A, matches_sift))

    # plot the vectors: they go from the point x to the line (F.T)x'
    plot_vectors(p, -e, x, y, w, h, f, out_file='%s_before.png' % out_files_pattern)
    plot_vectors(p, -ee, x, y, w, h, f, out_file='%s_after.png' % out_files_pattern)
Пример #10
0
    def _verify_rpc_model_crop(self, rpc_1, rpc_2):
        #sanity check of model

        rpc_model_before_croping = rpc_model.RPCModel(rpc_1)
        rpc_model_after_after = rpc_model.RPCModel(rpc_2)

        offset_w, offset_h = [100, 100]

        lon_gt, lat_gt, alt_gt = rpc_model_before_scaling.direct_estimate(
            200, 300, 0)
        lon, lat, alt = rpc_model_after_croping.direct_estimate(
            200 + offset_w, 300 + offset_h, 0)

        assert abs(lon_gt - lon) < 10**(-8)
        assert abs(lat_gt - lat) < 10**(-8)
        assert abs(alt_gt - alt) < 10**(-8)

        return (0)
Пример #11
0
    def scale_rpc(self, rpc_in, scale_x, scale_y):

        r = rpc_model.RPCModel(rpc_in)
        r.linScale /= scale_y
        r.linOff /= scale_y
        r.colScale /= scale_x
        r.colOff /= scale_x

        print("Done")
        return (r)
Пример #12
0
    def crop_rpc(self, rpc_in, crop):
        # crop should be x_min, y_min, width, height

        x_min, y_min, width, height = crop

        # generate rpc file
        print("croping rpc {} ...".format(rpc_in))

        r = rpc_model.RPCModel(rpc_in)

        r.linOff += y_min
        r.colOff += x_min

        print("Done")
        return (r)
Пример #13
0
def utm_roi_to_img_roi(rpc, roi):
    """
    """
    # define utm rectangular box
    x, y, w, h = [roi[k] for k in ['x', 'y', 'w', 'h']]
    box = [(x, y), (x+w, y), (x+w, y+h), (x, y+h)]

    # convert utm to lon/lat
    utm_z = roi['utm_band']
    north = roi['hemisphere'] == 'N'
    box_latlon = [utm.to_latlon(p[0], p[1], utm_z, northern=north) for p in box]

    # project lon/lat vertices into the image
    if not isinstance(rpc, rpc_model.RPCModel):
        rpc = rpc_model.RPCModel(rpc)
    img_pts = [rpc.inverse_estimate(p[1], p[0], rpc.altOff)[:2] for p in
               box_latlon]

    # return image roi
    x, y, w, h = common.bounding_box2D(img_pts)
    return {'x': x, 'y': y, 'w': w, 'h': h}
Пример #14
0
def rectify_pair(im1,
                 im2,
                 rpc1,
                 rpc2,
                 x,
                 y,
                 w,
                 h,
                 out1,
                 out2,
                 A=None,
                 sift_matches=None,
                 method='rpc',
                 hmargin=0,
                 vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

        This function uses the parameter subsampling_factor from the
        config module. If the factor z > 1 then the output images will
        be subsampled by a factor z. The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and
        disp_min = disp_min / z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(
                np.linalg.inv(A), matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin,
                                           vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(
            rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row)
                       for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(
            F, sift_matches, cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches,
                   fmt='%9.3f')
        visualisation.plot_matches(
            im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
            os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # impose a minimal disparity range (TODO this is valid only with the
    # 'center' flag for register_horizontally_translation)
    disp_m = min(-3, disp_m)
    disp_M = max(3, disp_M)

    #  if subsampling_factor'] the homographies are altered to reflect the zoom
    z = cfg['subsampling_factor']
    if z != 1:
        Z = np.diag((1 / z, 1 / z, 1))
        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_m = np.floor(disp_m / z)
        disp_M = np.ceil(disp_M / z)
        hmargin = int(np.floor(hmargin / z))
        vmargin = int(np.floor(vmargin / z))

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin],
                               atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)

    return H1, H2, disp_m, disp_M
Пример #15
0
def rectify_pair(im1,
                 im2,
                 rpc1,
                 rpc2,
                 x,
                 y,
                 w,
                 h,
                 out1,
                 out2,
                 A=None,
                 sift_matches=None,
                 method='rpc',
                 hmargin=0,
                 vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(
                np.linalg.inv(A), matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin,
                                           vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(
            rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row)
                       for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(
            F, sift_matches, cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches,
                   fmt='%9.3f')
        visualisation.plot_matches(
            im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
            os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        H1_inv = np.linalg.inv(H1)
        H1 = np.eye(
            3
        )  # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
        H2 = np.dot(H1_inv, H2)
        T = common.matrix_translation(-x + hmargin, -y + vmargin)
        H1 = np.dot(T, H1)
        H2 = np.dot(T, H2)

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin],
                               atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)

    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
        pts_out = common.points_apply_homography(H1_inv, pts_in)
        disp_m = pts_out[1, :] - pts_out[0, :]
        disp_M = pts_out[2, :] - pts_out[0, :]

    return H1, H2, disp_m, disp_M
Пример #16
0
               (w, h, in_img_file, tmp_vrt))

    common.run((
        'gdalwarp -co RPB=NO -co PROFILE=GeoTIFF -r %s -co "BIGTIFF=IF_NEEDED" -co "TILED=YES" -ovr NONE -overwrite -to SRC_METHOD=NO_GEOTRANSFORM -to DST_METHOD=NO_GEOTRANSFORM -tr'
        ' %d %d %s %s') % (filt, scale_x, scale_y, tmp_vrt, out_img_file))

    try:
        # Remove aux files if any
        os.remove(out_img_file + ".aux.xml")
    except OSError:
        pass

    # Clean tmp vrt file
    os.remove(tmp_vrt)

    print("Done")

    # generate rpc file
    print("Generating {} ...".format(out_rcp_file))

    r = rpc_model.RPCModel(in_rpc_file)
    r.linScale /= scale_y
    r.linOff /= scale_y
    r.colScale /= scale_x
    r.colOff /= scale_x
    r.write(out_rcp_file)

    print("Done")

    sys.exit(0)
Пример #17
0
def plot_matches(im1,
                 im2,
                 rpc1,
                 rpc2,
                 matches,
                 x=None,
                 y=None,
                 w=None,
                 h=None,
                 outfile=None):
    """
    Plot matches on Pleiades images

    Args:
        im1, im2: paths to full Pleiades images
        rpc1, rpc2: two  instances of the RPCModel class, or paths to xml files
            containing the rpc coefficients
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
            y2). The coordinates are given in the frame of the full images.
        x, y, w, h (optional, default is None): ROI in the reference image
        outfile (optional, default is None): path to the output file. If None,
            the file image is displayed using the pvflip viewer

    Returns:
        path to the displayed output
    """
    # if no matches, no plot
    if not matches.size:
        print("visualisation.plot_matches: nothing to plot")
        return

    # read rpcs
    for r in [rpc1, rpc2]:
        if not isinstance(r, rpc_model.RPCModel):
            r = rpc_model.RPCModel(r)

    # determine regions to crop in im1 and im2
    if x is not None:
        x1 = x
    else:
        x1 = np.min(matches[:, 0])

    if y is not None:
        y1 = y
    else:
        y1 = np.min(matches[:, 1])

    if w is not None:
        w1 = w
    else:
        w1 = np.max(matches[:, 0]) - x1

    if h is not None:
        h1 = h
    else:
        h1 = np.max(matches[:, 1]) - y1

    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x1, y1, w1, h1)
    # x2 = np.min(matches[:, 2])
    # w2 = np.max(matches[:, 2]) - x2
    # y2 = np.min(matches[:, 3])
    # h2 = np.max(matches[:, 3]) - y2

    # # add 20 pixels offset and round. The image_crop_gdal function will round
    # # off the coordinates before it does the crops.
    # x1 -= 20; x1 = np.round(x1)
    # y1 -= 20; y1 = np.round(y1)
    # x2 -= 20; x2 = np.round(x2)
    # y2 -= 20; y2 = np.round(y2)
    # w1 += 40; w1 = np.round(w1)
    # h1 += 40; h1 = np.round(h1)
    # w2 += 40; w2 = np.round(w2)
    # h2 += 40; h2 = np.round(h2)

    # do the crops
    crop1 = common.image_qauto(common.image_crop_gdal(im1, x1, y1, w1, h1))
    crop2 = common.image_qauto(common.image_crop_gdal(im2, x2, y2, w2, h2))

    # compute matches coordinates in the cropped images
    pts1 = matches[:, 0:2] - [x1, y1]
    pts2 = matches[:, 2:4] - [x2, y2]

    # plot the matches on the two crops
    to_display = plot_matches_low_level(crop1, crop2, np.hstack((pts1, pts2)))
    if outfile is None:
        os.system('v %s &' % (to_display))
    else:
        common.run('cp %s %s' % (to_display, outfile))

    return