Esempio n. 1
0
def is_this_tile_useful(x, y, w, h, images_sizes):
    """
    Check if a tile contains valid pixels.

    Valid pixels must be found in the reference image plus at least one other image.

    Args:
        x, y, w, h (ints): 4 ints that define the coordinates of the top-left corner,
            the width and the height of a rectangular tile
        images_sizes (list): list of tuples with the height and width of the images

    Return:
        useful (bool): bool telling if the tile has to be processed
        mask (np.array): tile validity mask. Set to None if the tile is discarded
    """
    # check if the tile is partly contained in at least one other image
    rpc = cfg['images'][0]['rpcm']
    for img, size in zip(cfg['images'][1:], images_sizes[1:]):
        coords = rpc_utils.corresponding_roi(rpc, img['rpcm'], x, y, w, h)
        if rectangles_intersect(coords, (0, 0, size[1], size[0])):
            break  # the tile is partly contained
    else:  # we've reached the end of the loop hence the tile is not contained
        return False, None

    roi_msk = cfg['images'][0]['roi']
    cld_msk = cfg['images'][0]['cld']
    wat_msk = cfg['images'][0]['wat']
    mask = masking.image_tile_mask(x, y, w, h, roi_msk, cld_msk, wat_msk,
                                   images_sizes[0], cfg['border_margin'])
    return True, mask
Esempio n. 2
0
File: sift.py Progetto: hnrck/s2p
def matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h):
    """
    Compute a list of SIFT matches between two images on a given roi.

    The corresponding roi in the second image is determined using the rpc
    functions.

    Args:
        im1, im2: paths to two large tif images
        rpc1, rpc2: two instances of the rpcm.RPCModel class
        x, y, w, h: four integers defining the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.

    Returns:
        matches: 2D numpy array containing a list of matches. Each line
            contains one pair of points, ordered as x1 y1 x2 y2.
            The coordinate system is that of the full images.
    """
    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x, y, w, h)

    # estimate an approximate affine fundamental matrix from the rpcs
    rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # sift matching method:
    method = 'relative' if cfg[
        'relative_sift_match_thresh'] is True else 'absolute'

    # if less than 10 matches, lower thresh_dog. An alternative would be ASIFT
    thresh_dog = 0.0133
    for i in range(2):
        p1 = image_keypoints(im1, x, y, w, h, thresh_dog=thresh_dog)
        p2 = image_keypoints(im2, x2, y2, w2, h2, thresh_dog=thresh_dog)
        matches = keypoints_match(p1,
                                  p2,
                                  method,
                                  cfg['sift_match_thresh'],
                                  F,
                                  epipolar_threshold=cfg['max_pointing_error'],
                                  model='fundamental')
        if matches is not None and matches.ndim == 2 and matches.shape[0] > 10:
            break
        thresh_dog /= 2.0
    else:
        print("WARNING: sift.matches_on_rpc_roi: found no matches.")
        return None
    return matches
Esempio n. 3
0
def plot_matches(im1,
                 im2,
                 rpc1,
                 rpc2,
                 matches,
                 x=None,
                 y=None,
                 w=None,
                 h=None,
                 outfile=None):
    """
    Plot matches on Pleiades images

    Args:
        im1, im2: paths to full Pleiades images
        rpc1, rpc2: two instances of the rpcm.RPCModel class
        matches: 2D numpy array of size 4xN containing a list of matches (a
            list of pairs of points, each pair being represented by x1, y1, x2,
            y2). The coordinates are given in the frame of the full images.
        x, y, w, h (optional, default is None): ROI in the reference image
        outfile (optional, default is None): path to the output file. If None,
            the file image is displayed using the pvflip viewer

    Returns:
        path to the displayed output
    """
    # if no matches, no plot
    if not matches.size:
        print("visualisation.plot_matches: nothing to plot")
        return

    # determine regions to crop in im1 and im2
    if x is not None:
        x1 = x
    else:
        x1 = np.min(matches[:, 0])

    if y is not None:
        y1 = y
    else:
        y1 = np.min(matches[:, 1])

    if w is not None:
        w1 = w
    else:
        w1 = np.max(matches[:, 0]) - x1

    if h is not None:
        h1 = h
    else:
        h1 = np.max(matches[:, 1]) - y1

    x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x1, y1, w1, h1)
    # x2 = np.min(matches[:, 2])
    # w2 = np.max(matches[:, 2]) - x2
    # y2 = np.min(matches[:, 3])
    # h2 = np.max(matches[:, 3]) - y2

    # # add 20 pixels offset and round. The image_crop_gdal function will round
    # # off the coordinates before it does the crops.
    # x1 -= 20; x1 = np.round(x1)
    # y1 -= 20; y1 = np.round(y1)
    # x2 -= 20; x2 = np.round(x2)
    # y2 -= 20; y2 = np.round(y2)
    # w1 += 40; w1 = np.round(w1)
    # h1 += 40; h1 = np.round(h1)
    # w2 += 40; w2 = np.round(w2)
    # h2 += 40; h2 = np.round(h2)

    # do the crops
    crop1 = common.image_qauto(common.image_crop_gdal(im1, x1, y1, w1, h1))
    crop2 = common.image_qauto(common.image_crop_gdal(im2, x2, y2, w2, h2))

    # compute matches coordinates in the cropped images
    pts1 = matches[:, 0:2] - [x1, y1]
    pts2 = matches[:, 2:4] - [x2, y2]

    # plot the matches on the two crops
    to_display = plot_matches_low_level(crop1, crop2, np.hstack((pts1, pts2)))
    if outfile is None:
        os.system('v %s &' % (to_display))
    else:
        common.run('cp %s %s' % (to_display, outfile))

    return