Exemplo n.º 1
0
def transfer_map(in_map, H, x, y, w, h, zoom, out_map):
    """
    Transfer the heights computed on the rectified grid to the original
    Pleiades image grid.

    Args:
        in_map: path to the input map, usually a height map or a mask, sampled
            on the rectified grid
        H: path to txt file containing a numpy 3x3 array representing the
            rectifying homography
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        zoom: zoom factor (usually 1, 2 or 4) used to produce the input height
            map
        out_map: path to the output map
    """
    # write the inverse of the resampling transform matrix. In brief it is:
    # homography * translation * zoom
    # This matrix transports the coordinates of the original cropped and
    # zoomed grid (the one desired for out_height) to the rectified cropped and
    # zoomed grid (the one we have for height)
    Z = np.diag([zoom, zoom, 1])
    A = common.matrix_translation(x, y)
    HH = np.dot(np.loadtxt(H), np.dot(A, Z))

    # apply the homography
    # write the 9 coefficients of the homography to a string, then call synflow
    # to produce the flow, then backflow to apply it
    # zero:256x256 is the iio way to create a 256x256 image filled with zeros
    hij = ' '.join(['%r' % num for num in HH.flatten()])
    common.run('synflow hom "%s" zero:%dx%d /dev/null - | BILINEAR=1 backflow - %s %s' % (
        hij, w/zoom, h/zoom, in_map, out_map))
Exemplo n.º 2
0
def transfer_map(in_map, H, x, y, w, h, zoom, out_map):
    """
    Transfer the heights computed on the rectified grid to the original
    Pleiades image grid.

    Args:
        in_map: path to the input map, usually a height map or a mask, sampled
            on the rectified grid
        H: path to txt file containing a numpy 3x3 array representing the
            rectifying homography
        x, y, w, h: four integers defining the rectangular ROI in the original
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        zoom: zoom factor (usually 1, 2 or 4) used to produce the input height
            map
        out_map: path to the output map
    """
    # write the inverse of the resampling transform matrix. In brief it is:
    # homography * translation * zoom
    # This matrix transports the coordinates of the original cropped and
    # zoomed grid (the one desired for out_height) to the rectified cropped and
    # zoomed grid (the one we have for height)
    Z = np.diag([zoom, zoom, 1])
    A = common.matrix_translation(x, y)
    HH = np.dot(np.loadtxt(H), np.dot(A, Z))

    # apply the homography
    # write the 9 coefficients of the homography to a string, then call synflow
    # to produce the flow, then backflow to apply it
    # zero:256x256 is the iio way to create a 256x256 image filled with zeros
    hij = ' '.join(['%r' % num for num in HH.flatten()])
    common.run('synflow hom "%s" zero:%dx%d /dev/null - | BILINEAR=1 backflow - %s %s' % (
        hij, w/zoom, h/zoom, in_map, out_map))
Exemplo n.º 3
0
def cloud_water_image_domain(x,
                             y,
                             w,
                             h,
                             rpc,
                             roi_gml=None,
                             cld_gml=None,
                             wat_msk=None):
    """
    Compute a mask for pixels masked by clouds, water, or out of image domain.

    Args:
        x, y, w, h: coordinates of the ROI
        roi_gml (optional): path to a gml file containing a mask
            defining the area contained in the full image
        cld_gml (optional): path to a gml file containing a mask
            defining the areas covered by clouds

    Returns:
        2D array containing the output binary mask. 0 indicate masked pixels, 1
        visible pixels.
    """
    # coefficients of the transformation associated to the crop
    H = common.matrix_translation(-x, -y)
    hij = ' '.join([str(el) for el in H.flatten()])

    mask = np.ones((h, w), dtype=np.bool)

    if roi_gml is not None:  # image domain mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' %
                              (w, h, hij, roi_gml, tmp),
                              shell=True)

        f = gdal.Open(tmp)
        mask = np.logical_and(mask, f.ReadAsArray())
        f = None  # this is the gdal way of closing files

    if not mask.any():
        return mask

    if cld_gml is not None:  # cloud mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' %
                              (w, h, hij, cld_gml, tmp),
                              shell=True)
        f = gdal.Open(tmp)
        mask = np.logical_and(mask, ~f.ReadAsArray().astype(bool))
        f = None  # this is the gdal way of closing files

    if not mask.any():
        return mask

    if wat_msk is not None:  # water mask (raster)
        f = gdal.Open(wat_msk)
        mask = np.logical_and(mask, f.ReadAsArray(x, y, w, h))
        f = None  # this is the gdal way of closing files

    return mask
Exemplo n.º 4
0
Arquivo: s2p.py Projeto: zousiyuan/s2p
def heights_to_ply(tile):
    """
    Generate a ply cloud.

    Args:
        tile: a dictionary that provides all you need to process a tile
    """
    # merge the n-1 height maps of the tile (n = nb of images)
    heights_fusion(tile)

    # compute a ply from the merged height map
    out_dir = tile['dir']
    x, y, w, h = tile['coordinates']
    z = cfg['subsampling_factor']
    plyfile = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    height_map = os.path.join(out_dir, 'height_map.tif')
    if cfg['skip_existing'] and os.path.isfile(plyfile):
        print('ply file already exists for tile {} {}'.format(x, y))
        return

    # H is the homography transforming the coordinates system of the original
    # full size image into the coordinates system of the crop
    H = np.dot(np.diag([1 / z, 1 / z, 1]), common.matrix_translation(-x, -y))
    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(
            common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h),
            colors)
    common.image_safe_zoom_fft(colors, z, colors)
    triangulation.height_map_to_point_cloud(plyfile,
                                            height_map,
                                            cfg['images'][0]['rpc'],
                                            H,
                                            colors,
                                            utm_zone=cfg['utm_zone'],
                                            llbbx=tuple(cfg['ll_bbx']))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (plyfile, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(height_map)
        common.remove(colors)
        common.remove(
            os.path.join(out_dir, 'cloud_water_image_domain_mask.png'))
Exemplo n.º 5
0
def register_horizontally_translation(matches, H1, H2, flag='center'):
    """
    Adjust rectifying homographies with a translation to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        flag: option needed to control how to modify the disparity range:
            'center': move the barycenter of disparities of matches to zero
            'positive': make all the disparities positive
            'negative': make all the disparities negative. Required for
                Hirshmuller stereo (java)

    Returns:
        H2: corrected homography H2

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis. The second homography H2
    is corrected with a horizontal translation to obtain the desired property
    on the disparity range.
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, :2])
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    p2 = common.points_apply_homography(H2, matches[:, 2:])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # for debug, print the vertical disparities. Should be zero.
    if cfg['debug']:
        print("Residual vertical disparities: max, min, mean. Should be zero")
        print(np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1))

    # compute the disparity offset according to selected option
    t = 0
    if (flag == 'center'):
        t = np.mean(x2 - x1)
    if (flag == 'positive'):
        t = np.min(x2 - x1)
    if (flag == 'negative'):
        t = np.max(x2 - x1)

    # correct H2 with a translation
    return np.dot(common.matrix_translation(-t, 0), H2)
Exemplo n.º 6
0
def register_horizontally_translation(matches, H1, H2, flag='center'):
    """
    Adjust rectifying homographies with a translation to modify the disparity range.

    Args:
        matches: list of pairs of 2D points, stored as a Nx4 numpy array
        H1, H2: two homographies, stored as numpy 3x3 matrices
        flag: option needed to control how to modify the disparity range:
            'center': move the barycenter of disparities of matches to zero
            'positive': make all the disparities positive
            'negative': make all the disparities negative. Required for
                Hirshmuller stereo (java)

    Returns:
        H2: corrected homography H2

    The matches are provided in the original images coordinate system. By
    transforming these coordinates with the provided homographies, we obtain
    matches whose disparity is only along the x-axis. The second homography H2
    is corrected with a horizontal translation to obtain the desired property
    on the disparity range.
    """
    # transform the matches according to the homographies
    p1 = common.points_apply_homography(H1, matches[:, :2])
    x1 = p1[:, 0]
    y1 = p1[:, 1]
    p2 = common.points_apply_homography(H2, matches[:, 2:])
    x2 = p2[:, 0]
    y2 = p2[:, 1]

    # for debug, print the vertical disparities. Should be zero.
    if cfg['debug']:
        print("Residual vertical disparities: max, min, mean. Should be zero")
        print(np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1))

    # compute the disparity offset according to selected option
    t = 0
    if (flag == 'center'):
        t = np.mean(x2 - x1)
    if (flag == 'positive'):
        t = np.min(x2 - x1)
    if (flag == 'negative'):
        t = np.max(x2 - x1)

    # correct H2 with a translation
    return np.dot(common.matrix_translation(-t, 0), H2)
Exemplo n.º 7
0
Arquivo: s2p.py Projeto: mnhrdt/s2p
def heights_to_ply(tile):
    """
    Generate a ply cloud.

    Args:
        tile: a dictionary that provides all you need to process a tile
    """
    # merge the n-1 height maps of the tile (n = nb of images)
    heights_fusion(tile)

    # compute a ply from the merged height map
    out_dir = tile['dir']
    x, y, w, h = tile['coordinates']
    z = cfg['subsampling_factor']
    plyfile = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    height_map = os.path.join(out_dir, 'height_map.tif')
    if cfg['skip_existing'] and os.path.isfile(plyfile):
        print('ply file already exists for tile {} {}'.format(x, y))
        return

    # H is the homography transforming the coordinates system of the original
    # full size image into the coordinates system of the crop
    H = np.dot(np.diag([1 / z, 1 / z, 1]), common.matrix_translation(-x, -y))
    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(common.image_crop_gdal(cfg['images'][0]['img'], x, y,
                                                 w, h), colors)
    common.image_safe_zoom_fft(colors, z, colors)
    triangulation.height_map_to_point_cloud(plyfile, height_map,
                                            cfg['images'][0]['rpc'], H, colors,
                                            utm_zone=cfg['utm_zone'],
                                            llbbx=tuple(cfg['ll_bbx']))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (plyfile, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(height_map)
        common.remove(colors)
        common.remove(os.path.join(out_dir,
                                   'cloud_water_image_domain_mask.png'))
Exemplo n.º 8
0
def rectification_homographies(matches, x, y, w, h, hmargin=0, vmargin=0):
    """
    Computes rectifying homographies from point matches for a given ROI.

    The affine fundamental matrix F is estimated with the gold-standard
    algorithm, then two rectifying similarities (rotation, zoom, translation)
    are computed directly from F.

    Args:
        matches: numpy array of shape (n, 4) containing a list of 2D point
            correspondences between the two images.
        x, y, w, h: four integers defining the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        {h,v}margin: translations added to the rectifying similarities to extend the
            horizontal and vertical footprint of the rectified images

    Returns:
        S1, S2, F: three numpy arrays of shape (3, 3) representing the
        two rectifying similarities to be applied to the two images and the
        corresponding affine fundamental matrix.
    """
    # estimate the affine fundamental matrix with the Gold standard algorithm
    F = estimation.affine_fundamental_matrix(matches)

    # compute rectifying similarities
    S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(
        F, cfg['debug'])

    if cfg['debug']:
        y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1]
        y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1]
        err = np.abs(y1 - y2)
        print("max, min, mean rectification error on point matches: ", end=' ')
        print(np.max(err), np.min(err), np.mean(err))

    # pull back top-left corner of the ROI to the origin (plus margin)
    pts = common.points_apply_homography(
        S1, [[x, y], [x + w, y], [x + w, y + h], [x, y + h]])
    x0, y0 = common.bounding_box2D(pts)[:2]
    T = common.matrix_translation(-x0 + hmargin, -y0 + vmargin)
    return np.dot(T, S1), np.dot(T, S2), F
Exemplo n.º 9
0
def rectification_homographies(matches, x, y, w, h, hmargin=0, vmargin=0):
    """
    Computes rectifying homographies from point matches for a given ROI.

    The affine fundamental matrix F is estimated with the gold-standard
    algorithm, then two rectifying similarities (rotation, zoom, translation)
    are computed directly from F.

    Args:
        matches: numpy array of shape (n, 4) containing a list of 2D point
            correspondences between the two images.
        x, y, w, h: four integers defining the rectangular ROI in the first
            image. (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        {h,v}margin: translations added to the rectifying similarities to extend the
            horizontal and vertical footprint of the rectified images

    Returns:
        S1, S2, F: three numpy arrays of shape (3, 3) representing the
        two rectifying similarities to be applied to the two images and the
        corresponding affine fundamental matrix.
    """
    # estimate the affine fundamental matrix with the Gold standard algorithm
    F = estimation.affine_fundamental_matrix(matches)

    # compute rectifying similarities
    S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, cfg['debug'])

    if cfg['debug']:
        y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1]
        y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1]
        err = np.abs(y1 - y2)
        print("max, min, mean rectification error on point matches: ", end=' ')
        print(np.max(err), np.min(err), np.mean(err))

    # pull back top-left corner of the ROI to the origin (plus margin)
    pts = common.points_apply_homography(S1, [[x, y], [x+w, y], [x+w, y+h], [x, y+h]])
    x0, y0 = common.bounding_box2D(pts)[:2]
    T = common.matrix_translation(-x0 + hmargin, -y0 + vmargin)
    return np.dot(T, S1), np.dot(T, S2), F
Exemplo n.º 10
0
def rectify_pair(im1,
                 im2,
                 rpc1,
                 rpc2,
                 x,
                 y,
                 w,
                 h,
                 out1,
                 out2,
                 A=None,
                 sift_matches=None,
                 method='rpc',
                 hmargin=0,
                 vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(
                np.linalg.inv(A), matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin,
                                           vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(
            rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row)
                       for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(
            F, sift_matches, cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches,
                   fmt='%9.3f')
        visualisation.plot_matches(
            im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
            os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        H1_inv = np.linalg.inv(H1)
        H1 = np.eye(
            3
        )  # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
        H2 = np.dot(H1_inv, H2)
        T = common.matrix_translation(-x + hmargin, -y + vmargin)
        H1 = np.dot(T, H1)
        H2 = np.dot(T, H2)

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin],
                               atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)

    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
        pts_out = common.points_apply_homography(H1_inv, pts_in)
        disp_m = pts_out[1, :] - pts_out[0, :]
        disp_M = pts_out[2, :] - pts_out[0, :]

    return H1, H2, disp_m, disp_M
Exemplo n.º 11
0
Arquivo: s2p.py Projeto: mnhrdt/s2p
def multidisparities_to_ply(tile):
    """
    Compute a point cloud from the disparity maps of N-pairs of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.

    # There is no guarantee that this function works with z!=1
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']

    rpc_ref = cfg['images'][0]['rpc']
    disp_list = list()
    rpc_list = list()

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    print('triangulating tile {} {}...'.format(x, y))
    n = len(cfg['images']) - 1
    for i in range(n):
        pair = 'pair_%d' % (i+1)
        H_ref = os.path.join(out_dir, pair, 'H_ref.txt')
        H_sec = os.path.join(out_dir, pair, 'H_sec.txt')
        disp = os.path.join(out_dir, pair, 'rectified_disp.tif')
        mask_rect = os.path.join(out_dir, pair, 'rectified_mask.png')
        disp2D = os.path.join(out_dir, pair, 'disp2D.tif')
        rpc_sec = cfg['images'][i+1]['rpc']

        if os.path.exists(disp):
            # homography for warp
            T = common.matrix_translation(x, y)
            hom_ref = np.loadtxt(H_ref)
            hom_ref_shift = np.dot(hom_ref, T)

            # homography for 1D to 2D conversion
            hom_sec = np.loadtxt(H_sec)
            if cfg["use_global_pointing_for_geometric_triangulation"] is True:
                pointing = os.path.join(cfg['out_dir'], 'global_pointing_%s.txt' % pair)
                hom_pointing = np.loadtxt(pointing)
                hom_sec = np.dot(hom_sec,np.linalg.inv(hom_pointing))
            hom_sec_shift_inv = np.linalg.inv(hom_sec)

            h1 = " ".join(str(x) for x in hom_ref_shift.flatten())
            h2 = " ".join(str(x) for x in hom_sec_shift_inv.flatten())

            # relative disparity map to absolute disparity map
            tmp_abs = common.tmpfile('.tif')
            os.environ["PLAMBDA_GETPIXEL"] = "0"
            common.run('plambda %s %s "y 0 = nan x[0] :i + x[1] :j + 1 3 njoin if" -o %s' % (disp, mask_rect, tmp_abs))

            # 1d to 2d conversion
            tmp_1d_to_2d = common.tmpfile('.tif')
            common.run('plambda %s "%s 9 njoin x mprod" -o %s' % (tmp_abs, h2, tmp_1d_to_2d))

            # warp
            tmp_warp = common.tmpfile('.tif')
            common.run('homwarp -o 2 "%s" %d %d %s %s' % (h1, w, h, tmp_1d_to_2d, tmp_warp))

            # set masked value to NaN
            exp = 'y 0 = nan x if'
            common.run('plambda %s %s "%s" -o %s' % (tmp_warp, mask_orig, exp, disp2D))
            # disp2D contains positions in the secondary image

            # added input data for triangulation module
            disp_list.append(disp2D)
            rpc_list.append(rpc_sec)

            if cfg['clean_intermediate']:
                common.remove(H_ref)
                common.remove(H_sec)
                common.remove(disp)
                common.remove(mask_rect)
                common.remove(mask_orig)

    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(common.image_crop_gdal(cfg['images'][0]['img'], x, y,
                                                 w, h), colors)

    # compute the point cloud
    triangulation.multidisp_map_to_point_cloud(ply_file, disp_list, rpc_ref, rpc_list,
                                               colors,
                                               utm_zone=cfg['utm_zone'],
                                               llbbx=tuple(cfg['ll_bbx']),
                                               xybbx=(x, x+w, y, y+h))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(colors)
Exemplo n.º 12
0
def local_translation_rectified(r1, r2, x, y, w, h, m):
    """
    Estimates the optimal translation to minimise the relative pointing error
    on a given tile.

    Args:
        r1, r2: two instances of the rpc_model.RPCModel class
        x, y, w, h: region of interest in the reference image (r1)
        m: Nx4 numpy array containing a list of matches, one per line. Each
            match is given by (p1, p2, q1, q2) where (p1, p2) is a point of the
            reference view and (q1, q2) is the corresponding point in the
            secondary view.

    Returns:
        3x3 numpy array containing the homogeneous representation of the
        optimal planar translation, to be applied to the secondary image in
        order to correct the pointing error.
    """
    # estimate the affine fundamental matrix between the two views
    n = cfg['n_gcp_per_axis']
    rpc_matches = rpc_utils.matches_from_rpc(r1, r2, x, y, w, h, n)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # Apply rectification on image 1
    S1p1 = common.points_apply_homography(S1, m[:,0:2])
    S1p1 = np.column_stack((S1p1,np.ones((N,1))))
    print("Points 1 rectied")
    print(S1p1[:10])
    # Apply rectification on image 2
    S2p2 = common.points_apply_homography(S1, m[:,2:4])
    S2p2 = np.column_stack((S2p2, np.ones((N,1))))
    print("Points 2 rectied")
    print(S2p2[:10])

    # Compute F in the rectified space
    rect_matches = np.column_stack((S1p1[:,0:2], S2p2[:, 0:2]))
    F_rect2 = estimation.affine_fundamental_matrix(rect_matches)

    # Compute epipolar lines
    FS1p1 = np.dot(F_rect2, S1p1.T).T
    print(FS1p1[:10])

    # Normalize epipolar lines
    c1 = -FS1p1[:, 1]
    FS1p1_norm = FS1p1/c1[:, np.newaxis]
    print(FS1p1_norm[:10])


    b_ = np.abs(S2p2[:, 1] - S1p1[:, 1])
    t_med = np.median(b_)
    print(t_med)

    t_med2 = np.sort(b_)[int(N/2)]
    print("t_med2", t_med2)

    # Compute epipolar lines witout recitifcation
    p1 = np.column_stack((m[:,0:2],np.ones((N,1))))
    Fp1 = np.dot(F, p1.T).T

    # Compute normal vector to epipolar liness
    ab = np.array([Fp1[0][0], Fp1[0][1]])
    ab = ab/np.linalg.norm(ab)

    tx = t_med*ab[0]
    ty = t_med*ab[1]
    print(tx, ty)

    # Get translation in not rectified image
    T = common.matrix_translation(0, -t_med)
    T = np.linalg.inv(S2).dot(T).dot(S2)
    # T = np.dot(S2_inv, T)
    print(T)

    # the correction to be applied to the second view is the opposite
    A = np.array([[1, 0, -out_x],
                  [0, 1, -out_y],
                  [0, 0, 1]])
    return A, F
Exemplo n.º 13
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None,
                 sift_matches=None, method='rpc', hmargin=0, vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

        This function uses the parameter subsampling_factor from the
        config module. If the factor z > 1 then the output images will
        be subsampled by a factor z. The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and
        disp_min = disp_min / z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A),
                                                            matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin, vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row) for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(F, sift_matches,
                                                          cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches, fmt='%9.3f')
        visualisation.plot_matches(im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
                                   os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # impose a minimal disparity range (TODO this is valid only with the
    # 'center' flag for register_horizontally_translation)
    disp_m = min(-3, disp_m)
    disp_M = max(3, disp_M)

    # compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        H1_inv = np.linalg.inv(H1)
        H1 = np.eye(3) # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
        H2 = np.dot(H1_inv,H2)
        T = common.matrix_translation(-x + hmargin, -y + vmargin)
        H1 = np.dot(T, H1)
        H2 = np.dot(T, H2)

    #  if subsampling_factor'] the homographies are altered to reflect the zoom
    z = cfg['subsampling_factor']
    if z != 1:
        Z = np.diag((1/z, 1/z, 1))
        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_m = np.floor(disp_m / z)
        disp_M = np.ceil(disp_M / z)
        hmargin = int(np.floor(hmargin / z))
        vmargin = int(np.floor(vmargin / z))

    # compute output images size
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin], atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2*hmargin, h0 + 2*vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2*hmargin, h0 + 2*vmargin)

    if cfg['disp_min'] is not None: disp_m = cfg['disp_min']
    if cfg['disp_max'] is not None: disp_M = cfg['disp_max']

    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
        pts_out = common.points_apply_homography(H1_inv,
                                                 pts_in)
        disp_m = pts_out[1,:] - pts_out[0,:]
        disp_M = pts_out[2,:] - pts_out[0,:]

    return H1, H2, disp_m, disp_M
Exemplo n.º 14
0
def cloud_water_image_domain(x,
                             y,
                             w,
                             h,
                             rpc,
                             roi_gml=None,
                             cld_gml=None,
                             wat_msk=None,
                             use_srtm_for_water=False):
    """
    Compute a mask for pixels masked by clouds, water, or out of image domain.

    Args:
        x, y, w, h: coordinates of the ROI
        rpc: path to the xml file containing the rpc coefficients of the image
            RPC model is used with SRTM data to derive the water mask
        roi_gml (optional): path to a gml file containing a mask
            defining the area contained in the full image
        cld_gml (optional): path to a gml file containing a mask
            defining the areas covered by clouds
        wat_msk (optional): path to an image file containing a water mask

    Returns:
        2D array containing the output binary mask. 0 indicate masked pixels, 1
        visible pixels.
    """
    # coefficients of the transformation associated to the crop and zoom
    z = cfg['subsampling_factor']
    H = np.dot(np.diag((1 / z, 1 / z, 1)), common.matrix_translation(-x, -y))
    hij = ' '.join([str(x) for x in H.flatten()])

    w, h = int(w / z), int(h / z)
    mask = np.ones((h, w), dtype=np.bool)

    if roi_gml is not None:  # image domain mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' %
                              (w, h, hij, roi_gml, tmp),
                              shell=True)

        f = gdal.Open(tmp)
        mask = np.logical_and(mask, f.ReadAsArray())
        f = None  # this is the gdal way of closing files

    if not mask.any():
        return mask

    if cld_gml is not None:  # cloud mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' %
                              (w, h, hij, cld_gml, tmp),
                              shell=True)
        f = gdal.Open(tmp)
        mask = np.logical_and(mask, ~f.ReadAsArray().astype(bool))
        f = None  # this is the gdal way of closing files

    if not mask.any():
        return mask

    if wat_msk is not None:  # water mask (raster)
        f = gdal.Open(wat_msk)
        mask = np.logical_and(mask, f.ReadAsArray(x, y, w, h))
        f = None  # this is the gdal way of closing files

    elif use_srtm_for_water:  # water mask (srtm)
        tmp = common.tmpfile('.png')
        env = os.environ.copy()
        env['SRTM4_CACHE'] = cfg['srtm_dir']
        subprocess.check_call('watermask %d %d -h "%s" %s %s' %
                              (w, h, hij, rpc, tmp),
                              shell=True,
                              env=env)
        f = gdal.Open(tmp)
        mask = np.logical_and(mask, f.ReadAsArray())
        f = None  # this is the gdal way of closing files

    return mask
Exemplo n.º 15
0
def cloud_water_image_domain(x, y, w, h, rpc, roi_gml=None, cld_gml=None,
                             wat_msk=None, use_srtm_for_water=False):
    """
    Compute a mask for pixels masked by clouds, water, or out of image domain.

    Args:
        x, y, w, h: coordinates of the ROI
        rpc: path to the xml file containing the rpc coefficients of the image
            RPC model is used with SRTM data to derive the water mask
        roi_gml (optional): path to a gml file containing a mask
            defining the area contained in the full image
        cld_gml (optional): path to a gml file containing a mask
            defining the areas covered by clouds
        wat_msk (optional): path to an image file containing a water mask

    Returns:
        2D array containing the output binary mask. 0 indicate masked pixels, 1
        visible pixels.
    """
    # coefficients of the transformation associated to the crop and zoom
    z = cfg['subsampling_factor']
    H = np.dot(np.diag((1/z, 1/z, 1)), common.matrix_translation(-x, -y))
    hij = ' '.join([str(el) for el in H.flatten()])

    w, h = int(w/z), int(h/z)
    mask = np.ones((h, w), dtype=np.bool)

    if roi_gml is not None:  # image domain mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' % (w, h, hij,
                                                               roi_gml, tmp),
                              shell=True)

        f = gdal.Open(tmp)
        mask = np.logical_and(mask, f.ReadAsArray())
        f = None  # this is the gdal way of closing files

    if not mask.any():
        return mask

    if cld_gml is not None:  # cloud mask (polygons)
        tmp = common.tmpfile('.png')
        subprocess.check_call('cldmask %d %d -h "%s" %s %s' % (w, h, hij,
                                                               cld_gml, tmp),
                              shell=True)
        f = gdal.Open(tmp)
        mask = np.logical_and(mask, ~f.ReadAsArray().astype(bool))
        f = None  # this is the gdal way of closing files

    if not mask.any():
        return mask

    if wat_msk is not None:  # water mask (raster)
        f = gdal.Open(wat_msk)
        mask = np.logical_and(mask, f.ReadAsArray(x, y, w, h))
        f = None  # this is the gdal way of closing files

    elif use_srtm_for_water:  # water mask (srtm)
        tmp = common.tmpfile('.png')
        env = os.environ.copy()
        env['SRTM4_CACHE'] = cfg['srtm_dir']
        subprocess.check_call('watermask %d %d -h "%s" %s %s' % (w, h, hij, rpc,
                                                                 tmp),
                              shell=True, env=env)
        f = gdal.Open(tmp)
        mask = np.logical_and(mask, f.ReadAsArray())
        f = None  # this is the gdal way of closing files

    return mask
Exemplo n.º 16
0
def multidisparities_to_ply(tile):
    """
    Compute a point cloud from the disparity maps of N-pairs of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.

    # There is no guarantee that this function works with z!=1
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']

    rpc_ref = cfg['images'][0]['rpc']
    disp_list = list()
    rpc_list = list()

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    print('triangulating tile {} {}...'.format(x, y))
    n = len(cfg['images']) - 1
    for i in range(n):
        pair = 'pair_%d' % (i + 1)
        H_ref = os.path.join(out_dir, pair, 'H_ref.txt')
        H_sec = os.path.join(out_dir, pair, 'H_sec.txt')
        disp = os.path.join(out_dir, pair, 'rectified_disp.tif')
        mask_rect = os.path.join(out_dir, pair, 'rectified_mask.png')
        disp2D = os.path.join(out_dir, pair, 'disp2D.tif')
        rpc_sec = cfg['images'][i + 1]['rpc']

        if os.path.exists(disp):
            # homography for warp
            T = common.matrix_translation(x, y)
            hom_ref = np.loadtxt(H_ref)
            hom_ref_shift = np.dot(hom_ref, T)

            # homography for 1D to 2D conversion
            hom_sec = np.loadtxt(H_sec)
            if cfg["use_global_pointing_for_geometric_triangulation"] is True:
                pointing = os.path.join(cfg['out_dir'],
                                        'global_pointing_%s.txt' % pair)
                hom_pointing = np.loadtxt(pointing)
                hom_sec = np.dot(hom_sec, np.linalg.inv(hom_pointing))
            hom_sec_shift_inv = np.linalg.inv(hom_sec)

            h1 = " ".join(str(x) for x in hom_ref_shift.flatten())
            h2 = " ".join(str(x) for x in hom_sec_shift_inv.flatten())

            # relative disparity map to absolute disparity map
            tmp_abs = common.tmpfile('.tif')
            os.environ["PLAMBDA_GETPIXEL"] = "0"
            common.run(
                'plambda %s %s "y 0 = nan x[0] :i + x[1] :j + 1 3 njoin if" -o %s'
                % (disp, mask_rect, tmp_abs))

            # 1d to 2d conversion
            tmp_1d_to_2d = common.tmpfile('.tif')
            common.run('plambda %s "%s 9 njoin x mprod" -o %s' %
                       (tmp_abs, h2, tmp_1d_to_2d))

            # warp
            tmp_warp = common.tmpfile('.tif')
            common.run('homwarp -o 2 "%s" %d %d %s %s' %
                       (h1, w, h, tmp_1d_to_2d, tmp_warp))

            # set masked value to NaN
            exp = 'y 0 = nan x if'
            common.run('plambda %s %s "%s" -o %s' %
                       (tmp_warp, mask_orig, exp, disp2D))
            # disp2D contains positions in the secondary image

            # added input data for triangulation module
            disp_list.append(disp2D)
            rpc_list.append(rpc_sec)

            if cfg['clean_intermediate']:
                common.remove(H_ref)
                common.remove(H_sec)
                common.remove(disp)
                common.remove(mask_rect)
                common.remove(mask_orig)

    colors = os.path.join(out_dir, 'ref.png')
    if cfg['images'][0]['clr']:
        common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors)
    else:
        common.image_qauto(
            common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h),
            colors)

    # compute the point cloud
    triangulation.multidisp_map_to_point_cloud(ply_file,
                                               disp_list,
                                               rpc_ref,
                                               rpc_list,
                                               colors,
                                               utm_zone=cfg['utm_zone'],
                                               llbbx=tuple(cfg['ll_bbx']),
                                               xybbx=(x, x + w, y, y + h))

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(colors)
Exemplo n.º 17
0
def local_translation_rotation(r1, r2, x, y, w, h, m):
    """
    Estimates the optimal translation to minimise the relative pointing error
    on a given tile.

    Args:
        r1, r2: two instances of the rpc_model.RPCModel class
        x, y, w, h: region of interest in the reference image (r1)
        m: Nx4 numpy array containing a list of matches, one per line. Each
            match is given by (p1, p2, q1, q2) where (p1, p2) is a point of the
            reference view and (q1, q2) is the corresponding point in the
            secondary view.

    Returns:
        3x3 numpy array containing the homogeneous representation of the
        optimal planar translation, to be applied to the secondary image in
        order to correct the pointing error.
    """
    # estimate the affine fundamental matrix between the two views
    n = cfg['n_gcp_per_axis']
    rpc_matches = rpc_utils.matches_from_rpc(r1, r2, x, y, w, h, n)
    F = estimation.affine_fundamental_matrix(rpc_matches)

    # Compute rectification homographies
    # S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, cfg['debug'])
    hmargin = cfg['horizontal_margin']
    vmargin = cfg['vertical_margin']
    S1, S2, F_rect = rectification.rectification_homographies(rpc_matches, x, y, w, h, hmargin, vmargin)

    N = len(m)
    # Apply rectification on image 1
    S1p1 = common.points_apply_homography(S1, m[:,0:2])
    S1p1 = np.column_stack((S1p1,np.ones((N,1))))
    print("Points 1 rectied")
    print(S1p1[:10])
    # Apply rectification on image 2
    S2p2 = common.points_apply_homography(S1, m[:,2:4])
    S2p2 = np.column_stack((S2p2, np.ones((N,1))))
    print("Points 2 rectied")
    print(S2p2[:10])

    # Compute F in the rectified space
    rect_matches = np.column_stack((S1p1[:,0:2], S2p2[:, 0:2]))
    F_rect2 = estimation.affine_fundamental_matrix(rect_matches)

    # Compute epipolar lines
    FS1p1 = np.dot(F_rect2, S1p1.T).T
    print(FS1p1[:10])

    # Normalize epipolar lines
    c1 = -FS1p1[:, 1]
    FS1p1_norm = FS1p1/c1[:, np.newaxis]
    print(FS1p1_norm[:10])


    # Variable of optimization problem
    A_ = np.ones((N,1))
    # A_ = np.column_stack((S2p2[:,0].reshape(N, 1),np.ones((N,1))))
    # b_ = S2p2[:, 1] - FS1p1_norm[:, 2]
    b_ = S2p2[:, 1] - S1p1[:, 1]
    t_med = np.median(b_)
    print(t_med)

    t_med2 = np.sort(b_)[int(N/2)]
    print("t_med2", t_med2)

    # min ||Ax + b||^2 => x = - (A^T A )^-1 A^T b
    # X_ = - np.dot(np.linalg.inv(np.dot(A_.T, A_)), np.dot(A_.T, b_))
    # [theta, t] = X_
    # print(t, theta)
    # t = X_[0]

    # Compute epipolar lines witout recitifcation
    p1 = np.column_stack((m[:,0:2],np.ones((N,1))))
    Fp1 = np.dot(F, p1.T).T

    # Compute normal vector to epipolar liness
    ab = np.array([Fp1[0][0], Fp1[0][1]])
    ab = ab/np.linalg.norm(ab)

    tx = t_med*ab[0]
    ty = t_med*ab[1]
    print(tx, ty)

    # Get translation in not rectified image
    T = common.matrix_translation(0, -t_med)
    T = np.linalg.inv(S2).dot(T).dot(S2)
    # T = np.dot(S2_inv, T)
    print(T)

    theta = 0
    cos_theta = np.cos(theta)
    sin_theta = np.sin(theta)
    A = np.array([[cos_theta, -sin_theta, tx],
                  [sin_theta,  cos_theta, ty],
                  [0, 0, 1]])
    return A, F