Ejemplo n.º 1
0
def disparity_to_ply(tile):
    """
    Compute a point cloud from the disparity map of a pair of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']
    rpc1 = cfg['images'][0]['rpc']
    rpc2 = cfg['images'][1]['rpc']

    if os.path.exists(os.path.join(out_dir, 'stderr.log')):
        print('triangulation: stderr.log exists')
        print('pair_1 not processed on tile {} {}'.format(x, y))
        return

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    print('triangulating tile {} {}...'.format(x, y))
    # This function is only called when there is a single pair (pair_1)
    H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt')
    H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt')
    pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt')
    disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif')
    mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png')
    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    # prepare the image needed to colorize point cloud
    colors = os.path.join(out_dir, 'rectified_ref.png')
    if cfg['images'][0]['clr']:
        hom = np.loadtxt(H_ref)
        roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
        ww, hh = common.bounding_box2D(common.points_apply_homography(
            hom, roi))[2:]
        tmp = common.tmpfile('.tif')
        common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom,
                                      ww + 2 * cfg['horizontal_margin'],
                                      hh + 2 * cfg['vertical_margin'])
        common.image_qauto(tmp, colors)
    else:
        common.image_qauto(
            os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors)

    # compute the point cloud
    triangulation.disp_map_to_point_cloud(ply_file,
                                          disp,
                                          mask_rect,
                                          rpc1,
                                          rpc2,
                                          H_ref,
                                          H_sec,
                                          pointing,
                                          colors,
                                          utm_zone=cfg['utm_zone'],
                                          llbbx=tuple(cfg['ll_bbx']),
                                          xybbx=(x, x + w, y, y + h),
                                          xymsk=mask_orig)

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(H_ref)
        common.remove(H_sec)
        common.remove(disp)
        common.remove(mask_rect)
        common.remove(mask_orig)
        common.remove(colors)
        common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
Ejemplo n.º 2
0
def rectify_pair(im1,
                 im2,
                 rpc1,
                 rpc2,
                 x,
                 y,
                 w,
                 h,
                 out1,
                 out2,
                 A=None,
                 sift_matches=None,
                 method='rpc',
                 hmargin=0,
                 vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(
                np.linalg.inv(A), matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin,
                                           vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(
            rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row)
                       for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(
            F, sift_matches, cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches,
                   fmt='%9.3f')
        visualisation.plot_matches(
            im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
            os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        H1_inv = np.linalg.inv(H1)
        H1 = np.eye(
            3
        )  # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
        H2 = np.dot(H1_inv, H2)
        T = common.matrix_translation(-x + hmargin, -y + vmargin)
        H1 = np.dot(T, H1)
        H2 = np.dot(T, H2)

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin],
                               atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)

    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
        pts_out = common.points_apply_homography(H1_inv, pts_in)
        disp_m = pts_out[1, :] - pts_out[0, :]
        disp_M = pts_out[2, :] - pts_out[0, :]

    return H1, H2, disp_m, disp_M
Ejemplo n.º 3
0
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None,
                 sift_matches=None, method='rpc', hmargin=0, vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

        This function uses the parameter subsampling_factor from the
        config module. If the factor z > 1 then the output images will
        be subsampled by a factor z. The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and
        disp_min = disp_min / z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A),
                                                            matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin, vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row) for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(F, sift_matches,
                                                          cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches, fmt='%9.3f')
        visualisation.plot_matches(im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
                                   os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # impose a minimal disparity range (TODO this is valid only with the
    # 'center' flag for register_horizontally_translation)
    disp_m = min(-3, disp_m)
    disp_M = max(3, disp_M)

    # compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        H1_inv = np.linalg.inv(H1)
        H1 = np.eye(3) # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
        H2 = np.dot(H1_inv,H2)
        T = common.matrix_translation(-x + hmargin, -y + vmargin)
        H1 = np.dot(T, H1)
        H2 = np.dot(T, H2)

    #  if subsampling_factor'] the homographies are altered to reflect the zoom
    z = cfg['subsampling_factor']
    if z != 1:
        Z = np.diag((1/z, 1/z, 1))
        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_m = np.floor(disp_m / z)
        disp_M = np.ceil(disp_M / z)
        hmargin = int(np.floor(hmargin / z))
        vmargin = int(np.floor(vmargin / z))

    # compute output images size
    roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin], atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2*hmargin, h0 + 2*vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2*hmargin, h0 + 2*vmargin)

    if cfg['disp_min'] is not None: disp_m = cfg['disp_min']
    if cfg['disp_max'] is not None: disp_M = cfg['disp_max']

    if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
        pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
        pts_out = common.points_apply_homography(H1_inv,
                                                 pts_in)
        disp_m = pts_out[1,:] - pts_out[0,:]
        disp_M = pts_out[2,:] - pts_out[0,:]

    return H1, H2, disp_m, disp_M
Ejemplo n.º 4
0
Archivo: s2p.py Proyecto: mnhrdt/s2p
def disparity_to_ply(tile):
    """
    Compute a point cloud from the disparity map of a pair of image tiles.

    Args:
        tile: dictionary containing the information needed to process a tile.
    """
    out_dir = os.path.join(tile['dir'])
    ply_file = os.path.join(out_dir, 'cloud.ply')
    plyextrema = os.path.join(out_dir, 'plyextrema.txt')
    x, y, w, h = tile['coordinates']
    rpc1 = cfg['images'][0]['rpc']
    rpc2 = cfg['images'][1]['rpc']

    if os.path.exists(os.path.join(out_dir, 'stderr.log')):
        print('triangulation: stderr.log exists')
        print('pair_1 not processed on tile {} {}'.format(x, y))
        return

    if cfg['skip_existing'] and os.path.isfile(ply_file):
        print('triangulation done on tile {} {}'.format(x, y))
        return

    print('triangulating tile {} {}...'.format(x, y))
    # This function is only called when there is a single pair (pair_1)
    H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt')
    H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt')
    pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt')
    disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif')
    mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png')
    mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png')

    # prepare the image needed to colorize point cloud
    colors = os.path.join(out_dir, 'rectified_ref.png')
    if cfg['images'][0]['clr']:
        hom = np.loadtxt(H_ref)
        roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
        ww, hh = common.bounding_box2D(common.points_apply_homography(hom, roi))[2:]
        tmp = common.tmpfile('.tif')
        common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom,
                                      ww + 2*cfg['horizontal_margin'],
                                      hh + 2*cfg['vertical_margin'])
        common.image_qauto(tmp, colors)
    else:
        common.image_qauto(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors)

    # compute the point cloud
    triangulation.disp_map_to_point_cloud(ply_file, disp, mask_rect, rpc1, rpc2,
                                          H_ref, H_sec, pointing, colors,
                                          utm_zone=cfg['utm_zone'],
                                          llbbx=tuple(cfg['ll_bbx']),
                                          xybbx=(x, x+w, y, y+h),
                                          xymsk=mask_orig)

    # compute the point cloud extrema (xmin, xmax, xmin, ymax)
    common.run("plyextrema %s %s" % (ply_file, plyextrema))

    if cfg['clean_intermediate']:
        common.remove(H_ref)
        common.remove(H_sec)
        common.remove(disp)
        common.remove(mask_rect)
        common.remove(mask_orig)
        common.remove(colors)
        common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
Ejemplo n.º 5
0
def rectify_pair(im1,
                 im2,
                 rpc1,
                 rpc2,
                 x,
                 y,
                 w,
                 h,
                 out1,
                 out2,
                 A=None,
                 sift_matches=None,
                 method='rpc',
                 hmargin=0,
                 vmargin=0):
    """
    Rectify a ROI in a pair of images.

    Args:
        im1, im2: paths to two image files
        rpc1, rpc2: paths to the two xml files containing RPC data
        x, y, w, h: four integers defining the rectangular ROI in the first
            image.  (x, y) is the top-left corner, and (w, h) are the dimensions
            of the rectangle.
        out1, out2: paths to the output rectified crops
        A (optional): 3x3 numpy array containing the pointing error correction
            for im2. This matrix is usually estimated with the pointing_accuracy
            module.
        sift_matches (optional): Nx4 numpy array containing a list of sift
            matches, in the full image coordinates frame
        method (default: 'rpc'): option to decide wether to use rpc of sift
            matches for the fundamental matrix estimation.
        {h,v}margin (optional): horizontal and vertical margins added on the
            sides of the rectified images

        This function uses the parameter subsampling_factor from the
        config module. If the factor z > 1 then the output images will
        be subsampled by a factor z. The output matrices H1, H2, and the
        ranges are also updated accordingly:
        Hi = Z * Hi with Z = diag(1/z, 1/z, 1) and
        disp_min = disp_min / z  (resp _max)

    Returns:
        H1, H2: Two 3x3 matrices representing the rectifying homographies that
        have been applied to the two original (large) images.
        disp_min, disp_max: horizontal disparity range
    """
    # read RPC data
    rpc1 = rpc_model.RPCModel(rpc1)
    rpc2 = rpc_model.RPCModel(rpc2)

    # compute real or virtual matches
    if method == 'rpc':
        # find virtual matches from RPC camera models
        matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
                                             cfg['n_gcp_per_axis'])

        # correct second image coordinates with the pointing correction matrix
        if A is not None:
            matches[:, 2:] = common.points_apply_homography(
                np.linalg.inv(A), matches[:, 2:])
    else:
        matches = sift_matches

    # compute rectifying homographies
    H1, H2, F = rectification_homographies(matches, x, y, w, h, hmargin,
                                           vmargin)

    if cfg['register_with_shear']:
        # compose H2 with a horizontal shear to reduce the disparity range
        a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
        lon, lat, alt = rpc_utils.ground_control_points(
            rpc1, x, y, w, h, a, a, 4)
        x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
        x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
        m = np.vstack([x1, y1, x2, y2]).T
        m = np.vstack({tuple(row)
                       for row in m})  # remove duplicates due to no alt range
        H2 = register_horizontally_shear(m, H1, H2)

    # compose H2 with a horizontal translation to center disp range around 0
    if sift_matches is not None:
        sift_matches = filter_matches_epipolar_constraint(
            F, sift_matches, cfg['epipolar_thresh'])
        if len(sift_matches) < 10:
            print('WARNING: no registration with less than 10 matches')
        else:
            H2 = register_horizontally_translation(sift_matches, H1, H2)

    # compute disparity range
    if cfg['debug']:
        out_dir = os.path.dirname(out1)
        np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
                   sift_matches,
                   fmt='%9.3f')
        visualisation.plot_matches(
            im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
            os.path.join(out_dir, 'sift_matches_disp.png'))
    disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
                                     sift_matches, A)

    # impose a minimal disparity range (TODO this is valid only with the
    # 'center' flag for register_horizontally_translation)
    disp_m = min(-3, disp_m)
    disp_M = max(3, disp_M)

    #  if subsampling_factor'] the homographies are altered to reflect the zoom
    z = cfg['subsampling_factor']
    if z != 1:
        Z = np.diag((1 / z, 1 / z, 1))
        H1 = np.dot(Z, H1)
        H2 = np.dot(Z, H2)
        disp_m = np.floor(disp_m / z)
        disp_M = np.ceil(disp_M / z)
        hmargin = int(np.floor(hmargin / z))
        vmargin = int(np.floor(vmargin / z))

    # compute output images size
    roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]
    pts1 = common.points_apply_homography(H1, roi)
    x0, y0, w0, h0 = common.bounding_box2D(pts1)
    # check that the first homography maps the ROI in the positive quadrant
    np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin],
                               atol=.01)

    # apply homographies and do the crops
    common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)
    common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin,
                                  h0 + 2 * vmargin)

    return H1, H2, disp_m, disp_M