def corresponding_roi(rpc1, rpc2, x, y, w, h): """ Uses RPC functions to determine the region of im2 associated to the specified ROI of im1. Args: rpc1, rpc2: two instances of the rpcm.RPCModel class, or paths to the xml files x, y, w, h: four integers defining a rectangular region of interest (ROI) in the first view. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: four integers defining a ROI in the second view. This ROI is supposed to contain the projections of the 3D points that are visible in the input ROI. """ # read rpc files if not isinstance(rpc1, rpcm.RPCModel): rpc1 = rpcm.RPCModel(rpc1) if not isinstance(rpc2, rpcm.RPCModel): rpc2 = rpcm.RPCModel(rpc2) m, M = altitude_range(rpc1, x, y, w, h, 0, 0) # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M] a = np.array([x, x, x, x, x + w, x + w, x + w, x + w]) b = np.array([y, y, y + h, y + h, y, y, y + h, y + h]) c = np.array([m, M, m, M, m, M, m, M]) # corresponding points in im2 xx, yy = find_corresponding_point(rpc1, rpc2, a, b, c)[0:2] # return coordinates of the bounding box in im2 out = common.bounding_box2D(np.vstack([xx, yy]).T) return np.round(out)
def roi_process(rpc, ll_poly, use_srtm=False): """ Convert a (lon, lat) polygon into a rectangular bounding box in image space. Args: rpc (rpcm.RPCModel): camera model ll_poly (array): 2D array of shape (n, 2) containing the vertices (longitude, latitude) of the polygon use_srtm (bool): whether or not to use SRTM DEM to estimate the average ground altitude of the ROI. Returns: x, y, w, h: four integers defining a rectangular region of interest (ROI) in the image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. """ # project lon lat vertices into the image if use_srtm: lon, lat = np.mean(ll_poly, axis=0) z = srtm4.srtm4(lon, lat) img_pts = rpc.projection(ll_poly[:, 0], ll_poly[:, 1], z) else: img_pts = rpc.projection(ll_poly[:, 0], ll_poly[:, 1], rpc.alt_offset) # return image roi x, y, w, h = common.bounding_box2D(list(zip(*img_pts))) return {'x': x, 'y': y, 'w': w, 'h': h}
def get_coordinates_with_config(tile, m, M): tile_cfg = s2p.read_config_file(os.path.join(tile, "config.json")) x = tile_cfg['roi']['x'] y = tile_cfg['roi']['y'] w = tile_cfg['roi']['w'] h = tile_cfg['roi']['h'] rpc = rpcm.rpc_from_geotiff(tile_cfg['images'][0]['img']) a = np.array([x, x, x, x, x+w, x+w, x+w, x+w]) b = np.array([y, y, y+h, y+h, y, y, y+h, y+h]) c = np.array([m, M, m, M, m, M, m, M]) lon, lat, __ = rpc.direct_estimate(a, b, c) out = list(common.bounding_box2D(np.vstack([lon, lat]).T)) out[2] += out[0] out[3] += out[1] latlon = [[out[0], out[3], 0], [out[2], out[3], 0], [out[2], out[1], 0], [out[0], out[1], 0], [out[0], out[3], 0]] return latlon
def roi_process(rpc, ll_poly, utm_zone=None, use_srtm=False): """ Convert a longitude/latitude polygon into a rectangular bounding box in image coordinates Args: rpc (rpcm.RPCModel): camera model ll_poly (array): 2D array of shape (n, 2) containing the vertices (longitude, latitude) of the polygon utm_zone: force the zone number to be used when defining `utm_bbx`. If not specified, the default UTM zone for the given geography is used. use_srtm (bool): whether or not to use SRTM DEM to estimate the average ground altitude of the ROI. Returns: x, y, w, h: four integers defining a rectangular region of interest (ROI) in the image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. """ if not utm_zone: utm_zone = geographiclib.compute_utm_zone(*ll_poly.mean(axis=0)) cfg['utm_zone'] = utm_zone # convert lon lat polygon to utm epsg = geographiclib.epsg_code_from_utm_zone(utm_zone) easting, northing = geographiclib.pyproj_transform(ll_poly[:, 0], ll_poly[:, 1], 4326, epsg) east_min = min(easting) east_max = max(easting) nort_min = min(northing) nort_max = max(northing) cfg['utm_bbx'] = (east_min, east_max, nort_min, nort_max) # project lon lat vertices into the image if use_srtm: lon, lat = np.mean(ll_poly, axis=0) z = srtm4.srtm4(lon, lat) img_pts = rpc.projection(ll_poly[:, 0], ll_poly[:, 1], z) else: img_pts = rpc.projection(ll_poly[:, 0], ll_poly[:, 1], rpc.alt_offset) # return image roi x, y, w, h = common.bounding_box2D(list(zip(*img_pts))) return {'x': x, 'y': y, 'w': w, 'h': h}
def roi_process(rpc, ll_poly, use_srtm=False, exogenous_dem=None, exogenous_dem_geoid_mode=True): """ Convert a (lon, lat) polygon into a rectangular bounding box in image space. Args: rpc (rpcm.RPCModel): camera model ll_poly (array): 2D array of shape (n, 2) containing the vertices (longitude, latitude) of the polygon use_srtm (bool): whether or not to use SRTM DEM to estimate the average ground altitude of the ROI. Returns: x, y, w, h: four integers defining a rectangular region of interest (ROI) in the image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. """ if use_srtm and (exogenous_dem is not None): raise ValueError("use_srtm and exogenous_dem are mutually exclusive") # project lon lat vertices into the image lon, lat = np.mean(ll_poly, axis=0) if exogenous_dem is not None: with rasterio.open(exogenous_dem) as src: x, y = geographiclib.pyproj_transform(lon, lat, 4326, src.crs.to_epsg()) z = list(src.sample([(x, y)]))[0][0] if exogenous_dem_geoid_mode is True: z = geographiclib.geoid_to_ellipsoid(lat, lon, z) elif use_srtm: z = srtm4.srtm4(lon, lat) else: z = rpc.alt_offset img_pts = rpc.projection(ll_poly[:, 0], ll_poly[:, 1], z) # return image roi x, y, w, h = common.bounding_box2D(list(zip(*img_pts))) return {'x': x, 'y': y, 'w': w, 'h': h}
def rectification_homographies(matches, x, y, w, h): """ Computes rectifying homographies from point matches for a given ROI. The affine fundamental matrix F is estimated with the gold-standard algorithm, then two rectifying similarities (rotation, zoom, translation) are computed directly from F. Args: matches: numpy array of shape (n, 4) containing a list of 2D point correspondences between the two images. x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. Returns: S1, S2, F: three numpy arrays of shape (3, 3) representing the two rectifying similarities to be applied to the two images and the corresponding affine fundamental matrix. """ # estimate the affine fundamental matrix with the Gold standard algorithm F = estimation.affine_fundamental_matrix(matches) # compute rectifying similarities S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix( F, cfg['debug']) if cfg['debug']: y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1] y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1] err = np.abs(y1 - y2) print("max, min, mean rectification error on point matches: ", end=' ') print(np.max(err), np.min(err), np.mean(err)) # pull back top-left corner of the ROI to the origin (plus margin) pts = common.points_apply_homography( S1, [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]) x0, y0 = common.bounding_box2D(pts)[:2] T = common.matrix_translation(-x0, -y0) return np.dot(T, S1), np.dot(T, S2), F
def utm_roi_to_img_roi(rpc, roi, use_srtm=False): """ Convert a UTM ROI into a rectangular bounding box in image coordinates Args: rpc (rpcm.RPCModel): camera model roi (dict): dictionary with keys 'utm_band', 'hemisphere', 'x', 'y', 'w', 'h' use_srtm (bool): whether or not to use SRTM DEM to estimate the average ground altitude of the ROI. Returns: x, y, w, h: four integers defining a rectangular region of interest (ROI) in the image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. """ # define utm rectangular box x, y, w, h = [roi[k] for k in ['x', 'y', 'w', 'h']] box = [(x, y), (x+w, y), (x+w, y+h), (x, y+h)] # convert utm to lon/lat utm_proj = geographiclib.utm_proj("{}{}".format(roi['utm_band'], roi['hemisphere'])) box_lon, box_lat = pyproj.transform( utm_proj, pyproj.Proj(init="epsg:4326"), [p[0] for p in box], [p[1] for p in box] ) # project lon/lat vertices into the image if use_srtm: lon = np.mean(box_lon) lat = np.mean(box_lat) z = srtm4.srtm4(lon, lat) else: z = rpc.alt_offset img_pts = rpc.projection(box_lon, box_lat, rpc.alt_offset) # return image roi x, y, w, h = common.bounding_box2D(img_pts) return {'x': x, 'y': y, 'w': w, 'h': h}
def kml_roi_process(rpc, kml): """ """ # extract lon lat from kml with open(kml, 'r') as f: a = bs4.BeautifulSoup(f, "lxml").find_all('coordinates')[0].text.split() ll_bbx = np.array([list(map(float, x.split(','))) for x in a])[:4, :2] # save lon lat bounding box to cfg dictionary lon_min = min(ll_bbx[:, 0]) lon_max = max(ll_bbx[:, 0]) lat_min = min(ll_bbx[:, 1]) lat_max = max(ll_bbx[:, 1]) cfg['ll_bbx'] = (lon_min, lon_max, lat_min, lat_max) # convert lon lat bbox to utm z = utm.conversion.latlon_to_zone_number((lat_min + lat_max) * .5, (lon_min + lon_max) * .5) utm_bbx = np.array( [utm.from_latlon(p[1], p[0], force_zone_number=z)[:2] for p in ll_bbx]) east_min = min(utm_bbx[:, 0]) east_max = max(utm_bbx[:, 0]) nort_min = min(utm_bbx[:, 1]) nort_max = max(utm_bbx[:, 1]) cfg['utm_bbx'] = (east_min, east_max, nort_min, nort_max) # project lon lat vertices into the image if not isinstance(rpc, rpc_model.RPCModel): rpc = rpc_model.RPCModel(rpc) img_pts = [ rpc.inverse_estimate(p[0], p[1], rpc.altOff)[:2] for p in ll_bbx ] # return image roi x, y, w, h = common.bounding_box2D(img_pts) return {'x': x, 'y': y, 'w': w, 'h': h}
def utm_roi_to_img_roi(rpc, roi): """ """ # define utm rectangular box x, y, w, h = [roi[k] for k in ['x', 'y', 'w', 'h']] box = [(x, y), (x + w, y), (x + w, y + h), (x, y + h)] # convert utm to lon/lat utm_z = roi['utm_band'] north = roi['hemisphere'] == 'N' box_latlon = [ utm.to_latlon(p[0], p[1], utm_z, northern=north) for p in box ] # project lon/lat vertices into the image if not isinstance(rpc, rpc_model.RPCModel): rpc = rpc_model.RPCModel(rpc) img_pts = [ rpc.inverse_estimate(p[1], p[0], rpc.altOff)[:2] for p in box_latlon ] # return image roi x, y, w, h = common.bounding_box2D(img_pts) return {'x': x, 'y': y, 'w': w, 'h': h}
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, sift_matches=None, method='rpc', hmargin=0, vmargin=0): """ Rectify a ROI in a pair of images. Args: im1, im2: paths to two GeoTIFF image files rpc1, rpc2: two instances of the rpcm.RPCModel class x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output rectified crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. sift_matches (optional): Nx4 numpy array containing a list of sift matches, in the full image coordinates frame method (default: 'rpc'): option to decide wether to use rpc of sift matches for the fundamental matrix estimation. {h,v}margin (optional): horizontal and vertical margins added on the sides of the rectified images Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two original (large) images. disp_min, disp_max: horizontal disparity range """ # compute real or virtual matches if method == 'rpc': # find virtual matches from RPC camera models matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, cfg['n_gcp_per_axis']) # correct second image coordinates with the pointing correction matrix if A is not None: matches[:, 2:] = common.points_apply_homography( np.linalg.inv(A), matches[:, 2:]) elif method == 'sift': matches = sift_matches else: raise Exception( "Unknown value {} for argument 'method'".format(method)) if matches is None or len(matches) < 4: raise NoRectificationMatchesError( "No or not enough matches found to rectify image pair") # compute rectifying homographies H1, H2, F = rectification_homographies(matches, x, y, w, h) if cfg['register_with_shear']: # compose H2 with a horizontal shear to reduce the disparity range a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h)) lon, lat, alt = rpc_utils.ground_control_points( rpc1, x, y, w, h, a, a, 4) x1, y1 = rpc1.projection(lon, lat, alt)[:2] x2, y2 = rpc2.projection(lon, lat, alt)[:2] m = np.vstack([x1, y1, x2, y2]).T m = np.vstack({tuple(row) for row in m}) # remove duplicates due to no alt range H2 = register_horizontally_shear(m, H1, H2) # compose H2 with a horizontal translation to center disp range around 0 if sift_matches is not None: sift_matches = filter_matches_epipolar_constraint( F, sift_matches, cfg['epipolar_thresh']) if len(sift_matches) < 1: warnings.warn( "Need at least one sift match for the horizontal registration", category=NoHorizontalRegistrationWarning, ) else: H2 = register_horizontally_translation(sift_matches, H1, H2) # compute disparity range if cfg['debug']: out_dir = os.path.dirname(out1) np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'), sift_matches, fmt='%9.3f') visualisation.plot_matches( im1, im2, rpc1, rpc2, sift_matches, x, y, w, h, os.path.join(out_dir, 'sift_matches_disp.png')) disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, sift_matches, A) # recompute hmargin and homographies hmargin = int(np.ceil(max([hmargin, np.fabs(disp_m), np.fabs(disp_M)]))) T = common.matrix_translation(hmargin, vmargin) H1, H2 = np.dot(T, H1), np.dot(T, H2) # compute output images size roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) # check that the first homography maps the ROI in the positive quadrant np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin], atol=.01) # apply homographies and do the crops common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin, h0 + 2 * vmargin) common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin, h0 + 2 * vmargin) return H1, H2, disp_m, disp_M
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None, sift_matches=None, method='rpc', hmargin=0, vmargin=0): """ Rectify a ROI in a pair of images. Args: im1, im2: paths to two image files rpc1, rpc2: paths to the two xml files containing RPC data x, y, w, h: four integers defining the rectangular ROI in the first image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out1, out2: paths to the output rectified crops A (optional): 3x3 numpy array containing the pointing error correction for im2. This matrix is usually estimated with the pointing_accuracy module. sift_matches (optional): Nx4 numpy array containing a list of sift matches, in the full image coordinates frame method (default: 'rpc'): option to decide wether to use rpc of sift matches for the fundamental matrix estimation. {h,v}margin (optional): horizontal and vertical margins added on the sides of the rectified images Returns: H1, H2: Two 3x3 matrices representing the rectifying homographies that have been applied to the two original (large) images. disp_min, disp_max: horizontal disparity range """ # read RPC data rpc1 = rpc_model.RPCModel(rpc1) rpc2 = rpc_model.RPCModel(rpc2) # compute real or virtual matches if method == 'rpc': # find virtual matches from RPC camera models matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, cfg['n_gcp_per_axis']) # correct second image coordinates with the pointing correction matrix if A is not None: matches[:, 2:] = common.points_apply_homography( np.linalg.inv(A), matches[:, 2:]) else: matches = sift_matches # compute rectifying homographies H1, H2, F = rectification_homographies(matches, x, y, w, h) if cfg['register_with_shear']: # compose H2 with a horizontal shear to reduce the disparity range a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h)) lon, lat, alt = rpc_utils.ground_control_points( rpc1, x, y, w, h, a, a, 4) x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2] x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2] m = np.vstack([x1, y1, x2, y2]).T m = np.vstack({tuple(row) for row in m}) # remove duplicates due to no alt range H2 = register_horizontally_shear(m, H1, H2) # compose H2 with a horizontal translation to center disp range around 0 if sift_matches is not None: sift_matches = filter_matches_epipolar_constraint( F, sift_matches, cfg['epipolar_thresh']) if len(sift_matches) < 10: print('WARNING: no registration with less than 10 matches') else: H2 = register_horizontally_translation(sift_matches, H1, H2) # compute disparity range if cfg['debug']: out_dir = os.path.dirname(out1) np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'), sift_matches, fmt='%9.3f') visualisation.plot_matches( im1, im2, rpc1, rpc2, sift_matches, x, y, w, h, os.path.join(out_dir, 'sift_matches_disp.png')) disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, sift_matches, A) # recompute hmargin and homographies hmargin = int(np.ceil(max([hmargin, np.fabs(disp_m), np.fabs(disp_M)]))) T = common.matrix_translation(hmargin, vmargin) H1, H2 = np.dot(T, H1), np.dot(T, H2) # compute rectifying homographies for non-epipolar mode (rectify the secondary tile only) if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']): H1_inv = np.linalg.inv(H1) H1 = np.eye( 3 ) # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere H2 = np.dot(H1_inv, H2) T = common.matrix_translation(-x + hmargin, -y + vmargin) H1 = np.dot(T, H1) H2 = np.dot(T, H2) # compute output images size roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] pts1 = common.points_apply_homography(H1, roi) x0, y0, w0, h0 = common.bounding_box2D(pts1) # check that the first homography maps the ROI in the positive quadrant np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin], atol=.01) # apply homographies and do the crops common.image_apply_homography(out1, im1, H1, w0 + 2 * hmargin, h0 + 2 * vmargin) common.image_apply_homography(out2, im2, H2, w0 + 2 * hmargin, h0 + 2 * vmargin) if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']): pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]] pts_out = common.points_apply_homography(H1_inv, pts_in) disp_m = pts_out[1, :] - pts_out[0, :] disp_M = pts_out[2, :] - pts_out[0, :] return H1, H2, disp_m, disp_M
def disparity_to_ply(tile): """ Compute a point cloud from the disparity map of a pair of image tiles. Args: tile: dictionary containing the information needed to process a tile. """ out_dir = os.path.join(tile['dir']) ply_file = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') x, y, w, h = tile['coordinates'] rpc1 = cfg['images'][0]['rpc'] rpc2 = cfg['images'][1]['rpc'] if os.path.exists(os.path.join(out_dir, 'stderr.log')): print('triangulation: stderr.log exists') print('pair_1 not processed on tile {} {}'.format(x, y)) return print('triangulating tile {} {}...'.format(x, y)) # This function is only called when there is a single pair (pair_1) H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt') H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt') pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt') disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif') extra = os.path.join(out_dir, 'pair_1', 'rectified_disp_confidence.tif') if not os.path.exists(extra): extra = '' mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png') mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png') # prepare the image needed to colorize point cloud colors = os.path.join(out_dir, 'rectified_ref.png') if cfg['images'][0]['clr']: hom = np.loadtxt(H_ref) roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] ww, hh = common.bounding_box2D(common.points_apply_homography( hom, roi))[2:] tmp = common.tmpfile('.tif') common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom, ww + 2 * cfg['horizontal_margin'], hh + 2 * cfg['vertical_margin']) common.image_qauto(tmp, colors) else: common.image_qauto( os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors) # compute the point cloud triangulation.disp_map_to_point_cloud(ply_file, disp, mask_rect, rpc1, rpc2, H_ref, H_sec, pointing, colors, extra, utm_zone=cfg['utm_zone'], llbbx=tuple(cfg['ll_bbx']), xybbx=(x, x + w, y, y + h), xymsk=mask_orig) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (ply_file, plyextrema)) if cfg['clean_intermediate']: common.remove(H_ref) common.remove(H_sec) common.remove(disp) common.remove(mask_rect) common.remove(mask_orig) common.remove(colors) common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))