def plys_to_potree(input_plys, output, bin_dir='.', cloud_name="cloud"): """ Compute a multi-scale representation of a large point cloud. The output file can be viewed with a web browser. This is useful for huge point clouds. The input is a list of ply files. If PotreeConverter is not available it doesn't fail. Args: output: path to the output folder input_plys: list of paths to ply files """ PotreeConverter = os.path.join( bin_dir, 'PotreeConverter/build/PotreeConverter/PotreeConverter') outdir = os.path.dirname(output) # List ply files in text file listfile = tmpfile('.txt', outdir) with open(listfile, 'w') as f: for p in input_plys: f.write("%s\n" % p) # Run PotreeConverter common.run("mkdir -p %s" % output) resourcedir = os.path.join( bin_dir, 'PotreeConverter/PotreeConverter/resources/page_template') common.run( "LC_ALL=C %s --list-of-files %s -o %s -p %s --edl-enabled --material RGB --overwrite --page-template %s" % (PotreeConverter, listfile, output, cloud_name, resourcedir)) # Cleanup os.remove(listfile)
def transfer_map(in_map, H, x, y, w, h, out_map): """ Transfer the heights computed on the rectified grid to the original Pleiades image grid. Args: in_map: path to the input map, usually a height map or a mask, sampled on the rectified grid H: path to txt file containing a numpy 3x3 array representing the rectifying homography x, y, w, h: four integers defining the rectangular ROI in the original image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. out_map: path to the output map """ # write the inverse of the resampling transform matrix. In brief it is: # homography * translation # This matrix transports the coordinates of the original cropped and # grid (the one desired for out_height) to the rectified cropped and # grid (the one we have for height) HH = np.dot(np.loadtxt(H), common.matrix_translation(x, y)) # apply the homography # write the 9 coefficients of the homography to a string, then call synflow # to produce the flow, then backflow to apply it # zero:256x256 is the iio way to create a 256x256 image filled with zeros hij = ' '.join(['%r' % num for num in HH.flatten()]) common.run( 'synflow hom "%s" zero:%dx%d /dev/null - | BILINEAR=1 backflow - %s %s' % (hij, w, h, in_map, out_map))
def height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, height, rpc_err, A=None): """ Computes a height map from a disparity map, using rpc. Args: rpc1, rpc2: paths to the xml files H1, H2: path to txt files containing two 3x3 numpy arrays defining the rectifying homographies disp, mask: paths to the diparity and mask maps height: path to the output height map rpc_err: path to the output rpc_error of triangulation A (optional): path to txt file containing the pointing correction matrix for im2 """ if A is not None: HH2 = common.tmpfile('.txt') np.savetxt(HH2, np.dot(np.loadtxt(H2), np.linalg.inv(np.loadtxt(A)))) else: HH2 = H2 common.run("disp_to_h %s %s %s %s %s %s %s %s" % (rpc1, rpc2, H1, HH2, disp, mask, height, rpc_err))
def produce_lidarviewer(s2poutdir, output): """ Produce a single multiscale point cloud for the whole processed region. Args: tiles: list of tiles dictionaries """ tiles_file = os.path.join(s2poutdir, 'tiles.txt') # Read the tiles file tiles = s2p.read_tiles(tiles_file) print(str(len(tiles)) + ' tiles found') # collect all plys plys = [ os.path.join(os.path.abspath(os.path.dirname(t)), 'cloud.ply') for t in tiles ] nthreads = 4 plys = ' '.join(plys) common.run( "LidarPreprocessor -to %s.LidarO -tp %s.LidarP -nt %d %s -o %s" % (output, output, nthreads, plys, output))
def main(tiles_file, outfile, sub_img): outfile_basename = os.path.basename(outfile) outfile_dirname = os.path.dirname(outfile) output_format = outfile_basename[-3:] print('Output format is ' + output_format) # If output format is tif, we need to generate a temporary vrt # with the same name vrt_basename = outfile_basename if output_format == 'tif': vrt_basename = vrt_basename[:-3] + 'vrt' elif output_format != 'vrt': print('Error: only vrt or tif extension is allowed for output image.') return vrt_name = os.path.join(outfile_dirname, vrt_basename) # Read the tiles file tiles = s2p.read_tiles(tiles_file) print(str(len(tiles)) + ' tiles found') # Compute the global extent of the output image (min_x, max_x, min_y, max_y) = global_extent(tiles) print('Global extent: [%i,%i]x[%i,%i]' % (min_x, max_x, min_y, max_y)) # Now, write all row vrts print("Writing row vrt files " + vrt_basename) vrt_row = write_row_vrts(tiles, sub_img, vrt_basename, min_x, max_x) # Finally, write main vrt print('Writing ' + vrt_name) write_main_vrt(vrt_row, vrt_name, min_x, max_x, min_y, max_y) # If Output format is tif, convert vrt file to tif if output_format == 'tif': print('Converting vrt to tif ...') common.run(('gdal_translate -ot Float32 -co TILED=YES -co' ' BIGTIFF=IF_NEEDED %s %s' % (common.shellquote(vrt_name), common.shellquote(outfile)))) print('Removing temporary vrt files') # Do not use items()/iteritems() here because of python 2 and 3 compat for y in vrt_row: vrt_data = vrt_row[y] row_vrt_filename = os.path.join(vrt_data['vrt_dir'], vrt_basename) try: os.remove(row_vrt_filename) except OSError: pass try: os.remove(vrt_name) except OSError: pass
def plys_to_potree(input_plys, output, bin_dir='.'): """ Compute a multi-scale representation of a large point cloud. The output file can be viewed with a web browser. This is useful for huge point clouds. The input is a list of ply files. If PotreeConverter is not available it doesn't fail. Args: output: path to the output folder input_plys: list of paths to ply files """ import os.path ply2ascii = os.path.join(bin_dir, 'plytool/ply2ascii') txt2las = os.path.join(bin_dir, 'PotreeConverter/LAStools/bin/txt2las') PotreeConverter = os.path.join( bin_dir, 'PotreeConverter/build/PotreeConverter/PotreeConverter') #if (not os.path.exists(ply2ascii)) or (not os.path.exists(txt2las)) or (not os.path.exists(PotreeConverter)) : # return outdir = os.path.dirname(output) plys = ' '.join(input_plys) las = [] trash = [] for p in input_plys: # make ascii ply if needed ap = tmpfile('.ply', outdir) lp = tmpfile('.las', outdir) las.append(lp) common.run("%s < %s > %s" % (ply2ascii, p, ap)) # convert ply to las because PotreeConverter is not able to read PLY common.run("%s -parse xyzRGB -verbose -i %s -o %s 2>/dev/null" % (txt2las, ap, lp)) # generate potree output listfile = tmpfile('.txt', outdir) ff = open(listfile, 'w') for item in las: ff.write("%s\n" % item) ff.close() common.run("mkdir -p %s" % output) resourcedir = os.path.join( bin_dir, 'PotreeConverter/PotreeConverter/resources/page_template') common.run( "LC_ALL=C %s --list-of-files %s -o %s -p cloud --edl-enabled --material ELEVATION --overwrite --page-template %s" % (PotreeConverter, listfile, output, resourcedir)) # clean intermediate files for p in garbage: common.run("rm %s" % p)
def height_map_to_point_cloud(cloud, heights, rpc, H=None, crop_colorized='', off_x=None, off_y=None, ascii_ply=False, with_normals=False, utm_zone=None, llbbx=None): """ Computes a color point cloud from a height map. Args: cloud: path to the output points cloud (ply format) heights: height map, sampled on the same grid as the crop_colorized image. In particular, its size is the same as crop_colorized. rpc: instances of the rpcm.RPCModel class H (optional, default None): numpy array of size 3x3 defining the homography transforming the coordinates system of the original full size image into the coordinates system of the crop we are dealing with. crop_colorized (optional, default ''): path to a colorized crop of a Pleiades image off_{x,y} (optional, default None): coordinates of the point we want to use as origin in the local coordinate system of the computed cloud ascii_ply (optional, default false): boolean flag to tell if the output ply file should be encoded in plain text (ascii). utm_zone (optional, default None): """ # write rpc coefficients to txt file rpcfile = common.tmpfile('.txt') rpc.write_to_file(rpcfile) if not os.path.exists(crop_colorized): crop_colorized = '' hij = " ".join(str(x) for x in H.flatten()) if H is not None else "" command = ["colormesh", cloud, heights, rpcfile, crop_colorized, "-h", hij] if ascii_ply: command.append("--ascii") if with_normals: command.append("--with-normals") if utm_zone: command.extend(["--utm-zone", utm_zone]) if llbbx: lonm, lonM, latm, latM = llbbx command.extend([ "--lon-m", lonm, "--lon-M", lonM, "--lat-m", latm, "--lat-M", latM ]) if off_x: command.extend(["--offset_x", "%d" % off_x]) if off_y: command.extend(["--offset_y", "%d" % off_y]) common.run(command)
def erosion(out, msk, radius): """ Erodes the accepted regions (ie eliminates more pixels) Args: out: path to the ouput mask image file msk: path to the input mask image file radius (in pixels): size of the disk used for the erosion """ if radius >= 2: common.run('morsi disk%d erosion %s %s' % (int(radius), msk, out))
def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None, model=None, epipolar_threshold=10): """ Find matches among two lists of sift keypoints. Args: k1, k2: paths to text files containing the lists of sift descriptors method (optional, default is 'relative'): flag ('relative' or 'absolute') indicating wether to use absolute distance or relative distance sift_thresh (optional, default is 0.6): threshold for distance between SIFT descriptors. These descriptors are 128-vectors, whose coefficients range from 0 to 255, thus with absolute distance a reasonable value for this threshold is between 200 and 300. With relative distance (ie ratio between distance to nearest and distance to second nearest), the commonly used value for the threshold is 0.6. F (optional): affine fundamental matrix model (optional, default is None): model imposed by RANSAC when searching the set of inliers. If None all matches are considered as inliers. epipolar_threshold (optional, default is 10): maximum distance allowed for a point to the epipolar line of its match. Returns: if any, a numpy 2D array containing the list of inliers matches. """ # compute matches mfile = common.tmpfile('.txt') cmd = "matching %s %s -o %s --sift-threshold %f" % (k1, k2, mfile, sift_thresh) if method == 'absolute': cmd += " --absolute" if F is not None: fij = ' '.join(str(x) for x in [F[0, 2], F[1, 2], F[2, 0], F[2, 1], F[2, 2]]) cmd = "%s -f \"%s\"" % (cmd, fij) cmd += " --epipolar-threshold {}".format(epipolar_threshold) common.run(cmd) matches = np.loadtxt(mfile) if matches.ndim == 2: # filter outliers with ransac if model == 'fundamental' and len(matches) >= 7: common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile)) elif model == 'homography' and len(matches) >= 4: common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" % (mfile, mfile)) elif model == 'hom_fund' and len(matches) >= 7: common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" % (mfile, mfile)) common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile)) if os.stat(mfile).st_size > 0: # return numpy array of matches return np.loadtxt(mfile)
def global_dsm(tiles): """ """ out_dsm_vrt = os.path.join(cfg['out_dir'], 'dsm.vrt') out_dsm_tif = os.path.join(cfg['out_dir'], 'dsm.tif') dsms_list = [os.path.join(t['dir'], 'dsm.tif') for t in tiles] dsms = '\n'.join(d for d in dsms_list if os.path.exists(d)) input_file_list = os.path.join(cfg['out_dir'], 'gdalbuildvrt_input_file_list.txt') with open(input_file_list, 'w') as f: f.write(dsms) common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" % (input_file_list, out_dsm_vrt)) res = cfg['dsm_resolution'] if 'utm_bbx' in cfg: bbx = cfg['utm_bbx'] xoff = bbx[0] yoff = bbx[3] xsize = int(np.ceil((bbx[1] - bbx[0]) / res)) ysize = int(np.ceil((bbx[3] - bbx[2]) / res)) projwin = "-projwin %s %s %s %s" % (xoff, yoff, xoff + xsize * res, yoff - ysize * res) else: projwin = "" common.run(" ".join([ "gdal_translate", "-co TILED=YES -co BIGTIFF=IF_SAFER", "%s %s %s" % (projwin, out_dsm_vrt, out_dsm_tif) ])) # EXPORT CONFIDENCE out_conf_vrt = os.path.join(cfg['out_dir'], 'confidence.vrt') out_conf_tif = os.path.join(cfg['out_dir'], 'confidence.tif') dsms_list = [os.path.join(t['dir'], 'confidence.tif') for t in tiles] dems_list_ok = [d for d in dsms_list if os.path.exists(d)] dsms = '\n'.join(dems_list_ok) input_file_list = os.path.join(cfg['out_dir'], 'gdalbuildvrt_input_file_list2.txt') if len(dems_list_ok) > 0: with open(input_file_list, 'w') as f: f.write(dsms) common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" % (input_file_list, out_conf_vrt)) common.run(" ".join([ "gdal_translate", "-co TILED=YES -co BIGTIFF=IF_SAFER", "%s %s %s" % (projwin, out_conf_vrt, out_conf_tif) ]))
def test_run_timeout(): """ Test s2p.common.run() timeout with Unix "sleep" utility command, and check that when the command times out, the launched process is killed. """ with pytest.raises(subprocess.TimeoutExpired): common.run("sleep 10", timeout=1) # Get the names of the running processes proc_names = [] for proc in psutil.process_iter(attrs=['pid', 'name']): proc_names.append(proc.info['name']) # Check that our process has effectively been killed assert "sleep" not in proc_names
def height_map_to_point_cloud(cloud, heights, rpc, H=None, crop_colorized='', off_x=None, off_y=None, ascii_ply=False, with_normals=False, utm_zone=None, llbbx=None): """ Computes a color point cloud from a height map. Args: cloud: path to the output points cloud (ply format) heights: height map, sampled on the same grid as the crop_colorized image. In particular, its size is the same as crop_colorized. rpc: path to xml file containing RPC data for the current Pleiade image H (optional, default None): numpy array of size 3x3 defining the homography transforming the coordinates system of the original full size image into the coordinates system of the crop we are dealing with. crop_colorized (optional, default ''): path to a colorized crop of a Pleiades image off_{x,y} (optional, default None): coordinates of the point we want to use as origin in the local coordinate system of the computed cloud ascii_ply (optional, default false): boolean flag to tell if the output ply file should be encoded in plain text (ascii). utm_zone (optional, default None): """ if not os.path.exists(crop_colorized): crop_colorized = '' hij = " ".join(str(x) for x in H.flatten()) if H is not None else "" asc = "--ascii" if ascii_ply else "" nrm = "--with-normals" if with_normals else "" utm = "--utm-zone %s" % utm_zone if utm_zone else "" lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else "" command = "colormesh %s %s %s %s -h \"%s\" %s %s %s %s" % ( cloud, heights, rpc, crop_colorized, hij, asc, nrm, utm, lbb) if off_x: command += " --offset_x %d" % off_x if off_y: command += " --offset_y %d" % off_y common.run(command)
def heights_to_ply(tile): """ Generate a ply cloud. Args: tile: a dictionary that provides all you need to process a tile """ # merge the n-1 height maps of the tile (n = nb of images) heights_fusion(tile) # compute a ply from the merged height map out_dir = tile['dir'] x, y, w, h = tile['coordinates'] plyfile = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') height_map = os.path.join(out_dir, 'height_map.tif') # H is the homography transforming the coordinates system of the original # full size image into the coordinates system of the crop H = np.dot(np.diag([1, 1, 1]), common.matrix_translation(-x, -y)) colors = os.path.join(out_dir, 'ref.png') if cfg['images'][0]['clr']: common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors) else: common.image_qauto( common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h), colors) triangulation.height_map_to_point_cloud(plyfile, height_map, cfg['images'][0]['rpc'], H, colors, utm_zone=cfg['utm_zone'], llbbx=tuple(cfg['ll_bbx'])) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (plyfile, plyextrema)) if cfg['clean_intermediate']: common.remove(height_map) common.remove(colors) common.remove( os.path.join(out_dir, 'cloud_water_image_domain_mask.png'))
def disp_map_to_point_cloud(out, disp, mask, rpc1, rpc2, H1, H2, A, colors, extra='', utm_zone=None, llbbx=None, xybbx=None, xymsk=None): """ Computes a 3D point cloud from a disparity map. Args: out: path to the output ply file disp, mask: paths to the diparity and mask maps rpc1, rpc2: paths to the xml files H1, H2: path to txt files containing two 3x3 numpy arrays defining the rectifying homographies A: path to txt file containing the pointing correction matrix for im2 colors: path to the png image containing the colors """ href = " ".join(str(x) for x in np.loadtxt(H1).flatten()) hsec = " ".join( str(x) for x in np.dot(np.loadtxt(H2), np.linalg.inv(np.loadtxt( A))).flatten()) utm = "--utm-zone %s" % utm_zone if utm_zone else "" lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else "" xbb = "--col-m %s --col-M %s --row-m %s --row-M %s" % xybbx if xybbx else "" msk = "--mask-orig %s" % xymsk if xymsk else "" command = 'disp2ply {} {} {} {} {}'.format(out, disp, mask, rpc1, rpc2) # extra: is an optinonal extra data channel in the ply its default value '' ignores it command += ' {} {} -href "{}" -hsec "{}"'.format(colors, extra, href, hsec) command += ' {} {} {} {}'.format(utm, lbb, xbb, msk) common.run(command)
def height_map(out, x, y, w, h, rpc1, rpc2, H1, H2, disp, mask, rpc_err, out_filt, A=None): """ Computes an altitude map, on the grid of the original reference image, from a disparity map given on the grid of the rectified reference image. Args: out: path to the output file x, y, w, h: four integers defining the rectangular ROI in the original image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. rpc1, rpc2: paths to the xml files H1, H2: path to txt files containing two 3x3 numpy arrays defining the rectifying homographies disp, mask: paths to the diparity and mask maps rpc_err: path to the output rpc_error of triangulation A (optional): path to txt file containing the pointing correction matrix for im2 """ tmp = common.tmpfile('.tif') height_map_rectified(rpc1, rpc2, H1, H2, disp, mask, tmp, rpc_err, A) transfer_map(tmp, H1, x, y, w, h, out) # apply output filter common.run('plambda {0} {1} "x 0 > y nan if" -o {1}'.format(out_filt, out))
def heights_to_ply(tile): """ Generate a ply cloud. Args: tile: a dictionary that provides all you need to process a tile """ # merge the n-1 height maps of the tile (n = nb of images) heights_fusion(tile) # compute a ply from the merged height map out_dir = tile['dir'] x, y, w, h = tile['coordinates'] plyfile = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') height_map = os.path.join(out_dir, 'height_map.tif') colors = os.path.join(out_dir, 'ref.tif') if cfg['images'][0]['clr']: common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors) else: common.image_qauto( common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h), colors) triangulation.height_map_to_point_cloud(plyfile, height_map, cfg['images'][0]['rpcm'], x, y, colors) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (plyfile, plyextrema)) if cfg['clean_intermediate']: common.remove(height_map) common.remove(colors) common.remove(os.path.join(out_dir, 'mask.png'))
def multidisp_map_to_point_cloud(out, disp_list, rpc_ref, rpc_list, colors, utm_zone=None, llbbx=None, xybbx=None): """ Computes a 3D point cloud from N disparity maps. Args: out: path to the output ply file disp_list: paths to the diparity maps rpc_ref, rpc_list: paths to the xml files colors: path to the png image containing the colors """ disp_command = [ '--disp%d %s' % (i + 1, disp) for i, disp in enumerate(disp_list) ] rpc_command = [ '--rpc_sec%d %s' % (i + 1, rpc) for i, rpc in enumerate(rpc_list) ] utm = "--utm-zone %s" % utm_zone if utm_zone else "" lbb = "--lon-m %s --lon-M %s --lat-m %s --lat-M %s" % llbbx if llbbx else "" xbb = "--col-m %s --col-M %s --row-m %s --row-M %s" % xybbx if xybbx else "" command = 'multidisp2ply {} {} {} {} {}'.format(out, len(disp_list), " ".join(disp_command), "--rpc_ref %s" % rpc_ref, " ".join(rpc_command)) command += ' --color {}'.format(colors) command += ' {} {} {}'.format(utm, lbb, xbb) common.run(command)
def create_rejection_mask(disp, im1, im2, mask): """ Create rejection mask (0 means rejected, 1 means accepted) Keep only the points that are matched and present in both input images Args: disp: path to the input disparity map im1, im2: rectified stereo pair mask: path to the output rejection mask """ tmp1 = common.tmpfile('.tif') tmp2 = common.tmpfile('.tif') common.run(["plambda", disp, "x 0 join", "-o", tmp1]) common.run(["backflow", tmp1, im2, tmp2]) common.run(["plambda", disp, im1, tmp2, "x isfinite y isfinite z isfinite and and", "-o", mask])
def disparity_to_ply(tile): """ Compute a point cloud from the disparity map of a pair of image tiles. This function is called by s2p.main only if there are two input images (not three). Args: tile: dictionary containing the information needed to process a tile. """ out_dir = tile['dir'] ply_file = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') x, y, w, h = tile['coordinates'] rpc1 = cfg['images'][0]['rpcm'] rpc2 = cfg['images'][1]['rpcm'] print('triangulating tile {} {}...'.format(x, y)) H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt') H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt') pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt') disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif') extra = os.path.join(out_dir, 'pair_1', 'rectified_disp_confidence.tif') if not os.path.exists(extra): # confidence file not always generated extra = '' mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png') mask_orig = os.path.join(out_dir, 'mask.png') # prepare the image needed to colorize point cloud colors = os.path.join(out_dir, 'rectified_ref.png') if cfg['images'][0]['clr']: hom = np.loadtxt(H_ref) # we want rectified_ref.png and rectified_ref.tif to have the same size with rasterio.open(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif')) as f: ww, hh = f.width, f.height common.image_apply_homography(colors, cfg['images'][0]['clr'], hom, ww, hh) else: common.image_qauto( os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors) # compute the point cloud with rasterio.open(disp, 'r') as f: disp_img = f.read().squeeze() with rasterio.open(mask_rect, 'r') as f: mask_rect_img = f.read().squeeze() pyproj_out_crs = geographiclib.pyproj_crs(cfg['out_crs']) proj_com = "CRS {}".format(cfg['out_crs']) xyz_array, err = triangulation.disp_to_xyz(rpc1, rpc2, np.loadtxt(H_ref), np.loadtxt(H_sec), disp_img, mask_rect_img, pyproj_out_crs, img_bbx=(x, x + w, y, y + h), A=np.loadtxt(pointing)) triangulation.filter_xyz_and_write_to_ply(ply_file, xyz_array, cfg['3d_filtering_r'], cfg['3d_filtering_n'], cfg['gsd'], colors, proj_com, confidence=extra) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (ply_file, plyextrema)) if cfg['clean_intermediate']: common.remove(H_ref) common.remove(H_sec) common.remove(disp) common.remove(mask_rect) common.remove(mask_orig) common.remove(colors) common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None, disp_max=None, extra_params=''): """ Runs a block-matching binary on a pair of stereo-rectified images. Args: im1, im2: rectified stereo pair disp: path to the output diparity map mask: path to the output rejection mask algo: string used to indicate the desired binary. Currently it can be one among 'hirschmuller02', 'hirschmuller08', 'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm', 'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac' disp_min : smallest disparity to consider disp_max : biggest disparity to consider extra_params: optional string with algorithm-dependent parameters """ if rectify_secondary_tile_only(algo) is False: disp_min = [disp_min] disp_max = [disp_max] # limit disparity bounds np.alltrue(len(disp_min) == len(disp_max)) for dim in range(len(disp_min)): if disp_min[dim] is not None and disp_max[dim] is not None: image_size = common.image_size_gdal(im1) if disp_max[dim] - disp_min[dim] > image_size[dim]: center = 0.5 * (disp_min[dim] + disp_max[dim]) disp_min[dim] = int(center - 0.5 * image_size[dim]) disp_max[dim] = int(center + 0.5 * image_size[dim]) # round disparity bounds if disp_min[dim] is not None: disp_min[dim] = int(np.floor(disp_min[dim])) if disp_max is not None: disp_max[dim] = int(np.ceil(disp_max[dim])) if rectify_secondary_tile_only(algo) is False: disp_min = disp_min[0] disp_max = disp_max[0] # define environment variables env = os.environ.copy() env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads']) # call the block_matching binary if algo == 'hirschmuller02': bm_binary = 'subpix.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) # extra_params: LoG(0) regionRadius(3) # LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled # regionRadius: radius of the window if algo == 'hirschmuller08': bm_binary = 'callSGBM.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1) # regionRadius: radius of the window # P1, P2 : regularization parameters # LRdiff: maximum difference between left and right disparity maps if algo == 'hirschmuller08_laplacian': bm_binary = 'callSGBM_lap.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) if algo == 'hirschmuller08_cauchy': bm_binary = 'callSGBM_cauchy.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) if algo == 'sgbm': # opencv sgbm function implements a modified version of Hirschmuller's # Semi-Global Matching (SGM) algorithm described in "Stereo Processing # by Semiglobal Matching and Mutual Information", PAMI, 2008 p1 = 8 # penalizes disparity changes of 1 between neighbor pixels p2 = 32 # penalizes disparity changes of more than 1 # it is required that p2 > p1. The larger p1, p2, the smoother the disparity win = 3 # matched block size. It must be a positive odd number lr = 1 # maximum difference allowed in the left-right disparity check cost = common.tmpfile('.tif') common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2, disp, cost, disp_min, disp_max, win, p1, p2, lr)) # create rejection mask (0 means rejected, 1 means accepted) # keep only the points that are matched and present in both input images common.run('plambda {0} "x 0 join" | backflow - {2} | plambda {0} {1} - "x isfinite y isfinite z isfinite and and" -o {3}'.format(disp, im1, im2, mask)) if algo == 'tvl1': tvl1 = 'callTVL1.sh' common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask), env) if algo == 'tvl1_2d': tvl1 = 'callTVL1.sh' common.run('{0} {1} {2} {3} {4} {5}'.format(tvl1, im1, im2, disp, mask, 1), env) if algo == 'msmw': bm_binary = 'iip_stereo_correlation_multi_win2' common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask)) if algo == 'msmw2': bm_binary = 'iip_stereo_correlation_multi_win2_newversion' common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format( bm_binary, disp_min, disp_max, im1, im2, disp, mask), env) if algo == 'msmw3': bm_binary = 'msmw' common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format( bm_binary, disp_min, disp_max, im1, im2, disp, mask)) if algo == 'mgm': env['MEDIAN'] = '1' env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win']) env['TSGM'] = '3' conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0]) common.run('{0} -r {1} -R {2} -s vfit -t census -O 8 {3} {4} {5} -confidence_consensusL {6}'.format('mgm', disp_min, disp_max, im1, im2, disp, conf), env) # produce the mask: rejected pixels are marked with nan of inf in disp # map common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask)) if algo == 'mgm_multi_lsd': ref = im1 sec = im2 wref = common.tmpfile('.tif') wsec = common.tmpfile('.tif') # TODO TUNE LSD PARAMETERS TO HANDLE DIRECTLY 12 bits images? # image dependent weights based on lsd segments image_size = common.image_size_gdal(ref) common.run('qauto %s | \ lsd - - | \ cut -d\' \' -f1,2,3,4 | \ pview segments %d %d | \ plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(ref,image_size[0], image_size[1],wref)) # image dependent weights based on lsd segments image_size = common.image_size_gdal(sec) common.run('qauto %s | \ lsd - - | \ cut -d\' \' -f1,2,3,4 | \ pview segments %d %d | \ plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(sec,image_size[0], image_size[1],wsec)) env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter']) env['SUBPIX'] = '2' env['MEDIAN'] = '1' env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win']) # it is required that p2 > p1. The larger p1, p2, the smoother the disparity regularity_multiplier = cfg['stereo_regularity_multiplier'] # increasing these numbers compensates the loss of regularity after incorporating LSD weights P1 = 12*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels P2 = 48*regularity_multiplier # penalizes disparity changes of more than 1 conf = disp+'.confidence.tif' common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census -O 8 -P1 {7} -P2 {8} -wl {3} -wr {4} -confidence_consensusL {10} {5} {6} {9}'.format('mgm_multi', disp_min, disp_max, wref,wsec, im1, im2, P1, P2, disp, conf), env) # produce the mask: rejected pixels are marked with nan of inf in disp # map common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask)) if algo == 'mgm_multi': env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter']) env['MINDIFF'] = '1' env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win']) env['SUBPIX'] = '2' # it is required that p2 > p1. The larger p1, p2, the smoother the disparity regularity_multiplier = cfg['stereo_regularity_multiplier'] P1 = 8*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels P2 = 32*regularity_multiplier # penalizes disparity changes of more than 1 conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0]) common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census {3} {4} {5} -confidence_consensusL {6}'.format('mgm_multi', disp_min, disp_max, im1, im2, disp, conf), env) # produce the mask: rejected pixels are marked with nan of inf in disp # map common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask)) if (algo == 'micmac'): # add micmac binaries to the PATH environment variable s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__)))) micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin') os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin # prepare micmac xml params file micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml') work_dir = os.path.dirname(os.path.abspath(im1)) common.run('cp {0} {1}'.format(micmac_params, work_dir)) # run MICMAC common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml'))) # copy output disp map micmac_disp = os.path.join(work_dir, 'MEC-EPI', 'Px1_Num6_DeZoom1_LeChantier.tif') disp = os.path.join(work_dir, 'rectified_disp.tif') common.run('cp {0} {1}'.format(micmac_disp, disp)) # compute mask by rejecting the 10% of pixels with lowest correlation score micmac_cost = os.path.join(work_dir, 'MEC-EPI', 'Correl_LeChantier_Num_5.tif') mask = os.path.join(work_dir, 'rectified_mask.png') common.run('plambda {0} "x x%q10 < 0 255 if" -o {1}'.format(micmac_cost, mask))
def test_run_success(): """ Test s2p.common.run() success with Unix "true" utility command. """ common.run("true")
def disparity_to_ply(tile): """ Compute a point cloud from the disparity map of a pair of image tiles. Args: tile: dictionary containing the information needed to process a tile. """ out_dir = os.path.join(tile['dir']) ply_file = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') x, y, w, h = tile['coordinates'] rpc1 = cfg['images'][0]['rpc'] rpc2 = cfg['images'][1]['rpc'] if os.path.exists(os.path.join(out_dir, 'stderr.log')): print('triangulation: stderr.log exists') print('pair_1 not processed on tile {} {}'.format(x, y)) return print('triangulating tile {} {}...'.format(x, y)) # This function is only called when there is a single pair (pair_1) H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt') H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt') pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt') disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif') extra = os.path.join(out_dir, 'pair_1', 'rectified_disp_confidence.tif') if not os.path.exists(extra): extra = '' mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png') mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png') # prepare the image needed to colorize point cloud colors = os.path.join(out_dir, 'rectified_ref.png') if cfg['images'][0]['clr']: hom = np.loadtxt(H_ref) roi = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] ww, hh = common.bounding_box2D(common.points_apply_homography( hom, roi))[2:] tmp = common.tmpfile('.tif') common.image_apply_homography(tmp, cfg['images'][0]['clr'], hom, ww + 2 * cfg['horizontal_margin'], hh + 2 * cfg['vertical_margin']) common.image_qauto(tmp, colors) else: common.image_qauto( os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors) # compute the point cloud triangulation.disp_map_to_point_cloud(ply_file, disp, mask_rect, rpc1, rpc2, H_ref, H_sec, pointing, colors, extra, utm_zone=cfg['utm_zone'], llbbx=tuple(cfg['ll_bbx']), xybbx=(x, x + w, y, y + h), xymsk=mask_orig) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (ply_file, plyextrema)) if cfg['clean_intermediate']: common.remove(H_ref) common.remove(H_sec) common.remove(disp) common.remove(mask_rect) common.remove(mask_orig) common.remove(colors) common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
# generate image print("Generating {} ...".format(out_img_file)) # First get input image size sz = common.image_size_gdal(in_img_file) w = sz[0] h = sz[1] # Generate a temporary vrt file to have the proper geotransform fd, tmp_vrt = tempfile.mkstemp(suffix='.vrt', dir=os.path.dirname(out_img_file)) os.close(fd) common.run('gdal_translate -of VRT -a_ullr 0 0 %d %d %s %s' % (w, h, in_img_file, tmp_vrt)) common.run(( 'gdalwarp -co RPB=NO -co PROFILE=GeoTIFF -r %s -co "BIGTIFF=IF_NEEDED" -co "TILED=YES" -ovr NONE -overwrite -to SRC_METHOD=NO_GEOTRANSFORM -to DST_METHOD=NO_GEOTRANSFORM -tr' ' %d %d %s %s') % (filt, scale_x, scale_y, tmp_vrt, out_img_file)) try: # Remove aux files if any os.remove(out_img_file + ".aux.xml") except OSError: pass # Clean tmp vrt file os.remove(tmp_vrt) print("Done")
def multidisparities_to_ply(tile): """ Compute a point cloud from the disparity maps of N-pairs of image tiles. Args: tile: dictionary containing the information needed to process a tile. # There is no guarantee that this function works with z!=1 """ out_dir = os.path.join(tile['dir']) ply_file = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') x, y, w, h = tile['coordinates'] rpc_ref = cfg['images'][0]['rpc'] disp_list = list() rpc_list = list() mask_orig = os.path.join(out_dir, 'cloud_water_image_domain_mask.png') print('triangulating tile {} {}...'.format(x, y)) n = len(cfg['images']) - 1 for i in range(n): pair = 'pair_%d' % (i + 1) H_ref = os.path.join(out_dir, pair, 'H_ref.txt') H_sec = os.path.join(out_dir, pair, 'H_sec.txt') disp = os.path.join(out_dir, pair, 'rectified_disp.tif') mask_rect = os.path.join(out_dir, pair, 'rectified_mask.png') disp2D = os.path.join(out_dir, pair, 'disp2D.tif') rpc_sec = cfg['images'][i + 1]['rpc'] if os.path.exists(disp): # homography for warp T = common.matrix_translation(x, y) hom_ref = np.loadtxt(H_ref) hom_ref_shift = np.dot(hom_ref, T) # homography for 1D to 2D conversion hom_sec = np.loadtxt(H_sec) if cfg["use_global_pointing_for_geometric_triangulation"] is True: pointing = os.path.join(cfg['out_dir'], 'global_pointing_%s.txt' % pair) hom_pointing = np.loadtxt(pointing) hom_sec = np.dot(hom_sec, np.linalg.inv(hom_pointing)) hom_sec_shift_inv = np.linalg.inv(hom_sec) h1 = " ".join(str(x) for x in hom_ref_shift.flatten()) h2 = " ".join(str(x) for x in hom_sec_shift_inv.flatten()) # relative disparity map to absolute disparity map tmp_abs = common.tmpfile('.tif') os.environ["PLAMBDA_GETPIXEL"] = "0" common.run( 'plambda %s %s "y 0 = nan x[0] :i + x[1] :j + 1 3 njoin if" -o %s' % (disp, mask_rect, tmp_abs)) # 1d to 2d conversion tmp_1d_to_2d = common.tmpfile('.tif') common.run('plambda %s "%s 9 njoin x mprod" -o %s' % (tmp_abs, h2, tmp_1d_to_2d)) # warp tmp_warp = common.tmpfile('.tif') common.run('homwarp -o 2 "%s" %d %d %s %s' % (h1, w, h, tmp_1d_to_2d, tmp_warp)) # set masked value to NaN exp = 'y 0 = nan x if' common.run('plambda %s %s "%s" -o %s' % (tmp_warp, mask_orig, exp, disp2D)) # disp2D contains positions in the secondary image # added input data for triangulation module disp_list.append(disp2D) rpc_list.append(rpc_sec) if cfg['clean_intermediate']: common.remove(H_ref) common.remove(H_sec) common.remove(disp) common.remove(mask_rect) common.remove(mask_orig) colors = os.path.join(out_dir, 'ref.png') if cfg['images'][0]['clr']: common.image_crop_gdal(cfg['images'][0]['clr'], x, y, w, h, colors) else: common.image_qauto( common.image_crop_gdal(cfg['images'][0]['img'], x, y, w, h), colors) # compute the point cloud triangulation.multidisp_map_to_point_cloud(ply_file, disp_list, rpc_ref, rpc_list, colors, utm_zone=cfg['utm_zone'], llbbx=tuple(cfg['ll_bbx']), xybbx=(x, x + w, y, y + h)) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (ply_file, plyextrema)) if cfg['clean_intermediate']: common.remove(colors)
def global_dsm(tiles): """ """ out_dsm_vrt = os.path.join(cfg['out_dir'], 'dsm.vrt') out_dsm_tif = os.path.join(cfg['out_dir'], 'dsm.tif') dsms_list = [os.path.join(t['dir'], 'dsm.tif') for t in tiles] dsms = '\n'.join(d for d in dsms_list if os.path.exists(d)) input_file_list = os.path.join(cfg['out_dir'], 'gdalbuildvrt_input_file_list.txt') with open(input_file_list, 'w') as f: f.write(dsms) common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" % (input_file_list, out_dsm_vrt)) res = cfg['dsm_resolution'] if 'roi_geojson' in cfg: ll_poly = geographiclib.read_lon_lat_poly_from_geojson( cfg['roi_geojson']) pyproj_crs = geographiclib.pyproj_crs(cfg['out_crs']) bbx = geographiclib.crs_bbx(ll_poly, pyproj_crs) xoff = bbx[0] yoff = bbx[3] xsize = int(np.ceil((bbx[1] - bbx[0]) / res)) ysize = int(np.ceil((bbx[3] - bbx[2]) / res)) projwin = "-projwin {} {} {} {}".format(xoff, yoff, xoff + xsize * res, yoff - ysize * res) else: projwin = "" common.run(" ".join([ "gdal_translate", "-co", "TILED=YES", "-co", "COMPRESS=DEFLATE", "-co", "PREDICTOR=2", "-co", "BIGTIFF=IF_SAFER", projwin, out_dsm_vrt, out_dsm_tif ])) # EXPORT CONFIDENCE out_conf_vrt = os.path.join(cfg['out_dir'], 'confidence.vrt') out_conf_tif = os.path.join(cfg['out_dir'], 'confidence.tif') dsms_list = [os.path.join(t['dir'], 'confidence.tif') for t in tiles] dems_list_ok = [d for d in dsms_list if os.path.exists(d)] dsms = '\n'.join(dems_list_ok) input_file_list = os.path.join(cfg['out_dir'], 'gdalbuildvrt_input_file_list2.txt') if len(dems_list_ok) > 0: with open(input_file_list, 'w') as f: f.write(dsms) common.run("gdalbuildvrt -vrtnodata nan -input_file_list %s %s" % (input_file_list, out_conf_vrt)) common.run(" ".join([ "gdal_translate", "-co TILED=YES -co BIGTIFF=IF_SAFER", "%s %s %s" % (projwin, out_conf_vrt, out_conf_tif) ]))
def disparity_to_ply(tile): """ Compute a point cloud from the disparity map of a pair of image tiles. Args: tile: dictionary containing the information needed to process a tile. """ out_dir = os.path.join(tile['dir']) ply_file = os.path.join(out_dir, 'cloud.ply') plyextrema = os.path.join(out_dir, 'plyextrema.txt') x, y, w, h = tile['coordinates'] rpc1 = cfg['images'][0]['rpcm'] rpc2 = cfg['images'][1]['rpcm'] if os.path.exists(os.path.join(out_dir, 'stderr.log')): print('triangulation: stderr.log exists') print('pair_1 not processed on tile {} {}'.format(x, y)) return print('triangulating tile {} {}...'.format(x, y)) # This function is only called when there is a single pair (pair_1) H_ref = os.path.join(out_dir, 'pair_1', 'H_ref.txt') H_sec = os.path.join(out_dir, 'pair_1', 'H_sec.txt') pointing = os.path.join(cfg['out_dir'], 'global_pointing_pair_1.txt') disp = os.path.join(out_dir, 'pair_1', 'rectified_disp.tif') extra = os.path.join(out_dir, 'pair_1', 'rectified_disp_confidence.tif') if not os.path.exists(extra): extra = '' mask_rect = os.path.join(out_dir, 'pair_1', 'rectified_mask.png') mask_orig = os.path.join(out_dir, 'mask.png') # prepare the image needed to colorize point cloud colors = os.path.join(out_dir, 'rectified_ref.png') if cfg['images'][0]['clr']: hom = np.loadtxt(H_ref) # We want rectified_ref.png and rectified_ref.tif to have the same size with rasterio.open(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif')) as f: ww, hh = f.width, f.height common.image_apply_homography(colors, cfg['images'][0]['clr'], hom, ww, hh) else: common.image_qauto(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'), colors) # compute the point cloud with rasterio.open(disp, 'r') as f: disp_img = f.read().squeeze() with rasterio.open(mask_rect, 'r') as f: mask_rect_img = f.read().squeeze() xyz_array, err = triangulation.disp_to_xyz(rpc1, rpc2, np.loadtxt(H_ref), np.loadtxt(H_sec), disp_img, mask_rect_img, int(cfg['utm_zone'][:-1]), img_bbx=(x, x+w, y, y+h), A=np.loadtxt(pointing)) # 3D filtering if cfg['3d_filtering_r'] and cfg['3d_filtering_n']: triangulation.filter_xyz(xyz_array, cfg['3d_filtering_r'], cfg['3d_filtering_n'], cfg['gsd']) # flatten the xyz array into a list and remove nan points xyz_list = xyz_array.reshape(-1, 3) valid = np.all(np.isfinite(xyz_list), axis=1) # write the point cloud to a ply file with rasterio.open(colors, 'r') as f: img = f.read() colors_list = img.transpose(1, 2, 0).reshape(-1, img.shape[0]) ply.write_3d_point_cloud_to_ply(ply_file, xyz_list[valid], colors=colors_list[valid], extra_properties=None, extra_properties_names=None, comments=["created by S2P", "projection: UTM {}".format(cfg['utm_zone'])]) # compute the point cloud extrema (xmin, xmax, xmin, ymax) common.run("plyextrema %s %s" % (ply_file, plyextrema)) if cfg['clean_intermediate']: common.remove(H_ref) common.remove(H_sec) common.remove(disp) common.remove(mask_rect) common.remove(mask_orig) common.remove(colors) common.remove(os.path.join(out_dir, 'pair_1', 'rectified_ref.tif'))
def test_run_error(): """ Test s2p.common.run() error run with Unix "false" utility command. """ with pytest.raises(subprocess.CalledProcessError): common.run("false")
def plot_matches(im1, im2, rpc1, rpc2, matches, x=None, y=None, w=None, h=None, outfile=None): """ Plot matches on Pleiades images Args: im1, im2: paths to full Pleiades images rpc1, rpc2: two instances of the rpcm.RPCModel class matches: 2D numpy array of size 4xN containing a list of matches (a list of pairs of points, each pair being represented by x1, y1, x2, y2). The coordinates are given in the frame of the full images. x, y, w, h (optional, default is None): ROI in the reference image outfile (optional, default is None): path to the output file. If None, the file image is displayed using the pvflip viewer Returns: path to the displayed output """ # if no matches, no plot if not matches.size: print("visualisation.plot_matches: nothing to plot") return # determine regions to crop in im1 and im2 if x is not None: x1 = x else: x1 = np.min(matches[:, 0]) if y is not None: y1 = y else: y1 = np.min(matches[:, 1]) if w is not None: w1 = w else: w1 = np.max(matches[:, 0]) - x1 if h is not None: h1 = h else: h1 = np.max(matches[:, 1]) - y1 x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x1, y1, w1, h1) # x2 = np.min(matches[:, 2]) # w2 = np.max(matches[:, 2]) - x2 # y2 = np.min(matches[:, 3]) # h2 = np.max(matches[:, 3]) - y2 # # add 20 pixels offset and round. The image_crop_gdal function will round # # off the coordinates before it does the crops. # x1 -= 20; x1 = np.round(x1) # y1 -= 20; y1 = np.round(y1) # x2 -= 20; x2 = np.round(x2) # y2 -= 20; y2 = np.round(y2) # w1 += 40; w1 = np.round(w1) # h1 += 40; h1 = np.round(h1) # w2 += 40; w2 = np.round(w2) # h2 += 40; h2 = np.round(h2) # do the crops crop1 = common.image_qauto(common.image_crop_gdal(im1, x1, y1, w1, h1)) crop2 = common.image_qauto(common.image_crop_gdal(im2, x2, y2, w2, h2)) # compute matches coordinates in the cropped images pts1 = matches[:, 0:2] - [x1, y1] pts2 = matches[:, 2:4] - [x2, y2] # plot the matches on the two crops to_display = plot_matches_low_level(crop1, crop2, np.hstack((pts1, pts2))) if outfile is None: os.system('v %s &' % (to_display)) else: common.run('cp %s %s' % (to_display, outfile)) return
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None, disp_max=None, timeout=600, max_disp_range=None, extra_params=''): """ Runs a block-matching binary on a pair of stereo-rectified images. Args: im1, im2: rectified stereo pair disp: path to the output diparity map mask: path to the output rejection mask algo: string used to indicate the desired binary. Currently it can be one among 'hirschmuller02', 'hirschmuller08', 'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm', 'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac' disp_min: smallest disparity to consider disp_max: biggest disparity to consider timeout: time in seconds after which the disparity command will raise an error if it hasn't returned. Only applies to `mgm*` algorithms. extra_params: optional string with algorithm-dependent parameters Raises: MaxDisparityRangeError: if max_disp_range is defined, and if the [disp_min, disp_max] range is greater than max_disp_range, to avoid endless computation. """ # limit disparity bounds if disp_min is not None and disp_max is not None: image_size = common.image_size_gdal(im1) if disp_max - disp_min > image_size[0]: center = 0.5 * (disp_min + disp_max) disp_min = int(center - 0.5 * image_size[0]) disp_max = int(center + 0.5 * image_size[0]) # round disparity bounds if disp_min is not None: disp_min = int(np.floor(disp_min)) if disp_max is not None: disp_max = int(np.ceil(disp_max)) if ( max_disp_range is not None and disp_max - disp_min > max_disp_range ): raise MaxDisparityRangeError( 'Disparity range [{}, {}] greater than {}'.format( disp_min, disp_max, max_disp_range ) ) # define environment variables env = os.environ.copy() env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads']) # call the block_matching binary if algo == 'hirschmuller02': bm_binary = 'subpix.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) # extra_params: LoG(0) regionRadius(3) # LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled # regionRadius: radius of the window if algo == 'hirschmuller08': bm_binary = 'callSGBM.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) # extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1) # regionRadius: radius of the window # P1, P2 : regularization parameters # LRdiff: maximum difference between left and right disparity maps if algo == 'hirschmuller08_laplacian': bm_binary = 'callSGBM_lap.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) if algo == 'hirschmuller08_cauchy': bm_binary = 'callSGBM_cauchy.sh' common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min, disp_max, extra_params)) if algo == 'sgbm': # opencv sgbm function implements a modified version of Hirschmuller's # Semi-Global Matching (SGM) algorithm described in "Stereo Processing # by Semiglobal Matching and Mutual Information", PAMI, 2008 p1 = 8 # penalizes disparity changes of 1 between neighbor pixels p2 = 32 # penalizes disparity changes of more than 1 # it is required that p2 > p1. The larger p1, p2, the smoother the disparity win = 3 # matched block size. It must be a positive odd number lr = 1 # maximum difference allowed in the left-right disparity check cost = common.tmpfile('.tif') common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2, disp, cost, disp_min, disp_max, win, p1, p2, lr)) create_rejection_mask(disp, im1, im2, mask) if algo == 'tvl1': tvl1 = 'callTVL1.sh' common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask), env) if algo == 'msmw': bm_binary = 'iip_stereo_correlation_multi_win2' common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask)) if algo == 'msmw2': bm_binary = 'iip_stereo_correlation_multi_win2_newversion' common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format( bm_binary, disp_min, disp_max, im1, im2, disp, mask), env) if algo == 'msmw3': bm_binary = 'msmw' common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format( bm_binary, disp_min, disp_max, im1, im2, disp, mask)) if algo == 'mgm': env['MEDIAN'] = '1' env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win']) env['TSGM'] = '3' nb_dir = cfg['mgm_nb_directions'] conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0]) common.run( '{executable} ' '-r {disp_min} -R {disp_max} ' '-s vfit ' '-t census ' '-O {nb_dir} ' '-confidence_consensusL {conf} ' '{im1} {im2} {disp}'.format( executable='mgm', disp_min=disp_min, disp_max=disp_max, nb_dir=nb_dir, conf=conf, im1=im1, im2=im2, disp=disp, ), env=env, timeout=timeout, ) create_rejection_mask(disp, im1, im2, mask) if algo == 'mgm_multi_lsd': ref = im1 sec = im2 wref = common.tmpfile('.tif') wsec = common.tmpfile('.tif') # TODO TUNE LSD PARAMETERS TO HANDLE DIRECTLY 12 bits images? # image dependent weights based on lsd segments image_size = common.image_size_gdal(ref) #TODO refactor this command to not use shell=True common.run('qauto %s | \ lsd - - | \ cut -d\' \' -f1,2,3,4 | \ pview segments %d %d | \ plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(ref,image_size[0], image_size[1],wref), shell=True) # image dependent weights based on lsd segments image_size = common.image_size_gdal(sec) #TODO refactor this command to not use shell=True common.run('qauto %s | \ lsd - - | \ cut -d\' \' -f1,2,3,4 | \ pview segments %d %d | \ plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(sec,image_size[0], image_size[1],wsec), shell=True) env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter']) env['SUBPIX'] = '2' env['MEDIAN'] = '1' env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win']) # it is required that p2 > p1. The larger p1, p2, the smoother the disparity regularity_multiplier = cfg['stereo_regularity_multiplier'] nb_dir = cfg['mgm_nb_directions'] # increasing these numbers compensates the loss of regularity after incorporating LSD weights P1 = 12*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels P2 = 48*regularity_multiplier # penalizes disparity changes of more than 1 conf = disp+'.confidence.tif' common.run( '{executable} ' '-r {disp_min} -R {disp_max} ' '-S 6 ' '-s vfit ' '-t census ' '-O {nb_dir} ' '-wl {wref} -wr {wsec} ' '-P1 {P1} -P2 {P2} ' '-confidence_consensusL {conf} ' '{im1} {im2} {disp}'.format( executable='mgm_multi', disp_min=disp_min, disp_max=disp_max, nb_dir=nb_dir, wref=wref, wsec=wsec, P1=P1, P2=P2, conf=conf, im1=im1, im2=im2, disp=disp, ), env=env, timeout=timeout, ) create_rejection_mask(disp, im1, im2, mask) if algo == 'mgm_multi': env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter']) env['MINDIFF'] = '1' env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win']) env['SUBPIX'] = '2' # it is required that p2 > p1. The larger p1, p2, the smoother the disparity regularity_multiplier = cfg['stereo_regularity_multiplier'] nb_dir = cfg['mgm_nb_directions'] P1 = 8*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels P2 = 32*regularity_multiplier # penalizes disparity changes of more than 1 conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0]) common.run( '{executable} ' '-r {disp_min} -R {disp_max} ' '-S 6 ' '-s vfit ' '-t census ' '-O {nb_dir} ' '-P1 {P1} -P2 {P2} ' '-confidence_consensusL {conf} ' '{im1} {im2} {disp}'.format( executable='mgm_multi', disp_min=disp_min, disp_max=disp_max, nb_dir=nb_dir, P1=P1, P2=P2, conf=conf, im1=im1, im2=im2, disp=disp, ), env=env, timeout=timeout, ) create_rejection_mask(disp, im1, im2, mask) if (algo == 'micmac'): # add micmac binaries to the PATH environment variable s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__)))) micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin') os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin # prepare micmac xml params file micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml') work_dir = os.path.dirname(os.path.abspath(im1)) common.run('cp {0} {1}'.format(micmac_params, work_dir)) # run MICMAC common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml'))) # copy output disp map micmac_disp = os.path.join(work_dir, 'MEC-EPI', 'Px1_Num6_DeZoom1_LeChantier.tif') disp = os.path.join(work_dir, 'rectified_disp.tif') common.run('cp {0} {1}'.format(micmac_disp, disp)) # compute mask by rejecting the 10% of pixels with lowest correlation score micmac_cost = os.path.join(work_dir, 'MEC-EPI', 'Correl_LeChantier_Num_5.tif') mask = os.path.join(work_dir, 'rectified_mask.png') common.run(["plambda", micmac_cost, "x x%q10 < 0 255 if", "-o", mask])