def pointing_correction(tiles): """ Compute the global pointing corrections for each pair of images. Args: tiles: list of tile_info dictionaries """ nb_pairs = tiles[0]['number_of_pairs'] for i in range(1, nb_pairs + 1): list_of_tiles = [os.path.join(t['directory'], 'pair_%d' % i) for t in tiles] np.savetxt(os.path.join(cfg['out_dir'], 'global_pointing_pair_%d.txt' % i), pointing_accuracy.global_from_local(list_of_tiles))
def pointing_correction(tiles): """ Compute the global pointing corrections for each pair of images. Args: tiles: list of tile_info dictionaries """ nb_pairs = tiles[0]['number_of_pairs'] for i in range(1, nb_pairs + 1): list_of_tiles = [ os.path.join(t['directory'], 'pair_%d' % i) for t in tiles ] np.savetxt( os.path.join(cfg['out_dir'], 'global_pointing_pair_%d.txt' % i), pointing_accuracy.global_from_local(list_of_tiles))
def process_pair(out_dir, img1, rpc1, img2, rpc2, x, y, w, h, tw=None, th=None, ov=None, cld_msk=None, roi_msk=None): """ Computes a height map from a Pair of pushbroom images, using tiles. Args: out_dir: path to the output directory img1: path to the reference image. rpc1: paths to the xml file containing the rpc coefficients of the reference image img2: path to the secondary image. rpc2: paths to the xml file containing the rpc coefficients of the secondary image x, y, w, h: four integers defining the rectangular ROI in the reference image. (x, y) is the top-left corner, and (w, h) are the dimensions of the rectangle. The ROI may be as big as you want, as it will be cutted into small tiles for processing. tw, th: dimensions of the tiles ov: width of overlapping bands between tiles cld_msk (optional): path to a gml file containing a cloud mask roi_msk (optional): path to a gml file containing a mask defining the area contained in the full image. Returns: path to height map tif file """ # create a directory for the experiment if not os.path.exists(out_dir): os.makedirs(out_dir) # duplicate stdout and stderr to log file tee.Tee('%s/stdout.log' % out_dir, 'w') # ensure that the coordinates of the ROI are multiples of the zoom factor, # to avoid bad registration of tiles due to rounding problems. z = cfg['subsampling_factor'] x, y, w, h = common.round_roi_to_nearest_multiple(z, x, y, w, h) # TODO: automatically compute optimal size for tiles if tw is None and th is None and ov is None: ov = z * 100 if w <= z * cfg['tile_size']: tw = w else: tw = z * cfg['tile_size'] if h <= z * cfg['tile_size']: th = h else: th = z * cfg['tile_size'] ntx = np.ceil(float(w - ov) / (tw - ov)) nty = np.ceil(float(h - ov) / (th - ov)) nt = ntx * nty print 'tiles size: (%d, %d)' % (tw, th) print 'total number of tiles: %d (%d x %d)' % (nt, ntx, nty) # create pool with less workers than available cores nb_workers = multiprocessing.cpu_count() if cfg['max_nb_threads']: nb_workers = min(nb_workers, cfg['max_nb_threads']) pool = multiprocessing.Pool(nb_workers) # process the tiles # don't parallellize if in debug mode tiles = [] results = [] show_progress.counter = 0 print 'Computing disparity maps tile by tile...' try: for row in np.arange(y, y + h - ov, th - ov): for col in np.arange(x, x + w - ov, tw - ov): tile_dir = '%s/tile_%06d_%06d_%04d_%04d' % (out_dir, col, row, tw, th) # check if the tile is already done, or masked if os.path.isfile('%s/rectified_disp.tif' % tile_dir): if cfg['skip_existing']: print "stereo on tile %d %d already done, skip" % (col, row) tiles.append(tile_dir) continue if os.path.isfile('%s/this_tile_is_masked.txt' % tile_dir): print "tile %d %d already masked, skip" % (col, row) tiles.append(tile_dir) continue # process the tile if cfg['debug']: process_pair_single_tile(tile_dir, img1, rpc1, img2, rpc2, col, row, tw, th, None, cld_msk, roi_msk) else: p = pool.apply_async(process_pair_single_tile, args=(tile_dir, img1, rpc1, img2, rpc2, col, row, tw, th, None, cld_msk, roi_msk), callback=show_progress) results.append(p) tiles.append(tile_dir) for r in results: try: r.get(3600) # wait at most one hour per tile except multiprocessing.TimeoutError: print "Timeout while computing tile "+str(r) except KeyboardInterrupt: pool.terminate() sys.exit(1) except common.RunFailure as e: print "FAILED call: ", e.args[0]["command"] print "output: ", e.args[0]["output"] # compute global pointing correction print 'Computing global pointing correction...' A_global = pointing_accuracy.global_from_local(tiles) np.savetxt('%s/pointing.txt' % out_dir, A_global) # Check if all tiles were computed # The only cause of a tile failure is a lack of sift matches, which breaks # the pointing correction step. Thus it is enough to check if the pointing # correction matrix was computed. results = [] for i, row in enumerate(np.arange(y, y + h - ov, th - ov)): for j, col in enumerate(np.arange(x, x + w - ov, tw - ov)): tile_dir = '%s/tile_%06d_%06d_%04d_%04d' % (out_dir, col, row, tw, th) if not os.path.isfile('%s/this_tile_is_masked.txt' % tile_dir): if not os.path.isfile('%s/pointing.txt' % tile_dir): print "%s retrying pointing corr..." % tile_dir # estimate pointing correction matrix from neighbors, if it # fails use A_global, then rerun the disparity map # computation A = pointing_accuracy.from_next_tiles(tiles, ntx, nty, j, i) if A is None: A = A_global if cfg['debug']: process_pair_single_tile(tile_dir, img1, rpc1, img2, rpc2, col, row, tw, th, None, cld_msk, roi_msk, A) else: p = pool.apply_async(process_pair_single_tile, args=(tile_dir, img1, rpc1, img2, rpc2, col, row, tw, th, None, cld_msk, roi_msk, A), callback=show_progress) results.append(p) try: for r in results: try: r.get(3600) # wait at most one hour per tile except multiprocessing.TimeoutError: print "Timeout while computing tile "+str(r) except KeyboardInterrupt: pool.terminate() sys.exit(1) except common.RunFailure as e: print "FAILED call: ", e.args[0]["command"] print "output: ", e.args[0]["output"] # triangulation processes = [] results = [] show_progress.counter = 0 print 'Computing height maps tile by tile...' try: for row in np.arange(y, y + h - ov, th - ov): for col in np.arange(x, x + w - ov, tw - ov): tile = '%s/tile_%06d_%06d_%04d_%04d' % (out_dir, col, row, tw, th) H1 = '%s/H_ref.txt' % tile H2 = '%s/H_sec.txt' % tile disp = '%s/rectified_disp.tif' % tile mask = '%s/rectified_mask.png' % tile rpc_err = '%s/rpc_err.tif' % tile height_map = '%s/height_map.tif' % tile # check if the tile is already done, or masked if os.path.isfile(height_map): if cfg['skip_existing']: print "triangulation on tile %d %d is done, skip" % (col, row) continue if os.path.isfile('%s/this_tile_is_masked.txt' % tile): print "tile %d %d already masked, skip" % (col, row) continue # process the tile if cfg['debug']: triangulation.compute_dem(height_map, col, row, tw, th, z, rpc1, rpc2, H1, H2, disp, mask, rpc_err, A_global) else: p = pool.apply_async(triangulation.compute_dem, args=(height_map, col, row, tw, th, z, rpc1, rpc2, H1, H2, disp, mask, rpc_err, A_global), callback=show_progress) processes.append(p) for p in processes: try: results.append(p.get(3600)) # wait at most one hour per tile except multiprocessing.TimeoutError: print "Timeout while computing tile "+str(r) except KeyboardInterrupt: pool.terminate() sys.exit(1) # tiles composition out = '%s/height_map.tif' % out_dir tmp = ['%s/height_map.tif' % t for t in tiles] if not os.path.isfile(out) or not cfg['skip_existing']: print "Mosaicing tiles with %s..." % cfg['mosaic_method'] if cfg['mosaic_method'] == 'gdal': tile_composer.mosaic_gdal(out, w/z, h/z, tmp, tw/z, th/z, ov/z) else: tile_composer.mosaic(out, w/z, h/z, tmp, tw/z, th/z, ov/z) common.garbage_cleanup() return out