Beispiel #1
0
def launch_calls(fun, list_of_args, nb_workers, *extra_args):
    """
    Run a function several times in parallel with different given inputs.

    Args:
        fun: function to be called several times in parallel.
        list_of_args: list of (first positional) arguments passed to fun, one
            per call
        nb_workers: number of calls run simultaneously
        extra_args (optional): tuple containing extra arguments to be passed to
            fun (same value for all calls)

    Return:
        list of outputs
    """
    results = []
    outputs = []
    show_progress.counter = 0
    show_progress.total = len(list_of_args)
    pool = multiprocessing.Pool(nb_workers)
    for x in list_of_args:
        if type(x) == tuple:  # we expect x = (tile_dictionary, pair_id)
            args = (fun, ) + x + extra_args
            log = os.path.join(x[0]['dir'], 'pair_%d' % x[1], 'stdout.log')
        else:  # we expect x = tile_dictionary
            args = (fun, x) + extra_args
            log = os.path.join(x['dir'], 'stdout.log')
        results.append(
            pool.apply_async(tilewise_wrapper,
                             args=args,
                             kwds={'stdout': log},
                             callback=show_progress))

    for r in results:
        try:
            outputs.append(r.get(600))  # wait at most 10 min per call
        except multiprocessing.TimeoutError:
            print("Timeout while running %s" % str(r))
            outputs.append(None)
        except common.RunFailure as e:
            print("FAILED call: ", e.args[0]["command"])
            print("\toutput: ", e.args[0]["output"])
            outputs.append(None)
        except KeyboardInterrupt:
            pool.terminate()
            sys.exit(1)
        except Exception as e:
            outputs.append(None)

    pool.close()
    pool.join()
    common.print_elapsed_time()
    return outputs
Beispiel #2
0
def launch_calls(fun, list_of_args, nb_workers, *extra_args):
    """
    Run a function several times in parallel with different given inputs.

    Args:
        fun: function to be called several times in parallel.
        list_of_args: list of (first positional) arguments passed to fun, one
            per call
        nb_workers: number of calls run simultaneously
        extra_args (optional): tuple containing extra arguments to be passed to
            fun (same value for all calls)

    Return:
        list of outputs
    """
    results = []
    outputs = []
    show_progress.counter = 0
    show_progress.total = len(list_of_args)
    pool = multiprocessing.Pool(nb_workers)
    for x in list_of_args:
        if type(x) == tuple:  # we expect x = (tile_dictionary, pair_id)
            args = (fun,) + x + extra_args
            log = os.path.join(x[0]['dir'], 'pair_%d' % x[1], 'stdout.log')
        else:  # we expect x = tile_dictionary
            args = (fun, x) + extra_args
            log = os.path.join(x['dir'], 'stdout.log')
        results.append(pool.apply_async(tilewise_wrapper, args=args,
                                        kwds={'stdout': log},
                                        callback=show_progress))

    for r in results:
        try:
            outputs.append(r.get(600))  # wait at most 10 min per call
        except multiprocessing.TimeoutError:
            print("Timeout while running %s" % str(r))
            outputs.append(None)
        except common.RunFailure as e:
            print("FAILED call: ", e.args[0]["command"])
            print("\toutput: ", e.args[0]["output"])
            outputs.append(None)
        except KeyboardInterrupt:
            pool.terminate()
            sys.exit(1)
        except Exception as e:
            outputs.append(None)

    pool.close()
    pool.join()
    common.print_elapsed_time()
    return outputs
Beispiel #3
0
def main(user_cfg, steps=ALL_STEPS):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        user_cfg: user config dictionary
        steps: either a string (single step) or a list of strings (several
            steps)
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(user_cfg)
    if 'initialisation' in steps:
        initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes']:
        nb_workers = min(nb_workers, cfg['max_processes'])
    cfg['max_processes'] = nb_workers

    tw, th = initialization.adjust_tile_size()
    tiles_txt = os.path.join(cfg['out_dir'], 'tiles.txt')
    create_masks = 'initialisation' in steps
    tiles = initialization.tiles_full_info(tw, th, tiles_txt, create_masks)

    if 'initialisation' in steps:
        # Write the list of json files to outdir/tiles.txt
        with open(tiles_txt, 'w') as f:
            for t in tiles:
                f.write(t['json'] + os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # omp_num_threads should not exceed nb_workers when multiplied by len(tiles)
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_pairs)))

    if 'local-pointing' in steps:
        print('correcting pointing locally...')
        parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    if 'global-pointing' in steps:
        print('correcting pointing globally...')
        global_pointing_correction(tiles)
        common.print_elapsed_time()

    if 'rectification' in steps:
        print('rectifying tiles...')
        parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    if 'matching' in steps:
        print('running stereo matching...')
        parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if n > 2 and cfg['triangulation_mode'] == 'pairwise':
        if 'disparity-to-height' in steps:
            print('computing height maps...')
            parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

            print('computing local pairwise height offsets...')
            parallel.launch_calls(mean_heights, tiles, nb_workers)

        if 'global-mean-heights' in steps:
            print('computing global pairwise height offsets...')
            global_mean_heights(tiles)

        if 'heights-to-ply' in steps:
            print('merging height maps and computing point clouds...')
            parallel.launch_calls(heights_to_ply, tiles, nb_workers)

    else:
        if 'triangulation' in steps:
            print('triangulating tiles...')
            if cfg['triangulation_mode'] == 'geometric':
                parallel.launch_calls(multidisparities_to_ply, tiles,
                                      nb_workers)
            elif cfg['triangulation_mode'] == 'pairwise':
                parallel.launch_calls(disparity_to_ply, tiles, nb_workers)
            else:
                raise ValueError(
                    "possible values for 'triangulation_mode' : 'pairwise' or 'geometric'"
                )

    if 'local-dsm-rasterization' in steps:
        print('computing DSM by tile...')
        parallel.launch_calls(plys_to_dsm, tiles, nb_workers)

    if 'global-dsm-rasterization' in steps:
        print('computing global DSM...')
        global_dsm(tiles)
        common.print_elapsed_time()

    # @kai
    if 'global-pointcloud' in steps:
        print('computing global point cloud...')
        global_pointcloud(tiles)
        common.print_elapsed_time()

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)
Beispiel #4
0
Datei: s2p.py Projekt: mnhrdt/s2p
def main(user_cfg, steps=ALL_STEPS):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        user_cfg: user config dictionary
        steps: either a string (single step) or a list of strings (several
            steps)
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(user_cfg)
    if 'initialisation' in steps:
        initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes']:
        nb_workers = min(nb_workers, cfg['max_processes'])
    cfg['max_processes'] = nb_workers

    tw, th = initialization.adjust_tile_size()
    print('\ndiscarding masked tiles...')
    tiles = initialization.tiles_full_info(tw, th)

    if 'initialisation' in steps:
        # Write the list of json files to outdir/tiles.txt
        with open(os.path.join(cfg['out_dir'],'tiles.txt'),'w') as f:
            for t in tiles:
                f.write(t['json']+os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # omp_num_threads should not exceed nb_workers when multiplied by len(tiles)
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_pairs)))

    if 'local-pointing' in steps:
        print('correcting pointing locally...')
        parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    if 'global-pointing' in steps:
        print('correcting pointing globally...')
        global_pointing_correction(tiles)
        common.print_elapsed_time()

    if 'rectification' in steps:
        print('rectifying tiles...')
        parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    if 'matching' in steps:
        print('running stereo matching...')
        parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if n > 2 and cfg['triangulation_mode'] == 'pairwise':
        if 'disparity-to-height' in steps:
            print('computing height maps...')
            parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

            print('computing local pairwise height offsets...')
            parallel.launch_calls(mean_heights, tiles, nb_workers)

        if 'global-mean-heights' in steps:
            print('computing global pairwise height offsets...')
            global_mean_heights(tiles)

        if 'heights-to-ply' in steps:
            print('merging height maps and computing point clouds...')
            parallel.launch_calls(heights_to_ply, tiles, nb_workers)

    else:
        if 'triangulation' in steps:
            print('triangulating tiles...')
            if cfg['triangulation_mode'] == 'geometric':
                parallel.launch_calls(multidisparities_to_ply, tiles, nb_workers)
            elif cfg['triangulation_mode'] == 'pairwise':
                parallel.launch_calls(disparity_to_ply, tiles, nb_workers)
            else:
                raise ValueError("possible values for 'triangulation_mode' : 'pairwise' or 'geometric'")

    if 'global-srcwin' in steps:
        print('computing global source window (xoff, yoff, xsize, ysize)...')
        global_srcwin(tiles)
        common.print_elapsed_time()

    if 'local-dsm-rasterization' in steps:
        print('computing DSM by tile...')
        parallel.launch_calls(plys_to_dsm, tiles, nb_workers)

    if 'global-dsm-rasterization' in steps:
        print('computing global DSM...')
        global_dsm(tiles)
        common.print_elapsed_time()

    if 'lidar-preprocessor' in steps:
        if cfg['run_lidar_preprocessor']:
            print('lidar preprocessor...')
            lidar_preprocessor(tiles)
            common.print_elapsed_time()
        else:
            print("LidarPreprocessor explicitly disabled in config.json")

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)
Beispiel #5
0
def main(config_file, steps=ALL_STEPS):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        config_file: path to a json configuration file
        steps: either a string (single step) or a list of strings (several
            steps)
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(config_file)
    if 'initialisation' in steps:
        initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes']:
        nb_workers = min(nb_workers, cfg['max_processes'])
    cfg['max_processes'] = nb_workers

    tw, th = initialization.adjust_tile_size()
    print('\ndiscarding masked tiles...')
    tiles = initialization.tiles_full_info(tw, th)

    if 'initialisation' in steps:
        # Write the list of json files to outdir/tiles.txt
        with open(os.path.join(cfg['out_dir'], 'tiles.txt'), 'w') as f:
            for t in tiles:
                f.write(t['json'] + os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # omp_num_threads should not exceed nb_workers when multiplied by len(tiles)
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_pairs)))

    if 'local-pointing' in steps:
        print('correcting pointing locally...')
        parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    if 'global-pointing' in steps:
        print('correcting pointing globally...')
        global_pointing_correction(tiles)
        common.print_elapsed_time()

    if 'rectification' in steps:
        print('rectifying tiles...')
        parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    if 'matching' in steps:
        print('running stereo matching...')
        parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if 'triangulation' in steps:
        if n > 2:
            print('computing height maps...')
            parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

            print('registering height maps...')
            mean_heights_local = parallel.launch_calls(mean_heights, tiles,
                                                       nb_workers)

            print('computing global pairwise height offsets...')
            mean_heights_global = np.nanmean(mean_heights_local, axis=0)

            print('merging height maps...')
            parallel.launch_calls(heights_fusion, tiles, nb_workers,
                                  mean_heights_global)

            print('computing point clouds...')
            parallel.launch_calls(heights_to_ply, tiles, nb_workers)

        else:
            print('triangulating tiles...')
            parallel.launch_calls(disparity_to_ply, tiles, nb_workers)

    if 'dsm-rasterization' in steps:
        print('computing DSM...')
        plys_to_dsm(tiles)
        common.print_elapsed_time()

    if 'lidar-preprocessor' in steps:
        print('lidar preprocessor...')
        lidar_preprocessor(tiles)
        common.print_elapsed_time()

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)
Beispiel #6
0
def main(user_cfg):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        user_cfg: user config dictionary
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(user_cfg)
    initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes'] is not None:
        nb_workers = cfg['max_processes']

    tw, th = initialization.adjust_tile_size()
    tiles_txt = os.path.join(cfg['out_dir'], 'tiles.txt')
    tiles = initialization.tiles_full_info(tw,
                                           th,
                                           tiles_txt,
                                           create_masks=True)

    # initialisation step:
    # Write the list of json files to outdir/tiles.txt
    with open(tiles_txt, 'w') as f:
        for t in tiles:
            f.write(t['json'] + os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # local-pointing step:
    print('correcting pointing locally...')
    parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    # global-pointing step:
    print('correcting pointing globally...')
    global_pointing_correction(tiles)
    common.print_elapsed_time()

    # rectification step:
    print('rectifying tiles...')
    parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    # matching step:
    print('running stereo matching...')
    parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if n > 2 and cfg['triangulation_mode'] == 'pairwise':
        # disparity-to-height step:
        print('computing height maps...')
        parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

        print('computing local pairwise height offsets...')
        parallel.launch_calls(mean_heights, tiles, nb_workers)

        # global-mean-heights step:
        print('computing global pairwise height offsets...')
        global_mean_heights(tiles)

        # heights-to-ply step:
        print('merging height maps and computing point clouds...')
        parallel.launch_calls(heights_to_ply, tiles, nb_workers)
    else:
        # triangulation step:
        print('triangulating tiles...')
        if cfg['triangulation_mode'] == 'geometric':
            parallel.launch_calls(multidisparities_to_ply, tiles, nb_workers)
        elif cfg['triangulation_mode'] == 'pairwise':
            parallel.launch_calls(disparity_to_ply, tiles, nb_workers)
        else:
            raise ValueError(
                "possible values for 'triangulation_mode' : 'pairwise' or 'geometric'"
            )

    # local-dsm-rasterization step:
    print('computing DSM by tile...')
    parallel.launch_calls(plys_to_dsm, tiles, nb_workers)

    # global-dsm-rasterization step:
    print('computing global DSM...')
    global_dsm(tiles)
    common.print_elapsed_time()

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)