Esempio n. 1
0
def main(user_cfg, steps=ALL_STEPS):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        user_cfg: user config dictionary
        steps: either a string (single step) or a list of strings (several
            steps)
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(user_cfg)
    if 'initialisation' in steps:
        initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes']:
        nb_workers = min(nb_workers, cfg['max_processes'])
    cfg['max_processes'] = nb_workers

    tw, th = initialization.adjust_tile_size()
    tiles_txt = os.path.join(cfg['out_dir'], 'tiles.txt')
    create_masks = 'initialisation' in steps
    tiles = initialization.tiles_full_info(tw, th, tiles_txt, create_masks)

    if 'initialisation' in steps:
        # Write the list of json files to outdir/tiles.txt
        with open(tiles_txt, 'w') as f:
            for t in tiles:
                f.write(t['json'] + os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # omp_num_threads should not exceed nb_workers when multiplied by len(tiles)
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_pairs)))

    if 'local-pointing' in steps:
        print('correcting pointing locally...')
        parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    if 'global-pointing' in steps:
        print('correcting pointing globally...')
        global_pointing_correction(tiles)
        common.print_elapsed_time()

    if 'rectification' in steps:
        print('rectifying tiles...')
        parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    if 'matching' in steps:
        print('running stereo matching...')
        parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if n > 2 and cfg['triangulation_mode'] == 'pairwise':
        if 'disparity-to-height' in steps:
            print('computing height maps...')
            parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

            print('computing local pairwise height offsets...')
            parallel.launch_calls(mean_heights, tiles, nb_workers)

        if 'global-mean-heights' in steps:
            print('computing global pairwise height offsets...')
            global_mean_heights(tiles)

        if 'heights-to-ply' in steps:
            print('merging height maps and computing point clouds...')
            parallel.launch_calls(heights_to_ply, tiles, nb_workers)

    else:
        if 'triangulation' in steps:
            print('triangulating tiles...')
            if cfg['triangulation_mode'] == 'geometric':
                parallel.launch_calls(multidisparities_to_ply, tiles,
                                      nb_workers)
            elif cfg['triangulation_mode'] == 'pairwise':
                parallel.launch_calls(disparity_to_ply, tiles, nb_workers)
            else:
                raise ValueError(
                    "possible values for 'triangulation_mode' : 'pairwise' or 'geometric'"
                )

    if 'local-dsm-rasterization' in steps:
        print('computing DSM by tile...')
        parallel.launch_calls(plys_to_dsm, tiles, nb_workers)

    if 'global-dsm-rasterization' in steps:
        print('computing global DSM...')
        global_dsm(tiles)
        common.print_elapsed_time()

    # @kai
    if 'global-pointcloud' in steps:
        print('computing global point cloud...')
        global_pointcloud(tiles)
        common.print_elapsed_time()

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)
Esempio n. 2
0
def main(config_file, steps=ALL_STEPS):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        config_file: path to a json configuration file
        steps: either a string (single step) or a list of strings (several
            steps)
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(config_file)
    if 'initialisation' in steps:
        initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes']:
        nb_workers = min(nb_workers, cfg['max_processes'])
    cfg['max_processes'] = nb_workers

    tw, th = initialization.adjust_tile_size()
    print('\ndiscarding masked tiles...')
    tiles = initialization.tiles_full_info(tw, th)

    if 'initialisation' in steps:
        # Write the list of json files to outdir/tiles.txt
        with open(os.path.join(cfg['out_dir'], 'tiles.txt'), 'w') as f:
            for t in tiles:
                f.write(t['json'] + os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # omp_num_threads should not exceed nb_workers when multiplied by len(tiles)
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_pairs)))

    if 'local-pointing' in steps:
        print('correcting pointing locally...')
        parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    if 'global-pointing' in steps:
        print('correcting pointing globally...')
        global_pointing_correction(tiles)
        common.print_elapsed_time()

    if 'rectification' in steps:
        print('rectifying tiles...')
        parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    if 'matching' in steps:
        print('running stereo matching...')
        parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if 'triangulation' in steps:
        if n > 2:
            print('computing height maps...')
            parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

            print('registering height maps...')
            mean_heights_local = parallel.launch_calls(mean_heights, tiles,
                                                       nb_workers)

            print('computing global pairwise height offsets...')
            mean_heights_global = np.nanmean(mean_heights_local, axis=0)

            print('merging height maps...')
            parallel.launch_calls(heights_fusion, tiles, nb_workers,
                                  mean_heights_global)

            print('computing point clouds...')
            parallel.launch_calls(heights_to_ply, tiles, nb_workers)

        else:
            print('triangulating tiles...')
            parallel.launch_calls(disparity_to_ply, tiles, nb_workers)

    if 'dsm-rasterization' in steps:
        print('computing DSM...')
        plys_to_dsm(tiles)
        common.print_elapsed_time()

    if 'lidar-preprocessor' in steps:
        print('lidar preprocessor...')
        lidar_preprocessor(tiles)
        common.print_elapsed_time()

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)
Esempio n. 3
0
File: s2p.py Progetto: mnhrdt/s2p
def main(user_cfg, steps=ALL_STEPS):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        user_cfg: user config dictionary
        steps: either a string (single step) or a list of strings (several
            steps)
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(user_cfg)
    if 'initialisation' in steps:
        initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes']:
        nb_workers = min(nb_workers, cfg['max_processes'])
    cfg['max_processes'] = nb_workers

    tw, th = initialization.adjust_tile_size()
    print('\ndiscarding masked tiles...')
    tiles = initialization.tiles_full_info(tw, th)

    if 'initialisation' in steps:
        # Write the list of json files to outdir/tiles.txt
        with open(os.path.join(cfg['out_dir'],'tiles.txt'),'w') as f:
            for t in tiles:
                f.write(t['json']+os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # omp_num_threads should not exceed nb_workers when multiplied by len(tiles)
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_pairs)))

    if 'local-pointing' in steps:
        print('correcting pointing locally...')
        parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    if 'global-pointing' in steps:
        print('correcting pointing globally...')
        global_pointing_correction(tiles)
        common.print_elapsed_time()

    if 'rectification' in steps:
        print('rectifying tiles...')
        parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    if 'matching' in steps:
        print('running stereo matching...')
        parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if n > 2 and cfg['triangulation_mode'] == 'pairwise':
        if 'disparity-to-height' in steps:
            print('computing height maps...')
            parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

            print('computing local pairwise height offsets...')
            parallel.launch_calls(mean_heights, tiles, nb_workers)

        if 'global-mean-heights' in steps:
            print('computing global pairwise height offsets...')
            global_mean_heights(tiles)

        if 'heights-to-ply' in steps:
            print('merging height maps and computing point clouds...')
            parallel.launch_calls(heights_to_ply, tiles, nb_workers)

    else:
        if 'triangulation' in steps:
            print('triangulating tiles...')
            if cfg['triangulation_mode'] == 'geometric':
                parallel.launch_calls(multidisparities_to_ply, tiles, nb_workers)
            elif cfg['triangulation_mode'] == 'pairwise':
                parallel.launch_calls(disparity_to_ply, tiles, nb_workers)
            else:
                raise ValueError("possible values for 'triangulation_mode' : 'pairwise' or 'geometric'")

    if 'global-srcwin' in steps:
        print('computing global source window (xoff, yoff, xsize, ysize)...')
        global_srcwin(tiles)
        common.print_elapsed_time()

    if 'local-dsm-rasterization' in steps:
        print('computing DSM by tile...')
        parallel.launch_calls(plys_to_dsm, tiles, nb_workers)

    if 'global-dsm-rasterization' in steps:
        print('computing global DSM...')
        global_dsm(tiles)
        common.print_elapsed_time()

    if 'lidar-preprocessor' in steps:
        if cfg['run_lidar_preprocessor']:
            print('lidar preprocessor...')
            lidar_preprocessor(tiles)
            common.print_elapsed_time()
        else:
            print("LidarPreprocessor explicitly disabled in config.json")

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)
Esempio n. 4
0
def main(user_cfg):
    """
    Launch the s2p pipeline with the parameters given in a json file.

    Args:
        user_cfg: user config dictionary
    """
    common.print_elapsed_time.t0 = datetime.datetime.now()
    initialization.build_cfg(user_cfg)
    initialization.make_dirs()

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_processes'] is not None:
        nb_workers = cfg['max_processes']

    tw, th = initialization.adjust_tile_size()
    tiles_txt = os.path.join(cfg['out_dir'], 'tiles.txt')
    tiles = initialization.tiles_full_info(tw,
                                           th,
                                           tiles_txt,
                                           create_masks=True)

    # initialisation step:
    # Write the list of json files to outdir/tiles.txt
    with open(tiles_txt, 'w') as f:
        for t in tiles:
            f.write(t['json'] + os.linesep)

    n = len(cfg['images'])
    tiles_pairs = [(t, i) for i in range(1, n) for t in tiles]

    # local-pointing step:
    print('correcting pointing locally...')
    parallel.launch_calls(pointing_correction, tiles_pairs, nb_workers)

    # global-pointing step:
    print('correcting pointing globally...')
    global_pointing_correction(tiles)
    common.print_elapsed_time()

    # rectification step:
    print('rectifying tiles...')
    parallel.launch_calls(rectification_pair, tiles_pairs, nb_workers)

    # matching step:
    print('running stereo matching...')
    parallel.launch_calls(stereo_matching, tiles_pairs, nb_workers)

    if n > 2 and cfg['triangulation_mode'] == 'pairwise':
        # disparity-to-height step:
        print('computing height maps...')
        parallel.launch_calls(disparity_to_height, tiles_pairs, nb_workers)

        print('computing local pairwise height offsets...')
        parallel.launch_calls(mean_heights, tiles, nb_workers)

        # global-mean-heights step:
        print('computing global pairwise height offsets...')
        global_mean_heights(tiles)

        # heights-to-ply step:
        print('merging height maps and computing point clouds...')
        parallel.launch_calls(heights_to_ply, tiles, nb_workers)
    else:
        # triangulation step:
        print('triangulating tiles...')
        if cfg['triangulation_mode'] == 'geometric':
            parallel.launch_calls(multidisparities_to_ply, tiles, nb_workers)
        elif cfg['triangulation_mode'] == 'pairwise':
            parallel.launch_calls(disparity_to_ply, tiles, nb_workers)
        else:
            raise ValueError(
                "possible values for 'triangulation_mode' : 'pairwise' or 'geometric'"
            )

    # local-dsm-rasterization step:
    print('computing DSM by tile...')
    parallel.launch_calls(plys_to_dsm, tiles, nb_workers)

    # global-dsm-rasterization step:
    print('computing global DSM...')
    global_dsm(tiles)
    common.print_elapsed_time()

    # cleanup
    common.garbage_cleanup()
    common.print_elapsed_time(since_first_call=True)