예제 #1
0
파일: s2p.py 프로젝트: cpalmann/s2p
def list_jobs(config_file, step):

    tiles_full_info = initialization.init_tiles_full_info(config_file)
    filename = str(step) + ".jobs"

    if not (os.path.exists(cfg['out_dir'])):
        os.mkdir(cfg['out_dir'])

    if step in [2,4]:           #preprocessing, processing
        f = open(os.path.join(cfg['out_dir'],filename),'w')
        for tile in tiles_full_info:
            tile_dir = tile['directory']
            f.write(tile_dir + ' ' + str(step) + '\n')
        f.close()
    elif step in [3,5,7]:       # global values, global extent, finalization
        f = open(os.path.join(cfg['out_dir'],filename),'w')
        f.write('all_tiles ' + str(step) + '\n')
        f.close()
    elif step ==6 :             # compute dsm
        f = open(os.path.join(cfg['out_dir'],filename),'w')
        for i in range(cfg['dsm_nb_tiles']):
            f.write('dsm_'+ str(i) + ' ' + str(step) + '\n')
        f.close()
    else:
        print "Unkown step required: %s" % str(step)
예제 #2
0
파일: s2p.py 프로젝트: cpalmann/s2p
def execute_job(config_file,params):
    """
    Execute a job.

    Args:
         - json config file
         - params  ( <==> [tile_dir,step,...])
    """
    tile_dir = params[0]
    step = int(params[1])

    tiles_full_info = initialization.init_tiles_full_info(config_file)

    if not (tile_dir == 'all_tiles' or 'dsm' in tile_dir ):
        for tile in tiles_full_info:
            if tile_dir == tile['directory']:
                tile_to_process = tile
                break

    try:

        if step == 2:#"preprocess_tiles":
            print 'preprocess_tiles on %s ...' % tile_to_process
            preprocess_tile(tile_to_process)

        if step == 3:#"global_values":
            print 'global values ...'
            global_values(tiles_full_info)

        if step == 4:#"process_tiles" :
            print 'process_tiles on %s ...' % tile_to_process
            process_tile(tile_to_process)

        if step == 5:#"global extent" :
            print 'global extent ...'
            global_extent(tiles_full_info)

        if step == 6:#"compute_dsm" :
            print 'compute_dsm ...'
            current_tile=int(tile_dir.split('_')[1]) # for instance, dsm_2 becomes 2
            compute_dsm([config_file,cfg['dsm_nb_tiles'],current_tile])

        if step == 7:#"global_finalization":
            print 'global finalization...'
            global_finalization(tiles_full_info)

    except KeyboardInterrupt:
        pool.terminate()
        sys.exit(1)

    except common.RunFailure as e:
        print "FAILED call: ", e.args[0]["command"]
        print "\toutput: ", e.args[0]["output"]
예제 #3
0
def main(config_file):
    """
    Launch the entire s2p pipeline with the parameters given by a json file.

    It is a succession of five steps:
        initialization
        preprocessing
        global_values
        processing
        global_finalization

    Args:
        config_file: path to a json configuration file
    """
    t0 = time.time()

    # initialization
    initialization.init_dirs_srtm_roi(config_file)
    tiles_full_info = initialization.init_tiles_full_info(config_file)
    show_progress.total = len(tiles_full_info)

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_nb_threads']:
        nb_workers = min(nb_workers, cfg['max_nb_threads'])

    # omp_num_threads: should not exceed nb_workers when multiplied by the
    # number of tiles
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_full_info)))

    # do the job
    print '\npreprocessing tiles...'
    launch_parallel_calls(preprocess_tile, tiles_full_info, nb_workers)
    print "Elapsed time:", datetime.timedelta(seconds=int(time.time() - t0))

    print '\ncomputing global values...'
    global_values(tiles_full_info)
    print "Elapsed time:", datetime.timedelta(seconds=int(time.time() - t0))

    print '\nprocessing tiles...'
    launch_parallel_calls(process_tile, tiles_full_info, nb_workers)
    print "Elapsed time:", datetime.timedelta(seconds=int(time.time() - t0))

    print '\nglobal finalization...'
    global_finalization(tiles_full_info)
    print "Total runtime:", datetime.timedelta(seconds=int(time.time() - t0))

    # cleanup
    common.garbage_cleanup()
예제 #4
0
파일: s2p.py 프로젝트: tangwudu/s2p
def main(config_file):
    """
    Launch the entire s2p pipeline with the parameters given by a json file.

    It is a succession of five steps:
        initialization
        preprocessing
        global_values
        processing
        global_finalization

    Args:
        config_file: path to a json configuration file
    """
    t0 = time.time()

    # initialization
    initialization.init_dirs_srtm_roi(config_file)
    tiles_full_info = initialization.init_tiles_full_info(config_file)
    show_progress.total = len(tiles_full_info)

    # multiprocessing setup
    nb_workers = multiprocessing.cpu_count()  # nb of available cores
    if cfg['max_nb_threads']:
        nb_workers = min(nb_workers, cfg['max_nb_threads'])

    # omp_num_threads: should not exceed nb_workers when multiplied by the
    # number of tiles
    cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_full_info)))

    # do the job
    print '\npreprocessing tiles...'
    launch_parallel_calls(preprocess_tile, tiles_full_info, nb_workers)
    print "Elapsed time:", datetime.timedelta(seconds=int(time.time() - t0))

    print '\ncomputing global values...'
    global_values(tiles_full_info)
    print "Elapsed time:", datetime.timedelta(seconds=int(time.time() - t0))

    print '\nprocessing tiles...'
    launch_parallel_calls(process_tile, tiles_full_info, nb_workers)
    print "Elapsed time:", datetime.timedelta(seconds=int(time.time() - t0))

    print '\nglobal finalization...'
    global_finalization(tiles_full_info)
    print "Total runtime:", datetime.timedelta(seconds=int(time.time() - t0))

    # cleanup
    common.garbage_cleanup()
예제 #5
0
파일: s2p.py 프로젝트: jguinet/s2p
def list_jobs(config_file, step):
    """
    """
    tiles_full_info = initialization.init_tiles_full_info(config_file)
    filename = str(step) + ".jobs"

    if not (os.path.exists(cfg['out_dir'])):
        os.mkdir(cfg['out_dir'])

    if step in [2, 4]:
        f = open(os.path.join(cfg['out_dir'], filename), 'w')
        for tile in tiles_full_info:
            tile_dir = tile['directory']
            f.write(tile_dir + ' ' + str(step) + '\n')
        f.close()
    elif step in [3, 5]:
        f = open(os.path.join(cfg['out_dir'], filename), 'w')
        f.write('all_tiles ' + str(step) + '\n')
        f.close()
    else:
        print "Unkown step required: %s" % str(step)
예제 #6
0
파일: s2p.py 프로젝트: jguinet/s2p
def execute_job(config_file, tile_dir, step):
    """
    Execute a job.

    Args:
         - json config file
         - tile_dir
         - step
    """
    tiles_full_info = initialization.init_tiles_full_info(config_file)

    if not tile_dir == 'all_tiles':
        for tile in tiles_full_info:
            if tile_dir == tile['directory']:
                tile_to_process = tile
                print tile_to_process
                break

    try:
        if step == 2:
            print 'preprocess_tiles on %s ...' % tile_to_process
            preprocess_tile(tile_to_process)

        if step == 3:
            print 'global values...'
            global_values(tiles_full_info)

        if step == 4:
            print 'process_tiles on %s ...' % tile_to_process
            process_tile(tile_to_process)

        if step == 5:
            print 'global finalization...'
            global_finalization(tiles_full_info)

    except common.RunFailure as e:
        print "FAILED call: ", e.args[0]["command"]
        print "\toutput: ", e.args[0]["output"]
예제 #7
0
파일: s2p.py 프로젝트: cpalmann/s2p
def main(config_file, step=None, clusterMode=None, misc=None):
    """
    Launch the entire s2p pipeline with the parameters given in a json file.

    It is a succession of six steps:
        initialization
        preprocessing
        global_values
        processing
        compute dsms
        global_finalization

    Args:
        config_file: path to a json configuration file
        step: integer between 1 and 5 specifying which step to run. Default
        value is None. In that case all the steps are run.
    """
    print_elapsed_time.t0 = datetime.datetime.now()

    if clusterMode == 'list_jobs':
        list_jobs(config_file, step)
    elif clusterMode == 'job':
        cfg['omp_num_threads'] = 1
        execute_job(config_file,misc)
    else:
        # determine which steps to run
        steps = [step] if step else [1, 2, 3, 4, 5, 6, 7]

        # initialization (has to be done whatever the queried steps)
        initialization.init_dirs_srtm(config_file)
        tiles_full_info = initialization.init_tiles_full_info(config_file)

        # multiprocessing setup
        nb_workers = multiprocessing.cpu_count()  # nb of available cores
        if cfg['max_nb_threads']:
            nb_workers = min(nb_workers, cfg['max_nb_threads'])

        # omp_num_threads: should not exceed nb_workers when multiplied by the
        # number of tiles
        cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_full_info)))

        # do the job
        if 2 in steps:
            print '\npreprocessing tiles...'
            show_progress.total = len(tiles_full_info)
            launch_parallel_calls(preprocess_tile, tiles_full_info, nb_workers)
            print_elapsed_time()

        if 3 in steps:
            print '\ncomputing global values...'
            global_values(tiles_full_info)
            print_elapsed_time()

        if 4 in steps:
            print '\nprocessing tiles...'
            show_progress.total = len(tiles_full_info)
            launch_parallel_calls(process_tile, tiles_full_info, nb_workers)
            print_elapsed_time()

        if 5 in steps:
            print '\ncomputing global extent...'
            global_extent(tiles_full_info)
            print_elapsed_time()

        if 6 in steps:
            print '\ncompute dsm...'
            args = []
            for i in range(cfg['dsm_nb_tiles']):
                args.append([config_file, cfg['dsm_nb_tiles'], i])
            show_progress.total = cfg['dsm_nb_tiles']
            launch_parallel_calls(compute_dsm, args, nb_workers)
            print_elapsed_time()

        if 7 in steps:
            print '\nglobal finalization...'
            global_finalization(tiles_full_info)
            print_elapsed_time()

    # cleanup
    print_elapsed_time(since_first_call=True)
    common.garbage_cleanup()
예제 #8
0
파일: s2p.py 프로젝트: jguinet/s2p
def main(config_file, step=None, clusterMode=None, misc=None):
    """
    Launch the entire s2p pipeline with the parameters given in a json file.

    It is a succession of five steps:
        initialization
        preprocessing
        global_values
        processing
        global_finalization

    Args:
        config_file: path to a json configuration file
        step: integer between 1 and 5 specifying which step to run. Default
        value is None. In that case all the steps are run.
    """
    if clusterMode == 'list_jobs':
        list_jobs(config_file, step)
    elif clusterMode == 'job':
        cfg['omp_num_threads'] = 1
        execute_job(config_file, misc[0], int(misc[1]))
    else:
        # determine which steps to run
        steps = [step] if step else [1, 2, 3, 4, 5]

        # initialization (has to be done whatever the queried steps)
        initialization.init_dirs_srtm(config_file)
        tiles_full_info = initialization.init_tiles_full_info(config_file)
        show_progress.total = len(tiles_full_info)
        print_elapsed_time.t0 = datetime.datetime.now()

        # multiprocessing setup
        nb_workers = multiprocessing.cpu_count()  # nb of available cores
        if cfg['max_nb_threads']:
            nb_workers = min(nb_workers, cfg['max_nb_threads'])

        # omp_num_threads: should not exceed nb_workers when multiplied by the
        # number of tiles
        cfg['omp_num_threads'] = max(1, int(nb_workers / len(tiles_full_info)))

        # do the job
        if 2 in steps:
            print '\npreprocessing tiles...'
            launch_parallel_calls(preprocess_tile, tiles_full_info, nb_workers)
            print_elapsed_time()

        if 3 in steps:
            print '\ncomputing global values...'
            global_values(tiles_full_info)
            print_elapsed_time()

        if 4 in steps:
            print '\nprocessing tiles...'
            launch_parallel_calls(process_tile, tiles_full_info, nb_workers)
            print_elapsed_time()

        if 5 in steps:
            print '\nglobal finalization...'
            global_finalization(tiles_full_info)
            print_elapsed_time()

    # cleanup
    common.garbage_cleanup()