Ejemplo n.º 1
0
    def energyMF_pool(self):
        start_time=time.time()        
        k_req_max=np.maximum(self.estimate[:self.N_Al[2]//2],20)
        # self.energyall=[[] for _ in self.k_req_max]
        # self.wfall=self.energyall  
        H_bdg_all=[]  
        self.make_system()
        self.k_req_max=k_req_max
        for kz_index,k_req in enumerate(k_req_max):
            k_req=k_req_max[kz_index]
            k_req=np.max([40,2*(1.1*k_req//2).astype(int)])
            H_bdg=self.system.hamiltonian_submatrix(params=dict(kz_index=kz_index),sparse=True)
            H_bdg=csc_matrix(np.real((H_bdg+H_bdg.T.conj())/2))
            H_bdg_all.append(H_bdg)
        
        self.H_bdg_all=H_bdg_all

        print('Elapsed time on constructing Hamiltonian is: {:.1f}s'.format(time.time()-start_time))

        executor=MPIPoolExecutor()
        pool_results=executor.map(diagonalization,[(i,self.E_D,j,k) for i,j,k in zip(H_bdg_all,k_req_max,range(len(k_req_max)))])
        executor.shutdown()
        self.energyall=[]
        self.wfall=[]
        for pool_result in pool_results:
            pool_val,pool_vec=pool_result
            self.energyall.append(pool_val)
            self.wfall.append(pool_vec)

        if self.store_history:
            self.energyall_history.append(self.energyall)
            self.wfall_history.append(self.wfall)
        print('Elapsed time is: {:.1f}s'.format(time.time()-start_time))
Ejemplo n.º 2
0
    def __init__(self, procs_per_worker=1):
        """

        :param procs_per_worker: int
        """
        try:
            from mpi4py import MPI
            from mpi4py.futures import MPIPoolExecutor
        except ImportError:
            raise ImportError(
                'nested: MPIFuturesInterface: problem with importing from mpi4py.futures'
            )
        self.global_comm = MPI.COMM_WORLD
        if procs_per_worker > 1:
            print 'nested: MPIFuturesInterface: procs_per_worker reduced to 1; collective operations not yet ' \
                  'implemented'
        self.procs_per_worker = 1
        self.executor = MPIPoolExecutor()
        self.rank = self.global_comm.rank
        self.global_size = self.global_comm.size
        self.num_workers = self.global_size - 1
        self.apply_counter = 0
        self.map = self.map_sync
        self.apply = self.apply_sync
        self.init_workers(disp=True)
Ejemplo n.º 3
0
def run_function(function, folder_dataset, list_subj, list_args=[], nb_cpu=None, verbose=1, test_integrity=0):
    """
    Run a test function on the dataset using multiprocessing and save the results
    :return: results
    # results are organized as the following: tuple of (status, output, DataFrame with results)
    """

    # add full path to each subject
    list_subj_path = [os.path.join(folder_dataset, subject) for subject in list_subj]

    # All scripts that are using multithreading with ITK must not use it when using multiprocessing
    os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1"

    # create list that finds all the combinations for function + subject path + arguments. Example of one list element:
    # ('sct_propseg', os.path.join(path_sct, 'data', 'sct_test_function', '200_005_s2''), '-i ' + os.path.join("t2", "t2.nii.gz") + ' -c t2', 1)
    list_func_subj_args = list(itertools.product(*[[function], list_subj_path, list_args, [test_integrity]]))
        # data_and_params = itertools.izip(itertools.repeat(function), data_subjects, itertools.repeat(parameters))

    logger.debug("stating pool with {} thread(s)".format(nb_cpu))
    pool = PoolExecutor(nb_cpu)
    compute_time = None
    try:
        compute_time = time.time()
        count = 0
        all_results = []

        # logger.info('Waiting for results, be patient')
        future_dirs = {pool.submit(function_launcher, subject_arg): subject_arg
                         for subject_arg in list_func_subj_args}

        for future in concurrent.futures.as_completed(future_dirs):
            count += 1
            subject = os.path.basename(future_dirs[future][1])
            arguments = future_dirs[future][2]
            try:
                result = future.result()
                sct.no_new_line_log('Processing subjects... {}/{}'.format(count, len(list_func_subj_args)))
                all_results.append(result)
            except Exception as exc:
                logger.error('{} {} generated an exception: {}'.format(subject, arguments, exc))

        compute_time = time.time() - compute_time

        # concatenate all_results into single Panda structure
        results_dataframe = pd.concat(all_results)

    except KeyboardInterrupt:
        logger.warning("\nCaught KeyboardInterrupt, terminating workers")
        for job in future_dirs:
            job.cancel()
    except Exception as e:
        logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
        logger.exception(e)
        for job in future_dirs:
            job.cancel()
        raise
    finally:
        pool.shutdown()

    return {'results': results_dataframe, "compute_time": compute_time}
Ejemplo n.º 4
0
class MPIExecutor(BaseExecutor):
    '''Executor for parallel execution using MPI
    
    Parameters
    ----------
    kwargs : all kwargs will be passed on to
             mpi4py.futures.MPIPoolExecutor
    
    Attributes
    ----------
    pool : concurrent.futures.ProcessPoolExecutor instance
    
    
    '''
    
    def __init__(self, **kwargs):
        super(MPIExecutor, self).__init__()
        self.pool = MPIPoolExecutor(**kwargs)

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.pool.shutdown(wait=True)
        return False
    
    def map(self, function, population):
        results = self.pool.map(function, population)
        population, objectives = list(zip(*results))
        
        objectives = np.asarray(objectives)
        
        return population, objectives
Ejemplo n.º 5
0
def mutual_info_run_MPI(L=512, es=100, Bp=False):
    eta_pos_list = []
    MI_pos_list = []
    executor = MPIPoolExecutor()
    inputs = [(L, Bp) for _ in range(es)]
    executor_pool = executor.starmap(MI_pool, inputs)
    executor.shutdown()
    for result in executor_pool:
        eta, MI = result
        eta_pos_list.append(eta)
        MI_pos_list.append(MI)
    return eta_pos_list, MI_pos_list
Ejemplo n.º 6
0
def run_extrinsic(args) -> None:

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    # The pool executor handles all other workers.
    if rank != 0:
        return

    if args.config_format == "yaml":
        cfg = yaml.safe_load(args.config)
    elif args.config_format == "json":
        cfg = json.load(args.config)
    else:
        raise ValueError("Currently only json and yaml formats are supported.")

    cfg_factory = extrinsic_metaparameters.parser(cfg)

    if args.continue_ is None:
        results: List[Result] = []
    else:
        results = parse_existing_trials(args.continue_)

    seen = {str(result.config) for result in results}

    configs = generate_configs(args.ntrials, seen, cfg_factory)

    run = partial(
        evaluate,
        genome=args.genome,
        gff=args.gff,
        hints=args.hints,
        species=args.species,
        tmpdir=args.tmpdir,
        softmasking=args.softmasking,
        singlestrand=args.singlestrand,
        utr=args.utr,
        alt_from_evidence=args.alt_from_evidence,
        min_intron_len=args.min_intron_len,
        allow_hinted_splicesites=args.allow_hinted_splicesites,
        augustus_config_path=args.augustus_config_path,
    )

    # max_workers=None means that the universe size is defined by the master
    # mpi process.
    executor = MPIPoolExecutor(max_workers=None)

    for result in executor.map(run, configs, unordered=True):
        results.append(result)
        seen.add(str(result.config))
        print(json.dumps(result.to_obj()), file=args.outfile)

    return
Ejemplo n.º 7
0
def main():
    num_workers = size - 1
    executor = MPIPoolExecutor()
    for i in xrange(3):
        start_time = time.time()
        tasks = range(num_workers)
        future_list = executor.map(do_work, tasks)
        returned_ranks = []
        for result in future_list:
            returned_ranks.append(result)
        used_workers = len(set(returned_ranks))
        print 'Map: %i used %i/%i unique workers and took %.4f s' % \
              (i, used_workers, num_workers, time.time() - start_time)
        if used_workers != num_workers:
            pprint.pprint(returned_ranks)
Ejemplo n.º 8
0
def main():
    # Handle CLI
    parser = argparse.ArgumentParser()
    parser.add_argument("--angle-start", type=float, default=0.0, help="the "
                        "inclusive lower bound of angles to optimize the "
                        "slice for (units in degrees, behaves like np.arange)")
    parser.add_argument("--angle-stop", type=float, default=1.0, help="the "
                        "exclusive upper bound of angles to optimize the "
                        "slice for (units in degrees, behaves like np.arange)")
    parser.add_argument("--angle-step", type=float, default=5.0, help="the step size "
                        "between angle values (units in degrees, behaves "
                        "like np.arange)")
    parser.add_argument("--slice-index", type=int, default=0, help="the "
                        "slice to perform QOC on (0-7)")
    args = vars(parser.parse_args())
    angle_start = args["angle_start"]
    angle_stop = args["angle_stop"]
    angle_step = args["angle_step"]
    slice_index = args["slice_index"]

    # Trim slices to only include start thru stop.
    uccsdslice = UCCSD_LIH_SLICES[slice_index]
    # Get the angles to optimize for.
    angle_deg_list = list(np.arange(angle_start, angle_stop, angle_step))
    
    # Construct a state for each job. I.e. each angle from the angle
    # list on the slice specified.
    state_iter = [ProcessState(uccsdslice, slice_index, angle_deg)
                  for angle_deg in angle_deg_list]
    
    # Run QOC for each process state.
    with MPIPoolExecutor(BROADWELL_CORE_COUNT) as executor:
        executor.map(process_init, state_iter)
Ejemplo n.º 9
0
def main(basedir, glob, mask_path, pixel_size, no_mpi, processes, full):
    all_paths = list(basedir.glob(glob))
    pixel_size *= np.pi / 10800.
    with h5py.File(mask_path, 'r') as f2:
        tmask = f2['t'][:]
        pmask = f2['p'][:]

    _get_weight_fsky = partial(get_weight_fsky, tmask, pmask, pixel_size)
    if not no_mpi:
        from mpi4py.futures import MPIPoolExecutor
        with MPIPoolExecutor() as executor:
            data = executor.map(_get_weight_fsky, all_paths)
    else:
        from dautil.util import map_parallel
        data = map_parallel(_get_weight_fsky, all_paths, processes=processes)

    index = pd.Index(list(map(get_greg, all_paths)),
                     name='date') if full else pd.MultiIndex.from_tuples(
                         list(map(get_idx, all_paths)),
                         names=('nullsplit', 'subsplit', 'date'))

    df = pd.DataFrame(data=data,
                      index=index,
                      columns=('tweight', 'pweight', 't_fsky', 'p_fsky'))

    df.sort_index(inplace=True)
    return df
Ejemplo n.º 10
0
def test_crawl():
    with MPIPoolExecutor(10) as executor:
        for url, content in executor.map(load_url,
                                         URLS,
                                         timeout=10,
                                         unordered=True):
            print('%-25s: %6.2f KiB' % (url, len(content) / (1 << 10)))
Ejemplo n.º 11
0
def mpirun(n_workers, f, cfg):
    mpi_args = dict(max_workers=n_workers)  #, path = [])
    with MPIPoolExecutor(**mpi_args) as p:
        out = p.starmap(
            mpistart, zip([cloudpickle.dumps(f)] * n_workers,
                          range(n_workers)))
        return next(out)
Ejemplo n.º 12
0
def launch_with_MPI_futures(population, config, tries=0):
    for individ in population:
        individ.failed = False

    args = pack_args(population, config)
    try:
        print(
            f"--> Starting MPI Pool executor. {len(args)} jobs running on {len(config.servers)} servers"
        )
        with MPIPoolExecutor() as executor:
            results = [result for result in executor.map(trainer.run, args)]
            executor.shutdown(wait=True)
    except TypeError as e:
        print("Caught the infamous _thread.RLock exception")
        print(e)
        if tries > 0:
            exit(0)
        return launch_with_MPI_futures(population, config, tries=tries + 1)

    # Exceptions may occur inside the async training loop.
    # The failed solutions will be discarded:
    original = len(results)
    results = [individ for individ in results if not individ.failed]
    filtered = len(results)
    print(
        f"--> Entire population trained. {original-filtered}/{original} failed."
    )
    for individ in results:
        del individ.failed

    return results
Ejemplo n.º 13
0
def main(args):
    cap, out = get_video(args)
    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))
    color_state_image = np.zeros((width, height, 3))

    alpr = Alpr("eu", "./ALPR/alpr_config/runtime_data/gb.conf", 
    "./ALPR/alpr_config/runtime_data")
    if not alpr.is_loaded():
        print("Error loading OpenALPR")
        sys.exit(1)

    lib_merges = []

    while True:
        ret, frame = cap.read()
        if ret == True:
            with MPIPoolExecutor(max_workers=3) as executor:
                results = []
                for result in executor.map(mpi_function, 
                range(3), [args]*3, [frame]*3, [cap]*3, 
                [color_state_image]*3):
                    results.append(result)
            if args.save_video:
                out.write(output)
            if args.real_time:
                cv2.imshow(identifier, output)
        else:
            break

    if out is not None:
        out.release()
    if cap is not None:
        cap.release()
def distribute_tasks(func_task, tasks, num_proc=1, is_distributed=False):
    """Distribute workload.

    This function distributes the workload using the ``multiprocessing`` or ``mpi4py`` library.
    It simply creates a pool of processes that allow to work on the tasks using shared or
    distributed memory.

    Notes
    -----

    We need to ensure that the number of processes is never larger as the number of tasks as
    otherwise the MPI implementation does not terminate properly.

    * MP Pool, see `here <https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.Pool>`_ for details
    * MPI Pool, see `here <https://mpi4py.readthedocs.io/en/stable/mpi4py.futures.html#mpipoolexecutor>`__ for details

    """
    num_proc_intern = min(len(tasks), num_proc)

    if is_distributed:
        assert "PMI_SIZE" in os.environ.keys(
        ), "MPI environment not available."
        from mpi4py.futures import MPIPoolExecutor

        executor = MPIPoolExecutor(num_proc_intern)

    else:
        executor = mp.Pool(num_proc_intern)

    with executor as e:
        rslt = list(e.map(func_task, tasks))

    return rslt
Ejemplo n.º 15
0
    def __init__(self, max_workers=0, use_threads=True, mpi=False):

        self.workers = max_workers
        self.use_threads = use_threads
        try:
            assert self.workers or self.use_threads
        except AssertionError:
            print(
                'WARNING: Specifying no workers and no threads is not allowed, forcing use_threads=True.'
            )
            self.use_threads = True
        #self.tex = ThreadPoolExecutor(max_workers=1) # MIXING threads and processes DOESN'T WORK

        self.processes = []
        self.processes_count = 0
        self.threads = []
        self.threads_count = 0
        self.results = {}

        self.pex = None
        if self.workers:
            if mpi:
                from mpi4py.futures import MPIPoolExecutor
                self.pex = MPIPoolExecutor(max_workers=self.workers)
            else:
                from concurrent.futures import ProcessPoolExecutor
                self.pex = ProcessPoolExecutor(max_workers=self.workers)
Ejemplo n.º 16
0
    def _run_master(self, amount_input_nodes: int, amount_output_nodes,
                    activation_function, challenge: Challenge,
                    config: NeatConfig, seed: int):

        logger.info("Maser - Name: {}, Size: {}, Rank {}/{}", self.name,
                    self.size, self.rank, self.size - 1)
        logger.info("Waiting for workers to start...")
        with MPIPoolExecutor() as self.executor:
            self.executor.bootup(wait=True)
            logger.info("All Workers should have started")

            # Initialize Parameters
            innovation_number_generator = InnovationNumberGeneratorSingleCore()
            species_id_generator = SpeciesIDGeneratorSingleCore()
            agent_id_generator = AgentIDGeneratorSingleCore()

            # Notify callback about starting evaluation
            self._notify_reporters_callback(lambda r: r.on_initialization())

            initial_generation = gs.create_initial_generation(
                amount_input_nodes, amount_output_nodes, activation_function,
                innovation_number_generator, species_id_generator,
                agent_id_generator, config, seed)

            finished_generation = self._evaluation_loop(
                initial_generation, challenge, innovation_number_generator,
                species_id_generator, agent_id_generator, config)

            self._notify_reporters_callback(
                lambda r: r.on_finish(finished_generation, self.reporters))
Ejemplo n.º 17
0
def test_julia_pool():
    with MPIPoolExecutor() as executor:
        print("Loaded Executor")
        tic = time.time()
        image = list(executor.map(julia_line, range(h), chunksize=10))
        toc = time.time()
        print("%s - %s Set %dx%d in %.2f seconds." %
              (gethostname(), 'Julia', w, h, toc - tic))
Ejemplo n.º 18
0
    def run(self, data):
        from mpi4py import MPI
        from mpi4py.futures import MPIPoolExecutor
        MPI.pickle.__init__(dill.dumps, dill.loads)

        with MPIPoolExecutor(max_workers=self.max_workers) as pool:
            result = pool.map(self.operations, data)
        return result
Ejemplo n.º 19
0
def get_executor(workers=4, backend='thread'):
    if backend == 'mpi':
        executor = MPIPoolExecutor(max_workers=workers)
    elif backend == 'process':
        executor = ProcessPoolExecutor(max_workers=workers)
    else:
        executor = ThreadPoolExecutor(max_workers=workers)
    return executor
Ejemplo n.º 20
0
def main(args):
    with MPIPoolExecutor(max_workers=3) as executor:
        executor.map(mpi_function, range(3), [args]*3)

    if out is not None:
        out.release()
    if cap is not None:
        cap.release()
Ejemplo n.º 21
0
def mutual_info_run_MPI(T, es, L):
    delta_list = np.linspace(-1, 1, 50)
    log_neg_dis_list = []
    ensemblesize = es

    for delta in delta_list:
        log_neg_ensemble_list = []
        mutual_info_ensemble_list_pool = []
        executor = MPIPoolExecutor()
        inputs = [(delta, T, L) for _ in range(ensemblesize)]
        mutual_info_ensemble_list_pool = executor.starmap(MI_pool, inputs)
        executor.shutdown()
        for result in mutual_info_ensemble_list_pool:
            LN = result
            log_neg_ensemble_list.append(LN)
        log_neg_dis_list.append(log_neg_ensemble_list)

    return delta_list, log_neg_dis_list
Ejemplo n.º 22
0
    def run_MPIPool(self, fileformat, jobs):
        def runGaussian(job):
            return self.runGaussianInParallel(fileformat, job)

        with MPIPoolExecutor() as executor:
            results = executor.map(runGaussian, jobs)
            for _ in results:
                pass
        self._logging("finish jobs:", *jobs)
Ejemplo n.º 23
0
    def run_MPIPool(self, fileformat, jobs):
        def runGaussian(job):
            return self.runGaussianInParallel(fileformat, job)

        with MPIPoolExecutor() as executor:
            results = executor.map(runGaussian, jobs)
            for _ in results:
                pass
        logging.info(f"finish {len(jobs)} jobs.")
Ejemplo n.º 24
0
def mutual_info_run_MPI(s_prob, es=100):
    delta_list = np.linspace(-1, 1, 100)**3
    mutual_info_dis_list = []
    if s_prob == 0 or s_prob == 1:
        ensemblesize = 1
    else:
        ensemblesize = es

    for delta in delta_list:
        mutual_info_ensemble_list = []
        mutual_info_ensemble_list_pool = []
        executor = MPIPoolExecutor()
        inputs = [(delta, s_prob) for _ in range(ensemblesize)]
        mutual_info_ensemble_list_pool = executor.starmap(MI_pool, inputs)
        executor.shutdown()
        for result in mutual_info_ensemble_list_pool:
            mutual_info_ensemble_list.append(result)
        mutual_info_dis_list.append(mutual_info_ensemble_list)
    return delta_list, mutual_info_dis_list
Ejemplo n.º 25
0
def mutual_info_run_MPI(s_prob,L):
    delta_list=np.linspace(-1,1,100)**3
    mutual_info_dis_list=[]
 
    params=Params(delta=0,L=L,bc=-1,basis='m')
    proj_range=np.arange(int(params.L/2),params.L,2)
    s_list_list=params.generate_position_list(np.arange(int(params.L/2),params.L,2),s_prob)
    for delta in delta_list:
        mutual_info_ensemble_list=[]
        mutual_info_ensemble_list_pool=[]        
        executor=MPIPoolExecutor()
        inputs=[(delta,proj_range,s_list,L) for s_list in (s_list_list)]
        mutual_info_ensemble_list_pool=executor.starmap(MI_pool,inputs)
        executor.shutdown()
        for result in mutual_info_ensemble_list_pool:
            mutual_info_ensemble_list.append(result)
        mutual_info_dis_list.append(mutual_info_ensemble_list)
    
    return delta_list,mutual_info_dis_list
Ejemplo n.º 26
0
def _starmap_parallel_mpi(f: 'Callable',
                          args: 'Iterable[Iterable]',
                          return_results: 'bool' = True,
                          **kwargs) -> 'list':
    from mpi4py.futures import MPIPoolExecutor

    with MPIPoolExecutor() as mpi_pool_executor:
        res = mpi_pool_executor.starmap(f, args)
        if return_results:
            return list(res)
        else:
            return []
Ejemplo n.º 27
0
    def make_surrogate_L10(self, inflow, cases, seeds, turbine, x_cross,
                           y_down, outfile):
        grid = []
        with MPIPoolExecutor(max_workers=num_cores) as executor:
            for L10 in executor.map(self.calc_L10, cases, repeat(inflow),
                                    repeat(seeds), repeat(turbine),
                                    repeat(outfile)):
                grid.append(L10)
        grid = np.reshape(grid, (len(x_cross), len(y_down)))
        f_interp = interp2d(x_cross, y_down, grid[:, :])

        return f_interp
Ejemplo n.º 28
0
 def _handler(self, request, response):
     if 'delay' in request.inputs:
         seconds = request.inputs['delay'][0].data
     else:
         seconds = 1
     response.update_status('PyWPS Process started. Waiting...', 50)
     with MPIPoolExecutor(max_workers=None, path=[MODULE_PATH]) as executor:
         data = [seconds for i in range(4)]
         result = executor.map(sleep, data)
         print(result)
     response.outputs['sleep_output'].data = 'done sleeping'
     return response
Ejemplo n.º 29
0
def mutual_info_run_MPI(T, es, L, mtype):
    delta_list = np.linspace(0, 1, 26)
    mutual_info_dis_list = []
    log_neg_dis_list = []
    ensemblesize = es

    for delta in delta_list:
        log_neg_ensemble_list = []
        mutual_info_ensemble_list = []
        mutual_info_ensemble_list_pool = []
        executor = MPIPoolExecutor()
        inputs = [(delta, T, L, mtype) for _ in range(ensemblesize)]
        mutual_info_ensemble_list_pool = executor.map(MI_pool, inputs)
        executor.shutdown()
        for result in mutual_info_ensemble_list_pool:
            MI, LN = result
            mutual_info_ensemble_list.append(MI)
            log_neg_ensemble_list.append(LN)
        mutual_info_dis_list.append(mutual_info_ensemble_list)
        log_neg_dis_list.append(log_neg_ensemble_list)

    return delta_list, log_neg_dis_list
Ejemplo n.º 30
0
def main():

    # Define a data queue. This serves as the connection between the receiver (datastream from KSTAR)
    # to the executor that handles the analysis routines
    dq = queue.Queue()
    msg = AdiosMessage(0, None)
    # Dummy data of the same shape as the DFT of a time-chunk.
    data = np.zeros([192, 512, 38], dtype=np.complex128)

    # Define an executor that handles the analysis tasks
    executor = MPIPoolExecutor(max_workers=2)
    #executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)

    # Start a worker thread that pops element from the queue and dispatches it to
    # the executor
    worker = threading.Thread(target=consume, args=(dq, executor))
    worker.start()

    # Start the receiver loop. Here we receive data chunks from remote
    for i in range(5):
        # Receive time chunk data
        logging.info(f"Received time chunk {i}")
        # Compile a message with the current data
        msg = AdiosMessage(tstep_idx=i, data=data)
        # Put the data in the queue
        dq.put(msg)

    logging.info("Finished the receiver loop")
    # Put the hang-up message in the queue
    dq.put(AdiosMessage(None, None))
    # Close the queue
    dq.join()
    # Stop the worker process
    worker.join()

    # Shutdown the MPIPoolExecutor
    # This has to be done after the queue has joined!
    executor.shutdown()
Ejemplo n.º 31
0
def main(args):
    in_paths = chain(*(glob(glob_i, recursive=True) for glob_i in args.input))

    __h5assert_isfinite = partial(_h5assert_isfinite, verbose=args.verbose)
    if args.use_mpi:
        from mpi4py.futures import MPIPoolExecutor
        with MPIPoolExecutor() as executor:
            executor.map(__h5assert_isfinite, in_paths)
    elif args.p > 1:
        from dautil.util import get_map_parallel
        map_parallel = get_map_parallel(args.p)
        map_parallel(__h5assert_isfinite, in_paths)
    else:
        list(map(__h5assert_isfinite, in_paths))