def run_multicore(fn, items, config, parallel=None): """Run the function using multiple cores on the given items to process. """ if len(items) == 0: return [] if parallel is None or "num_jobs" not in parallel: if parallel is None: parallel = { "type": "local", "cores": config["algorithm"].get("num_cores", 1) } sysinfo = system.get_info({}, parallel) parallel = resources.calculate( parallel, items, sysinfo, config, parallel.get("multiplier", 1), max_multicore=int(parallel.get("max_multicore", sysinfo["cores"]))) items = [ config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items ] if joblib is None: raise ImportError("Need joblib for multiprocessing parallelization") out = [] for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")( joblib.delayed(fn)(x) for x in items): if data: out.extend(data) return out
def run(fn_name, items): setpath.prepend_bcbiopath() out = [] fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else ( _get_ipython_fn(fn_name, parallel), fn_name) items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [ config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items ] if "wrapper" in parallel: wrap_parallel = { k: v for k, v in parallel.items() if k in set(["fresources"]) } items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] items = zip_args([args for args in items]) for data in view.map_sync(fn, items, track=False): if data: out.extend(unzip_args(data)) return out
def run_multicore(fn, items, config, parallel=None): """Run the function using multiple cores on the given items to process. """ if len(items) == 0: return [] if parallel is None or "num_jobs" not in parallel: if parallel is None: parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)} sysinfo = system.get_info({}, parallel) parallel = resources.calculate( parallel, items, sysinfo, config, parallel.get("multiplier", 1), max_multicore=int(parallel.get("max_multicore", sysinfo["cores"])), ) items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items] if joblib is None: raise ImportError("Need joblib for multiprocessing parallelization") out = [] for data in joblib.Parallel(parallel["num_jobs"])(joblib.delayed(fn)(x) for x in items): if data: out.extend(data) return out
def run(fn_name, items): out = [] items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) fn = _get_ipython_fn(fn_name, parallel) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items] if "wrapper" in parallel: items = [[fn_name] + parallel.get("wrapper_args", []) + list(x) for x in items] for data in view.map_sync(fn, items, track=False): if data: out.extend(data) return out
def run(fn_name, items): out = [] items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) fn = _get_ipython_fn(fn_name, parallel) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items] if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] items = zip_args([args for args in items]) for data in view.map_sync(fn, items, track=False): if data: out.extend(unzip_args(data)) return out
def run(fn_name, items): out = [] items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) fn = _get_ipython_fn(fn_name, parallel) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [ config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items ] if "wrapper" in parallel: wrap_parallel = { k: v for k, v in parallel.items() if k in set(["fresources"]) } items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] for data in view.map_sync(fn, items, track=False): if data: out.extend(data) return out