def main(config_file, fc_dir=None, run_info_yaml=None, numcores=None, paralleltype=None, queue=None, scheduler=None, upgrade=None, profile=None, workflow=None, inputs=None, resources="", timeout=15, retries=None): work_dir = os.getcwd() config, config_file = load_system_config(config_file) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(work_dir, "log") paralleltype, numcores = _get_cores_and_type(config, fc_dir, run_info_yaml, numcores, paralleltype) parallel = {"type": paralleltype, "cores": numcores, "scheduler": scheduler, "queue": queue, "profile": profile, "module": "bcbio.distributed", "resources": resources, "timeout": timeout, "retries": retries} if parallel["type"] in ["local", "messaging-main"]: if numcores is None: config["algorithm"]["num_cores"] = numcores run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "messaging": parallel["task_module"] = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) messaging.run_and_monitor(config, config_file, args, parallel) elif parallel["type"] == "ipython": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def main(config_file, fc_dir=None, run_info_yaml=None, numcores=None, paralleltype=None, profile="default"): work_dir = os.getcwd() config = load_config(config_file) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(work_dir, "log") paralleltype, numcores = _get_cores_and_type(config, fc_dir, run_info_yaml, numcores, paralleltype) parallel = { "type": paralleltype, "cores": numcores, "profile": profile, "module": "bcbio.distributed" } if parallel["type"] in ["local", "messaging-main"]: if numcores is None: config["algorithm"]["num_cores"] = numcores run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "messaging": parallel["task_module"] = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) messaging.run_and_monitor(config, config_file, args, parallel) elif parallel["type"] == "ipython": run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def main(config_file, fc_dir=None, run_info_yaml=None, numcores=None, paralleltype=None, profile="default"): work_dir = os.getcwd() config = load_config(config_file) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(work_dir, "log") paralleltype, numcores = _get_cores_and_type(config, fc_dir, run_info_yaml, numcores, paralleltype) parallel = {"type": paralleltype, "cores": numcores, "profile": profile, "module": "bcbio.distributed"} if parallel["type"] in ["local", "messaging-main"]: if numcores is None: config["algorithm"]["num_cores"] = numcores run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "messaging": parallel["task_module"] = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) messaging.run_and_monitor(config, config_file, args, parallel) elif parallel["type"] == "ipython": run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def main(system_config_file, cur_config_file): config = utils.merge_config_files([system_config_file, cur_config_file]) workers_needed = len(config["experiments"]) task_module = "bcbio.hbc.linker.tasks" queue = "hbc.trim" args = [system_config_file, cur_config_file] run_and_monitor(config, system_config_file, args, workers_needed, task_module, queues=queue)
def main(config_file, fc_dir, run_info_yaml=None, num_workers=None): with open(config_file) as in_handle: config = yaml.load(in_handle) assert config["algorithm"]["num_cores"] == "messaging", \ "Use this script only with configured 'messaging' parallelization" if num_workers is None: num_workers = _needed_workers(get_run_info(fc_dir, config, run_info_yaml)[-1]) task_module = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) run_and_monitor(config, config_file, args, num_workers, task_module)
def main(config_file, fc_dir=None, run_info_yaml=None, numcores=None, paralleltype=None, queue=None, scheduler=None, upgrade=None, profile=None, workflow=None, inputs=None, resources="", timeout=15, retries=None): work_dir = os.getcwd() config, config_file = load_system_config(config_file) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(work_dir, "log") paralleltype, numcores = _get_cores_and_type(config, fc_dir, run_info_yaml, numcores, paralleltype) parallel = { "type": paralleltype, "cores": numcores, "scheduler": scheduler, "queue": queue, "profile": profile, "module": "bcbio.distributed", "resources": resources, "timeout": timeout, "retries": retries } if parallel["type"] in ["local", "messaging-main"]: if numcores is None: config["algorithm"]["num_cores"] = numcores run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "messaging": parallel["task_module"] = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) messaging.run_and_monitor(config, config_file, args, parallel) elif parallel["type"] == "ipython": assert parallel[ "queue"] is not None, "IPython parallel requires a specified queue (-q)" assert parallel[ "scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" run_main(config, config_file, work_dir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def main(config_file, fc_dir, run_info_yaml=None, num_workers=None): config = load_config(config_file) assert config["algorithm"]["num_cores"] == "messaging", \ "Use this script only with configured 'messaging' parallelization" if num_workers is None: if config["distributed"].get("num_workers", "") == "all": cp = config["distributed"]["cluster_platform"] cluster = __import__("bcbio.distributed.{0}".format(cp), fromlist=[cp]) num_workers = cluster.available_nodes(config["distributed"]["platform_args"]) - 1 if num_workers is None: num_workers = _needed_workers(get_run_info(fc_dir, config, run_info_yaml)[-1]) task_module = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) run_and_monitor(config, config_file, args, num_workers, task_module)
def main(config_file, fc_dir, run_info_yaml=None, num_workers=None): config = load_config(config_file) assert config["algorithm"]["num_cores"] == "messaging", \ "Use this script only with configured 'messaging' parallelization" if num_workers is None: if config["distributed"].get("num_workers", "") == "all": cp = config["distributed"]["cluster_platform"] cluster = __import__("bcbio.distributed.{0}".format(cp), fromlist=[cp]) num_workers = cluster.available_nodes( config["distributed"]["platform_args"]) - 1 if num_workers is None: num_workers = _needed_workers( get_run_info(fc_dir, config, run_info_yaml)[-1]) task_module = "bcbio.distributed.tasks" args = [config_file, fc_dir] if run_info_yaml: args.append(run_info_yaml) run_and_monitor(config, config_file, args, num_workers, task_module)