def do_query(args): status = None if args.status: status = TaskStatus(args.status.lower()) name = args.name tasks = ConfigManager(db_path=None).query_tasks(status=status, name=name) prepared = [( wid, t.status, t.name, t.start, (", ".join(t.labels) if t.labels else ""), t.execution_dir, ) for wid, t in tasks.items()] prepared.sort(key=lambda p: p[3] if p[3] else DateUtil.max()) print( tabulate.tabulate( prepared, headers=[ "TaskID", "status", "name", "start date", "labels", "path" ], ), file=sys.stdout, )
def do_rm(args): wids = args.wid for wid in wids: try: ConfigManager.manager().remove_task(wid, keep_output=args.keep) except Exception as e: Logger.critical(f"Can't remove {wid}: " + str(e))
def abort_wids(sids: List[str], wait=True): cm = ConfigManager(db_path=None) for sid in sids: try: row = cm.get_row_for_submission_id_or_path(sid) WorkflowManager.mark_aborted(row.execution_dir, row.submission_id) except Exception as e: Logger.critical(f"Couldn't abort '{sid}': " + str(e)) raise e if wait: Logger.info( "Waiting until completely aborted. This can take up to a few minutes to complete." ) for sid in sids: try: wm = ConfigManager.get_from_path_or_submission_lazy( sid, readonly=True) check_attempts = 0 while not wm.database.get_uncached_status().is_in_final_state( ): time.sleep(1) check_attempts += 1 if check_attempts % 5 == 0: Logger.info( f"Still waiting for '{sid}' to move to final state" ) except Exception as e: Logger.critical( f"Couldn't watch '{sid}' until aborted: {str(e)}") Logger.info(f"Jobs {' '.join(sids)} should be completely aborted now")
def do_rm(args): wids = args.wid for wid in wids: try: ConfigManager.get_from_path_or_submission_lazy( wid, readonly=True).remove_task(wid, keep_output=args.keep) except Exception as e: Logger.critical(f"Can't remove {wid}: " + str(e))
def do_metadata(args): wid = args.wid Logger.mute() if wid == "*": tasks = ConfigManager.manager().taskDB.get_all_tasks() for t in tasks: try: print("--- TASKID = " + t.wid + " ---") ConfigManager.manager().from_wid( t.wid, readonly=True).log_dbtaskinfo() except Exception as e: print("\tAn error occurred: " + str(e)) else: tm = ConfigManager.manager().from_wid(wid) tm.log_dbtaskinfo() Logger.unmute()
def do_metadata(args): wid = args.wid Logger.mute() cm = ConfigManager(db_path=None) if wid == "*": tasks = cm._taskDB.get_all_tasks() for t in tasks: try: print("--- TASKID = " + t.wid + " ---") cm.get_from_path_or_submission(t.wid, readonly=True).log_dbtaskinfo() except Exception as e: print("\tAn error occurred: " + str(e)) else: tm = cm.get_from_path_or_submission(wid, readonly=True) tm.log_dbtaskinfo() Logger.unmute()
def do_environment(args): method = args.method if method == "list": return print(", ".join( ConfigManager.manager().environmentDB.get_env_ids())) raise NotImplementedError(f"No implementation for '{method}' yet")
def do_watch(args): wid = args.wid refresh = args.refresh if args.once: # --once overrides --refresh refresh = -1 brief = args.brief monochrome = args.monochrome tm = ConfigManager.manager().from_wid(wid, readonly=True) tm.watch(seconds=refresh, brief=brief, monochrome=monochrome)
def do_watch(args): wid = args.wid refresh = args.refresh if args.once: # --once overrides --refresh refresh = -1 brief = args.brief monochrome = args.monochrome wm = ConfigManager.get_from_path_or_submission_lazy(wid, readonly=False) wm.watch(seconds=refresh, brief=brief, monochrome=monochrome)
def abort_wids(wids: List[str]): for wid in wids: try: row = ConfigManager.manager().get_lazy_db_connection().get_by_wid( wid) if row: WorkflowManager.mark_aborted(row.outputdir, row.wid) else: WorkflowManager.mark_aborted(wid, None) except Exception as e: Logger.critical(f"Couldn't abort '{wid}': " + str(e)) raise e
def resume(wid, foreground: bool = False): wm = ConfigManager.manager().from_wid(wid, readonly=False) if not wm: raise Exception("Couldn't find workflow manager with wid = " + str(wid)) run_in_background = False if foreground: run_in_background = False elif wm.database.workflowmetadata.configuration.run_in_background: run_in_background = True wm.start_or_submit(run_in_background=run_in_background)
def do_wait(args): wids = args.wid statuses = {} for wid in wids: wm = ConfigManager.get_from_path_or_submission_lazy(wid, readonly=True) Logger.info(f"Waiting for '{wid}' to finish") status = wm.database.get_uncached_status() while not status.is_in_final_state(): sleep(2) status = wm.database.get_uncached_status() statuses[wid] = (wm.submission_id, status) Logger.info( f"Workflow {wid} finished with status: {status.to_string()}") collapsed_status = TaskStatus.collapse_states( [s[1] for s in statuses.values()]) rc = collapsed_status.get_exit_code() Logger.info( f"All workflows finished with collapsed status {collapsed_status.to_string()}, exiting with rc={rc}" ) sys.exit(rc)
def do_resume(args): # if args.job: # from os import getcwd # # # parse and load the job file # Logger.info("Specified job file, ignoring all other parameters") # d = parse_dict(get_file_from_searchname(args.job, getcwd())) # job = PreparedJob(**d) # wm = ConfigManager.get_from_path_or_submission_lazy( # job.execution_dir, readonly=False # ) # # else: wm = ConfigManager.get_from_path_or_submission_lazy(args.wid, readonly=False) run_in_background = False if args.foreground: run_in_background = False elif wm.database.workflowmetadata.configuration.run_in_background: run_in_background = True wm.start_or_submit(run_in_background=run_in_background)
def fromjanis( workflow: Union[str, j.Tool, Type[j.Tool]], name: str = None, engine: Union[str, Engine] = None, filescheme: Union[str, FileScheme] = LocalFileScheme(), validation_reqs=None, batchrun_reqs=None, hints: Optional[Dict[str, str]] = None, output_dir: Optional[str] = None, dryrun: bool = False, inputs: Union[str, dict] = None, required_inputs: dict = None, watch=True, max_cores=None, max_memory=None, force=False, keep_intermediate_files=False, recipes=None, run_in_background=True, run_in_foreground=None, dbconfig=None, only_toolbox=False, no_store=False, allow_empty_container=False, check_files=True, container_override: dict = None, **kwargs, ): cm = ConfigManager.manager() jc = JanisConfiguration.manager() wf: Optional[Tool] = resolve_tool( tool=workflow, name=name, from_toolshed=True, only_toolbox=only_toolbox, force=force, ) if not wf: raise Exception("Couldn't find workflow with name: " + str(workflow)) # if isinstance(tool, j.CommandTool): # tool = tool.wrapped_in_wf() # elif isinstance(tool, j.CodeTool): # tool = tool.wrapped_in_wf() # organise inputs inputsdict = {} if recipes: valuesfromrecipe = jc.recipes.get_recipe_for_keys(recipes) inputsdict.update(valuesfromrecipe) inputsdict.update( cascade_inputs( wf=wf, inputs=inputs, required_inputs=required_inputs, batchrun_options=batchrun_reqs, )) row = cm.create_task_base(wf, outdir=output_dir, store_in_centraldb=not no_store) print(row.wid, file=sys.stdout) engine = engine or jc.engine eng = get_engine_from_eng( engine, wid=row.wid, execdir=WorkflowManager.get_path_for_component_and_dir( row.outputdir, WorkflowManager.WorkflowManagerPath.execution), confdir=WorkflowManager.get_path_for_component_and_dir( row.outputdir, WorkflowManager.WorkflowManagerPath.configuration), logfile=os.path.join( WorkflowManager.get_path_for_component_and_dir( row.outputdir, WorkflowManager.WorkflowManagerPath.logs), "engine.log", ), watch=watch, **kwargs, ) fs = get_filescheme_from_fs(filescheme, **kwargs) environment = Environment(f"custom_{wf.id()}", eng, fs) try: # Note: run_in_foreground can be None, so # (not (run_in_foreground is True)) != (run_in_foreground is False) should_run_in_background = (run_in_background is True or jc.run_in_background is True ) and not (run_in_foreground is True) tm = cm.start_task( wid=row.wid, tool=wf, environment=environment, validation_requirements=validation_reqs, batchrun_requirements=batchrun_reqs, task_path=row.outputdir, hints=hints, inputs_dict=inputsdict, dryrun=dryrun, watch=watch, max_cores=max_cores, max_memory=max_memory, keep_intermediate_files=keep_intermediate_files, run_in_background=should_run_in_background, dbconfig=dbconfig, allow_empty_container=allow_empty_container, container_override=container_override, check_files=check_files, ) Logger.log("Finished starting task task") return tm except KeyboardInterrupt: Logger.info("Exiting...") except Exception as e: # Have to make sure we stop the engine if something happens when creating the task that causes # janis to exit early environment.engine.stop_engine() raise e
def do_rawquery(args): wid = args.wid wm = ConfigManager.get_from_path_or_submission_lazy(wid, readonly=True) with wm.database.with_cursor() as cursor: result = cursor.execute(args.query).fetchall() return print(tabulate.tabulate(result))
def cleanup(): ConfigManager.manager().cleanup_missing_tasks()
def do_pause(args): wm = ConfigManager.get_from_path_or_submission_lazy(args.wid, readonly=True) wm.mark_paused(wm.execution_dir)
def do_rawquery(args): wid = args.wid wm = ConfigManager.manager().from_wid(wid, readonly=True) with wm.database.with_cursor() as cursor: result = cursor.execute(args.query).fetchall() return print(tabulate.tabulate(result))
def run_from_jobfile( workflow: Union[str, j.Tool, Type[j.Tool]], jobfile: PreparedJob, engine: Union[str, Engine, None] = None, wait: bool = False, # specific engine args cromwell_jar: Optional[str] = None, cromwell_url: Optional[str] = None, ): cm = ConfigManager(db_path=jobfile.db_path) if not workflow: raise Exception("Couldn't find workflow with name: " + str(workflow)) row = cm.create_task_base( wf=workflow, job=jobfile, ) jobfile.execution_dir = row.execution_dir jobfile.output_dir = row.output_dir # set logger for submit Logger.set_write_level(Logger.CONSOLE_LEVEL) logpath = os.path.join( WorkflowManager.get_path_for_component_and_dir( row.execution_dir, WorkflowManager.WorkflowManagerPath.logs), "janis-submit.log", ) Logger.WRITE_LEVELS = {Logger.CONSOLE_LEVEL: (logpath, open(logpath, "a"))} Logger.debug(f"Set submission logging to '{logpath}'") print(row.submission_id, file=sys.stdout) eng = get_engine_from_eng( engine or jobfile.engine, wid=row.submission_id, execdir=WorkflowManager.get_path_for_component_and_dir( row.execution_dir, WorkflowManager.WorkflowManagerPath.execution), confdir=WorkflowManager.get_path_for_component_and_dir( row.execution_dir, WorkflowManager.WorkflowManagerPath.configuration), logfile=os.path.join( WorkflowManager.get_path_for_component_and_dir( row.execution_dir, WorkflowManager.WorkflowManagerPath.logs), "engine.log", ), cromwell_jar=cromwell_jar, cromwell_url=cromwell_url, ) try: wm = WorkflowManager.from_janis( submission_id=row.submission_id, tool=workflow, engine=eng, prepared_submission=jobfile, wait=wait, ) Logger.log("Finished starting task") return wm except KeyboardInterrupt: Logger.info("Exiting...") try: wm.abort() except: pass except Exception as e: # Have to make sure we stop the engine if something happens when creating the task that causes # janis to exit early eng.stop_engine() raise e return wm
def pause(wid): wm = ConfigManager.manager().from_wid(wid) if not wm: raise Exception("Couldn't find workflow manager with wid = " + str(wid)) wm.mark_paused()
def cleanup(): ConfigManager(db_path=None).cleanup_missing_tasks()
def test_reconnect(self): wid = "fbe6ad" # 44395a" ConfigManager.manager().from_tid(tid).resume()