def print_exception(ex, linemaps, print_traceback=True): """ Print an error message for a given exception. Arguments ex -- the exception linemaps -- a dict of a dict that maps for each snakefile the compiled lines to source code lines in the snakefile. """ # traceback.print_exception(type(ex), ex, ex.__traceback__) origin = get_exception_origin(ex, linemaps) if origin is not None: lineno, file = origin logger.critical(format_error(ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=print_traceback)) return if isinstance(ex, SyntaxError): logger.critical( format_error(ex, ex.lineno, linemaps=linemaps, snakefile=ex.filename, show_traceback=print_traceback) ) elif isinstance(ex, TokenError): logger.critical(format_error(ex, None, show_traceback=print_traceback)) elif isinstance(ex, RuleException): for e in ex._include + [ex]: if not e.omit: logger.critical( format_error(e, e.lineno, linemaps=linemaps, snakefile=e.filename, show_traceback=print_traceback) ) elif isinstance(ex, WorkflowError): logger.critical( format_error(ex, ex.lineno, linemaps=linemaps, snakefile=ex.snakefile, show_traceback=print_traceback) ) elif isinstance(ex, KeyboardInterrupt): logger.warning("Cancelling snakemake on user request.") else: traceback.print_exception(type(ex), ex, ex.__traceback__)
def print_exception(ex, linemaps, print_traceback=True): """ Print an error message for a given exception. Arguments ex -- the exception linemaps -- a dict of a dict that maps for each snakefile the compiled lines to source code lines in the snakefile. """ #traceback.print_exception(type(ex), ex, ex.__traceback__) origin = get_exception_origin(ex, linemaps) if origin is not None: lineno, file = origin logger.critical( format_error(ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=print_traceback)) return if isinstance(ex, SyntaxError): logger.critical( format_error(ex, ex.lineno, linemaps=linemaps, snakefile=ex.filename, show_traceback=print_traceback)) elif isinstance(ex, TokenError): logger.critical(format_error(ex, None, show_traceback=print_traceback)) elif isinstance(ex, RuleException): for e in ex._include + [ex]: if not e.omit: logger.critical( format_error(e, e.lineno, linemaps=linemaps, snakefile=e.filename, show_traceback=print_traceback)) elif isinstance(ex, WorkflowError): logger.critical( format_error(ex, ex.lineno, linemaps=linemaps, snakefile=ex.snakefile, show_traceback=print_traceback)) elif isinstance(ex, KeyboardInterrupt): logger.warning("Cancelling snakemake on user request.") else: traceback.print_exception(type(ex), ex, ex.__traceback__)
def execute(self, targets=None, dryrun=False, touch=False, cores=1, forcetargets=False, forceall=False, forcerun=None, prioritytargets=None, quiet=False, keepgoing=False, printshellcmds=False, printreason=False, printdag=False, cluster=None, immediate_submit=False, ignore_ambiguity=False, workdir=None, printrulegraph=False, stats=None, force_incomplete=False, ignore_incomplete=False, list_version_changes=False, list_code_changes=False, list_input_changes=False, list_params_changes=False, summary=False, output_wait=3, nolock=False, unlock=False, resources=None, notemp=False, nodeps=False, cleanup_metadata=None): self.global_resources = dict( ) if cluster or resources is None else resources self.global_resources["_cores"] = cores def rules(items): return map(self._rules.__getitem__, filter(self.is_rule, items)) def files(items): return map(os.path.relpath, filterfalse(self.is_rule, items)) if workdir is None: workdir = os.getcwd() if self._workdir is None else self._workdir os.chdir(workdir) if not targets: targets = [self.first_rule ] if self.first_rule is not None else list() if prioritytargets is None: prioritytargets = list() if forcerun is None: forcerun = list() priorityrules = set(rules(prioritytargets)) priorityfiles = set(files(prioritytargets)) forcerules = set(rules(forcerun)) forcefiles = set(files(forcerun)) targetrules = set( chain(rules(targets), filterfalse(Rule.has_wildcards, priorityrules), filterfalse(Rule.has_wildcards, forcerules))) targetfiles = set(chain(files(targets), priorityfiles, forcefiles)) if forcetargets: forcefiles.update(targetfiles) forcerules.update(targetrules) dag = DAG(self, dryrun=dryrun, targetfiles=targetfiles, targetrules=targetrules, forceall=forceall, forcefiles=forcefiles, forcerules=forcerules, priorityfiles=priorityfiles, priorityrules=priorityrules, ignore_ambiguity=ignore_ambiguity, force_incomplete=force_incomplete, ignore_incomplete=ignore_incomplete, notemp=notemp) self.persistence = Persistence(nolock=nolock, dag=dag) if cleanup_metadata: for f in cleanup_metadata: self.persistence.cleanup_metadata(f) return True dag.init() dag.check_dynamic() if unlock: try: self.persistence.cleanup_locks() logger.warning("Unlocking working directory.") return True except IOError: logger.error("Error: Unlocking the directory {} failed. Maybe " "you don't have the permissions?") return False try: self.persistence.lock() except IOError: logger.critical( "Error: Directory cannot be locked. Please make " "sure that no other Snakemake process is trying to create " "the same files in the following directory:\n{}\n" "If you are sure that no other " "instances of snakemake are running on this directory, " "the remaining lock was likely caused by a kill signal or " "a power loss. It can be removed with " "the --unlock argument.".format(os.getcwd())) return False dag.check_incomplete() dag.postprocess() if nodeps: missing_input = [ f for job in dag.targetjobs for f in job.input if dag.needrun(job) and not os.path.exists(f) ] logger.critical( "Dependency resolution disabled (--nodeps) " "but missing input " "files detected. If this happens on a cluster, please make sure " "that you handle the dependencies yourself or turn of " "--immediate-submit. Missing input files:\n{}".format( "\n".join(missing_input))) return False if printdag: print(dag) return True elif printrulegraph: print(dag.rule_dot()) return True elif summary: print("\n".join(dag.summary())) return True elif list_version_changes: items = list( chain(*map(self.persistence.version_changed, dag.jobs))) if items: print(*items, sep="\n") return True elif list_code_changes: items = list(chain(*map(self.persistence.code_changed, dag.jobs))) if items: print(*items, sep="\n") return True elif list_input_changes: items = list(chain(*map(self.persistence.input_changed, dag.jobs))) if items: print(*items, sep="\n") return True elif list_params_changes: items = list( chain(*map(self.persistence.params_changed, dag.jobs))) if items: print(*items, sep="\n") return True scheduler = JobScheduler(self, dag, cores, dryrun=dryrun, touch=touch, cluster=cluster, immediate_submit=immediate_submit, quiet=quiet, keepgoing=keepgoing, printreason=printreason, printshellcmds=printshellcmds, output_wait=output_wait) if not dryrun and not quiet and len(dag): if cluster: logger.warning("Provided cluster nodes: {}".format(cores)) else: logger.warning("Provided cores: {}".format(cores)) logger.warning("\n".join(dag.stats())) success = scheduler.schedule() if success: if dryrun: if not quiet: logger.warning("\n".join(dag.stats())) elif stats: scheduler.stats.to_csv(stats) else: logger.critical("Exiting because a job execution failed. " "Look above for error message") return False return True
def execute( self, targets=None, dryrun=False, touch=False, cores=1, forcetargets=False, forceall=False, forcerun=None, prioritytargets=None, quiet=False, keepgoing=False, printshellcmds=False, printreason=False, printdag=False, cluster=None, immediate_submit=False, ignore_ambiguity=False, workdir=None, printrulegraph=False, stats=None, force_incomplete=False, ignore_incomplete=False, list_version_changes=False, list_code_changes=False, list_input_changes=False, list_params_changes=False, summary=False, output_wait=3, nolock=False, unlock=False, resources=None, notemp=False, nodeps=False, cleanup_metadata=None): self.global_resources = dict() if cluster or resources is None else resources self.global_resources["_cores"] = cores def rules(items): return map(self._rules.__getitem__, filter(self.is_rule, items)) def files(items): return map(os.path.relpath, filterfalse(self.is_rule, items)) if workdir is None: workdir = os.getcwd() if self._workdir is None else self._workdir os.chdir(workdir) if not targets: targets = [self.first_rule] if self.first_rule is not None else list() if prioritytargets is None: prioritytargets = list() if forcerun is None: forcerun = list() priorityrules = set(rules(prioritytargets)) priorityfiles = set(files(prioritytargets)) forcerules = set(rules(forcerun)) forcefiles = set(files(forcerun)) targetrules = set(chain( rules(targets), filterfalse(Rule.has_wildcards, priorityrules), filterfalse(Rule.has_wildcards, forcerules))) targetfiles = set(chain(files(targets), priorityfiles, forcefiles)) if forcetargets: forcefiles.update(targetfiles) forcerules.update(targetrules) dag = DAG( self, dryrun=dryrun, targetfiles=targetfiles, targetrules=targetrules, forceall=forceall, forcefiles=forcefiles, forcerules=forcerules, priorityfiles=priorityfiles, priorityrules=priorityrules, ignore_ambiguity=ignore_ambiguity, force_incomplete=force_incomplete, ignore_incomplete=ignore_incomplete, notemp=notemp) self.persistence = Persistence(nolock=nolock, dag=dag) if cleanup_metadata: for f in cleanup_metadata: self.persistence.cleanup_metadata(f) return True dag.init() dag.check_dynamic() if unlock: try: self.persistence.cleanup_locks() logger.warning("Unlocking working directory.") return True except IOError: logger.error("Error: Unlocking the directory {} failed. Maybe " "you don't have the permissions?") return False try: self.persistence.lock() except IOError: logger.critical("Error: Directory cannot be locked. Please make " "sure that no other Snakemake process is trying to create " "the same files in the following directory:\n{}\n" "If you are sure that no other " "instances of snakemake are running on this directory, " "the remaining lock was likely caused by a kill signal or " "a power loss. It can be removed with " "the --unlock argument.".format(os.getcwd())) return False dag.check_incomplete() dag.postprocess() if nodeps: missing_input = [f for job in dag.targetjobs for f in job.input if dag.needrun(job) and not os.path.exists(f)] logger.critical("Dependency resolution disabled (--nodeps) " "but missing input " "files detected. If this happens on a cluster, please make sure " "that you handle the dependencies yourself or turn of " "--immediate-submit. Missing input files:\n{}".format( "\n".join(missing_input))) return False if printdag: print(dag) return True elif printrulegraph: print(dag.rule_dot()) return True elif summary: print("\n".join(dag.summary())) return True elif list_version_changes: items = list(chain( *map(self.persistence.version_changed, dag.jobs))) if items: print(*items, sep="\n") return True elif list_code_changes: items = list(chain( *map(self.persistence.code_changed, dag.jobs))) if items: print(*items, sep="\n") return True elif list_input_changes: items = list(chain( *map(self.persistence.input_changed, dag.jobs))) if items: print(*items, sep="\n") return True elif list_params_changes: items = list(chain( *map(self.persistence.params_changed, dag.jobs))) if items: print(*items, sep="\n") return True scheduler = JobScheduler( self, dag, cores, dryrun=dryrun, touch=touch, cluster=cluster, immediate_submit=immediate_submit, quiet=quiet, keepgoing=keepgoing, printreason=printreason, printshellcmds=printshellcmds, output_wait=output_wait) if not dryrun and not quiet and len(dag): if cluster: logger.warning("Provided cluster nodes: {}".format(cores)) else: logger.warning("Provided cores: {}".format(cores)) logger.warning("\n".join(dag.stats())) success = scheduler.schedule() if success: if dryrun: if not quiet: logger.warning("\n".join(dag.stats())) elif stats: scheduler.stats.to_csv(stats) else: logger.critical( "Exiting because a job execution failed. " "Look above for error message") return False return True