def prompt_param(self, param_id, param_obj, is_required, answers): """Prompt for a single parameter""" # Print the question question = self.single_param_to_questionary(param_id, param_obj, answers) answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) # If required and got an empty reponse, ask again while type(answer[param_id]) is str and answer[param_id].strip( ) == "" and is_required: log.error("'–-{}' is required".format(param_id)) answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) # Ignore if empty if answer[param_id] == "": answer = {} # Previously entered something but this time we deleted it if param_id not in answer and param_id in answers: answers.pop(param_id) # Everything else (first time answer no response or normal response) else: answers.update(answer) return answers
def answers(self) -> AnswersMap: """Container of all answers to the questionary. It asks the user the 1st time it is called, if running interactively. """ result = AnswersMap( default=self.template.default_answers, init=self.data, last=self.subproject.last_answers, metadata=self.template.metadata, ) questions: List[Question] = [] for var_name, details in self.template.questions_data.items(): if var_name in result.init: # Do not ask again continue questions.append( Question( answers=result, ask_user=not self.defaults, jinja_env=self.jinja_env, var_name=var_name, **details, )) for question in questions: # Display TUI and ask user interactively only without --defaults new_answer = (question.get_default() if self.defaults else unsafe_prompt( question.get_questionary_structure(), answers=result.combined)[question.var_name]) previous_answer = result.combined.get(question.var_name) if new_answer != previous_answer: result.user[question.var_name] = new_answer return result
def prompt_group(self, group_id, group_obj): """ Prompt for edits to a group of parameters (subschema in 'definitions') Args: group_id: Paramater ID (string) group_obj: JSON Schema keys (dict) Returns: Dict of param_id:val answers """ while_break = False answers = {} while not while_break: question = { "type": "list", "name": group_id, "message": group_obj.get("title", group_id), "choices": ["Continue >>", questionary.Separator()], } for param_id, param in group_obj["properties"].items(): if not param.get("hidden", False) or self.show_hidden: q_title = param_id if param_id in answers: q_title += " [{}]".format(answers[param_id]) elif "default" in param: q_title += " [{}]".format(param["default"]) question["choices"].append( questionary.Choice(title=q_title, value=param_id)) # Skip if all questions hidden if len(question["choices"]) == 2: return {} self.print_param_header(group_id, group_obj) answer = questionary.unsafe_prompt([question], style=nfcore_question_style) if answer[group_id] == "Continue >>": while_break = True # Check if there are any required parameters that don't have answers for p_required in group_obj.get("required", []): req_default = self.schema_obj.input_params.get( p_required, "") req_answer = answers.get(p_required, "") if req_default == "" and req_answer == "": log.error("'--{}' is required.".format(p_required)) while_break = False else: param_id = answer[group_id] is_required = param_id in group_obj.get("required", []) answers.update( self.prompt_param(param_id, group_obj["properties"][param_id], is_required, answers)) return answers
def run_tests_workflow(self, command): """Given a test workflow and an entry point, run the test workflow""" # The config expects $PROFILE and Nextflow fails if it's not set if os.environ.get("PROFILE") is None: os.environ["PROFILE"] = "" if self.no_prompts: log.info( "Setting env var '$PROFILE' to an empty string as not set.\n" "Tests will run with Docker by default. " "To use Singularity set 'export PROFILE=singularity' in your shell before running this command." ) else: question = { "type": "list", "name": "profile", "message": "Choose software profile", "choices": ["Docker", "Singularity", "Conda"], } answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) profile = answer["profile"].lower() if profile in ["singularity", "conda"]: os.environ["PROFILE"] = profile log.info(f"Setting env var '$PROFILE' to '{profile}'") tmp_dir = tempfile.mkdtemp() tmp_dir_repeat = tempfile.mkdtemp() work_dir = tempfile.mkdtemp() command_repeat = command + f" --outdir {tmp_dir_repeat} -work-dir {work_dir}" command += f" --outdir {tmp_dir} -work-dir {work_dir}" log.info( f"Running '{self.module_name}' test with command:\n[violet]{command}" ) try: nfconfig_raw = subprocess.check_output(shlex.split(command)) log.info(f"Repeating test ...") nfconfig_raw = subprocess.check_output(shlex.split(command_repeat)) except OSError as e: if e.errno == errno.ENOENT and command.strip().startswith( "nextflow "): raise AssertionError( "It looks like Nextflow is not installed. It is required for most nf-core functions." ) except subprocess.CalledProcessError as e: raise UserWarning( f"Error running test workflow (exit code {e.returncode})\n[red]{e.output.decode()}" ) except Exception as e: raise UserWarning(f"Error running test workflow: {e}") else: log.info("Test workflow finished!") log.debug(nfconfig_raw) return tmp_dir, tmp_dir_repeat
def prompt_param(self, param_id, param_obj, is_required, answers): """Prompt for a single parameter""" # Print the question question = self.single_param_to_questionary(param_id, param_obj, answers) answer = questionary.unsafe_prompt([question], style=nfcore_question_style) # If required and got an empty reponse, ask again while type(answer[param_id]) is str and answer[param_id].strip( ) == "" and is_required: log.error("'–-{}' is required".format(param_id)) answer = questionary.unsafe_prompt([question], style=nfcore_question_style) # Don't return empty answers if answer[param_id] == "": return {} return answer
def prompt_web_gui(self): """Ask whether to use the web-based or cli wizard to collect params""" log.info( "[magenta]Would you like to enter pipeline parameters using a web-based interface or a command-line wizard?" ) question = { "type": "list", "name": "use_web_gui", "message": "Choose launch method", "choices": ["Web based", "Command line"], "default": "Web based", } answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) return answer["use_web_gui"] == "Web based"
def get_answers(self) -> AnyByStrDict: """Obtain answers for all questions. It produces a TUI for querying the user if `ask_user` is true. Otherwise, it gets answers from other sources. """ previous_answers = self.get_best_answers() if self.ask_user: self.answers_user = unsafe_prompt( (question.get_questionary_structure() for question in self.questions), answers=previous_answers, ) else: # Avoid prompting to not requiring a TTy when --force for question in self.questions: new_answer = question.get_default() previous_answer = previous_answers.get(question.var_name) if new_answer != previous_answer: self.answers_user[question.var_name] = new_answer return self.answers_user
def lint(self, module=None, all_modules=False, print_results=True, show_passed=False, local=False): """ Lint all or one specific module First gets a list of all local modules (in modules/local/process) and all modules installed from nf-core (in modules/nf-core/software) For all nf-core modules, the correct file structure is assured and important file content is verified. If directory subject to linting is a clone of 'nf-core/modules', the files necessary for testing the modules are also inspected. For all local modules, the '.nf' file is checked for some important flags, and warnings are issued if untypical content is found. :param module: A specific module to lint :param print_results: Whether to print the linting results :param show_passed: Whether passed tests should be shown as well :returns: dict of {passed, warned, failed} """ # Get list of all modules in a pipeline local_modules, nfcore_modules = self.get_installed_modules() # Prompt for module or all if module is None and not all_modules: question = { "type": "list", "name": "all_modules", "message": "Lint all modules or a single named module?", "choices": ["All modules", "Named module"], } answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) if answer["all_modules"] == "All modules": all_modules = True else: module = questionary.autocomplete( "Tool name:", choices=[m.module_name for m in nfcore_modules], style=nf_core.utils.nfcore_question_style, ).ask() # Only lint the given module if module: if all_modules: raise ModuleLintException( "You cannot specify a tool and request all tools to be linted." ) local_modules = [] nfcore_modules = [ m for m in nfcore_modules if m.module_name == module ] if len(nfcore_modules) == 0: raise ModuleLintException( f"Could not find the specified module: '{module}'") if self.repo_type == "modules": log.info(f"Linting modules repo: [magenta]{self.dir}") else: log.info(f"Linting pipeline: [magenta]{self.dir}") if module: log.info(f"Linting module: [magenta]{module}") # Lint local modules if local and len(local_modules) > 0: self.lint_local_modules(local_modules) # Lint nf-core modules if len(nfcore_modules) > 0: self.lint_nfcore_modules(nfcore_modules) self.check_module_changes(nfcore_modules) if print_results: self._print_results(show_passed=show_passed) return { "passed": self.passed, "warned": self.warned, "failed": self.failed }
def bump_versions(self, module=None, all_modules=False, show_uptodate=False): """ Bump the container and conda version of single module or all modules Looks for a bioconda tool version in the `main.nf` file of the module and checks whether are more recent version is available. If yes, then tries to get docker/singularity container links and replace the bioconda version and the container links in the main.nf file of the respective module. Args: module: a specific module to update all_modules: whether to bump versions for all modules """ self.up_to_date = [] self.updated = [] self.failed = [] self.show_up_to_date = show_uptodate # Verify that this is not a pipeline repo_type = nf_core.modules.module_utils.get_repo_type(self.dir) if not repo_type == "modules": raise nf_core.modules.module_utils.ModuleException( "This command only works on the nf-core/modules repository, not on pipelines!" ) # Get list of all modules _, nfcore_modules = nf_core.modules.module_utils.get_installed_modules( self.dir) # Load the .nf-core-tools.config self.tools_config = nf_core.utils.load_tools_config(self.dir) # Prompt for module or all if module is None and not all_modules: question = { "type": "list", "name": "all_modules", "message": "Bump versions for all modules or a single named module?", "choices": ["All modules", "Named module"], } answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) if answer["all_modules"] == "All modules": all_modules = True else: module = questionary.autocomplete( "Tool name:", choices=[m.module_name for m in nfcore_modules], style=nf_core.utils.nfcore_question_style, ).unsafe_ask() if module: self.show_up_to_date = True if all_modules: raise nf_core.modules.module_utils.ModuleException( "You cannot specify a tool and request all tools to be bumped." ) nfcore_modules = [ m for m in nfcore_modules if m.module_name == module ] if len(nfcore_modules) == 0: raise nf_core.modules.module_utils.ModuleException( f"Could not find the specified module: '{module}'") progress_bar = rich.progress.Progress( "[bold blue]{task.description}", rich.progress.BarColumn(bar_width=None), "[magenta]{task.completed} of {task.total}[reset] » [bold yellow]{task.fields[test_name]}", transient=True, ) with progress_bar: bump_progress = progress_bar.add_task( "Bumping nf-core modules versions", total=len(nfcore_modules), test_name=nfcore_modules[0].module_name) for mod in nfcore_modules: progress_bar.update(bump_progress, advance=1, test_name=mod.module_name) self.bump_module_version(mod) self._print_results()
def lint(self, module=None, key=(), all_modules=False, print_results=True, show_passed=False, local=False): """ Lint all or one specific module First gets a list of all local modules (in modules/local/process) and all modules installed from nf-core (in modules/nf-core/modules) For all nf-core modules, the correct file structure is assured and important file content is verified. If directory subject to linting is a clone of 'nf-core/modules', the files necessary for testing the modules are also inspected. For all local modules, the '.nf' file is checked for some important flags, and warnings are issued if untypical content is found. :param module: A specific module to lint :param print_results: Whether to print the linting results :param show_passed: Whether passed tests should be shown as well :returns: A ModuleLint object containing information of the passed, warned and failed tests """ # Prompt for module or all if module is None and not all_modules: questions = [ { "type": "list", "name": "all_modules", "message": "Lint all modules or a single named module?", "choices": ["All modules", "Named module"], }, { "type": "autocomplete", "name": "tool_name", "message": "Tool name:", "when": lambda x: x["all_modules"] == "Named module", "choices": [m.module_name for m in self.all_nfcore_modules], }, ] answers = questionary.unsafe_prompt( questions, style=nf_core.utils.nfcore_question_style) all_modules = answers["all_modules"] == "All modules" module = answers.get("tool_name") # Only lint the given module if module: if all_modules: raise ModuleLintException( "You cannot specify a tool and request all tools to be linted." ) local_modules = [] nfcore_modules = [ m for m in self.all_nfcore_modules if m.module_name == module ] if len(nfcore_modules) == 0: raise ModuleLintException( f"Could not find the specified module: '{module}'") else: local_modules = self.all_local_modules nfcore_modules = self.all_nfcore_modules if self.repo_type == "modules": log.info(f"Linting modules repo: [magenta]'{self.dir}'") else: log.info(f"Linting pipeline: [magenta]'{self.dir}'") if module: log.info(f"Linting module: [magenta]'{module}'") # Filter the tests by the key if one is supplied if key: self.filter_tests_by_key(key) log.info("Only running tests: '{}'".format("', '".join(key))) # If it is a pipeline, load the lint config file and the modules.json file if self.repo_type == "pipeline": self.set_up_pipeline_files() # Lint local modules if local and len(local_modules) > 0: self.lint_modules(local_modules, local=True) # Lint nf-core modules if len(nfcore_modules) > 0: self.lint_modules(nfcore_modules, local=False) if print_results: self._print_results(show_passed=show_passed) self.print_summary()
def prompt_group(self, group_id, group_obj): """ Prompt for edits to a group of parameters (subschema in 'definitions') Args: group_id: Paramater ID (string) group_obj: JSON Schema keys (dict) Returns: Dict of param_id:val answers """ while_break = False answers = {} error_msgs = [] while not while_break: if len(error_msgs) == 0: self.print_param_header(group_id, group_obj, True) question = { "type": "list", "name": group_id, "qmark": "", "message": "", "instruction": " ", "choices": ["Continue >>", questionary.Separator()], } # Show error messages if we have any for msg in error_msgs: question["choices"].append( questionary.Choice( [("bg:ansiblack fg:ansired bold", " error "), ("fg:ansired", f" - {msg}")], disabled=True)) error_msgs = [] for param_id, param in group_obj["properties"].items(): if not param.get("hidden", False) or self.show_hidden: q_title = [("", "{} ".format(param_id))] # If already filled in, show value if param_id in answers and answers.get( param_id) != param.get("default"): q_title.append(("class:choice-default-changed", "[{}]".format(answers[param_id]))) # If the schema has a default, show default elif "default" in param: q_title.append(("class:choice-default", "[{}]".format(param["default"]))) # Show that it's required if not filled in and no default elif param_id in group_obj.get("required", []): q_title.append(("class:choice-required", "(required)")) question["choices"].append( questionary.Choice(title=q_title, value=param_id)) # Skip if all questions hidden if len(question["choices"]) == 2: return {} answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) if answer[group_id] == "Continue >>": while_break = True # Check if there are any required parameters that don't have answers for p_required in group_obj.get("required", []): req_default = self.schema_obj.input_params.get( p_required, "") req_answer = answers.get(p_required, "") if req_default == "" and req_answer == "": error_msgs.append(f"`{p_required}` is required") while_break = False else: param_id = answer[group_id] is_required = param_id in group_obj.get("required", []) answers = self.prompt_param(param_id, group_obj["properties"][param_id], is_required, answers) return answers