def delMed(self): cont = True while cont: with open("medicine.dat", "rb") as file: data: List[List[Any]] = pickle.load(file) file.close() if len(data) == 0: print(self.emptyError) return IDS = [row[0] for row in data] delId = questionary.autocomplete( "Choose the I.D. for which you want to delete record: ", choices=IDS, validate=lambda val: val in IDS, ).ask() for row in data: if row[0] == delId: data.remove(row) break with open("medicine.dat", "wb") as fh: pickle.dump(data, fh) fh.close() cont: bool = questionary.confirm( "Do you wish to continue?:", default=False ).ask()
def check_inputs(self): """ Do more complex checks about supplied flags. """ # Get the tool name if not specified if self.module_name is None: modules_repo = nf_core.modules.pipeline_modules.ModulesRepo() modules_repo.get_modules_file_tree() self.module_name = questionary.autocomplete( "Tool name:", choices=modules_repo.modules_avail_module_names, style=nf_core.utils.nfcore_question_style, ).ask() self.module_dir = os.path.join("software", *self.module_name.split("/")) self.module_test_main = os.path.join("tests", "software", *self.module_name.split("/"), "main.nf") # First, sanity check that the module directory exists if not os.path.isdir(self.module_dir): raise UserWarning( f"Cannot find directory '{self.module_dir}'. Should be TOOL/SUBTOOL or TOOL" ) if not os.path.exists(self.module_test_main): raise UserWarning( f"Cannot find module test workflow '{self.module_test_main}'") # Check that we're running tests if no prompts if not self.run_tests and self.no_prompts: log.debug("Setting run_tests to True as running without prompts") self.run_tests = True # Get the output YAML file / check it does not already exist while self.test_yml_output_path is None: default_val = f"tests/software/{self.module_name}/test.yml" if self.no_prompts: self.test_yml_output_path = default_val else: self.test_yml_output_path = rich.prompt.Prompt.ask( "[violet]Test YAML output path[/] (- for stdout)", default=default_val).strip() if self.test_yml_output_path == "": self.test_yml_output_path = None # Check that the output YAML file does not already exist if (self.test_yml_output_path is not None and self.test_yml_output_path != "-" and os.path.exists(self.test_yml_output_path) and not self.force_overwrite): if rich.prompt.Confirm.ask( f"[red]File exists! [green]'{self.test_yml_output_path}' [violet]Overwrite?" ): self.force_overwrite = True else: self.test_yml_output_path = None if os.path.exists( self.test_yml_output_path) and not self.force_overwrite: raise UserWarning( f"Test YAML file already exists! '{self.test_yml_output_path}'. Use '--force' to overwrite." )
def plotting(self): """ Method for plotting a graph based on interest in book over a period of time. """ with open(self.bookPath, "r") as plottingFile: data = [row for row in csv.reader(plottingFile) if len(row) != 0] currentBooks = [row[1] for row in data if len(row) != 0] plottingFile.close() book = questionary.autocomplete( "Enter the name of the book:", choices=currentBooks, validate=lambda x: x in currentBooks, ) bookVal = book.ask() timeFrames = { "1 Month": "today 1-m", "3 Months": "today 3-m", "12 Months": "today 12-m", } tmf = questionary.select( "Choose the timeframe:", choices=list(timeFrames.keys()) ).ask() tmfValue = timeFrames[tmf] pytrend = TrendReq() try: pytrend.build_payload(kw_list=[bookVal], timeframe=tmfValue) except Exception as e: print("Error in building data for {}".format(bookVal)) return else: print("Data successfully built!") df = pytrend.interest_over_time() df.drop(labels="isPartial", axis=1, inplace=True) try: data = go.Scatter( x=df.index, y=df[bookVal], name=bookVal, mode="lines+markers" ) except Exception as e: print("Error in building figure!") print(e) return else: print("Figure built successfully!!") fig = go.Figure(data=data) fig.show()
def _prompt_app(default_app: Optional[str], apps: List[dict]) -> dict: apps_by_name = groupby("name", apps) if default_app and default_app in apps_by_name: name = default_app else: name = questionary.autocomplete(message="Choose app:", choices=apps_by_name).ask() return first(apps_by_name[name])
def remove(self, module): """ Remove an already installed module This command only works for modules that are installed from 'nf-core/modules' """ # Check whether pipelines is valid self.has_valid_pipeline() # Get the installed modules self.get_pipeline_modules() if module is None: if len(self.pipeline_module_names) == 0: log.error("No installed modules found in pipeline") return False module = questionary.autocomplete( "Tool name:", choices=self.pipeline_module_names, style=nf_core.utils.nfcore_question_style ).ask() # Set the install folder based on the repository name install_folder = ["nf-core", "software"] if not self.modules_repo.name == "nf-core/modules": install_folder = ["external"] # Get the module directory module_dir = os.path.join(self.pipeline_dir, "modules", *install_folder, module) # Verify that the module is actually installed if not os.path.exists(module_dir): log.error("Module directory is not installed: {}".format(module_dir)) log.info("The module you want to remove does not seem to be installed") return False log.info("Removing {}".format(module)) # Remove the module try: shutil.rmtree(module_dir) # Try cleaning up empty parent if tool/subtool and tool/ is empty if module.count("/") > 0: parent_dir = os.path.dirname(module_dir) try: os.rmdir(parent_dir) except OSError: log.debug(f"Parent directory not empty: '{parent_dir}'") else: log.debug(f"Deleted orphan tool directory: '{parent_dir}'") log.info("Successfully removed {} module".format(module)) return True except OSError as e: log.error("Could not remove module: {}".format(e)) return False
def purchase(self): cont = True while cont: with open("medicine.dat", "rb") as fileObject: data: List[List[Any]] = pickle.load(fileObject) fileObject.close() if len(data) == 0: print(self.emptyError) return IDS = [row[0] for row in data] billID = questionary.autocomplete( "Enter the ID of the Medicine that you want to buy: ", choices=IDS, validate=lambda val: val in IDS, ).ask() bill_row = [] for row in data: if row[0] == billID: bill_row = row break while True: billQty = str(input("Enter the quantity that you want: ")) if int(billQty) <= 0: print("QTY cannot be <= 0!\nTry Again!") else: billQty = int(billQty) break totalPrice = bill_row[3] * billQty billContent = [ [billID, bill_row[1], bill_row[2], bill_row[3], billQty, totalPrice] ] print("-" * self.term.columns) print(f"Bill generated on {self.today}".center(self.term.columns)) print("-" * self.term.columns) print( tabulate( tabular_data=billContent, headers=["ID", "NAME", "DESC", "PRICE", "QTY", "TOTAL PRICE"], tablefmt="fancy_grid", ) ) cont = questionary.confirm("Do you wish to continue?", default=False).ask()
def install(self, module=None): # Check whether pipelines is valid self.has_valid_pipeline() # Get the available modules self.modules_repo.get_modules_file_tree() if module is None: module = questionary.autocomplete( "Tool name:", choices=self.modules_repo.modules_avail_module_names, style=nf_core.utils.nfcore_question_style, ).ask() log.info("Installing {}".format(module)) # Check that the supplied name is an available module if module not in self.modules_repo.modules_avail_module_names: log.error("Module '{}' not found in list of available modules.".format(module)) log.info("Use the command 'nf-core modules list' to view available software") return False log.debug("Installing module '{}' at modules hash {}".format(module, self.modules_repo.modules_current_hash)) # Set the install folder based on the repository name install_folder = ["nf-core", "software"] if not self.modules_repo.name == "nf-core/modules": install_folder = ["external"] # Check that we don't already have a folder for this module module_dir = os.path.join(self.pipeline_dir, "modules", *install_folder, module) if os.path.exists(module_dir): log.error("Module directory already exists: {}".format(module_dir)) # TODO: uncomment next line once update is implemented # log.info("To update an existing module, use the commands 'nf-core update'") return False # Download module files files = self.modules_repo.get_module_file_urls(module) log.debug("Fetching module files:\n - {}".format("\n - ".join(files.keys()))) for filename, api_url in files.items(): split_filename = filename.split("/") dl_filename = os.path.join(self.pipeline_dir, "modules", *install_folder, *split_filename[1:]) self.modules_repo.download_gh_file(dl_filename, api_url) log.info("Downloaded {} files to {}".format(len(files), module_dir))
def ask_pystyle(**kwargs): # create the question object question = questionary.autocomplete( "Choose ant specie", validate=PolyergusValidator, meta_information=meta_information, choices=[ "Camponotus pennsylvanicus", "Linepithema humile", "Eciton burchellii", "Atta colombica", "Polyergus lucidus", "Polyergus rufescens", ], ignore_case=False, style=custom_style_fancy, **kwargs, ) # prompt the user for an answer return question.ask()
def prompt_remote_pipeline_name(wfs): """Prompt for the pipeline name with questionary Args: wfs: A nf_core.list.Workflows() object, where get_remote_workflows() has been called. Returns: pipeline (str): GitHub repo - username/repo Raises: AssertionError, if pipeline cannot be found """ pipeline = questionary.autocomplete( "Pipeline name:", choices=[wf.name for wf in wfs.remote_workflows], style=nfcore_question_style, ).unsafe_ask() # Check nf-core repos for wf in wfs.remote_workflows: if wf.full_name == pipeline or wf.name == pipeline: return wf.full_name # Non nf-core repo on GitHub else: if pipeline.count("/") == 1: try: gh_response = requests.get( f"https://api.github.com/repos/{pipeline}") assert gh_response.json().get("message") != "Not Found" except AssertionError: pass else: return pipeline log.info("Available nf-core pipelines: '{}'".format("', '".join( [w.name for w in wfs.remote_workflows]))) raise AssertionError(f"Not able to find pipeline '{pipeline}'")
def collect(filename: Optional[str]) -> Output: """Choose between local, existing, .env files.""" file_suffix = (filename or "").replace(settings.PREFIX, "") if file_suffix: if not os.path.exists(fs.filename_from_template(file_suffix)): fatal(__name__, f"No file found for `{filename}`", 2) else: env_files = sorted( file_option.replace(settings.PREFIX, "") for file_option in os.listdir(".") if file_option.startswith(settings.PREFIX)) if not env_files: fatal(__name__, "No local options available.", 2) click.echo( f"""Local options: {", ".join(click.style(env_file, fg="magenta") for env_file in env_files)}""") file_suffix = questionary.autocomplete("Choose file:", choices=env_files).ask() variables = fs.load(file_suffix=file_suffix) return Output(file_suffix=file_suffix, variables=variables)
def searchByAuthor(self): data = None with open(self.bookPath, "r") as fileObject: data = list(csv.reader(fileObject)) authors = [] for _row in data: if _row[3] not in authors: authors.append(_row[3]) authorChoose = questionary.autocomplete( "Enter the author's name:", choices=authors, validate=lambda x: x in authors, ).ask() authorData = [] for _row in data: if _row[3] == authorChoose: authorData.append(_row) print( tabulate( authorData, headers=[ "ISBN", "BOOK NAME", "PAGES", "AUTHOR NAME", "GENRE", ], tablefmt="fancy_grid", ) )
def update(self, module): if self.repo_type == "modules": log.error("You cannot update a module in a clone of nf-core/modules") return False # Check whether pipelines is valid if not self.has_valid_directory(): return False # Verify that 'modules.json' is consistent with the installed modules self.modules_json_up_to_date() tool_config = nf_core.utils.load_tools_config() update_config = tool_config.get("update", {}) if not self.update_all and module is None: choices = ["All modules", "Named module"] self.update_all = ( questionary.select( "Update all modules or a single named module?", choices=choices, style=nf_core.utils.nfcore_question_style, ).unsafe_ask() == "All modules" ) if self.prompt and self.sha is not None: log.error("Cannot use '--sha' and '--prompt' at the same time!") return False # Verify that the provided SHA exists in the repo if self.sha: try: nf_core.modules.module_utils.sha_exists(self.sha, self.modules_repo) except UserWarning: log.error(f"Commit SHA '{self.sha}' doesn't exist in '{self.modules_repo.name}'") return False except LookupError as e: log.error(e) return False if not self.update_all: # Get the available modules try: self.modules_repo.get_modules_file_tree() except LookupError as e: log.error(e) return False # Check if there are any modules installed from repo_name = self.modules_repo.name if repo_name not in self.module_names: log.error(f"No modules installed from '{repo_name}'") return False if module is None: self.get_pipeline_modules() module = questionary.autocomplete( "Tool name:", choices=self.module_names[repo_name], style=nf_core.utils.nfcore_question_style, ).unsafe_ask() # Check if module is installed before trying to update if module not in self.module_names[repo_name]: log.error(f"Module '{module}' is not installed in pipeline and could therefore not be updated") return False sha = self.sha if module in update_config.get(self.modules_repo.name, {}): config_entry = update_config[self.modules_repo.name].get(module) if config_entry is not None and config_entry is not True: if config_entry is False: log.info("Module's update entry in '.nf-core.yml' is set to False") return False elif isinstance(config_entry, str): sha = config_entry if self.sha: log.warning( f"Found entry in '.nf-core.yml' for module '{module}' " "which will override version specified with '--sha'" ) else: log.info(f"Found entry in '.nf-core.yml' for module '{module}'") log.info(f"Updating module to ({sha})") else: log.error("Module's update entry in '.nf-core.yml' is of wrong type") return False # Check that the supplied name is an available module if module and module not in self.modules_repo.modules_avail_module_names: log.error("Module '{}' not found in list of available modules.".format(module)) log.info("Use the command 'nf-core modules list remote' to view available software") return False repos_mods_shas = [(self.modules_repo, module, sha)] else: if module: raise UserWarning("You cannot specify a module and use the '--all' flag at the same time") self.get_pipeline_modules() # Filter out modules that should not be updated or assign versions if there are any skipped_repos = [] skipped_modules = [] repos_mods_shas = {} for repo_name, modules in self.module_names.items(): if repo_name not in update_config or update_config[repo_name] is True: repos_mods_shas[repo_name] = [] for module in modules: repos_mods_shas[repo_name].append((module, self.sha)) elif isinstance(update_config[repo_name], dict): repo_config = update_config[repo_name] repos_mods_shas[repo_name] = [] for module in modules: if module not in repo_config or repo_config[module] is True: repos_mods_shas[repo_name].append((module, self.sha)) elif isinstance(repo_config[module], str): # If a string is given it is the commit SHA to which we should update to custom_sha = repo_config[module] repos_mods_shas[repo_name].append((module, custom_sha)) else: # Otherwise the entry must be 'False' and we should ignore the module skipped_modules.append(f"{repo_name}/{module}") elif isinstance(update_config[repo_name], str): # If a string is given it is the commit SHA to which we should update to custom_sha = update_config[repo_name] repos_mods_shas[repo_name] = [] for module in modules: repos_mods_shas[repo_name].append((module, custom_sha)) else: skipped_repos.append(repo_name) if skipped_repos: skipped_str = "', '".join(skipped_repos) log.info(f"Skipping modules in repositor{'y' if len(skipped_repos) == 1 else 'ies'}: '{skipped_str}'") if skipped_modules: skipped_str = "', '".join(skipped_modules) log.info(f"Skipping module{'' if len(skipped_modules) == 1 else 's'}: '{skipped_str}'") repos_mods_shas = [ (ModulesRepo(repo=repo_name), mods_shas) for repo_name, mods_shas in repos_mods_shas.items() ] for repo, _ in repos_mods_shas: repo.get_modules_file_tree() # Flatten the list repos_mods_shas = [(repo, mod, sha) for repo, mods_shas in repos_mods_shas for mod, sha in mods_shas] # Load 'modules.json' modules_json = self.load_modules_json() if not modules_json: return False exit_value = True for modules_repo, module, sha in repos_mods_shas: dry_run = self.diff if not module_exist_in_repo(module, modules_repo): warn_msg = f"Module '{module}' not found in remote '{modules_repo.name}' ({modules_repo.branch})" if self.update_all: warn_msg += ". Skipping..." log.warning(warn_msg) exit_value = False continue if modules_repo.name in modules_json["repos"]: current_entry = modules_json["repos"][modules_repo.name].get(module) else: current_entry = None # Set the install folder based on the repository name install_folder = [self.dir, "modules", modules_repo.owner, modules_repo.repo] # Compute the module directory module_dir = os.path.join(*install_folder, module) if sha: version = sha elif self.prompt: try: version = nf_core.modules.module_utils.prompt_module_version_sha( module, modules_repo=modules_repo, installed_sha=current_entry["git_sha"] if not current_entry is None else None, ) except SystemError as e: log.error(e) exit_value = False continue else: # Fetch the latest commit for the module try: git_log = get_module_git_log(module, modules_repo=modules_repo, per_page=1, page_nbr=1) except UserWarning: log.error(f"Was unable to fetch version of module '{module}'") exit_value = False continue version = git_log[0]["git_sha"] if current_entry is not None and not self.force: # Fetch the latest commit for the module current_version = current_entry["git_sha"] if current_version == version: if self.sha or self.prompt: log.info(f"'{modules_repo.name}/{module}' is already installed at {version}") else: log.info(f"'{modules_repo.name}/{module}' is already up to date") continue if not dry_run: log.info(f"Updating '{modules_repo.name}/{module}'") log.debug(f"Updating module '{module}' to {version} from {modules_repo.name}") log.debug(f"Removing old version of module '{module}'") self.clear_module_dir(module, module_dir) if dry_run: # Set the install folder to a temporary directory install_folder = ["/tmp", next(tempfile._get_candidate_names())] # Download module files if not self.download_module_file(module, version, modules_repo, install_folder, dry_run=dry_run): exit_value = False continue if dry_run: console = Console(force_terminal=nf_core.utils.rich_force_colors()) files = os.listdir(os.path.join(*install_folder, module)) temp_folder = os.path.join(*install_folder, module) log.info( f"Changes in module '{module}' between ({current_entry['git_sha'] if current_entry is not None else '?'}) and ({version if version is not None else 'latest'})" ) for file in files: temp_path = os.path.join(temp_folder, file) curr_path = os.path.join(module_dir, file) if os.path.exists(temp_path) and os.path.exists(curr_path): with open(temp_path, "r") as fh: new_lines = fh.readlines() with open(curr_path, "r") as fh: old_lines = fh.readlines() if new_lines == old_lines: # The files are identical log.info(f"'{os.path.join(module, file)}' is unchanged") else: log.info(f"Changes in '{os.path.join(module, file)}':") # Compute the diff diff = difflib.unified_diff( old_lines, new_lines, fromfile=f"{os.path.join(module, file)} (installed)", tofile=f"{os.path.join(module, file)} (new)", ) # Pretty print the diff using the pygments diff lexer console.print(Syntax("".join(diff), "diff", theme="ansi_light")) elif os.path.exists(temp_path): # The file was created between the commits log.info(f"Created file '{file}'") elif os.path.exists(curr_path): # The file was removed between the commits log.info(f"Removed file '{file}'") # Ask the user if they want to install the module dry_run = not questionary.confirm("Update module?", default=False).unsafe_ask() if not dry_run: # The new module files are already installed # we just need to clear the directory and move the # new files from the temporary directory self.clear_module_dir(module, module_dir) os.mkdir(module_dir) for file in files: path = os.path.join(temp_folder, file) if os.path.exists(path): shutil.move(path, os.path.join(module_dir, file)) log.info(f"Updating '{modules_repo.name}/{module}'") log.debug(f"Updating module '{module}' to {version} from {modules_repo.name}") if not dry_run: # Update module.json with newly installed module self.update_modules_json(modules_json, modules_repo.name, module, version) return exit_value
def install(self, module): if self.repo_type == "modules": log.error("You cannot install a module in a clone of nf-core/modules") return False # Check whether pipelines is valid if not self.has_valid_directory(): return False # Verify that 'modules.json' is consistent with the installed modules self.modules_json_up_to_date() # Get the available modules try: self.modules_repo.get_modules_file_tree() except LookupError as e: log.error(e) return False if self.prompt and self.sha is not None: log.error("Cannot use '--sha' and '--prompt' at the same time!") return False # Verify that the provided SHA exists in the repo if self.sha: try: nf_core.modules.module_utils.sha_exists(self.sha, self.modules_repo) except UserWarning: log.error(f"Commit SHA '{self.sha}' doesn't exist in '{self.modules_repo.name}'") return False except LookupError as e: log.error(e) return False if module is None: module = questionary.autocomplete( "Tool name:", choices=self.modules_repo.modules_avail_module_names, style=nf_core.utils.nfcore_question_style, ).unsafe_ask() # Check that the supplied name is an available module if module and module not in self.modules_repo.modules_avail_module_names: log.error("Module '{}' not found in list of available modules.".format(module)) log.info("Use the command 'nf-core modules list' to view available software") return False # Load 'modules.json' modules_json = self.load_modules_json() if not modules_json: return False if not module_exist_in_repo(module, self.modules_repo): warn_msg = f"Module '{module}' not found in remote '{self.modules_repo.name}' ({self.modules_repo.branch})" log.warning(warn_msg) return False if self.modules_repo.name in modules_json["repos"]: current_entry = modules_json["repos"][self.modules_repo.name].get(module) else: current_entry = None # Set the install folder based on the repository name install_folder = [self.dir, "modules", self.modules_repo.owner, self.modules_repo.repo] # Compute the module directory module_dir = os.path.join(*install_folder, module) # Check that the module is not already installed if (current_entry is not None and os.path.exists(module_dir)) and not self.force: log.error(f"Module is already installed.") repo_flag = "" if self.modules_repo.name == "nf-core/modules" else f"-g {self.modules_repo.name} " branch_flag = "" if self.modules_repo.branch == "master" else f"-b {self.modules_repo.branch} " log.info( f"To update '{module}' run 'nf-core modules {repo_flag}{branch_flag}update {module}'. To force reinstallation use '--force'" ) return False if self.sha: version = self.sha elif self.prompt: try: version = nf_core.modules.module_utils.prompt_module_version_sha( module, installed_sha=current_entry["git_sha"] if not current_entry is None else None, modules_repo=self.modules_repo, ) except SystemError as e: log.error(e) return False else: # Fetch the latest commit for the module try: git_log = get_module_git_log(module, modules_repo=self.modules_repo, per_page=1, page_nbr=1) except UserWarning: log.error(f"Was unable to fetch version of module '{module}'") return False version = git_log[0]["git_sha"] if self.force: log.info(f"Removing installed version of '{self.modules_repo.name}/{module}'") self.clear_module_dir(module, module_dir) log.info(f"{'Rei' if self.force else 'I'}nstalling '{module}'") log.debug(f"Installing module '{module}' at modules hash {version} from {self.modules_repo.name}") # Download module files if not self.download_module_file(module, version, self.modules_repo, install_folder): return False # Update module.json with newly installed module self.update_modules_json(modules_json, self.modules_repo.name, module, version) return True
def lint(self, module=None, all_modules=False, print_results=True, show_passed=False, local=False): """ Lint all or one specific module First gets a list of all local modules (in modules/local/process) and all modules installed from nf-core (in modules/nf-core/software) For all nf-core modules, the correct file structure is assured and important file content is verified. If directory subject to linting is a clone of 'nf-core/modules', the files necessary for testing the modules are also inspected. For all local modules, the '.nf' file is checked for some important flags, and warnings are issued if untypical content is found. :param module: A specific module to lint :param print_results: Whether to print the linting results :param show_passed: Whether passed tests should be shown as well :returns: dict of {passed, warned, failed} """ # Get list of all modules in a pipeline local_modules, nfcore_modules = self.get_installed_modules() # Prompt for module or all if module is None and not all_modules: question = { "type": "list", "name": "all_modules", "message": "Lint all modules or a single named module?", "choices": ["All modules", "Named module"], } answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) if answer["all_modules"] == "All modules": all_modules = True else: module = questionary.autocomplete( "Tool name:", choices=[m.module_name for m in nfcore_modules], style=nf_core.utils.nfcore_question_style, ).ask() # Only lint the given module if module: if all_modules: raise ModuleLintException( "You cannot specify a tool and request all tools to be linted." ) local_modules = [] nfcore_modules = [ m for m in nfcore_modules if m.module_name == module ] if len(nfcore_modules) == 0: raise ModuleLintException( f"Could not find the specified module: '{module}'") if self.repo_type == "modules": log.info(f"Linting modules repo: [magenta]{self.dir}") else: log.info(f"Linting pipeline: [magenta]{self.dir}") if module: log.info(f"Linting module: [magenta]{module}") # Lint local modules if local and len(local_modules) > 0: self.lint_local_modules(local_modules) # Lint nf-core modules if len(nfcore_modules) > 0: self.lint_nfcore_modules(nfcore_modules) self.check_module_changes(nfcore_modules) if print_results: self._print_results(show_passed=show_passed) return { "passed": self.passed, "warned": self.warned, "failed": self.failed }
def bump_versions(self, module=None, all_modules=False, show_uptodate=False): """ Bump the container and conda version of single module or all modules Looks for a bioconda tool version in the `main.nf` file of the module and checks whether are more recent version is available. If yes, then tries to get docker/singularity container links and replace the bioconda version and the container links in the main.nf file of the respective module. Args: module: a specific module to update all_modules: whether to bump versions for all modules """ self.up_to_date = [] self.updated = [] self.failed = [] self.show_up_to_date = show_uptodate # Verify that this is not a pipeline repo_type = nf_core.modules.module_utils.get_repo_type(self.dir) if not repo_type == "modules": raise nf_core.modules.module_utils.ModuleException( "This command only works on the nf-core/modules repository, not on pipelines!" ) # Get list of all modules _, nfcore_modules = nf_core.modules.module_utils.get_installed_modules( self.dir) # Load the .nf-core-tools.config self.tools_config = nf_core.utils.load_tools_config(self.dir) # Prompt for module or all if module is None and not all_modules: question = { "type": "list", "name": "all_modules", "message": "Bump versions for all modules or a single named module?", "choices": ["All modules", "Named module"], } answer = questionary.unsafe_prompt( [question], style=nf_core.utils.nfcore_question_style) if answer["all_modules"] == "All modules": all_modules = True else: module = questionary.autocomplete( "Tool name:", choices=[m.module_name for m in nfcore_modules], style=nf_core.utils.nfcore_question_style, ).unsafe_ask() if module: self.show_up_to_date = True if all_modules: raise nf_core.modules.module_utils.ModuleException( "You cannot specify a tool and request all tools to be bumped." ) nfcore_modules = [ m for m in nfcore_modules if m.module_name == module ] if len(nfcore_modules) == 0: raise nf_core.modules.module_utils.ModuleException( f"Could not find the specified module: '{module}'") progress_bar = rich.progress.Progress( "[bold blue]{task.description}", rich.progress.BarColumn(bar_width=None), "[magenta]{task.completed} of {task.total}[reset] » [bold yellow]{task.fields[test_name]}", transient=True, ) with progress_bar: bump_progress = progress_bar.add_task( "Bumping nf-core modules versions", total=len(nfcore_modules), test_name=nfcore_modules[0].module_name) for mod in nfcore_modules: progress_bar.update(bump_progress, advance=1, test_name=mod.module_name) self.bump_module_version(mod) self._print_results()
def issue_create(ctx, open_url): """Создать задачу""" rd = ctx.obj['redmine'] versions = get_last_versions(rd, cfg['project.id']) versions_map = {str(v): v for v in versions} current_user = rd.user.get('current') memberships = { str(m.user): m.user.id for m in get_memberships(rd, cfg['project.id']) } trackers = get_trackers_project(rd, cfg['project.id']) trackers_map = {str(t): t for t in trackers} selected_tracker = questionary.select('Трекер', choices=list( trackers_map.keys())).ask() tracker_id = trackers_map.get(selected_tracker).id subject = questionary.text('Тема задачи').ask() description = questionary.text('Описание задачи').ask() assigned = questionary.autocomplete('Назначена', choices=list(memberships.keys()), default=str(current_user)).ask() fixed_version = questionary.select('Версия', choices=list( versions_map.keys())).ask() custom_fields = [] for cf_id, cf in get_custom_fields( rd, filtered=cfg['issue.filter_custom_fields']).items(): default_select = None possible_values = [v.get('value') for v in get_cf_values(rd, cf_id)] if cf.field_format == 'user': default_select = str(current_user) possible_values = memberships.keys() if not len(possible_values): continue if len(possible_values) > 10: value = questionary.autocomplete(str(cf), choices=possible_values, default=default_select).ask() else: value = questionary.select(str(cf), choices=possible_values, default=default_select).ask() if cf.field_format == 'user': value = memberships.get(value) if not value: continue custom_fields.append({'id': cf_id, 'value': value}) is_confirm = questionary.confirm('Создать задачу?').ask() if is_confirm: selected_fixed_version = versions_map[fixed_version] result = rd.issue.create(project_id=cfg['project.id'], tracker_id=tracker_id, subject=subject, fixed_version_id=selected_fixed_version.id, assigned_to_id=memberships.get(assigned), description=description, custom_fields=custom_fields) click.echo(click.style(f'Создана задача № {result.id}', bold=True)) if open_url: open_web_url( f"{str(cfg['redmine.host']).strip('/')}/issues/{result.id}", new=True)
def get_id(conn): """ Extracts unique id from database by narrowing down candidates based on user provided information. Some cars continue to have variants even after all questions have been asked. For those that do, the first id fetched by the final query result is the one we will use. Parameters: conn (obj): connection to sqlite database we will be querying Returns: int: identifier for user's current car, used as a means to access other information related to their vehicle for future functions """ c = conn.cursor() make_query = 'SELECT DISTINCT make FROM vehicles' make_results = c.execute(make_query).fetchall() make_results = sorted({tup[0] for tup in make_results}) make_ans = q.autocomplete( "What is your car's make?\n ", choices=make_results, validate=(lambda text: autoc_validator(text, c, 'make')), style=Style(S_CONFIG), qmark='⯁ ').ask() m_y_query = ('SELECT DISTINCT model, year ' 'FROM vehicles WHERE make = ?') m_y_results = set(c.execute(m_y_query, (make_ans, )).fetchall()) m_y_results = sorted(m_y_results, key=lambda tup: (tup[0], -tup[1])) m_y_results = [tup[0] + ' ' + str(tup[1]) for tup in m_y_results] m_y_ans = q.autocomplete( 'What about model and year?\n ', choices=m_y_results, validate=(lambda text: autoc_validator(text, c, 'm_y')), style=Style(S_CONFIG), qmark='\n⯁ ').ask() model, _, year = m_y_ans.rpartition(' ') base_tup = (make_ans, model, int(year)) id_query = f'SELECT id FROM vehicles {WHERE_CMD}' uniq_results = c.execute(id_query, base_tup).fetchall() uniq_results = [tup[0] for tup in uniq_results] id_ = uniq_results[0] uniq = len(uniq_results) == 1 c_msg = ('Your particular car has some variants, would you like to be ' 'more specific?\n You may be prompted to choose transmission, ' 'number of cylinders, or drive type.\n Recommended only if ' 'you are comfortable with these more advanced options.\n ' '(Skipping defaults to No.)\n ') advanced = q.confirm(c_msg, default=False, style=Style(S_CONFIG + [('qmark', 'fg:#CF5050')]), qmark='\n❗').skip_if(uniq).ask() if advanced: t_ans, _ = unique_helper(c, 'trany', 'transmission', base_tup) c_ans, _ = unique_helper(c, 'cylinders', 'number of cylinders', base_tup, [('trany', t_ans)]) d_ans, cond = unique_helper(c, 'drive', 'drive type', base_tup, [('trany', t_ans), ('cylinders', c_ans)]) if d_ans: cond += ' AND drive = ?' add_tup = tuple(val for val in (t_ans, c_ans, d_ans) if val) id_query = 'SELECT id FROM vehicles ' + WHERE_CMD + cond id_ = c.execute(id_query, base_tup + add_tup).fetchone()[0] c.close() return id_
def update(self, module): if self.repo_type == "modules": log.error("You cannot update a module in a clone of nf-core/modules") return False # Check whether pipelines is valid if not self.has_valid_directory(): return False # Verify that 'modules.json' is consistent with the installed modules self.modules_json_up_to_date() tool_config = nf_core.utils.load_tools_config() update_config = tool_config.get("update", {}) if not self.update_all and module is None: choices = ["All modules", "Named module"] self.update_all = ( questionary.select( "Update all modules or a single named module?", choices=choices, style=nf_core.utils.nfcore_question_style, ).unsafe_ask() == "All modules" ) if self.prompt and self.sha is not None: log.error("Cannot use '--sha' and '--prompt' at the same time!") return False # Verify that the provided SHA exists in the repo if self.sha: try: nf_core.modules.module_utils.sha_exists(self.sha, self.modules_repo) except UserWarning: log.error(f"Commit SHA '{self.sha}' doesn't exist in '{self.modules_repo.name}'") return False except LookupError as e: log.error(e) return False if not self.update_all: # Get the available modules try: self.modules_repo.get_modules_file_tree() except LookupError as e: log.error(e) return False # Check if there are any modules installed from repo_name = self.modules_repo.name if repo_name not in self.module_names: log.error(f"No modules installed from '{repo_name}'") return False if module is None: self.get_pipeline_modules() module = questionary.autocomplete( "Tool name:", choices=self.module_names[repo_name], style=nf_core.utils.nfcore_question_style, ).unsafe_ask() # Check if module is installed before trying to update if module not in self.module_names[repo_name]: log.error(f"Module '{module}' is not installed in pipeline and could therefore not be updated") return False sha = self.sha if module in update_config.get(self.modules_repo.name, {}): config_entry = update_config[self.modules_repo.name].get(module) if config_entry is not None and config_entry is not True: if config_entry is False: log.info("Module's update entry in '.nf-core.yml' is set to False") return False elif isinstance(config_entry, str): sha = config_entry if self.sha: log.warning( f"Found entry in '.nf-core.yml' for module '{module}' " "which will override version specified with '--sha'" ) else: log.info(f"Found entry in '.nf-core.yml' for module '{module}'") log.info(f"Updating module to ({sha})") else: log.error("Module's update entry in '.nf-core.yml' is of wrong type") return False # Check that the supplied name is an available module if module and module not in self.modules_repo.modules_avail_module_names: log.error("Module '{}' not found in list of available modules.".format(module)) log.info("Use the command 'nf-core modules list remote' to view available software") return False repos_mods_shas = [(self.modules_repo, module, sha)] else: if module: raise UserWarning("You cannot specify a module and use the '--all' flag at the same time") self.get_pipeline_modules() # Filter out modules that should not be updated or assign versions if there are any skipped_repos = [] skipped_modules = [] repos_mods_shas = {} for repo_name, modules in self.module_names.items(): if repo_name not in update_config or update_config[repo_name] is True: repos_mods_shas[repo_name] = [] for module in modules: repos_mods_shas[repo_name].append((module, self.sha)) elif isinstance(update_config[repo_name], dict): repo_config = update_config[repo_name] repos_mods_shas[repo_name] = [] for module in modules: if module not in repo_config or repo_config[module] is True: repos_mods_shas[repo_name].append((module, self.sha)) elif isinstance(repo_config[module], str): # If a string is given it is the commit SHA to which we should update to custom_sha = repo_config[module] repos_mods_shas[repo_name].append((module, custom_sha)) else: # Otherwise the entry must be 'False' and we should ignore the module skipped_modules.append(f"{repo_name}/{module}") elif isinstance(update_config[repo_name], str): # If a string is given it is the commit SHA to which we should update to custom_sha = update_config[repo_name] repos_mods_shas[repo_name] = [] for module in modules: repos_mods_shas[repo_name].append((module, custom_sha)) else: skipped_repos.append(repo_name) if skipped_repos: skipped_str = "', '".join(skipped_repos) log.info(f"Skipping modules in repositor{'y' if len(skipped_repos) == 1 else 'ies'}: '{skipped_str}'") if skipped_modules: skipped_str = "', '".join(skipped_modules) log.info(f"Skipping module{'' if len(skipped_modules) == 1 else 's'}: '{skipped_str}'") repos_mods_shas = [ (ModulesRepo(repo=repo_name), mods_shas) for repo_name, mods_shas in repos_mods_shas.items() ] for repo, _ in repos_mods_shas: repo.get_modules_file_tree() # Flatten the list repos_mods_shas = [(repo, mod, sha) for repo, mods_shas in repos_mods_shas for mod, sha in mods_shas] # Load 'modules.json' modules_json = self.load_modules_json() old_modules_json = copy.deepcopy(modules_json) # Deep copy to avoid mutability if not modules_json: return False # If --preview is true, don't save to a patch file if self.show_diff: self.show_diff_fn = False # Ask if we should show the diffs (unless a filename was already given on the command line) if not self.save_diff_fn and self.show_diff is None: diff_type = questionary.select( "Do you want to view diffs of the proposed changes?", choices=[ {"name": "No previews, just update everything", "value": 0}, {"name": "Preview diff in terminal, choose whether to update files", "value": 1}, {"name": "Just write diffs to a patch file", "value": 2}, ], style=nf_core.utils.nfcore_question_style, ).unsafe_ask() self.show_diff = diff_type == 1 self.save_diff_fn = diff_type == 2 # Set up file to save diff if self.save_diff_fn: # True or a string # From questionary - no filename yet if self.save_diff_fn is True: self.save_diff_fn = questionary.text( "Enter the filename: ", style=nf_core.utils.nfcore_question_style ).unsafe_ask() # Check if filename already exists (questionary or cli) while os.path.exists(self.save_diff_fn): if questionary.confirm(f"'{self.save_diff_fn}' exists. Remove file?").unsafe_ask(): os.remove(self.save_diff_fn) break self.save_diff_fn = questionary.text( f"Enter a new filename: ", style=nf_core.utils.nfcore_question_style, ).unsafe_ask() exit_value = True for modules_repo, module, sha in repos_mods_shas: # Are we updating the files in place or not? dry_run = self.show_diff or self.save_diff_fn # Check if the module we've been asked to update actually exists if not module_exist_in_repo(module, modules_repo): warn_msg = f"Module '{module}' not found in remote '{modules_repo.name}' ({modules_repo.branch})" if self.update_all: warn_msg += ". Skipping..." log.warning(warn_msg) exit_value = False continue if modules_repo.name in modules_json["repos"]: current_entry = modules_json["repos"][modules_repo.name].get(module) else: current_entry = None # Set the install folder based on the repository name install_folder = [self.dir, "modules", modules_repo.owner, modules_repo.repo] # Compute the module directory module_dir = os.path.join(*install_folder, module) if sha: version = sha elif self.prompt: try: version = nf_core.modules.module_utils.prompt_module_version_sha( module, modules_repo=modules_repo, installed_sha=current_entry["git_sha"] if not current_entry is None else None, ) except SystemError as e: log.error(e) exit_value = False continue else: # Fetch the latest commit for the module try: git_log = get_module_git_log(module, modules_repo=modules_repo, per_page=1, page_nbr=1) except UserWarning: log.error(f"Was unable to fetch version of module '{module}'") exit_value = False continue version = git_log[0]["git_sha"] if current_entry is not None and not self.force: # Fetch the latest commit for the module current_version = current_entry["git_sha"] if current_version == version: if self.sha or self.prompt: log.info(f"'{modules_repo.name}/{module}' is already installed at {version}") else: log.info(f"'{modules_repo.name}/{module}' is already up to date") continue if not dry_run: log.info(f"Updating '{modules_repo.name}/{module}'") log.debug(f"Updating module '{module}' to {version} from {modules_repo.name}") log.debug(f"Removing old version of module '{module}'") self.clear_module_dir(module, module_dir) if dry_run: # Set the install folder to a temporary directory install_folder = ["/tmp", next(tempfile._get_candidate_names())] # Download module files if not self.download_module_file(module, version, modules_repo, install_folder, dry_run=dry_run): exit_value = False continue if dry_run: class DiffEnum(enum.Enum): """ Enumeration for keeping track of the diff status of a pair of files """ UNCHANGED = enum.auto() CHANGED = enum.auto() CREATED = enum.auto() REMOVED = enum.auto() diffs = {} # Get all unique filenames in the two folders. # `dict.fromkeys()` is used instead of `set()` to preserve order files = dict.fromkeys(os.listdir(os.path.join(*install_folder, module))) files.update(dict.fromkeys(os.listdir(module_dir))) files = list(files) temp_folder = os.path.join(*install_folder, module) # Loop through all the module files and compute their diffs if needed for file in files: temp_path = os.path.join(temp_folder, file) curr_path = os.path.join(module_dir, file) if os.path.exists(temp_path) and os.path.exists(curr_path) and os.path.isfile(temp_path): with open(temp_path, "r") as fh: new_lines = fh.readlines() with open(curr_path, "r") as fh: old_lines = fh.readlines() if new_lines == old_lines: # The files are identical diffs[file] = (DiffEnum.UNCHANGED, ()) else: # Compute the diff diff = difflib.unified_diff( old_lines, new_lines, fromfile=os.path.join(module_dir, file), tofile=os.path.join(module_dir, file), ) diffs[file] = (DiffEnum.CHANGED, diff) elif os.path.exists(temp_path): # The file was created diffs[file] = (DiffEnum.CREATED, ()) elif os.path.exists(curr_path): # The file was removed diffs[file] = (DiffEnum.REMOVED, ()) if self.save_diff_fn: log.info(f"Writing diff of '{module}' to '{self.save_diff_fn}'") with open(self.save_diff_fn, "a") as fh: fh.write( f"Changes in module '{module}' between ({current_entry['git_sha'] if current_entry is not None else '?'}) and ({version if version is not None else 'latest'})\n" ) for file, d in diffs.items(): diff_status, diff = d if diff_status == DiffEnum.UNCHANGED: # The files are identical fh.write(f"'{os.path.join(module_dir, file)}' is unchanged\n") elif diff_status == DiffEnum.CREATED: # The file was created between the commits fh.write(f"'{os.path.join(module_dir, file)}' was created\n") elif diff_status == DiffEnum.REMOVED: # The file was removed between the commits fh.write(f"'{os.path.join(module_dir, file)}' was removed\n") else: # The file has changed fh.write(f"Changes in '{os.path.join(module_dir, file)}':\n") # Write the diff lines to the file for line in diff: fh.write(line) fh.write("\n") fh.write("*" * 60 + "\n") elif self.show_diff: console = Console(force_terminal=nf_core.utils.rich_force_colors()) log.info( f"Changes in module '{module}' between ({current_entry['git_sha'] if current_entry is not None else '?'}) and ({version if version is not None else 'latest'})" ) for file, d in diffs.items(): diff_status, diff = d if diff_status == DiffEnum.UNCHANGED: # The files are identical log.info(f"'{os.path.join(module, file)}' is unchanged") elif diff_status == DiffEnum.CREATED: # The file was created between the commits log.info(f"'{os.path.join(module, file)}' was created") elif diff_status == DiffEnum.REMOVED: # The file was removed between the commits log.info(f"'{os.path.join(module, file)}' was removed") else: # The file has changed log.info(f"Changes in '{os.path.join(module, file)}':") # Pretty print the diff using the pygments diff lexer console.print(Syntax("".join(diff), "diff", theme="ansi_light")) # Ask the user if they want to install the module dry_run = not questionary.confirm( f"Update module '{module}'?", default=False, style=nf_core.utils.nfcore_question_style ).unsafe_ask() if not dry_run: # The new module files are already installed. # We just need to clear the directory and move the # new files from the temporary directory self.clear_module_dir(module, module_dir) os.makedirs(module_dir) for file in files: path = os.path.join(temp_folder, file) if os.path.exists(path): shutil.move(path, os.path.join(module_dir, file)) log.info(f"Updating '{modules_repo.name}/{module}'") log.debug(f"Updating module '{module}' to {version} from {modules_repo.name}") # Update modules.json with newly installed module if not dry_run: self.update_modules_json(modules_json, modules_repo.name, module, version) # Don't save to a file, just iteratively update the variable else: modules_json = self.update_modules_json( modules_json, modules_repo.name, module, version, write_file=False ) if self.save_diff_fn: # Compare the new modules.json and build a diff modules_json_diff = difflib.unified_diff( json.dumps(old_modules_json, indent=4).splitlines(keepends=True), json.dumps(modules_json, indent=4).splitlines(keepends=True), fromfile=os.path.join(self.dir, "modules.json"), tofile=os.path.join(self.dir, "modules.json"), ) # Save diff for modules.json to file with open(self.save_diff_fn, "a") as fh: fh.write(f"Changes in './modules.json'\n") for line in modules_json_diff: fh.write(line) fh.write("*" * 60 + "\n") log.info("Updates complete :sparkles:") if self.save_diff_fn: log.info( f"[bold magenta italic] TIP! [/] If you are happy with the changes in '{self.save_diff_fn}', you can apply them by running the command :point_right: [bold magenta italic]git apply {self.save_diff_fn}" ) return exit_value
def remove(self, module): """ Remove an already installed module This command only works for modules that are installed from 'nf-core/modules' """ if self.repo_type == "modules": log.error( "You cannot remove a module in a clone of nf-core/modules") return False # Check whether pipelines is valid self.has_valid_directory() # Get the installed modules self.get_pipeline_modules() if sum(map(len, self.module_names)) == 0: log.error("No installed modules found in pipeline") return False # Decide from which repo the module was installed # TODO Configure the prompt for repository name in a nice way if True: repo_name = self.modules_repo.name elif len(self.module_names) == 1: repo_name = list(self.module_names.keys())[0] else: repo_name = questionary.autocomplete( "Repo name:", choices=self.module_names.keys(), style=nf_core.utils.nfcore_question_style).unsafe_ask() if module is None: module = questionary.autocomplete( "Tool name:", choices=self.module_names[repo_name], style=nf_core.utils.nfcore_question_style).unsafe_ask() # Set the remove folder based on the repository name remove_folder = os.path.split(repo_name) # Get the module directory module_dir = os.path.join(self.dir, "modules", *remove_folder, module) # Verify that the module is actually installed if not os.path.exists(module_dir): log.error(f"Module directory does not exist: '{module_dir}'") modules_json = self.load_modules_json() if self.modules_repo.name in modules_json[ "repos"] and module in modules_json["repos"][repo_name]: log.error( f"Found entry for '{module}' in 'modules.json'. Removing..." ) self.remove_modules_json_entry(module, repo_name, modules_json) return False log.info("Removing {}".format(module)) # Remove entry from modules.json modules_json = self.load_modules_json() self.remove_modules_json_entry(module, repo_name, modules_json) # Remove the module return self.clear_module_dir(module_name=module, module_dir=module_dir)
progress.update(task, visible=False) rprint("[red][strong]TürkAnime'ye ulaşılamıyor.[/strong][red]") kapat(1) sorgu = AnimeSorgula(driver) progress.update(task, visible=False) while True: islem = select("İşlemi seç", choices=['Anime izle', 'Anime indir', 'Ayarlar', 'Kapat'], style=prompt_tema, instruction=" ").ask() if "Anime" in islem: try: secilen_seri = autocomplete('Animeyi yazın', choices=sorgu.get_seriler(), style=prompt_tema).ask() secilen_bolumler = prompt( { 'type': "checkbox" if "indir" in islem else "select", 'message': 'Bölüm seç', 'name': 'anime_bolum', 'choices': sorgu.get_bolumler(secilen_seri) }, style=prompt_tema, kbi_msg="")['anime_bolum'] except KeyError: continue
def addBook(self): """Add Book Method. Only for Teacher Class""" with open(self.bookPath, "r") as fileObject: data = list(csv.reader(fileObject)) cur_isbn = [] cur_categ = [] for row in data: if len(row) == 5: cur_isbn.append(row[0]) if row[4] not in cur_categ: cur_categ.append(row[4]) fileObject.close() pattern_isbn = "^[0-9]{3}-[0-9]{4}-[0-9]{3}$" checker_isbn = re.compile(pattern=pattern_isbn) while True: isbn_new = questionary.text( "Enter the ISBN of your book\ (It should be of the format ###-####-###): " ).ask() if isbn_new in cur_isbn: print("ISBN is already in use!") print("Try Again!") elif not checker_isbn.match(isbn_new): print("Incorrect Format!\n\tTry Again!") else: break bookNew = questionary.text("Enter the Book Name:").ask() while True: pagesNew = int(input("Enter the No. of Pages: ")) if pagesNew <= 1: print("Incorrect Value!\n") else: break authorNew = questionary.text( "Enter the Author's name:", validate=lambda x: len(x.split(" ")) >= 2, ).ask() categoryNew = questionary.autocomplete( "Enter the Category: ", choices=cur_categ, validate=lambda cat: cat in cur_categ, ).ask() toBeIns = [isbn_new, bookNew, pagesNew, authorNew, categoryNew] data.append(toBeIns) try: with open(self.bookPath, "w", newline="") as fileObject: writer = csv.writer(fileObject) writer.writerows(data) fileObject.close() except Exception as e: print(e) print("Unable to add book") return else: print("Book added successfully!") return
def modifyData(self): with open("medicine.dat", "rb") as file: data: List[List[Any]] = pickle.load(file) file.close() if len(data) == 0: print(self.emptyError) return IDS = [] for row in data: if row[0] not in IDS: IDS.append(row[0]) modId = questionary.autocomplete( "Enter the I.D. for which you want to modify details: ", IDS, validate=lambda val: val in IDS, ).ask() modRow = [] modIndex = 0 for row in data: if row[0] == modId: modRow = row modIndex = data.index(row) data.remove(modRow) break while True: newName = str(input("Enter the new name(Enter for skip): ")) if newName.rstrip(" ").lstrip(" ") == "": break else: modRow[1] = newName.rstrip(" ").lstrip(" ") break while True: newDesc = str(input("Enter the new description(Enter for skip): ")) if newDesc.rstrip(" ").lstrip(" ") == "": break else: modRow[2] = newDesc.rstrip(" ").lstrip(" ")[:30] break # Price Loop while True: newPrice = str(input("Enter the new price(Entry for skip): ")) if newPrice.rstrip(" ").lstrip(" ") == "": break else: modRow[3] = int(newPrice) break # Reorder qty loop while True: newQty = str(input("Enter the new quantity(Enter for skip): ")) if newQty.rstrip(" ").lstrip(" ") == "": break else: modRow[4] = int(newQty) break # Reorder qty loop while True: newReQty = str(input("Enter the new Reorder Quantity(Enter for skip): ")) if newReQty.rstrip(" ").lstrip(" ") == "": break else: modRow[5] = int(newReQty) break data.insert(modIndex, modRow) with open("medicine.dat", "wb") as fileObject: pickle.dump(data, fileObject) fileObject.close() return
def borrowBook(self): """Borrow Method. Provides borrowing functionality for both Student & Teacher Class.""" with open(self.borrowPath, "r", newline="") as file_borrow_01: reader = csv.reader(file_borrow_01) data_borrow = list(reader) cur_ids = [] for row in data_borrow: if len(row) == 4: cur_ids.append(row[1]) file_borrow_01.close() rollNo = questionary.text( "Enter your Admission No:", validate=lambda x: len(x) == 4 and str(x).isdigit() == True, ).ask() if rollNo in cur_ids: print("You have already borrowed a book.") print("First return that book!") return name = questionary.text( "Enter your name: ", ).ask() print("Name & Admin No. accepted!") with open(self.bookPath, "r", newline="") as fileBooks: data_books = [] for row in csv.reader(fileBooks): if len(row) != 0: data_books.append(row) cur_books = [row[1] for row in data_books] today = str(date.today()) fileBooks.close() book = questionary.autocomplete( "Choose a book", choices=cur_books, validate=lambda b: b in cur_books, ).ask() toBeIns = [book, rollNo, name, today] data_borrow = [ row for row in csv.reader(open(self.borrowPath, "r", newline="")) if len(row) != 0 ] data_borrow.append(toBeIns) with open(self.borrowPath, "w", newline="") as file: writer = csv.writer(file) writer.writerows(data_borrow) print("Book borrowed successfully!") file.close()
def _prompt_team_apps(default_team: Optional[str], apps: List[dict]) -> List[dict]: apps_by_team = groupby(curry(get_in, ["team", "name"]), apps) if personal_space := apps_by_team.get(None): apps_by_team = dissoc(assoc(apps_by_team, "personal", personal_space), None) if default_team and default_team in apps_by_team: team = default_team else: click.echo( f"""Teams found in account: {", ".join(sorted(click.style(team, fg="magenta")for team in apps_by_team))}""" ) team = questionary.autocomplete(message="Choose team:", choices=apps_by_team).ask() return apps_by_team[team] def _prompt_app(default_app: Optional[str], apps: List[dict]) -> dict: apps_by_name = groupby("name", apps) if default_app and default_app in apps_by_name: name = default_app else: name = questionary.autocomplete(message="Choose app:", choices=apps_by_name).ask() return first(apps_by_name[name])
def create(self): """ Create a new DSL2 module from the nf-core template. Tool should be named just <tool> or <tool/subtool> e.g fastqc or samtools/sort, respectively. If <directory> is a pipeline, this function creates a file called: '<directory>/modules/local/tool.nf' OR '<directory>/modules/local/tool_subtool.nf' If <directory> is a clone of nf-core/modules, it creates or modifies the following files: modules/software/tool/subtool/ * main.nf * meta.yml * functions.nf modules/tests/software/tool/subtool/ * main.nf * test.yml tests/config/pytest_software.yml The function will attempt to automatically find a Bioconda package called <tool> and matching Docker / Singularity images from BioContainers. """ # Check whether the given directory is a nf-core pipeline or a clone of nf-core/modules self.repo_type = self.get_repo_type(self.directory) log.info( "[yellow]Press enter to use default values [cyan bold](shown in brackets)[/] [yellow]or type your own responses. " "ctrl+click [link=https://youtu.be/dQw4w9WgXcQ]underlined text[/link] to open links." ) # Collect module info via prompt if empty or invalid if self.tool is None: self.tool = "" while self.tool == "" or re.search( r"[^a-z\d/]", self.tool) or self.tool.count("/") > 0: # Check + auto-fix for invalid chacters if re.search(r"[^a-z\d/]", self.tool): log.warning( "Tool/subtool name must be lower-case letters only, with no punctuation" ) tool_clean = re.sub(r"[^a-z\d/]", "", self.tool.lower()) if rich.prompt.Confirm.ask( f"[violet]Change '{self.tool}' to '{tool_clean}'?"): self.tool = tool_clean else: self.tool = "" # Split into tool and subtool if self.tool.count("/") > 1: log.warning("Tool/subtool can have maximum one '/' character") self.tool = "" elif self.tool.count("/") == 1: self.tool, self.subtool = self.tool.split("/") else: self.subtool = None # Reset edge case: entered '/subtool' as name and gone round loop again # Prompt for new entry if we reset if self.tool == "": self.tool = rich.prompt.Prompt.ask( "[violet]Name of tool/subtool").strip() # Determine the tool name self.tool_name = self.tool self.tool_dir = self.tool if self.subtool: self.tool_name = f"{self.tool}_{self.subtool}" self.tool_dir = os.path.join(self.tool, self.subtool) # Check existance of directories early for fast-fail self.file_paths = self.get_module_dirs() # Try to find a bioconda package for 'tool' while True: try: if self.tool_conda_name: anaconda_response = nf_core.utils.anaconda_package( self.tool_conda_name, ["bioconda"]) else: anaconda_response = nf_core.utils.anaconda_package( self.tool, ["bioconda"]) version = anaconda_response.get("latest_version") if not version: version = str( max([ parse_version(v) for v in anaconda_response["versions"] ])) self.tool_licence = nf_core.utils.parse_anaconda_licence( anaconda_response, version) self.tool_description = anaconda_response.get("summary", "") self.tool_doc_url = anaconda_response.get("doc_url", "") self.tool_dev_url = anaconda_response.get("dev_url", "") if self.tool_conda_name: self.bioconda = "bioconda::" + self.tool_conda_name + "=" + version else: self.bioconda = "bioconda::" + self.tool + "=" + version log.info(f"Using Bioconda package: '{self.bioconda}'") break except (ValueError, LookupError) as e: log.warning( f"Could not find Conda dependency using the Anaconda API: '{self.tool}'" ) if rich.prompt.Confirm.ask( f"[violet]Do you want to enter a different Bioconda package name?" ): self.tool_conda_name = rich.prompt.Prompt.ask( "[violet]Name of Bioconda package").strip() continue else: log.warning( f"{e}\nBuilding module without tool software and meta, you will need to enter this information manually." ) break # Try to get the container tag (only if bioconda package was found) if self.bioconda: try: if self.tool_conda_name: self.container_tag = nf_core.utils.get_biocontainer_tag( self.tool_conda_name, version) else: self.container_tag = nf_core.utils.get_biocontainer_tag( self.tool, version) log.info( f"Using Docker / Singularity container with tag: '{self.container_tag}'" ) except (ValueError, LookupError) as e: log.info(f"Could not find a container tag ({e})") # Prompt for GitHub username # Try to guess the current user if `gh` is installed author_default = None try: with open(os.devnull, "w") as devnull: gh_auth_user = json.loads( subprocess.check_output(["gh", "api", "/user"], stderr=devnull)) author_default = "@{}".format(gh_auth_user["login"]) except Exception as e: log.debug( f"Could not find GitHub username using 'gh' cli command: [red]{e}" ) # Regex to valid GitHub username: https://github.com/shinnn/github-username-regex github_username_regex = re.compile( r"^@[a-zA-Z\d](?:[a-zA-Z\d]|-(?=[a-zA-Z\d])){0,38}$") while self.author is None or not github_username_regex.match( self.author): if self.author is not None and not github_username_regex.match( self.author): log.warning( "Does not look like a valid GitHub username (must start with an '@')!" ) self.author = rich.prompt.Prompt.ask( "[violet]GitHub Username:[/]{}".format( " (@author)" if author_default is None else ""), default=author_default, ) process_label_defaults = [ "process_low", "process_medium", "process_high", "process_long" ] if self.process_label is None: log.info( "Provide an appropriate resource label for the process, taken from the " "[link=https://github.com/nf-core/tools/blob/master/nf_core/pipeline-template/conf/base.config#L29]nf-core pipeline template[/link].\n" "For example: {}".format(", ".join(process_label_defaults))) while self.process_label is None: self.process_label = questionary.autocomplete( "Process resource label:", choices=process_label_defaults, style=nf_core.utils.nfcore_question_style, default="process_low", ).ask() if self.has_meta is None: log.info( "Where applicable all sample-specific information e.g. 'id', 'single_end', 'read_group' " "MUST be provided as an input via a Groovy Map called 'meta'. " "This information may [italic]not[/] be required in some instances, for example " "[link=https://github.com/nf-core/modules/blob/master/software/bwa/index/main.nf]indexing reference genome files[/link]." ) while self.has_meta is None: self.has_meta = rich.prompt.Confirm.ask( "[violet]Will the module require a meta map of sample information? (yes/no)", default=True) # Create module template with cokiecutter self.render_template() if self.repo_type == "modules": # Add entry to pytest_software.yml try: with open( os.path.join(self.directory, "tests", "config", "pytest_software.yml"), "r") as fh: pytest_software_yml = yaml.safe_load(fh) if self.subtool: pytest_software_yml[self.tool_name] = [ f"software/{self.tool}/{self.subtool}/**", f"tests/software/{self.tool}/{self.subtool}/**", ] else: pytest_software_yml[self.tool_name] = [ f"software/{self.tool}/**", f"tests/software/{self.tool}/**", ] pytest_software_yml = dict(sorted(pytest_software_yml.items())) with open( os.path.join(self.directory, "tests", "config", "pytest_software.yml"), "w") as fh: yaml.dump(pytest_software_yml, fh, sort_keys=True, Dumper=nf_core.utils.custom_yaml_dumper()) except FileNotFoundError as e: raise UserWarning( f"Could not open 'tests/config/pytest_software.yml' file!") new_files = list(self.file_paths.values()) if self.repo_type == "modules": new_files.append( os.path.join(self.directory, "tests", "config", "pytest_software.yml")) log.info("Created / edited following files:\n " + "\n ".join(new_files))
def main(): generator = questionary.autocomplete( "What do you want to generate?", choices=[ "Apache Config", "Windows Install", ], style=style, ).ask() if generator == "Apache Config": gen = ApacheConfigGenerator() apache_path: str = questionary.path( "Where is apache2 located?", default="/etc/apache2", style=style, ).ask() if apache_path == None: print("Failed to generate: missing apache path!") exit(1) domains: str = questionary.text( "What domains to you want to serve? (Seperated by commas)", default="example.com", style=style, ).ask() if domains == None: print("Failed to generate: missing domain(s)!") exit(1) web_loc = questionary.path( "What is the path of your website files? (Optional)", default="", style=style, ).ask() if web_loc == None: print("Failed to generate: missing webroot!") exit(1) uri_to_forward = questionary.text( "What URI do you want to proxy to? (Optional)", default="", style=style, ).ask() if uri_to_forward == None: print("Failed to generate: missing proxy URI!") exit(1) if apache_path != "" and domains != "": gen.generate( apache_path=apache_path, domains=domains, uri_to_forward=uri_to_forward, web_loc=web_loc, ) else: print("Failed to generate: missing answers!") elif generator == "Windows Apache2 Modifier Install": gen = Windowsa2Installer() gen.generate() else: print("Nothing to generate, exiting...")