def cli(ctx, path): """Create files to use GitHub/TravisCI testing. Setup files in a github tool repository to enable continuous integration testing.:: % planemo travis_init . % # setup Ubuntu 12.04 w/ dependencies in % vim .travis/setup_custom_dependencies.bash % git add .travis.yml .travis % git commit -m "Add Travis CI testing infrastructure for tools." % git push # and register repository @ http://travis-ci.org/ These tests were inspired by work original done and documented by Peter C**k here http://bit.ly/gxtravisci. """ # TODO: Option --verbose_travis_yaml to unroll travis_test.sh line by line # and place all but last in 'install' section and last in 'script'. Would # require a yaml dependency though. shell("mkdir -p '%s/.travis'" % path) travis_yml = os.path.join(path, ".travis.yml") setup_sh = os.path.join(path, ".travis", "setup_custom_dependencies.bash") if not os.path.exists(travis_yml): open(travis_yml, "w").write(TRAVIS_YML) else: warn(".travis.yml file already exists, not overwriting.") if not os.path.exists(setup_sh): open(setup_sh, "w").write(TRAVIS_SETUP) else: warn(".travis/setup_custom_dependencies.bash already exists, not overwriting.") info(PREPARE_MESSAGE)
def update(realized_repository): upload_ret_code = 0 upload_ok = True if not kwds["skip_upload"]: upload_ret_code = shed.upload_repository( ctx, realized_repository, **kwds ) upload_ok = not upload_ret_code if upload_ret_code == 2: error("Failed to update repository it does not exist " "in target ToolShed.") return upload_ret_code repo_id = realized_repository.find_repository_id(ctx, tsi) metadata_ok = True if not kwds["skip_metadata"]: metadata_ok = realized_repository.update(ctx, tsi, repo_id) if metadata_ok: info("Repository metadata updated.") else: error("Failed to update repository metadata.") if metadata_ok and upload_ok: return 0 else: error("Failed to update a repository.") return 1
def write_tool_description(ctx, tool_description, **kwds): """Write a tool description to the file system guided by supplied CLI kwds.""" tool_id = kwds.get("id") output = kwds.get("tool") if not output: extension = "cwl" if kwds.get("cwl") else "xml" output = "%s.%s" % (tool_id, extension) if not io.can_write_to_path(output, **kwds): ctx.exit(1) io.write_file(output, tool_description.contents) io.info("Tool written to %s" % output) test_contents = tool_description.test_contents if test_contents: sep = "-" if "-" in tool_id else "_" tests_path = "%s%stests.yml" % (kwds.get("id"), sep) if not io.can_write_to_path(tests_path, **kwds): ctx.exit(1) io.write_file(tests_path, test_contents) io.info("Tool tests written to %s" % tests_path) macros = kwds["macros"] macros_file = "macros.xml" if macros and not os.path.exists(macros_file): io.write_file(macros_file, tool_description.macro_contents) elif macros: io.info(REUSING_MACROS_MESSAGE) if tool_description.test_files: if not os.path.exists("test-data"): io.info("No test-data directory, creating one.") io.shell("mkdir -p 'test-data'") for test_file in tool_description.test_files: io.info("Copying test-file %s" % test_file) io.shell("cp '%s' 'test-data'" % test_file)
def _handle_summary( structured_data, **kwds ): summary_dict = get_dict_value("summary", structured_data) num_tests = get_dict_value("num_tests", summary_dict) num_failures = get_dict_value("num_failures", summary_dict) num_errors = get_dict_value("num_errors", summary_dict) num_problems = num_failures + num_errors summary_exit_code = EXIT_CODE_OK if num_problems > 0: summary_exit_code = EXIT_CODE_GENERIC_FAILURE elif num_tests == 0: summary_exit_code = EXIT_CODE_NO_SUCH_TARGET summary_style = kwds.get("summary") if summary_style != "none": if num_tests == 0: warn(NO_TESTS_MESSAGE) elif num_problems == 0: info(ALL_TESTS_PASSED_MESSAGE % num_tests) elif num_problems: html_report_file = kwds.get("test_output") message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) _summarize_tests_full( structured_data, **kwds ) return summary_exit_code
def cli(ctx, **kwds): """Download and install conda. This will download conda for managing dependencies for your platform using the appropriate Miniconda installer. By running this command, you are agreeing to the terms of the conda license a 3-clause BSD 3 license. Please review full license at http://docs.continuum.io/anaconda/eula. Planemo will print a warning and terminate with an exit code of 7 if Conda is already installed. """ conda_context = build_conda_context(ctx, **kwds) if conda_context.is_conda_installed(): warn(MESSAGE_ERROR_ALREADY_EXISTS % conda_context.conda_exec) exit = EXIT_CODE_ALREADY_EXISTS else: exit = conda_util.install_conda(conda_context=conda_context, force_conda_build=True) if exit: warn(MESSAGE_ERROR_FAILED % conda_context.conda_exec) else: info(MESSAGE_INSTALL_OKAY % conda_context.conda_exec) ctx.exit(exit)
def _is_tool_xml(ctx, tool_path, tool_xml): if os.path.basename(tool_path) in SHED_FILES: return False if tool_xml.getroot().tag != "tool": if ctx.verbose: info(SKIP_XML_MESSAGE % tool_path) return False return True
def cli(ctx, path, **kwds): """Upload file to GitHub as a sharable gist.""" file_url = github_util.publish_as_gist_file(ctx, path) if kwds.get("link_type") == "raw": share_url = file_url else: share_url = "http://htmlpreview.github.io/?%s" % file_url info("File published to Github Gist - share with %s" % share_url)
def run_galaxy_command(ctx, command, env, action, daemon=False): message = "%s with command [%s]" % (action, command) info(message) ctx.vlog("With environment variables:") ctx.vlog("============================") for key, value in env.items(): ctx.vlog('%s="%s"' % (key, value)) ctx.vlog("============================") return shell(command, env=env)
def handle_tool_load_error(tool_path, tool_xml): """ Return True if tool_xml is tool load error (invalid XML), and print a helpful error message. """ is_error = False if is_tool_load_error(tool_xml): info("Could not lint %s due to malformed xml." % tool_path) is_error = True return is_error
def cli(ctx, **kwds): """Generate a bioconductor tool outline from supplied arguments.""" invalid = _validate_kwds(kwds) _check_r_version(kwds) if kwds.get("command"): command = kwds["command"] rscript = command.split()[1] # Name of Custom R file elif kwds.get("rscript") and kwds.get("input") and kwds.get("output"): rscript = kwds["rscript"] command = 'Rscript %s ' % rscript # Build command from --rscript, --input, --output for i in kwds["input"]: command += '--input %s ' % i for o in kwds["output"]: command += '--output %s ' % o else: # No --rscript/input/output and no --command given info("Need to supply EITHER a full command (--command) OR an R script (--rscript), input(s) (--input), and output(s) (--output).") ctx.exit(1) if invalid: ctx.exit(invalid) rscript_data = rscript_parse.parse_rscript(rscript, command) kwds['rscript_data'] = rscript_data kwds['rscript'] = rscript kwds['command'] = command kwds['name'] = kwds.get("name") kwds['id'] = rscript.split("/")[-1].replace(".R", "") # Default: name of R script w/o extension # Assign input/output to kwds if --input/--output not used if not kwds['input']: new_inputs = () for i in kwds['rscript_data']['inputs']['input']: new_inputs += (i,) kwds['input'] = new_inputs if not kwds['output']: new_outputs = () for i in kwds['rscript_data']['outputs']['output']: new_outputs += (i,) kwds['output'] = new_outputs input_dict = rscript_data.get('inputs') inputs = list(input_dict.values()) kwds['inputs'] = inputs # Set example_input/output to input/output for now # Probably can remove example_input/output in future kwds['example_input'] = kwds['input'] kwds['example_output'] = kwds['output'] # Build Tool definition file tool_description = bioc_tool_builder.build(**kwds) tool_builder.write_tool_description( ctx, tool_description, **kwds )
def cli(ctx, path, **kwds): """Download a tool repository as a tarball from the tool shed and extract to the specified directory. """ file_url = github_util.publish_as_gist_file(ctx, path) if kwds.get("link_type") == "raw": share_url = file_url else: share_url = "http://htmlpreview.github.io/?%s" % file_url info("File published to Github Gist - share with %s" % share_url)
def get_galaxy_datatype(z_ext, datatype_fp): """Get the Galaxy datatype corresponding to a Zenodo file type.""" g_datatype = '' datatypes = load_yaml(datatype_fp) if z_ext in datatypes: g_datatype = datatypes[z_ext] if g_datatype == '': g_datatype = '# Please add a Galaxy datatype or update the shared/datatypes.yaml file' info("Get Galaxy datatypes: %s --> %s" % (z_ext, g_datatype)) return g_datatype
def append_macro_file(tool_files, kwds): macro_contents = None if kwds["macros"]: macro_contents = _render(kwds, MACROS_TEMPLATE) macros_file = "macros.xml" if not os.path.exists(macros_file): tool_files.append(ToolFile(macros_file, macro_contents, "macros")) io.info(REUSING_MACROS_MESSAGE)
def create(realized_reposiotry): repo_id = realized_reposiotry.find_repository_id(ctx, tsi) if repo_id is None: if realized_reposiotry.create(ctx, tsi): info("Repository created") return 0 else: return 2 else: return 1
def _is_tool_source(ctx, tool_path, tool_source): if os.path.basename(tool_path) in SHED_FILES: return False root = getattr(tool_source, "root", None) if root is not None: if root.tag != "tool": if ctx.verbose: info(SKIP_XML_MESSAGE % tool_path) return False return True
def convert_tool_dep(dependencies_file): """Parse a tool_dependencies.xml into install.sh and env.sh commands. Returns two lists of strings, commands to add to install.sh and env.sh respectively. """ install_cmds = [] env_cmds = [] root = ET.parse(dependencies_file).getroot() package_els = root.findall("package") packages = [] dependencies = [] for package_el in package_els: install_els = package_el.findall("install") assert len(install_els) in (0, 1) if len(install_els) == 0: repository_el = package_el.find("repository") assert repository_el is not None, "no repository in %s" % repository_el dependencies.append(Dependency(None, package_el, repository_el)) else: install_el = install_els[0] packages.append(BasePackage(None, package_el, install_el, readme=None)) if not packages: info("No packages in %s" % dependencies_file) return [], [] assert len(packages) == 1, packages package = packages[0] name = package_el.attrib["name"] version = package_el.attrib["version"] # TODO - Set $INSTALL_DIR in the script # os.environ["INSTALL_DIR"] = os.path.abspath(os.curdir) for action in package.all_actions: inst, env = action.to_bash() install_cmds.extend(inst) env_cmds.extend(env) if install_cmds: install_cmds.insert(0, 'cd $dep_install_tmp') install_cmds.insert(0, 'specifc_action_done=0') install_cmds.insert(0, 'echo "%s"' % ('=' * 60)) install_cmds.insert(0, 'echo "Installing %s version %s"' % (name, version)) install_cmds.insert(0, 'echo "%s"' % ('=' * 60)) if env_cmds: env_cmds.insert(0, 'specifc_action_done=0') env_cmds.insert(0, '#' + '=' * 60) env_cmds.insert(0, 'echo "Setting environment variables for %s version %s"' % (name, version)) env_cmds.insert(0, '#' + '=' * 60) # TODO - define $INSTALL_DIR here? return install_cmds, env_cmds
def generate_tuto_from_wf(self, ctx): """Generate the skeleton of a tutorial from a workflow.""" self.check_topic_init_tuto() if self.tuto.has_workflow(): info("Create tutorial skeleton from workflow") self.tuto.create_hands_on_tutorial(ctx) self.tuto.export_workflow_file() else: raise Exception( "A path to a local workflow or the id of a workflow on a running Galaxy instance should be provided" )
def cli(ctx, path, template=None, **kwds): """Help initialize global configuration (in home directory) for Planemo. """ # TODO: prompt for values someday. config_path = config.global_config_path() if os.path.exists(config_path): warn("File %s already exists, exiting." % config_path) sys.exit(1) with open(config_path, "w") as f: f.write(CONFIG_TEMPLATE) info(SUCCESS_MESSAGE % config_path)
def run_galaxy_command(ctx, command, env, action): """Run Galaxy command with informative verbose logging.""" message = "%s with command [%s]" % (action, command) info(message) ctx.vlog("With environment variables:") ctx.vlog("============================") for key, value in env.items(): ctx.vlog('%s="%s"' % (key, value)) ctx.vlog("============================") exit_code = shell(command, env=env) ctx.vlog("run command exited with return code %s" % exit_code) return exit_code
def lint_repository_tools(ctx, realized_repository, lint_ctx, lint_args): path = realized_repository.path for (tool_path, tool_source) in yield_tool_sources(ctx, path, recursive=True): original_path = tool_path.replace(path, realized_repository.real_path) info("+Linting tool %s" % original_path) if handle_tool_load_error(tool_path, tool_source): return True lint_tool_source_with( lint_ctx, tool_source, extra_modules=lint_args["extra_modules"] )
def shed_serve(ctx, install_args_list, **kwds): with serve_daemon(ctx, **kwds) as config: install_deps = not kwds.get("skip_dependencies", False) io.info("Installing repositories - this may take some time...") for install_args in install_args_list: install_args["install_tool_dependencies"] = install_deps install_args["install_repository_dependencies"] = True install_args["new_tool_panel_section_label"] = "Shed Installs" config.install_repo( **install_args ) config.wait_for_all_installed() yield config
def cli(ctx, **kwds): """Generate a tool outline from supplied arguments. """ invalid = _validate_kwds(kwds) if invalid: return invalid output = kwds.get("tool") if not output: output = "%s.xml" % kwds.get("id") if not io.can_write_to_path(output, **kwds): sys.exit(1) tool_description = tool_builder.build(**kwds) open(output, "w").write(tool_description.contents) io.info("Tool written to %s" % output) macros = kwds["macros"] macros_file = "macros.xml" if macros and not os.path.exists(macros_file): open(macros_file, "w").write(tool_description.macro_contents) elif macros: io.info(REUSING_MACROS_MESSAGE) if tool_description.test_files: if not os.path.exists("test-data"): io.info("No test-data directory, creating one.") io.shell("mkdir -p 'test-data'") for test_file in tool_description.test_files: io.info("Copying test-file %s" % test_file) io.shell("cp '%s' 'test-data'" % test_file)
def cli(ctx, paths, **kwds): """Launch Galaxy with Tool Shed dependencies. This command will start a Galaxy instance configured to target the specified shed, find published artifacts (tools and dependencies) corresponding to command-line arguments and ``.shed.yml`` file(s), install these artifacts, and serve a Galaxy instances that can be logged into and explored interactively. """ kwds['galaxy_skip_client_build'] = False install_args_list = shed.install_arg_lists(ctx, paths, **kwds) with shed_serve(ctx, install_args_list, **kwds) as config: io.info("Galaxy running with tools installed at %s" % config.galaxy_url) sleep_for_serve()
def cli(ctx, paths, **kwds): """ Serve a transient Galaxy with published repositories installed. This command will start a Galaxy instance configured to target the specified shed, find published artifacts (tools and dependencies) corresponding to command-line arguments and ``.shed.yml`` file(s), install these artifacts, and serve a Galaxy instances that can be logged into and explored interactively. """ install_args_list = shed.install_arg_lists(ctx, paths, **kwds) with shed_serve(ctx, install_args_list, **kwds) as config: gx_url = "http://localhost:%d/" % config.port io.info("Galaxy running with tools installed at %s" % gx_url) time.sleep(1000000)
def create(realized_repository): repo_id = realized_repository.find_repository_id(ctx, shed_context) if repo_id is None: if realized_repository.create(ctx, shed_context): info("Repository created") if not kwds["skip_upload"]: return shed.upload_repository( ctx, realized_repository, **kwds ) else: return 0 else: return 2 else: return 1
def lint_tools_on_path(ctx, paths, lint_args, **kwds): assert_tools = kwds.get("assert_tools", True) recursive = kwds.get("recursive", False) exit_codes = [] for (tool_path, tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive): if handle_tool_load_error(tool_path, tool_xml): exit_codes.append(EXIT_CODE_GENERIC_FAILURE) continue info("Linting tool %s" % tool_path) if not lint_tool_source(tool_xml, **lint_args): error("Failed linting") exit_codes.append(EXIT_CODE_GENERIC_FAILURE) else: exit_codes.append(EXIT_CODE_OK) return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
def update(realized_repository): collected_data['results']['total'] += 1 upload_ret_code = 0 upload_ok = True if not kwds["skip_upload"]: upload_ret_code = shed.upload_repository( ctx, realized_repository, **kwds ) upload_ok = not upload_ret_code if upload_ret_code == 2: collected_data['results']['failures'] += 1 collected_data['tests'].append({ 'classname': realized_repository.name, 'result': 2, }) error("Failed to update repository it does not exist " "in target ToolShed.") return upload_ret_code repo_id = realized_repository.find_repository_id(ctx, shed_context) metadata_ok = True if not kwds["skip_metadata"]: metadata_ok = realized_repository.update(ctx, shed_context, repo_id) if metadata_ok: info("Repository metadata updated.") else: error("Failed to update repository metadata.") if metadata_ok and upload_ok: collected_data['tests'].append({ 'classname': realized_repository.name, 'result': 0, }) return 0 elif upload_ok: collected_data['results']['skips'] += 1 collected_data['tests'].append({ 'classname': realized_repository.name, 'result': 3, }) error("Repo updated but metadata was not.") return 1 else: collected_data['results']['failures'] += 1 collected_data['tests'].append({ 'classname': realized_repository.name, 'result': 1, }) error("Failed to update a repository.") return 1
def lint_tools_on_path(ctx, paths, lint_args, **kwds): assert_tools = kwds.get("assert_tools", True) recursive = kwds.get("recursive", False) exit = 0 valid_tools = 0 for path in paths: for (tool_path, tool_xml) in yield_tool_xmls(ctx, path, recursive): info("Linting tool %s" % tool_path) if not lint_xml(tool_xml, **lint_args): error("Failed linting") exit = 1 else: valid_tools += 1 if exit == 0 and valid_tools == 0 and assert_tools: exit = 2 return exit
def get_zenodo_record(zenodo_link): """Get the content of a Zenodo record.""" # get the record in the Zenodo link if 'doi' in zenodo_link: z_record = zenodo_link.split('.')[-1] else: z_record = zenodo_link.split('/')[-1] # get JSON corresponding to the record from Zenodo API req = "https://zenodo.org/api/records/%s" % (z_record) r = requests.get(req) if r: req_res = r.json() else: info("The Zenodo link (%s) seems invalid" % (zenodo_link)) req_res = {'files': []} z_record = None return(z_record, req_res)
def __handle_summary( test_results, **kwds ): summary_style = kwds.get("summary") if summary_style == "none": return if test_results.has_details: __summarize_tests_full( test_results, **kwds ) else: if test_results.exit_code: warn(GENERIC_PROBLEMS_MESSAGE % test_results.output_html_path) else: info(GENERIC_TESTS_PASSED_MESSAGE)
def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ runnables = for_paths(paths) enable_beta_test = any([r.type not in [RunnableType.galaxy_tool, RunnableType.directory] for r in runnables]) enable_beta_test = enable_beta_test or kwds.get("engine", "galaxy") != "galaxy" if enable_beta_test: info("Enable beta testing mode for testing.") with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: kwds["for_tests"] = True with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) ctx.exit(return_value)
def upload_repository(ctx, realized_repository, **kwds): """Upload a tool directory as a tarball to a tool shed.""" path = realized_repository.path tar_path = kwds.get("tar", None) if not tar_path: tar_path = build_tarball(path, **kwds) if kwds.get("tar_only", False): name = realized_repository.pattern_to_file_name("shed_upload.tar.gz") shutil.copy(tar_path, name) return 0 shed_context = get_shed_context(ctx, **kwds) update_kwds = {} _update_commit_message(ctx, realized_repository, update_kwds, **kwds) repo_id = handle_force_create(realized_repository, ctx, shed_context, **kwds) # failing to create the repo, give up if repo_id is None: return report_non_existent_repository(realized_repository) if kwds.get("check_diff", False): is_diff = diff_repo(ctx, realized_repository, **kwds) != 0 if not is_diff: name = realized_repository.name info("Repository [%s] not different, skipping upload." % name) return 0 # TODO: support updating repo information if it changes in the config file try: shed_context.tsi.repositories.update_repository( str(repo_id), tar_path, **update_kwds ) except Exception as e: if isinstance(e, bioblend.ConnectionError) and e.status_code == 400 and \ '"No changes to repository."' in e.body: warn("Repository %s was not updated because there were no changes" % realized_repository.name) return 0 message = api_exception_to_message(e) error("Could not update %s" % realized_repository.name) error(message) return -1 info("Repository %s updated successfully." % realized_repository.name) return 0
def upload_repository(ctx, realized_repository, **kwds): """Upload a tool directory as a tarball to a tool shed. """ path = realized_repository.path tar_path = kwds.get("tar", None) if not tar_path: tar_path = build_tarball(path, **kwds) if kwds.get("tar_only", False): name = realized_repository.pattern_to_file_name("shed_upload.tar.gz") shell("cp '%s' '%s'" % (tar_path, name)) return 0 tsi = tool_shed_client(ctx, **kwds) update_kwds = {} _update_commit_message(ctx, realized_repository, update_kwds, **kwds) repo_id = realized_repository.find_repository_id(ctx, tsi) if repo_id is None and kwds["force_repository_creation"]: repo_id = realized_repository.create(ctx, tsi) # failing to create the repo, give up if repo_id is None: name = realized_repository.name error("Repository [%s] does not exist in the targeted Tool Shed." % name) return -1 if kwds.get("check_diff", False): is_diff = diff_repo(ctx, realized_repository, **kwds) if not is_diff: name = realized_repository.name info("Repository [%s] not different, skipping upload." % name) return 0 # TODO: support updating repo information if it changes in the config file try: tsi.repositories.update_repository(repo_id, tar_path, **update_kwds) except Exception as e: message = api_exception_to_message(e) error("Could not update %s" % realized_repository.name) error(message) return -1 info("Repository %s updated successfully." % realized_repository.name) return 0
def cli(ctx, profile, **kwds): """ List aliases for a path or a workflow or dataset ID. Aliases are associated with a particular planemo profile. """ info("Looking for profiles...") aliases = profiles.list_alias(ctx, profile) if tabulate: print( tabulate({ "Alias": aliases.keys(), "Object": aliases.values() }, headers="keys")) else: print(json.dumps(aliases, indent=4, sort_keys=True)) info("{} aliases were found for profile {}.".format(len(aliases), profile)) ctx.exit(0) return
def __init__(self, tool_inp_desc, wf_param_values, wf_steps, level, should_be_there=False, force_default=False): """Init an instance of ToolInput.""" self.name = tool_inp_desc['name'] if 'type' not in tool_inp_desc: raise ValueError("No type for the parameter %s" % tool_inp_desc['name']) self.type = tool_inp_desc['type'] self.tool_inp_desc = tool_inp_desc self.level = level self.wf_param_values = wf_param_values self.wf_steps = wf_steps self.formatted_desc = '' self.force_default = force_default if self.name not in self.wf_param_values: if not should_be_there: info("%s not in workflow" % self.name) else: raise ValueError("%s not in workflow" % self.name) else: self.wf_param_values = self.wf_param_values[self.name]
def __summarize_tests_full(test_results, **kwds): num_tests = test_results.num_tests num_problems = test_results.num_problems if num_tests == 0: warn(NO_TESTS_MESSAGE) return if num_problems == 0: info(ALL_TESTS_PASSED_MESSAGE % num_tests) if num_problems: html_report_file = test_results.output_html_path message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) for testcase_el in test_results.xunit_testcase_elements: structured_data_tests = test_results.structured_data_tests __summarize_test_case(structured_data_tests, testcase_el, **kwds)
def shed_serve(ctx, install_args_list, **kwds): """Serve a daemon instance of Galaxy with specified repositories installed.""" with serve_daemon(ctx, **kwds) as config: install_deps = not kwds.get("skip_dependencies", False) print(INSTALLING_MESSAGE) io.info(INSTALLING_MESSAGE) for install_args in install_args_list: install_args["install_tool_dependencies"] = install_deps install_args["install_repository_dependencies"] = True install_args["new_tool_panel_section_label"] = "Shed Installs" config.install_repo(**install_args) try: config.wait_for_all_installed() except Exception: if ctx.verbose: print("Failed to install tool repositories, Galaxy log:") print(config.log_contents) print("Galaxy root:") io.shell(['ls', config.galaxy_root]) raise yield config
def fill_data_library(self, ctx): """Fill a data library for a tutorial.""" self.check_topic_init_tuto() # get the zenodo link z_link = '' if self.tuto.zenodo_link != '': if self.kwds['zenodo_link']: info( "The data library and the metadata will be updated with the new Zenodo link" ) z_link = self.kwds['zenodo_link'] self.tuto.zenodo_link = z_link else: info( "The data library will be extracted using the Zenodo link in the metadata of the tutorial" ) z_link = self.tuto.zenodo_link elif self.kwds['zenodo_link']: info( "The data library will be created and the metadata will be filled with the new Zenodo link" ) z_link = self.kwds['zenodo_link'] self.tuto.zenodo_link = z_link if z_link == '' or z_link is None: raise Exception( "A Zenodo link should be provided either in the metadata file or as argument of the command" ) # extract the data library from Zenodo self.tuto.prepare_data_library_from_zenodo() # update the metadata self.tuto.write_hands_on_tutorial()
def _handle_summary(structured_data, **kwds): summary_dict = get_dict_value("summary", structured_data) num_tests = get_dict_value("num_tests", summary_dict) num_failures = get_dict_value("num_failures", summary_dict) num_errors = get_dict_value("num_errors", summary_dict) num_problems = num_failures + num_errors summary_exit_code = EXIT_CODE_OK if num_problems > 0: summary_exit_code = EXIT_CODE_GENERIC_FAILURE elif num_tests == 0: summary_exit_code = EXIT_CODE_NO_SUCH_TARGET summary_style = kwds.get("summary") if kwds.get('test_data_updated'): info(TEST_DATA_UPDATED_MESSAGE) if summary_style != "none": if num_tests == 0: warn(NO_TESTS_MESSAGE) elif num_problems == 0: if kwds.get( 'update_test_data') and not kwds.get('test_data_updated'): info(TEST_DATA_NOT_UPDATED_MESSAGE % num_tests) else: info(ALL_TESTS_PASSED_MESSAGE % num_tests) elif num_problems: html_report_file = kwds.get("test_output") message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) _summarize_tests_full(structured_data, **kwds) return summary_exit_code
def write_bioconda_recipe(package_name, clone, update, bioconda_dir_path=None): """Make a bioconda recipe given the package name. clone: y/N , clone the whole bioconda repository and create recipe inside repository. update: The update feature differs from the one in bioconda, as it updates the specific package, as opposed to the every package in the biocoda repository. """ # set bioconda path if bioconda_dir_path is None: bioconda_recipe_path = os.path.join(os.path.expanduser("~"), "bioconda-recipes") else: bioconda_recipe_path = os.path.join(bioconda_dir_path, "bioconda-recipes") # Clone if clone and (not os.path.exists(bioconda_recipe_path)): clone_bioconda_repo(bioconda_recipe_path) info("bioconda-recipes cloned and writing to %s" % bioconda_dir_path) else: info("Bioconda repository not cloned or already exists") # Check if package_name is in recipes presence = any(package_name in r for r, d, f in os.walk(bioconda_recipe_path)) if presence: info("Package already exists in bioconda") if update: info("Package will be updated") recipe_dir = os.path.join(bioconda_recipe_path, "recipes") bioconductor_skeleton.write_recipe(package_name, recipe_dir, True) elif not presence: info("Package found in bioconda recipes") recipe_dir = os.path.join(bioconda_recipe_path, "recipes") bioconductor_skeleton.write_recipe(package_name, recipe_dir, True) return
def cli(ctx, path, **kwds): """Publish JSON test results to Github Gist and produce sharable URL. Sharable URL can be used to share an HTML version of the report that can be easily embedded in pull requests or commit messages. Requires a ~/.planemo.yml with Github 'username' and 'password' defined in a 'github' section of that configuration file. """ file_url = github_util.publish_as_gist_file(ctx, path) share_url = PLANEMO_TEST_VIEWER_URL_TEMPLATE % file_url info("File published to Github Gist.") info("Raw URL: %s" % file_url) info("Share results with URL: %s" % share_url) markdown = "[View Tool Test Results](%s)" % share_url info("Embed results with markdown: %s" % markdown)
def create_tutorial(self, ctx): """Create the skeleton of a new tutorial.""" # create tuto folder and empty files os.makedirs(self.dir) os.makedirs(self.tour_dir) os.makedirs(self.wf_dir) # extract the data library from Zenodo and the links for the tutorial if self.zenodo_link != '': info("Create the data library from Zenodo") self.prepare_data_library_from_zenodo() # create tutorial skeleton from workflow and copy workflow file if self.hands_on: info("Create tutorial skeleton from workflow (if it is provided)") self.create_hands_on_tutorial(ctx) self.export_workflow_file() # create slide skeleton if self.slides: with open(self.slide_fp, 'w') as slide_f: slide_f.write( templates.render(TUTO_SLIDES_TEMPLATE, **{"metadata": self.get_tuto_metata()}))
def perform_required_update(ctx, xml_files, tool_path, requirements, tokens, xml_to_update, wrapper_version_token, **kwds): """ Carry out the update, if requirements are out-of-date """ # check all requirements for k, v in requirements.items(): for req in v: req_check = check_conda(req, ctx, **kwds) # print(req_check, v[req]['text']) if req_check != v[req]['text']: xml_to_update[k].append({ 'type': 'requirement', 'tag': v[req]['tag'], 'value': req_check }) # check all tokens, if wrapper_version_token exists if wrapper_version_token: for k, v in tokens.items(): if wrapper_version_token in v: xml_to_update[k].append({ 'type': 'token', 'tag': v[wrapper_version_token]['tag'], 'value': 0 }) # finally, update each file separately for k, v in xml_files.items(): update_xml(k, v, xml_to_update[k], wrapper_version_token, is_macro=(k != tool_path)) info("Tool {} updated.".format(tool_path)) return set(xml_files)
def cli(ctx, path): """Create files to use GitHub/TravisCI testing. Setup files in a github tool repository to enable continuous integration testing. \b % planemo travis_init . % # setup Ubuntu 12.04 w/ dependencies in % vim .travis/setup_custom_dependencies.bash % git add .travis.yml .travis % git commit -m "Add Travis CI testing infrastructure for tools." % git push # and register repository @ http://travis-ci.org/ These tests were inspired by work original done and documented by Peter C**k here http://bit.ly/gxtravisci. """ # TODO: Option --verbose_travis_yaml to unroll travis_test.sh line by line # and place all but last in 'install' section and last in 'script'. Would # require a yaml dependency though. dot_travis_dir = os.path.join(path, '.travis') if not os.path.exists(dot_travis_dir): os.makedirs(dot_travis_dir) travis_yml = os.path.join(path, ".travis.yml") setup_sh = os.path.join(dot_travis_dir, "setup_custom_dependencies.bash") if not os.path.exists(travis_yml): with open(travis_yml, "w") as fh: fh.write(TRAVIS_YML) else: warn(".travis.yml file already exists, not overwriting.") if not os.path.exists(setup_sh): with open(setup_sh, "w") as fh: fh.write(TRAVIS_SETUP) else: warn("%s already exists, not overwriting." % setup_sh) info(PREPARE_MESSAGE)
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable = for_runnable_identifier(ctx, runnable_identifier, kwds) is_cwl = runnable.type.is_cwl_artifact kwds["cwl"] = is_cwl kwds["execution_type"] = "Run" if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) elif kwds.get('no_wait'): info('Run successfully executed - exiting without waiting for results.') else: output_json = kwds.get("output_json", None) outputs_dict = run_result.outputs_dict if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) info('Run completed successfully.') report_data = StructuredData(data={'tests': [run_result.structured_data()], 'version': '0.1'}) report_data.calculate_summary_data() return_value = handle_reports_and_summary(ctx, report_data.structured_data, kwds=kwds) ctx.exit(return_value)
def cli(ctx, path, **kwds): """Publish JSON test results as sharable Gist. This will upload the JSON test results to Github as a Gist and produce sharable URL. The sharable URL can be used to share an HTML version of the report that can be easily embedded in pull requests or commit messages. Requires a ~/.planemo.yml with a Github access token defined in a 'github' section of that configuration file. An access token can be generated by going to https://github.com/settings/tokens. """ file_url = github_util.publish_as_gist_file(ctx, path) share_url = PLANEMO_TEST_VIEWER_URL_TEMPLATE % file_url info("File published to Github Gist.") info("Raw URL: %s" % file_url) info("Share results with URL: %s" % share_url) markdown = "[View Tool Test Results](%s)" % share_url info("Embed results with markdown: %s" % markdown)
def cli(ctx, paths, recursive=False, fail_fast=True, download_cache=None): """Compile tool_dependencies.xml to bash script. An experimental approach parsing tool_dependencies.xml files into bash shell scripts, intended initially for use within Continuous Integration testing setups like TravisCI. Parses the ``tool_dependencies.xml`` files from the specified projects, and converts them into an installation bash script (``dep_install.sh``), and a shell script (``env.sh``) defining any new/edited environment variables. These are intended to be used via ``bash dep_install.sh`` (once), and as ``source env.sh`` prior to running any of the dependencies to set the environment variable within the current shell session. Both ``dep_install.sh`` and ``env.sh`` require ``$INSTALL_DIR`` be defined before running them, set to an existing directory with write permissions. Beware than if run on multiple tools, they can over-write each other (for example if you have packages for different versions of the same tool). In this case make separate calls to ``planemo dependency_script`` and call the scripts with different installation directories. This command will download (and cache) any URLs specified via Galaxy download actions. This is in order to decompress them and determine the relevant sub-folder to change into as per the Tool Shed install mechanism, so that this can be recorded as a ``cd`` comand in the bash script. The download cache used by ``planemo dependency_script`` and the resulting output script ``dep_install.sh`` defaults to ``./download_cache`` (under the current working directory), and can be set with ``$DOWNLOAD_CACHE``. If the ``tool_dependencies.xml`` file includes SHA256 checksums for downloads, these will be verified after downloading to the cache (by either ``planemo dependency_script`` or ``bash dep_install.sh``). This is experimental, and is initially intended for use within continuous integration testing setups like TravisCI to both verify the dependency installation receipe works, and to use this to run functional tests. """ # TODO: Command line API for bash output filanames & install dir, cache. if download_cache: assert os.path.isdir(download_cache), download_cache # Effectively using this as a global variable, refactor this # once using a visitor pattern instead of action.to_bash() os.environ["DOWNLOAD_CACHE"] = os.path.abspath(download_cache) print("Using $DOWNLOAD_CACHE=%r" % os.environ["DOWNLOAD_CACHE"]) failed = False with open("env.sh", "w") as env_sh_handle: with open("dep_install.sh", "w") as install_handle: install_handle.write(preamble_dep_install) env_sh_handle.write(preamble_env_sh) for path in paths: # ctx.log("Checking: %r" % path) if failed and fail_fast: break for tool_dep in find_tool_dependencis_xml(path, recursive): passed = process_tool_dependencies_xml( tool_dep, install_handle, env_sh_handle) if passed: info('Processed %s' % tool_dep) else: failed = True if fail_fast: for line in [ '#' + '*' * 60, 'echo "WARNING: Skipping %s"' % tool_dep, '#' + '*' * 60 ]: install_handle.write(line + "\n") break # error("%s failed" % tool_dep) install_handle.write(final_dep_install) ctx.log("The End") if failed: error('Error processing one or more tool_dependencies.xml files.') sys.exit(1)
def update(realized_repository): collected_data['results']['total'] += 1 skip_upload = kwds["skip_upload"] skip_metadata = kwds["skip_metadata"] upload_ret_code = 0 upload_ok = True captured_io = {} if not skip_upload: with captured_io_for_xunit(kwds, captured_io): upload_ret_code = shed.upload_repository( ctx, realized_repository, **kwds) upload_ok = not upload_ret_code repo_result = { 'classname': realized_repository.name, 'time': captured_io.get("time", None), 'name': 'shed-update', 'stdout': captured_io.get("stdout", None), 'stderr': captured_io.get("stderr", None), } # Now that we've uploaded (or skipped appropriately), collect results. if upload_ret_code == 2: collected_data['results']['failures'] += 1 repo_result.update({ 'errorType': 'FailedUpdate', 'errorMessage': 'Failed to update repository as it does not exist in target ToolShed', }) collected_data['tests'].append(repo_result) error("Failed to update repository it does not exist " "in target ToolShed.") return upload_ret_code exit = 0 metadata_ok = True if not skip_metadata: repo_id = shed.handle_force_create(realized_repository, ctx, shed_context, **kwds) # failing to create the repo, give up if repo_id is None: exit = shed.report_non_existent_repository(realized_repository) metadata_ok = False else: metadata_ok = realized_repository.update( ctx, shed_context, repo_id) else: info("Skipping repository metadata update.") if not metadata_ok: error("Failed to update repository metadata.") if metadata_ok and upload_ok: pass elif upload_ok: collected_data['results']['skips'] += 1 repo_result.update({ 'errorType': 'FailedMetadata', 'errorMessage': 'Failed to update repository metadata', }) if not skip_upload: error("Repo updated but metadata was not.") exit = exit or 1 else: collected_data['results']['failures'] += 1 repo_result.update({ 'errorType': 'FailedUpdate', 'errorMessage': 'Failed to update repository', }) error("Failed to update a repository.") exit = exit or 1 collected_data['tests'].append(repo_result) return exit
def cli(ctx, paths, **kwds): # noqa C901 """Auto-update tool requirements by checking against Conda and updating if newer versions are available.""" assert_tools = kwds.get("assert_tools", True) recursive = kwds.get("recursive", False) exit_codes = [] modified_files = set() tools_to_skip = [line.rstrip() for line in open(kwds['skiplist']) ] if kwds['skiplist'] else [] runnables = for_paths(paths) if any(r.type in {RunnableType.galaxy_tool, RunnableType.directory} for r in runnables): # update Galaxy tools for (tool_path, tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive): if tool_path.split('/')[-1] in tools_to_skip: info("Skipping tool %s" % tool_path) continue info("Auto-updating tool %s" % tool_path) try: updated = autoupdate.autoupdate_tool( ctx, tool_path, modified_files=modified_files, **kwds) if updated: modified_files.update(updated) except Exception as e: error( f"{tool_path} could not be updated - the following error was raised: {e.__str__()}" ) if handle_tool_load_error(tool_path, tool_xml): exit_codes.append(EXIT_CODE_GENERIC_FAILURE) continue else: exit_codes.append(EXIT_CODE_OK) workflows = [ r for r in runnables if r.type == RunnableType.galaxy_workflow ] modified_workflows = [] if workflows: assert is_galaxy_engine(**kwds) if kwds.get("engine") != "external_galaxy": kwds["install_most_recent_revision"] = True kwds["install_resolver_dependencies"] = False kwds["install_repository_dependencies"] = False kwds['shed_install'] = True with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served(workflows) as config: for workflow in workflows: if config.updated_repos.get(workflow.path) or kwds.get( "engine") == "external_galaxy": info("Auto-updating workflow %s" % workflow.path) updated_workflow = autoupdate.autoupdate_wf( ctx, config, workflow) if workflow.path.endswith(".ga"): with open(workflow.path, 'w') as f: json.dump(updated_workflow, f, indent=4, sort_keys=True) else: format2_wrapper = from_galaxy_native( updated_workflow, json_wrapper=True) with open(workflow.path, "w") as f: f.write(format2_wrapper["yaml_content"]) modified_workflows.append(workflow.path) else: info( "No newer tool versions were found, so the workflow was not updated." ) if kwds['test']: if not modified_files: info("No tools were updated, so no tests were run.") else: with temp_directory(dir=ctx.planemo_directory) as temp_path: # only test tools in updated directories modified_paths = [ path for path, tool_xml in yield_tool_sources_on_paths( ctx, paths, recursive) if path in modified_files ] info( f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}" ) runnables = for_paths(modified_paths + modified_workflows, temp_path=temp_path) kwds["engine"] = "galaxy" return_value = test_runnables(ctx, runnables, original_paths=paths, **kwds) exit_codes.append(return_value) return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
def _diff_in(ctx, working, realized_repository, **kwds): path = realized_repository.path shed_target_source = kwds.get("shed_target_source") label_a = "_%s_" % (shed_target_source if shed_target_source else "workingdir") shed_target = kwds.get("shed_target", "B") if "/" in shed_target: shed_target = "custom_shed" label_b = "_%s_" % shed_target mine = os.path.join(working, label_a) other = os.path.join(working, label_b) shed_context = get_shed_context(ctx, read_only=True, **kwds) # In order to download the tarball, require repository ID... repo_id = realized_repository.find_repository_id(ctx, shed_context) if repo_id is None: error("shed_diff: Repository [%s] does not exist in the targeted Tool Shed." % realized_repository.name) # $ diff README.rst not_a_file 2&>1 /dev/null; echo $? # 2 return 2 info("Diffing repository [%s]" % realized_repository.name) download_tarball( ctx, shed_context, realized_repository, destination=other, clean=True, destination_is_pattern=False, **kwds ) if shed_target_source: new_kwds = kwds.copy() new_kwds["shed_target"] = shed_target_source shed_context = get_shed_context(ctx, read_only=True, **new_kwds) download_tarball( ctx, shed_context, realized_repository, destination=mine, clean=True, destination_is_pattern=False, **new_kwds ) else: tar_path = build_tarball(path) os.mkdir(mine) shell(['tar', '-xzf', tar_path, '-C', mine]) shutil.rmtree(tar_path, ignore_errors=True) output = kwds.get("output") raw = kwds.get("raw", False) xml_diff = 0 if not raw: if output: with open(output, "w") as f: xml_diff = diff_and_remove(working, label_a, label_b, f) else: xml_diff = diff_and_remove(working, label_a, label_b, sys.stdout) cmd = ['diff', '-r', label_a, label_b] if output: with open(output, 'ab') as fh: raw_diff = shell(cmd, cwd=working, stdout=fh) else: raw_diff = shell(cmd, cwd=working) exit = raw_diff or xml_diff if not raw: if xml_diff: ctx.vlog("One or more shed XML file(s) different!") if raw_diff: ctx.vlog("One or more non-shed XML file(s) different.") if not xml_diff and not raw_diff: ctx.vlog("No differences.") return exit
def lint_repository(ctx, realized_repository, **kwds): """Lint a realized shed repository. See :module:`planemo.shed` for details on constructing a realized repository data structure. """ # TODO: this really needs to start working with realized path. failed = False path = realized_repository.real_path info("Linting repository %s" % path) lint_args, lint_ctx = setup_lint(ctx, **kwds) lint_ctx.lint( "lint_expansion", lint_expansion, realized_repository, ) lint_ctx.lint( "lint_expected_files", lint_expected_files, realized_repository, ) lint_ctx.lint( "lint_tool_dependencies_xsd", lint_tool_dependencies_xsd, path, ) lint_ctx.lint( "lint_tool_dependencies_sha256sum", lint_tool_dependencies_sha256sum, path, ) lint_ctx.lint( "lint_tool_dependencies_actions", lint_tool_dependencies_actions, path, ) lint_ctx.lint( "lint_repository_dependencies", lint_repository_dependencies, path, ) lint_ctx.lint( "lint_shed_yaml", lint_shed_yaml, realized_repository, ) lint_ctx.lint( "lint_readme", lint_readme, path, ) if kwds["urls"]: lint_ctx.lint( "lint_urls", lint_tool_dependencies_urls, path, ) if kwds["tools"]: for (tool_path, tool_source) in yield_tool_sources(ctx, path, recursive=True): info("+Linting tool %s" % tool_path) if handle_tool_load_error(tool_path, tool_source): failed = True continue lint_tool_source_with(lint_ctx, tool_source, extra_modules=lint_args["extra_modules"]) if kwds["ensure_metadata"]: lint_ctx.lint( "lint_shed_metadata", lint_shed_metadata, realized_repository, ) return handle_lint_complete(lint_ctx, lint_args, failed=failed)
def run_in_config(ctx, config, run=run_galaxy_command, test_data_target_dir=None, **kwds): """Run Galaxy tests with the run_tests.sh command. The specified `config` object describes the context for tool execution. """ config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_report_file = _xunit_state(kwds, config) xunit_report_file_tracker = _FileChangeTracker(xunit_report_file) structured_report_file = _structured_report_file(kwds, config) structured_report_file_tracker = _FileChangeTracker(structured_report_file) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = ['cd', config.galaxy_root] test_cmd = test_structures.GalaxyTestCommand( html_report_file, xunit_report_file, structured_report_file, failed=kwds.get("failed", False), installed=kwds.get("installed", False), ).build() setup_common_startup_args = "" if kwds.get("skip_venv", False): setup_common_startup_args = shell_join( 'COMMON_STARTUP_ARGS=--skip-venv', 'export COMMON_STARTUP_ARGS', 'echo "Set COMMON_STARTUP_ARGS to ${COMMON_STARTUP_ARGS}"', ) setup_venv_command = setup_venv(ctx, kwds) cmd = shell_join( cd_to_galaxy_command, setup_common_startup_args, setup_venv_command, test_cmd, ) action = "Testing tools" return_code = run(ctx, cmd, config.env, action) if kwds.get('update_test_data', False): copy_tree(job_output_files, test_data_target_dir or config.test_data_dir) _check_test_outputs(xunit_report_file_tracker, structured_report_file_tracker) test_results = test_structures.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) structured_data = test_results.structured_data return handle_reports_and_summary(ctx, structured_data, exit_code=test_results.exit_code, kwds=kwds)
def lint_repository(ctx, realized_repository, **kwds): # TODO: this really needs to start working with realized path. failed = False path = realized_repository.real_path info("Linting repository %s" % path) lint_args = build_lint_args(ctx, **kwds) lint_ctx = LintContext(lint_args["level"]) lint_ctx.lint( "lint_expansion", lint_expansion, realized_repository, ) lint_ctx.lint( "lint_expected_files", lint_expected_files, realized_repository, ) lint_ctx.lint( "lint_tool_dependencies_xsd", lint_tool_dependencies_xsd, path, ) lint_ctx.lint( "lint_tool_dependencies_actions", lint_tool_dependencies_actions, path, ) lint_ctx.lint( "lint_repository_dependencies", lint_repository_dependencies, path, ) lint_ctx.lint( "lint_shed_yaml", lint_shed_yaml, realized_repository, ) lint_ctx.lint( "lint_readme", lint_readme, path, ) if kwds["tools"]: for (tool_path, tool_xml) in yield_tool_xmls(ctx, path, recursive=True): info("+Linting tool %s" % tool_path) if handle_tool_load_error(tool_path, tool_xml): failed = True continue lint_xml_with(lint_ctx, tool_xml, extra_modules=lint_args["extra_modules"]) if kwds["ensure_metadata"]: lint_ctx.lint( "lint_shed_metadata", lint_shed_metadata, realized_repository, ) if not failed: failed = lint_ctx.failed(lint_args["fail_level"]) if failed: error("Failed linting") return 1 if failed else 0
def run_in_config(ctx, config, **kwds): config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root test_cmd = test_structures.GalaxyTestCommand( html_report_file, xunit_report_file, structured_report_file, failed=kwds.get("failed", False), installed=kwds.get("installed", False), ).build() cmd = "; ".join([ cd_to_galaxy_command, galaxy_run.ACTIVATE_COMMAND, # TODO: this should be moved to # run_tests.sh to match run.sh. test_cmd, ]) action = "Testing tools" return_code = galaxy_run.run_galaxy_command(ctx, cmd, config.env, action) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = test_structures.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) try: test_data = test_results.structured_data if 'test_output' in kwds: output_path = kwds['test_output'] if output_path is not None: with open(output_path, 'w') as handle: handle.write(build_report.build_report(test_data)) for kw_name in ('markdown', 'text'): if 'test_output_%s' % kw_name in kwds: output_path = kwds['test_output_%s' % kw_name] if output_path is None: continue with open(output_path, 'w') as handle: handle.write( build_report.build_report(test_data, report_type=kw_name)) except Exception: ctx.vlog("Problem producing test output.", exception=True) __handle_summary(test_results, **kwds) return return_code
def run_in_config(ctx, config, **kwds): config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root test_cmd = test_structures.GalaxyTestCommand( html_report_file, xunit_report_file, structured_report_file, failed=kwds.get("failed", False), installed=kwds.get("installed", False), ).build() setup_common_startup_args = "" if kwds.get("skip_venv", False): setup_common_startup_args = ( 'COMMON_STARTUP_ARGS=--skip-venv; ' 'export COMMON_STARTUP_ARGS; ' 'echo "Set COMMON_STARTUP_ARGS to ${COMMON_STARTUP_ARGS}"') setup_venv_command = setup_venv(ctx, kwds) cmd = shell_join( cd_to_galaxy_command, setup_common_startup_args, setup_venv_command, test_cmd, ) action = "Testing tools" return_code = run_galaxy_command(ctx, cmd, config.env, action) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = test_structures.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) test_data = test_results.structured_data handle_reports(ctx, test_data, kwds) __handle_summary(test_results, **kwds) return return_code
def write_tool_description(ctx, tool_description, **kwds): """Write a tool description to the file system guided by supplied CLI kwds.""" tool_id = kwds.get("id") output = kwds.get("tool") if not output: extension = "cwl" if kwds.get("cwl") else "xml" output = "%s.%s" % (tool_id, extension) if not io.can_write_to_path(output, **kwds): ctx.exit(1) io.write_file(output, tool_description.contents) io.info("Tool written to %s" % output) for tool_file in tool_description.tool_files: if tool_file.contents is None: continue path = tool_file.filename if not io.can_write_to_path(path, **kwds): ctx.exit(1) io.write_file(path, tool_file.contents) io.info("Tool %s written to %s" % (tool_file.description, path)) macros = kwds["macros"] macros_file = "macros.xml" if macros and not os.path.exists(macros_file): io.write_file(macros_file, tool_description.macro_contents) elif macros: io.info(REUSING_MACROS_MESSAGE) if tool_description.test_files: if not os.path.exists("test-data"): io.info("No test-data directory, creating one.") os.makedirs('test-data') for test_file in tool_description.test_files: io.info("Copying test-file %s" % test_file) try: shutil.copy(test_file, 'test-data') except Exception as e: io.info("Copy of %s failed: %s" % (test_file, e))
def cli(ctx, path, **kwds): """Run the tests in the specified tool tests in a Galaxy instance. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ for name, default in OUTPUT_DFEAULTS.items(): __populate_default_output(ctx, name, kwds, default) kwds["for_tests"] = True with galaxy_config.galaxy_config(ctx, path, **kwds) as config: config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root cmd = "; ".join([ cd_to_galaxy_command, galaxy_run.ACTIVATE_COMMAND, # TODO: this should be moved to # run_tests.sh to match run.sh. __run_tests_cmd( html_report_file, xunit_report_file, structured_report_file, ), ]) action = "Testing tools" return_code = galaxy_run.run_galaxy_command(ctx, cmd, config.env, action) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = galaxy_test.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) try: test_data = test_results.structured_data new_report = build_report.build_report(test_data) open(test_results.output_html_path, "w").write(new_report) except Exception: pass __handle_summary(test_results, **kwds) if return_code: sys.exit(1)
def _diff_in(ctx, working, realized_repository, **kwds): path = realized_repository.path shed_target_source = kwds.get("shed_target_source", None) label_a = "_%s_" % (shed_target_source if shed_target_source else "workingdir") shed_target = kwds.get("shed_target", "B") if "/" in shed_target: shed_target = "custom_shed" label_b = "_%s_" % shed_target mine = os.path.join(working, label_a) other = os.path.join(working, label_b) shed_context = get_shed_context(ctx, read_only=True, **kwds) # In order to download the tarball, require repository ID... repo_id = realized_repository.find_repository_id(ctx, shed_context) if repo_id is None: error( "shed_diff: Repository [%s] does not exist in the targeted Tool Shed." % realized_repository.name) # $ diff README.rst not_a_file 2&>1 /dev/null; echo $? # 2 return 2 info("Diffing repository [%s]" % realized_repository.name) download_tarball(ctx, shed_context, realized_repository, destination=other, clean=True, destination_is_pattern=False, **kwds) if shed_target_source: new_kwds = kwds.copy() new_kwds["shed_target"] = shed_target_source shed_context = get_shed_context(ctx, read_only=True, **new_kwds) download_tarball(ctx, shed_context, realized_repository, destination=mine, clean=True, destination_is_pattern=False, **new_kwds) else: tar_path = build_tarball(path) cmd_template = 'mkdir "%s"; tar -xzf "%s" -C "%s"; rm -rf %s' shell(cmd_template % (mine, tar_path, mine, tar_path)) output = kwds.get("output", None) raw = kwds.get("raw", False) xml_diff = 0 if not raw: if output: with open(output, "w") as f: xml_diff = diff_and_remove(working, label_a, label_b, f) else: xml_diff = diff_and_remove(working, label_a, label_b, sys.stdout) cmd = 'cd "%s"; diff -r %s %s' % (working, label_a, label_b) if output: cmd += " >> '%s'" % output raw_diff = shell(cmd) exit = raw_diff or xml_diff if not raw: if xml_diff: ctx.vlog("One or more shed XML file(s) different!") if raw_diff: ctx.vlog("One or more non-shed XML file(s) different.") if not xml_diff and not raw_diff: ctx.vlog("No differences.") return exit
def lint_repository(ctx, realized_repository, **kwds): """Lint a realized shed repository. See :mod:`planemo.shed` for details on constructing a realized repository data structure. """ failed = False path = realized_repository.real_path info("Linting repository %s" % path) lint_args, lint_ctx = setup_lint(ctx, **kwds) lint_ctx.lint( "lint_expansion", lint_expansion, realized_repository, ) lint_ctx.lint( "lint_expected_files", lint_expected_files, realized_repository, ) lint_ctx.lint( "lint_tool_dependencies_xsd", lint_tool_dependencies_xsd, realized_repository, ) lint_ctx.lint( "lint_tool_dependencies_sha256sum", lint_tool_dependencies_sha256sum, realized_repository, ) lint_ctx.lint( "lint_tool_dependencies_actions", lint_tool_dependencies_actions, realized_repository, ) lint_ctx.lint( "lint_repository_dependencies", lint_repository_dependencies, realized_repository, ) lint_ctx.lint( "lint_shed_yaml", lint_shed_yaml, realized_repository, ) lint_ctx.lint( "lint_readme", lint_readme, realized_repository, ) if kwds["urls"]: lint_ctx.lint( "lint_urls", lint_tool_dependencies_urls, realized_repository, ) if kwds["tools"]: tools_failed = lint_repository_tools(ctx, realized_repository, lint_ctx, lint_args) failed = failed or tools_failed if kwds["ensure_metadata"]: lint_ctx.lint( "lint_shed_metadata", lint_shed_metadata, realized_repository, ) return handle_lint_complete(lint_ctx, lint_args, failed=failed)