def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ with temp_directory(dir=ctx.planemo_directory) as temp_path: # Create temp dir(s) outside of temp, docker can't mount $TEMPDIR on OSX runnables = for_paths(paths, temp_path=temp_path) is_cwl = all(r.type in {RunnableType.cwl_tool, RunnableType.cwl_workflow} for r in runnables) if kwds.get("engine") is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" engine_type = kwds["engine"] test_engine_testable = {RunnableType.galaxy_tool, RunnableType.galaxy_datamanager, RunnableType.directory} enable_test_engines = any(r.type not in test_engine_testable for r in runnables) enable_test_engines = enable_test_engines or engine_type != "galaxy" if enable_test_engines: ctx.vlog("Using test engine type %s" % engine_type) with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type) kwds["for_tests"] = True if kwds.get('update_test_data'): non_copied_runnables = for_paths(paths) kwds['test_data_target_dir'] = _find_test_data(non_copied_runnables, **kwds) with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) ctx.exit(return_value)
def test_galaxy_workflow_step_failed(): ctx = t_context() test_artifact = os.path.join(TEST_DATA_DIR, "wf_failed_step.ga") runnables = for_paths([test_artifact]) with NamedTemporaryFile(prefix="result_json") as json_out: kwds = { "engine": "galaxy", "no_dependency_resolution": True, "paste_test_data_paths": False, "extra_tools": ['$GALAXY_FUNCTIONAL_TEST_TOOLS'], "test_output_json": json_out.name, "galaxy_branch": target_galaxy_branch(), } exit_code = t_runnables(ctx, runnables, **kwds) assert exit_code == 1 report = json.load(json_out) data = report['tests'][0]['data'] assert data['status'] == 'error' assert data['execution_problem'] invocation_steps = data['invocation_details']['steps'] assert len(invocation_steps) == 2 first_step, second_step = invocation_steps.values() assert first_step['state'] == 'scheduled' job = first_step['jobs'][0] assert job['exit_code'] == 127 assert job['state'] == 'error' assert second_step['state'] == 'scheduled' assert second_step['jobs'][0]['state'] == 'paused'
def cli(ctx, uris, **kwds): """Launch Galaxy instance with specified tools. The Galaxy tool panel will include just the referenced tool or tools (by default all the tools in the current working directory) and the upload tool. planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the ``--galaxy_root`` option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. ``planemo`` will run the Galaxy instance in an existing virtualenv if one exists in a ``.venv`` directory in the specified ``--galaxy_root``. Otherwise, the Galaxy instance will run in a clean virtualenv created in ``/tmp``. ``planemo`` uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet, so please be careful and do not try this against a production Galaxy instance. """ paths = uris_to_paths(ctx, uris) runnables = for_paths(paths) galaxy_serve(ctx, runnables, **kwds)
def _cli(ctx, paths, glx, user_key, **kwds) -> Dict: """ Run specified tool's tests within Galaxy. Returns a dict of the status and history_name of the finished workflow. See https://github.com/galaxyproject/planemo/blob/master/planemo/commands/cmd_test.py """ kwds["engine"] = "external_galaxy" kwds["shed_install"] = False kwds["galaxy_url"] = glx.url kwds["galaxy_admin_key"] = glx.user_key kwds["history_name"] = "galaxy_benchmarker-" + str(time.time_ns()) + str( random.randrange(0, 99999)) if user_key is not None: kwds["galaxy_user_key"] = user_key runnables = for_paths(paths) try: with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) exit_code = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) status = "success" if exit_code == 0 else "error" except Exception as e: log.error("There was an error: {e}".format(e=e)) status = "error" return {"status": status, "history_name": kwds["history_name"]}
def test_toil_tests(): ctx = t_context() test_artifact = os.path.join(TEST_DATA_DIR, "int_tool.cwl") runnables = for_paths([test_artifact]) exit_code = t_runnables( ctx, runnables, engine="toil", ) assert exit_code == 0
def test_galaxy_workflow_non_data_inputs(): ctx = t_context() test_artifact = os.path.join(TEST_DATA_DIR, "wf9-int-input.gxwf.yml") runnables = for_paths([test_artifact]) kwds = { "engine": "galaxy", "no_dependency_resolution": True, "paste_test_data_paths": False, "galaxy_branch": target_galaxy_branch(), } exit_code = t_runnables(ctx, runnables, **kwds) assert exit_code == 0
def cli(ctx, **kwds): """(Experimental) Launch Galaxy with the Tool Factory 2 available. For more information about the Galaxy Tool Factory see the publication Creating reusable tools from scripts: the Galaxy Tool Factory by Lazarus et. al. (10.1093/bioinformatics/bts573). Available at http://www.ncbi.nlm.nih.gov/pubmed/23024011. """ mod_dir = os.path.dirname(__file__) tf_dir = os.path.join(mod_dir, '..', '..', 'planemo_ext', 'tool_factory_2') runnables = for_paths([tf_dir]) serve(ctx, runnables, **kwds)
def cli(ctx, **kwds): """(Experimental) Launch Galaxy with Tool Factory 2. For more information about the Galaxy Tool Factory see the publication Creating reusable tools from scripts: the Galaxy Tool Factory by Lazarus et. al. (10.1093/bioinformatics/bts573). Available at http://www.ncbi.nlm.nih.gov/pubmed/23024011. """ mod_dir = os.path.dirname(__file__) tf_dir = os.path.join(mod_dir, '..', '..', 'planemo_ext', 'tool_factory_2') runnables = for_paths([tf_dir]) serve(ctx, runnables, **kwds)
def cli(ctx, paths, **kwds): """Auto-update tool requirements by checking against Conda and updating if newer versions are available.""" assert_tools = kwds.get("assert_tools", True) recursive = kwds.get("recursive", False) exit_codes = [] modified_files = set() tools_to_skip = [line.rstrip() for line in open(kwds['skiplist']) ] if kwds['skiplist'] else [] for (tool_path, tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive): if tool_path.split('/')[-1] in tools_to_skip: info("Skipping tool %s" % tool_path) continue info("Auto-updating tool %s" % tool_path) try: updated = autoupdate.autoupdate_tool(ctx, tool_path, modified_files=modified_files, **kwds) if updated: modified_files.update(updated) except Exception as e: error( "{} could not be updated - the following error was raised: {}". format(tool_path, e.__str__())) if handle_tool_load_error(tool_path, tool_xml): exit_codes.append(EXIT_CODE_GENERIC_FAILURE) continue else: exit_codes.append(EXIT_CODE_OK) if kwds['test']: if not modified_files: info("No tools were updated, so no tests were run.") else: with temp_directory(dir=ctx.planemo_directory) as temp_path: # only test tools in updated directories modified_paths = [ path for path, tool_xml in yield_tool_sources_on_paths( ctx, paths, recursive) if path in modified_files ] info( f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}" ) runnables = for_paths(modified_paths, temp_path=temp_path) kwds["engine"] = "galaxy" return_value = test_runnables(ctx, runnables, original_paths=paths, **kwds) exit_codes.append(return_value) return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In addition to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be configured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ with temp_directory(dir=ctx.planemo_directory) as temp_path: # Create temp dir(s) outside of temp, docker can't mount $TEMPDIR on OSX runnables = for_paths(paths, temp_path=temp_path) is_cwl = all( r.type in {RunnableType.cwl_tool, RunnableType.cwl_workflow} for r in runnables) if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" return_value = test_runnables(ctx, runnables, original_paths=paths, **kwds) ctx.exit(return_value)
def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ runnables = for_paths(paths) is_cwl = all([r.type in [RunnableType.cwl_tool, RunnableType.cwl_workflow] for r in runnables]) if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" engine_type = kwds["engine"] enable_test_engines = any([r.type not in [RunnableType.galaxy_tool, RunnableType.directory] for r in runnables]) enable_test_engines = enable_test_engines or engine_type != "galaxy" if enable_test_engines: ctx.vlog("Using test engine type %s" % engine_type) with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type) kwds["for_tests"] = True with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) ctx.exit(return_value)
def test_galaxy_workflow_collection_output_fail(): ctx = t_context() test_artifact = os.path.join(TEST_DATA_DIR, "wf7-collection-output-fail.gxwf.yml") collection_creates_pair = os.path.join(TEST_DATA_DIR, "collection_creates_pair_2.xml") runnables = for_paths([test_artifact]) kwds = { "engine": "galaxy", "no_dependency_resolution": True, "paste_test_data_paths": False, "galaxy_branch": target_galaxy_branch(), "extra_tools": [collection_creates_pair], } exit_code = t_runnables(ctx, runnables, **kwds) assert exit_code == 1
def test_galaxy_wf_tests(): ctx = t_context() random_lines = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "randomlines.xml") cat = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "cat.xml") test_artifact = os.path.join(TEST_DATA_DIR, "wf1.gxwf.yml") runnables = for_paths([test_artifact]) kwds = { "engine": "galaxy", "no_dependency_resolution": True, "paste_test_data_paths": False, "galaxy_branch": target_galaxy_branch(), "extra_tools": [random_lines, cat] } exit_code = t_runnables(ctx, runnables, **kwds) assert exit_code == 0
def test_galaxy_workflow_nested_collection_inputs(): ctx = t_context() test_artifact = os.path.join(TEST_DATA_DIR, "wf8-collection-nested-input.gxwf.yml") collection_cat_pair = os.path.join(TEST_DATA_DIR, "cat_pair.xml") collection_cat_list = os.path.join(TEST_DATA_DIR, "cat_list.xml") runnables = for_paths([test_artifact]) kwds = { "engine": "galaxy", "no_dependency_resolution": True, "paste_test_data_paths": False, "galaxy_branch": target_galaxy_branch(), "extra_tools": [collection_cat_pair, collection_cat_list], } exit_code = t_runnables(ctx, runnables, **kwds) assert exit_code == 0
def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ runnables = for_paths(paths) enable_beta_test = any([r.type not in [RunnableType.galaxy_tool, RunnableType.directory] for r in runnables]) enable_beta_test = enable_beta_test or kwds.get("engine", "galaxy") != "galaxy" if enable_beta_test: info("Enable beta testing mode for testing.") with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: kwds["for_tests"] = True with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) ctx.exit(return_value)
def test_runnables(ctx, runnables, original_paths=None, **kwds): """Return exit code indicating test or failure.""" engine_type = kwds["engine"] test_engine_testable = {RunnableType.galaxy_tool, RunnableType.galaxy_datamanager, RunnableType.directory} enable_test_engines = any(r.type not in test_engine_testable for r in runnables) enable_test_engines = enable_test_engines or engine_type != "galaxy" if enable_test_engines: ctx.vlog("Using test engine type %s" % engine_type) with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) ctx.vlog("engine.test returning [%s]" % test_data) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type) kwds["for_tests"] = True if kwds.get('update_test_data'): non_copied_runnables = for_paths(original_paths) kwds['test_data_target_dir'] = _find_test_data(non_copied_runnables, **kwds) with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) return return_value
def test_galaxy_workflow_tags(): with NamedTemporaryFile(prefix="data_manager_test_json") as json_out: ctx = t_context() test_artifact = os.path.join(TEST_DATA_DIR, "wf10-tags-and-rules.gxwf.yml") collection_cat_list = os.path.join(TEST_DATA_DIR, "cat_list.xml") runnables = for_paths([test_artifact]) kwds = { "engine": "galaxy", "no_dependency_resolution": True, "paste_test_data_paths": False, "galaxy_branch": "dev", "extra_tools": [collection_cat_list], "test_output_json": json_out.name, } try: exit_code = t_runnables(ctx, runnables, **kwds) assert exit_code == 0 except Exception: with open(json_out.name, "r") as f: print(f.read()) raise
def install_workflow(workflow_path, glx_instance): """ Installs the tools necessary to run a given workflow (given as a path to the workflow). """ runnable = for_paths(workflow_path)[0] install_shed_repos(runnable, glx_instance, False)
def cli(ctx, paths, **kwds): # noqa C901 """Auto-update tool requirements by checking against Conda and updating if newer versions are available.""" assert_tools = kwds.get("assert_tools", True) recursive = kwds.get("recursive", False) exit_codes = [] modified_files = set() tools_to_skip = [line.rstrip() for line in open(kwds['skiplist']) ] if kwds['skiplist'] else [] runnables = for_paths(paths) if any(r.type in {RunnableType.galaxy_tool, RunnableType.directory} for r in runnables): # update Galaxy tools for (tool_path, tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive): if tool_path.split('/')[-1] in tools_to_skip: info("Skipping tool %s" % tool_path) continue info("Auto-updating tool %s" % tool_path) try: updated = autoupdate.autoupdate_tool( ctx, tool_path, modified_files=modified_files, **kwds) if updated: modified_files.update(updated) except Exception as e: error( f"{tool_path} could not be updated - the following error was raised: {e.__str__()}" ) if handle_tool_load_error(tool_path, tool_xml): exit_codes.append(EXIT_CODE_GENERIC_FAILURE) continue else: exit_codes.append(EXIT_CODE_OK) workflows = [ r for r in runnables if r.type == RunnableType.galaxy_workflow ] modified_workflows = [] if workflows: assert is_galaxy_engine(**kwds) if kwds.get("engine") != "external_galaxy": kwds["install_most_recent_revision"] = True kwds["install_resolver_dependencies"] = False kwds["install_repository_dependencies"] = False kwds['shed_install'] = True with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served(workflows) as config: for workflow in workflows: if config.updated_repos.get(workflow.path) or kwds.get( "engine") == "external_galaxy": info("Auto-updating workflow %s" % workflow.path) updated_workflow = autoupdate.autoupdate_wf( ctx, config, workflow) if workflow.path.endswith(".ga"): with open(workflow.path, 'w') as f: json.dump(updated_workflow, f, indent=4, sort_keys=True) else: format2_wrapper = from_galaxy_native( updated_workflow, json_wrapper=True) with open(workflow.path, "w") as f: f.write(format2_wrapper["yaml_content"]) modified_workflows.append(workflow.path) else: info( "No newer tool versions were found, so the workflow was not updated." ) if kwds['test']: if not modified_files: info("No tools were updated, so no tests were run.") else: with temp_directory(dir=ctx.planemo_directory) as temp_path: # only test tools in updated directories modified_paths = [ path for path, tool_xml in yield_tool_sources_on_paths( ctx, paths, recursive) if path in modified_files ] info( f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}" ) runnables = for_paths(modified_paths + modified_workflows, temp_path=temp_path) kwds["engine"] = "galaxy" return_value = test_runnables(ctx, runnables, original_paths=paths, **kwds) exit_codes.append(return_value) return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)