def cli(ctx, path, job_path, **kwds): """Planemo command for running tools and jobs. :: % planemo run cat1-tool.cwl cat-job.json """ kwds["cwl"] = path.endswith(".cwl") conformance_test = kwds.get("conformance_test", False) with conditionally_captured_io(conformance_test): with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % str(run_result)) ctx.exit(1) if conformance_test: if hasattr(run_result, "cwl_command_state"): command_state = run_result.cwl_command_state dumped_json = json.dumps(command_state) if hasattr(run_result, "galaxy_paths"): for (local_path, galaxy_path) in run_result.galaxy_paths: dumped_json = dumped_json.replace(galaxy_path, local_path) print(dumped_json) else: outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, uri, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ path = uri_to_path(ctx, uri) # TODO: convert UI to runnable and do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def test_tutorial_create_hands_on_tutorial(): """Test :func:`planemo.training.tutorial.tutorial.create_hands_on_tutorial`.""" tuto = Tutorial( training=training, topic=topic) os.makedirs(tuto.wf_dir) # with init_wf_id and no Galaxy URL tuto.init_wf_id = 'ID' tuto.training.galaxy_url = None exp_exception = "No Galaxy URL given" with assert_raises_regexp(Exception, exp_exception): tuto.create_hands_on_tutorial(CTX) # with init_wf_id and no Galaxy API key tuto.init_wf_id = 'ID' tuto.training.galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) tuto.training.galaxy_api_key = None exp_exception = "No API key to access the given Galaxy instance" with assert_raises_regexp(Exception, exp_exception): tuto.create_hands_on_tutorial(CTX) # with init_wf_id assert is_galaxy_engine(**KWDS) with engine_context(CTX, **KWDS) as galaxy_engine: with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: tuto.init_wf_id = config.workflow_id(WF_FP) tuto.training.galaxy_api_key = config.user_api_key tuto.create_hands_on_tutorial(CTX) assert os.path.exists(tuto.tuto_fp) os.remove(tuto.tuto_fp) # with init_wf_fp tuto.init_wf_id = None tuto.init_wf_fp = WF_FP tuto.create_hands_on_tutorial(CTX) assert os.path.exists(tuto.tuto_fp) shutil.rmtree("topics")
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable = for_runnable_identifier(ctx, runnable_identifier, kwds) is_cwl = runnable.type.is_cwl_artifact kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, workflow_path, output=None, force=False, **kwds): """Convert Format 2 workflow to a native Galaxy workflow. """ assert is_galaxy_engine(**kwds) kwds["no_dependency_resolution"] = True if workflow_path.endswith(".ga"): if output is None: output = os.path.splitext(workflow_path)[0] + ".gxwf.yml" with open(workflow_path, "r") as f: workflow_dict = json.load(f) format2_wrapper = from_galaxy_native(workflow_dict, json_wrapper=True) with open(output, "w") as f: f.write(format2_wrapper["yaml_content"]) else: if output is None: output = os.path.splitext(workflow_path)[0] + ".ga" runnable = for_path(workflow_path) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(workflow_path) output_dict = config.gi.workflows.export_workflow_dict(workflow_id) output_contents = json.dumps(output_dict) write_file(output, output_contents, force=force)
def cli(ctx, rerunnable_ids, **kwds): """Planemo command for rerunning and remapping failed jobs on an external Galaxy server. Supply a list of history, invocation or job IDs, identifying the ID type using the --invocation, --history or --job flag, and all associated failed jobs will be rerun. Please note: attempting to rerun non-remappable jobs will result in an exit code of 1. As jobs cannot be remapped more than once, running this command two or more times with the same history or job IDs will therefore return an exit code of 1. If avoiding this is important, you should specify the invocation ID instead if possible. \b % planemo rerun --invocation / --history / --job RERUNNABLE_IDS """ # Possible TODO: allow collection IDs to be specified as well if not kwds.get('rerunnable_type'): error( "Please specify the type (invocation, history or job) of the IDs which should be rerun." ) ctx.exit(1) kwds["engine"] = "external_galaxy" rerun_successful = True with engine_context(ctx, **kwds) as engine: for rerunnable_id in rerunnable_ids: rerunnable = Rerunnable(rerunnable_id, kwds['rerunnable_type'], kwds["galaxy_url"]) rerun_result = engine.rerun(ctx, rerunnable, **kwds) if not rerun_result.was_successful: rerun_successful = False if rerun_successful: info('All requested jobs were rerun successfully.') ctx.exit(0) else: error('Some of the requested jobs could not be rerun.') ctx.exit(1)
def cli(ctx, uri, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ path = uri_to_path(ctx, uri) # TODO: convert UI to runnable and do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % str(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def _cli(ctx, paths, glx, user_key, **kwds) -> Dict: """ Run specified tool's tests within Galaxy. Returns a dict of the status and history_name of the finished workflow. See https://github.com/galaxyproject/planemo/blob/master/planemo/commands/cmd_test.py """ kwds["engine"] = "external_galaxy" kwds["shed_install"] = False kwds["galaxy_url"] = glx.url kwds["galaxy_admin_key"] = glx.user_key kwds["history_name"] = "galaxy_benchmarker-" + str(time.time_ns()) + str( random.randrange(0, 99999)) if user_key is not None: kwds["galaxy_user_key"] = user_key runnables = for_paths(paths) try: with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) exit_code = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) status = "success" if exit_code == 0 else "error" except Exception as e: log.error("There was an error: {e}".format(e=e)) status = "error" return {"status": status, "history_name": kwds["history_name"]}
def cli(ctx, workflow_identifier, output=None, force=False, **kwds): """Convert Format 2 workflows to native Galaxy workflows, and vice-versa. """ assert is_galaxy_engine(**kwds) kwds["no_dependency_resolution"] = True if workflow_identifier.endswith(".ga"): if output is None: output = os.path.splitext(workflow_identifier)[0] + ".gxwf.yml" with open(workflow_identifier, "r") as f: workflow_dict = json.load(f) format2_wrapper = from_galaxy_native(workflow_dict, json_wrapper=True) with open(output, "w") as f: f.write(format2_wrapper["yaml_content"]) else: if output is None: output = os.path.splitext(workflow_identifier)[0] + ".ga" runnable = for_path(workflow_identifier) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(workflow_identifier) output_dict = config.gi.workflows.export_workflow_dict(workflow_id) output_contents = json.dumps(output_dict, indent=4, sort_keys=True) write_file(output, output_contents, force=force)
def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ runnables = for_paths(paths) is_cwl = all([ r.type in [RunnableType.cwl_tool, RunnableType.cwl_workflow] for r in runnables ]) if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" engine_type = kwds["engine"] enable_test_engines = any([ r.type not in [ RunnableType.galaxy_tool, RunnableType.galaxy_datamanager, RunnableType.directory ] for r in runnables ]) enable_test_engines = enable_test_engines or engine_type != "galaxy" if enable_test_engines: ctx.vlog("Using test engine type %s" % engine_type) with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) return_value = handle_reports_and_summary( ctx, test_data.structured_data, kwds=kwds) else: ctx.vlog( "Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type) kwds["for_tests"] = True with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) ctx.exit(return_value)
def get_hands_on_boxes_from_local_galaxy(kwds, wf_filepath, ctx): """Server local Galaxy and get the workflow dictionary.""" assert is_galaxy_engine(**kwds) runnable = for_path(wf_filepath) tuto_body = '' with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(wf_filepath) wf = config.gi.workflows.export_workflow_dict(workflow_id) tuto_body = format_wf_steps(wf, config.gi) return tuto_body
def test_format_wf_steps(): """Test :func:`planemo.training.tutorial.format_wf_steps`.""" assert is_galaxy_engine(**KWDS) with engine_context(CTX, **KWDS) as galaxy_engine: with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: workflow_id = config.workflow_id(WF_FP) wf = config.gi.workflows.export_workflow_dict(workflow_id) body = format_wf_steps(wf, config.gi) assert '## Sub-step with **FastQC**' in body assert '## Sub-step with **Query Tabular**' in body assert '## Sub-step with **Select first**' in body
def test_get_hands_on_boxes_from_running_galaxy(): """Test :func:`planemo.training.tutorial.get_hands_on_boxes_from_running_galaxy`.""" assert is_galaxy_engine(**KWDS) galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) with engine_context(CTX, **KWDS) as galaxy_engine: with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: wf_id = config.workflow_id(WF_FP) tuto_body = get_hands_on_boxes_from_running_galaxy(wf_id, galaxy_url, config.user_api_key) assert '## Sub-step with **FastQC**' in tuto_body assert '## Sub-step with **Query Tabular**' in tuto_body assert '## Sub-step with **Select first**' in tuto_body
def cli(ctx, workflow_path, output=None, force=False, **kwds): """Open a synchronized Galaxy workflow editor. """ assert is_galaxy_engine(**kwds) kwds["workflows_from_path"] = True runnable = for_path(workflow_path) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(workflow_path) url = "%s/workflow/editor?id=%s" % (config.galaxy_url, workflow_id) click.launch(url) sleep_for_serve()
def cli(ctx, workflow_identifier, output=None, force=False, **kwds): """Open a synchronized Galaxy workflow editor. """ assert is_galaxy_engine(**kwds) runnable = for_runnable_identifier(ctx, workflow_identifier, kwds.get("profile")) kwds["workflows_from_path"] = True with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id_for_runnable(runnable) url = "%s/workflow/editor?id=%s" % (config.galaxy_url, workflow_id) click.launch(url) if kwds["engine"] != "external_galaxy": sleep_for_serve()
def cli(ctx, paths, **kwds): """Run specified tool's tests within Galaxy. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ runnables = for_paths(paths) is_cwl = all([r.type in [RunnableType.cwl_tool, RunnableType.cwl_workflow] for r in runnables]) if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" engine_type = kwds["engine"] enable_test_engines = any([r.type not in [RunnableType.galaxy_tool, RunnableType.directory] for r in runnables]) enable_test_engines = enable_test_engines or engine_type != "galaxy" if enable_test_engines: ctx.vlog("Using test engine type %s" % engine_type) with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type) kwds["for_tests"] = True with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) ctx.exit(return_value)
def cli(ctx, workflow_path, output=None, force=False, **kwds): """Convert Format 2 workflow to a native Galaxy workflow. """ assert is_galaxy_engine(**kwds) kwds["no_dependency_resolution"] = True if output is None: output = os.path.splitext(workflow_path)[0] + ".ga" runnable = for_path(workflow_path) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.serve_runnables([runnable]) as config: workflow_id = config.workflow_id(workflow_path) output_dict = config.gi.workflows.export_workflow_dict(workflow_id) output_contents = json.dumps(output_dict) write_file(output, output_contents, force=force)
def test_tutorial_export_workflow_file(): """Test :func:`planemo.training.tutorial.tutorial.export_workflow_file`.""" tuto = Tutorial(training=training, topic=topic) os.makedirs(tuto.wf_dir) # with worflow fp tuto.init_wf_fp = WF_FP tuto.export_workflow_file() assert os.path.exists(tuto.wf_fp) # with workflow id tuto.init_wf_fp = None os.remove(tuto.wf_fp) assert is_galaxy_engine(**KWDS) galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) with engine_context(CTX, **KWDS) as galaxy_engine: with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: tuto.init_wf_id = config.workflow_id(WF_FP) tuto.training.galaxy_url = galaxy_url tuto.training.galaxy_api_key = config.user_api_key tuto.export_workflow_file() assert os.path.exists(tuto.wf_fp) shutil.rmtree("topics")
def cli(ctx, workflow_identifier, output=None, force=False, **kwds): """Open a synchronized Galaxy workflow editor. """ assert is_galaxy_engine(**kwds) workflow_identifier = translate_alias(ctx, workflow_identifier, kwds.get('profile')) if os.path.exists(workflow_identifier): runnable = for_path(workflow_identifier) else: # assume galaxy workflow id runnable = for_id(workflow_identifier) kwds["workflows_from_path"] = True with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id_for_runnable(runnable) url = "%s/workflow/editor?id=%s" % (config.galaxy_url, workflow_id) click.launch(url) if kwds["engine"] != "external_galaxy": sleep_for_serve()
def test_runnables(ctx, runnables, original_paths=None, **kwds): """Return exit code indicating test or failure.""" engine_type = kwds["engine"] test_engine_testable = {RunnableType.galaxy_tool, RunnableType.galaxy_datamanager, RunnableType.directory} enable_test_engines = any(r.type not in test_engine_testable for r in runnables) enable_test_engines = enable_test_engines or engine_type != "galaxy" if enable_test_engines: ctx.vlog("Using test engine type %s" % engine_type) with engine_context(ctx, **kwds) as engine: test_data = engine.test(runnables) ctx.vlog("engine.test returning [%s]" % test_data) return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds) else: ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type) kwds["for_tests"] = True if kwds.get('update_test_data'): non_copied_runnables = for_paths(original_paths) kwds['test_data_target_dir'] = _find_test_data(non_copied_runnables, **kwds) with galaxy_config(ctx, runnables, **kwds) as config: return_value = run_in_config(ctx, config, **kwds) return return_value
def test_tutorial_export_workflow_file(): """Test :func:`planemo.training.tutorial.tutorial.export_workflow_file`.""" tuto = Tutorial( training=training, topic=topic) os.makedirs(tuto.wf_dir) # with worflow fp tuto.init_wf_fp = WF_FP tuto.export_workflow_file() assert os.path.exists(tuto.wf_fp) # with workflow id tuto.init_wf_fp = None os.remove(tuto.wf_fp) assert is_galaxy_engine(**KWDS) galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) with engine_context(CTX, **KWDS) as galaxy_engine: with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: tuto.init_wf_id = config.workflow_id(WF_FP) tuto.training.galaxy_url = galaxy_url tuto.training.galaxy_api_key = config.user_api_key tuto.export_workflow_file() assert os.path.exists(tuto.wf_fp) shutil.rmtree("topics")
def cli(ctx, uri, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ path = uri_to_path(ctx, uri) kwds["cwl"] = path.endswith(".cwl") with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % str(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable_identifier = translate_alias(ctx, runnable_identifier, kwds.get('profile')) path = uri_to_path(ctx, runnable_identifier) if os.path.exists(path): runnable = for_path(path) else: # assume galaxy workflow id runnable = for_id(runnable_identifier) # TODO: do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable = for_runnable_identifier(ctx, runnable_identifier, kwds) is_cwl = runnable.type.is_cwl_artifact kwds["cwl"] = is_cwl kwds["execution_type"] = "Run" if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) elif kwds.get('no_wait'): info('Run successfully executed - exiting without waiting for results.') else: output_json = kwds.get("output_json", None) outputs_dict = run_result.outputs_dict if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) info('Run completed successfully.') report_data = StructuredData(data={'tests': [run_result.structured_data()], 'version': '0.1'}) report_data.calculate_summary_data() return_value = handle_reports_and_summary(ctx, report_data.structured_data, kwds=kwds) ctx.exit(return_value)
def test_plain_init(self): ctx = cli.PlanemoCliContext() ctx.planemo_directory = "/tmp/planemo-test-workspace" cat_tool = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "cat.xml") test_workflow_path = os.path.join(TEST_DATA_DIR, 'wf2.ga') with engine_context(ctx, extra_tools=(cat_tool, )) as galaxy_engine: with galaxy_engine.ensure_runnables_served( [for_path(test_workflow_path)]) as config: wfid = config.workflow_id(test_workflow_path) # commands to test profile_list_cmd = ["profile_list"] profile_create_cmd = [ "profile_create", "test_ext_profile", "--galaxy_url", config.galaxy_url, "--galaxy_user_key", config.user_api_key ] alias_create_cmd = [ "create_alias", wfid, "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] alias_list_cmd = [ "list_alias", "--profile", "test_ext_profile" ] alias_delete_cmd = [ "delete_alias", "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] profile_delete_cmd = ["profile_delete", "test_ext_profile"] run_cmd = [ "run", "test_wf_alias", os.path.join(TEST_DATA_DIR, "wf2-job.yml"), "--profile", "test_ext_profile" ] list_invocs_cmd = [ "list_invocations", "test_wf_alias", "--profile", "test_ext_profile" ] rerun_cmd = [ "rerun", "--invocation", "invocation_id", "--profile", "test_ext_profile" ] upload_data_cmd = [ "upload_data", "test_wf_alias", os.path.join(TEST_DATA_DIR, "wf2-job.yml"), "new-job.yml", "--profile", "test_ext_profile" ] workflow_test_init_cmd = [ "workflow_test_init", "invocation_id", "--from_invocation", "--profile", "test_ext_profile" ] test_workflow_test_init_cmd = [ "test", "TestWorkflow1.ga", "--profile", "test_ext_profile" ] # test alias and profile creation result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output result = self._check_exit_code(profile_create_cmd) assert 'Profile [test_ext_profile] created' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' in result.output result = self._check_exit_code(alias_create_cmd) assert 'Alias test_wf_alias created.' in result.output result = self._check_exit_code(alias_list_cmd) assert 'test_wf_alias' in result.output assert wfid in result.output assert '1 aliases were found for profile test_ext_profile.' in result.output # test upload_data command self._check_exit_code(upload_data_cmd) with open("new-job.yml") as f: new_job = yaml.safe_load(f) assert list(new_job['WorkflowInput1']) == list( new_job['WorkflowInput2']) == ["class", "galaxy_id"] # test WF execution (from wfid) using created profile and alias result = self._check_exit_code(run_cmd) assert 'Run failed' not in result.output result = self._check_exit_code(run_cmd + ["--no_wait"]) assert 'Run successfully executed' in result.output result = self._check_exit_code(list_invocs_cmd) assert '2 invocations found.' in result.output assert '1 jobs ok' in result.output or '"ok": 1' in result.output # so it passes regardless if tabulate is installed or not # test rerun invocation_id = config.user_gi.workflows.get_invocations( wfid)[0]['id'] rerun_cmd[2] = invocation_id result = self._check_exit_code(rerun_cmd) assert 'No jobs matching the specified invocation' in result.output # test generating test case from invocation_id workflow_test_init_cmd[1] = invocation_id self._check_exit_code(workflow_test_init_cmd) assert os.path.exists('TestWorkflow1.ga') assert os.path.exists('TestWorkflow1-tests.yml') self._check_exit_code(test_workflow_test_init_cmd) # test alias and profile deletion result = self._check_exit_code(alias_delete_cmd) assert 'Alias test_wf_alias was successfully deleted from profile test_ext_profile' in result.output result = self._check_exit_code(alias_list_cmd) assert '0 aliases were found for profile test_ext_profile.' in result.output result = self._check_exit_code(profile_delete_cmd) assert 'Profile deleted.' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output
def cli(ctx, paths, **kwds): # noqa C901 """Auto-update tool requirements by checking against Conda and updating if newer versions are available.""" assert_tools = kwds.get("assert_tools", True) recursive = kwds.get("recursive", False) exit_codes = [] modified_files = set() tools_to_skip = [line.rstrip() for line in open(kwds['skiplist']) ] if kwds['skiplist'] else [] runnables = for_paths(paths) if any(r.type in {RunnableType.galaxy_tool, RunnableType.directory} for r in runnables): # update Galaxy tools for (tool_path, tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive): if tool_path.split('/')[-1] in tools_to_skip: info("Skipping tool %s" % tool_path) continue info("Auto-updating tool %s" % tool_path) try: updated = autoupdate.autoupdate_tool( ctx, tool_path, modified_files=modified_files, **kwds) if updated: modified_files.update(updated) except Exception as e: error( f"{tool_path} could not be updated - the following error was raised: {e.__str__()}" ) if handle_tool_load_error(tool_path, tool_xml): exit_codes.append(EXIT_CODE_GENERIC_FAILURE) continue else: exit_codes.append(EXIT_CODE_OK) workflows = [ r for r in runnables if r.type == RunnableType.galaxy_workflow ] modified_workflows = [] if workflows: assert is_galaxy_engine(**kwds) if kwds.get("engine") != "external_galaxy": kwds["install_most_recent_revision"] = True kwds["install_resolver_dependencies"] = False kwds["install_repository_dependencies"] = False kwds['shed_install'] = True with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served(workflows) as config: for workflow in workflows: if config.updated_repos.get(workflow.path) or kwds.get( "engine") == "external_galaxy": info("Auto-updating workflow %s" % workflow.path) updated_workflow = autoupdate.autoupdate_wf( ctx, config, workflow) if workflow.path.endswith(".ga"): with open(workflow.path, 'w') as f: json.dump(updated_workflow, f, indent=4, sort_keys=True) else: format2_wrapper = from_galaxy_native( updated_workflow, json_wrapper=True) with open(workflow.path, "w") as f: f.write(format2_wrapper["yaml_content"]) modified_workflows.append(workflow.path) else: info( "No newer tool versions were found, so the workflow was not updated." ) if kwds['test']: if not modified_files: info("No tools were updated, so no tests were run.") else: with temp_directory(dir=ctx.planemo_directory) as temp_path: # only test tools in updated directories modified_paths = [ path for path, tool_xml in yield_tool_sources_on_paths( ctx, paths, recursive) if path in modified_files ] info( f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}" ) runnables = for_paths(modified_paths + modified_workflows, temp_path=temp_path) kwds["engine"] = "galaxy" return_value = test_runnables(ctx, runnables, original_paths=paths, **kwds) exit_codes.append(return_value) return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
def test_can_handle(): ctx = test_context() for engine_type in ["galaxy", "cwltool"]: with engine_context(ctx, engine=engine_type) as e: for key, value in CAN_HANDLE[engine_type].items(): assert bool(e.can_run(for_path(key))) is value
def test_plain_init(self): ctx = cli.PlanemoCliContext() ctx.planemo_directory = "/tmp/planemo-test-workspace" cat_tool = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "cat.xml") test_workflow_path = os.path.join(TEST_DATA_DIR, 'wf2.ga') with engine_context(ctx, extra_tools=(cat_tool, )) as galaxy_engine: with galaxy_engine.ensure_runnables_served( [for_path(test_workflow_path)]) as config: wfid = config.workflow_id(test_workflow_path) # commands to test profile_list_cmd = ["profile_list"] profile_create_cmd = [ "profile_create", "test_ext_profile", "--galaxy_url", config.galaxy_url, "--galaxy_user_key", config.user_api_key ] alias_create_cmd = [ "create_alias", wfid, "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] alias_list_cmd = [ "list_alias", "--profile", "test_ext_profile" ] alias_delete_cmd = [ "delete_alias", "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] profile_delete_cmd = ["profile_delete", "test_ext_profile"] run_cmd = [ "run", "test_wf_alias", os.path.join(TEST_DATA_DIR, "wf2-job.yml"), "--profile", "test_ext_profile" ] list_invocs_cmd = [ "list_invocations", "test_wf_alias", "--profile", "test_ext_profile" ] # test alias and profile creation result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output result = self._check_exit_code(profile_create_cmd) assert 'Profile [test_ext_profile] created' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' in result.output result = self._check_exit_code(alias_create_cmd) assert 'Alias test_wf_alias created.' in result.output result = self._check_exit_code(alias_list_cmd) assert 'test_wf_alias' in result.output assert wfid in result.output assert '1 aliases were found for profile test_ext_profile.' in result.output # test WF execution (from wfid) using created profile and alias result = self._check_exit_code(run_cmd) assert 'Run failed' not in result.output result = self._check_exit_code(list_invocs_cmd) assert '1 invocations found.' in result.output assert '1 jobs ok' in result.output or '"ok": 1' in result.output # so it passes regardless if tabulate is installed or not # test alias and profile deletion result = self._check_exit_code(alias_delete_cmd) assert 'Alias test_wf_alias was successfully deleted from profile test_ext_profile' in result.output result = self._check_exit_code(alias_list_cmd) assert '0 aliases were found for profile test_ext_profile.' in result.output result = self._check_exit_code(profile_delete_cmd) assert 'Profile deleted.' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output