def test_serve_daemon(self): """Test serving a galaxy tool via a daemon Galaxy process.""" port = network_util.get_free_port() cat_path = os.path.join(TEST_REPOS_DIR, "single_tool", "cat.xml") config = galaxy_serve( self.test_context, [for_path(cat_path)], install_galaxy=True, port=port, daemon=True, no_dependency_resolution=True, ) assert network_util.wait_net_service( "localhost", config.port, timeout=.1, ) config_dict = config.gi.config.get_config() assert "allow_user_dataset_purge" in config_dict config.kill() assert not network_util.wait_net_service( "localhost", config.port, timeout=.1, )
def cli(ctx, workflow_identifier, output=None, force=False, **kwds): """Convert Format 2 workflows to native Galaxy workflows, and vice-versa. """ assert is_galaxy_engine(**kwds) kwds["no_dependency_resolution"] = True if workflow_identifier.endswith(".ga"): if output is None: output = os.path.splitext(workflow_identifier)[0] + ".gxwf.yml" with open(workflow_identifier, "r") as f: workflow_dict = json.load(f) format2_wrapper = from_galaxy_native(workflow_dict, json_wrapper=True) with open(output, "w") as f: f.write(format2_wrapper["yaml_content"]) else: if output is None: output = os.path.splitext(workflow_identifier)[0] + ".ga" runnable = for_path(workflow_identifier) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(workflow_identifier) output_dict = config.gi.workflows.export_workflow_dict(workflow_id) output_contents = json.dumps(output_dict, indent=4, sort_keys=True) write_file(output, output_contents, force=force)
def cli(ctx, workflow_path, output=None, force=False, **kwds): """Convert Format 2 workflow to a native Galaxy workflow. """ assert is_galaxy_engine(**kwds) kwds["no_dependency_resolution"] = True if workflow_path.endswith(".ga"): if output is None: output = os.path.splitext(workflow_path)[0] + ".gxwf.yml" with open(workflow_path, "r") as f: workflow_dict = json.load(f) format2_wrapper = from_galaxy_native(workflow_dict, json_wrapper=True) with open(output, "w") as f: f.write(format2_wrapper["yaml_content"]) else: if output is None: output = os.path.splitext(workflow_path)[0] + ".ga" runnable = for_path(workflow_path) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(workflow_path) output_dict = config.gi.workflows.export_workflow_dict(workflow_id) output_contents = json.dumps(output_dict) write_file(output, output_contents, force=force)
def test_describe_outputs(): wf_path = os.path.join(TEST_DATA_DIR, "wf1.gxwf.yml") runnable = for_path(wf_path) outputs = describe_outputs(runnable) assert len(outputs) == 1 output = outputs[0] assert output.order_index == 1 assert output.output_name == "out_file1" assert output.label == "wf_output_1"
def get_hands_on_boxes_from_local_galaxy(kwds, wf_filepath, ctx): """Server local Galaxy and get the workflow dictionary.""" assert is_galaxy_engine(**kwds) runnable = for_path(wf_filepath) tuto_body = '' with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(wf_filepath) wf = config.gi.workflows.export_workflow_dict(workflow_id) tuto_body = format_wf_steps(wf, config.gi) return tuto_body
def cli(ctx, workflow_path, output=None, force=False, **kwds): """Open a synchronized Galaxy workflow editor. """ assert is_galaxy_engine(**kwds) kwds["workflows_from_path"] = True runnable = for_path(workflow_path) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id(workflow_path) url = "%s/workflow/editor?id=%s" % (config.galaxy_url, workflow_id) click.launch(url) sleep_for_serve()
def _lint_tsts(path, lint_context): runnable = for_path(path) test_cases = cases(runnable) all_tests_valid = False if len(test_cases) == 0: lint_context.warn("Workflow missing test cases.") else: all_tests_valid = True for test_case in test_cases: if not _lint_case(path, test_case, lint_context): all_tests_valid = False if all_tests_valid: lint_context.valid("Tests appear structurally correct")
def test_serve_daemon(self): """Test serving a galaxy tool via a daemon Galaxy process.""" port = network_util.get_free_port() cat_path = os.path.join(TEST_REPOS_DIR, "single_tool", "cat.xml") config = galaxy_serve( self.test_context, [for_path(cat_path)], install_galaxy=True, galaxy_branch=target_galaxy_branch(), port=port, daemon=True, no_dependency_resolution=True, ) _assert_service_up(config) config.kill() _assert_service_down(config)
def test_non_file_case_checker(): """Verify simply usage of :func:`planemo.runnable.TestCase.check`.""" int_tool_path = os.path.join(TEST_DATA_DIR, "int_tool.cwl") test_cases = cases(for_path(int_tool_path)) assert len(test_cases) == 1 test_case = test_cases[0] outputs_dict = { "output": 4, } sd = test_case.structured_test_data(MockRunResponse(outputs_dict)) assert sd["data"]["status"] == "success" bad_outputs_dict = { "output": 5, } sd = test_case.structured_test_data(MockRunResponse(bad_outputs_dict)) assert sd["data"]["status"] == "failure"
def cli(ctx, workflow_path, output=None, force=False, **kwds): """Convert Format 2 workflow to a native Galaxy workflow. """ assert is_galaxy_engine(**kwds) kwds["no_dependency_resolution"] = True if output is None: output = os.path.splitext(workflow_path)[0] + ".ga" runnable = for_path(workflow_path) with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.serve_runnables([runnable]) as config: workflow_id = config.workflow_id(workflow_path) output_dict = config.gi.workflows.export_workflow_dict(workflow_id) output_contents = json.dumps(output_dict) write_file(output, output_contents, force=force)
def _lint_tsts(path, lint_context): runnables = for_path(path, return_all=True) if not isinstance(runnables, list): runnables = [runnables] for runnable in runnables: test_cases = cases(runnable) all_tests_valid = False if len(test_cases) == 0: lint_context.warn("Workflow missing test cases.") else: all_tests_valid = True for test_case in test_cases: if not _lint_case(path, test_case, lint_context): all_tests_valid = False if all_tests_valid: lint_context.valid( f"Tests appear structurally correct for {runnable.path}")
def test_serve_workflow(self): """Test serving a galaxy workflow via a daemon Galaxy process.""" port = network_util.get_free_port() random_lines = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "randomlines.xml") cat = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "cat.xml") worklfow = os.path.join(TEST_DATA_DIR, "wf1.gxwf.yml") extra_tools = [random_lines, cat] config = galaxy_serve( self.test_context, [for_path(worklfow)], install_galaxy=True, port=port, daemon=True, extra_tools=extra_tools, ) user_gi = config.user_gi assert user_gi.tools.get_tools(tool_id="random_lines1") assert len(user_gi.workflows.get_workflows()) == 1 config.kill()
def cli(ctx, workflow_identifier, output=None, force=False, **kwds): """Open a synchronized Galaxy workflow editor. """ assert is_galaxy_engine(**kwds) workflow_identifier = translate_alias(ctx, workflow_identifier, kwds.get('profile')) if os.path.exists(workflow_identifier): runnable = for_path(workflow_identifier) else: # assume galaxy workflow id runnable = for_id(workflow_identifier) kwds["workflows_from_path"] = True with engine_context(ctx, **kwds) as galaxy_engine: with galaxy_engine.ensure_runnables_served([runnable]) as config: workflow_id = config.workflow_id_for_runnable(runnable) url = "%s/workflow/editor?id=%s" % (config.galaxy_url, workflow_id) click.launch(url) if kwds["engine"] != "external_galaxy": sleep_for_serve()
def test_file_case_checker(): hello_txt_path = os.path.join(TEST_DATA_DIR, "hello.txt") int_tool_path = os.path.join(TEST_DATA_DIR, "cat_tool.cwl") test_cases = cases(for_path(int_tool_path)) assert len(test_cases) == 1 test_case = test_cases[0] outputs_dict = { "output_file": { "path": hello_txt_path, } } sd = test_case.structured_test_data(MockRunResponse(outputs_dict)) assert sd["data"]["status"] == "success" not_hello_txt_path = os.path.join(TEST_DATA_DIR, "int_tool_job.json") bad_outputs_dict = { "output_file": { "path": not_hello_txt_path, } } sd = test_case.structured_test_data(MockRunResponse(bad_outputs_dict)) assert sd["data"]["status"] == "failure"
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable_identifier = translate_alias(ctx, runnable_identifier, kwds.get('profile')) path = uri_to_path(ctx, runnable_identifier) if os.path.exists(path): runnable = for_path(path) else: # assume galaxy workflow id runnable = for_id(runnable_identifier) # TODO: do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def test_outputs(): outputs = get_outputs(for_path(A_CWL_TOOL)) assert len(outputs) == 1 output_id = outputs[0].get_id() assert output_id == "output_file"
def test_can_handle(): ctx = test_context() for engine_type in ["galaxy", "cwltool"]: with engine_context(ctx, engine=engine_type) as e: for key, value in CAN_HANDLE[engine_type].items(): assert bool(e.can_run(for_path(key))) is value
def test_outputs(): outputs = get_outputs(for_path(A_CWL_WORKFLOW)) assert len(outputs) == 1 output_id = outputs[0].get_id() assert output_id == "count_output"
datatype_fp = os.path.join(TEST_DATA_DIR, "training_datatypes.yaml") tuto_fp = os.path.join(TEST_DATA_DIR, "training_tutorial.md") tuto_wo_zenodo_fp = os.path.join(TEST_DATA_DIR, "training_tutorial_wo_zenodo.md") zenodo_link = 'https://zenodo.org/record/1321885' # load a workflow generated from Galaxy WF_FP = os.path.join(TEST_DATA_DIR, "training_workflow.ga") with open(WF_FP, "r") as wf_f: wf = json.load(wf_f) # load wf_param_values (output of tutorial.get_wf_param_values on wf['steps']['4']) with open(os.path.join(TEST_DATA_DIR, "training_wf_param_values.json"), "r") as wf_param_values_f: wf_param_values = json.load(wf_param_values_f) # configuration RUNNABLE = for_path(WF_FP) CTX = cli.Context() CTX.planemo_directory = "/tmp/planemo-test-workspace" KWDS = { 'topic_name': 'my_new_topic', 'topic_title': "New topic", 'topic_target': "use", 'topic_summary': "Topic summary", 'tutorial_name': "new_tuto", 'tutorial_title': "Title of tuto", 'hands_on': True, 'slides': True, 'workflow': None, 'workflow_id': None, 'zenodo_link': None, 'datatypes': os.path.join(TEST_DATA_DIR, "training_datatypes.yaml"),
def test_plain_init(self): ctx = cli.PlanemoCliContext() ctx.planemo_directory = "/tmp/planemo-test-workspace" cat_tool = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "cat.xml") test_workflow_path = os.path.join(TEST_DATA_DIR, 'wf2.ga') with engine_context(ctx, extra_tools=(cat_tool, )) as galaxy_engine: with galaxy_engine.ensure_runnables_served( [for_path(test_workflow_path)]) as config: wfid = config.workflow_id(test_workflow_path) # commands to test profile_list_cmd = ["profile_list"] profile_create_cmd = [ "profile_create", "test_ext_profile", "--galaxy_url", config.galaxy_url, "--galaxy_user_key", config.user_api_key ] alias_create_cmd = [ "create_alias", wfid, "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] alias_list_cmd = [ "list_alias", "--profile", "test_ext_profile" ] alias_delete_cmd = [ "delete_alias", "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] profile_delete_cmd = ["profile_delete", "test_ext_profile"] run_cmd = [ "run", "test_wf_alias", os.path.join(TEST_DATA_DIR, "wf2-job.yml"), "--profile", "test_ext_profile" ] list_invocs_cmd = [ "list_invocations", "test_wf_alias", "--profile", "test_ext_profile" ] # test alias and profile creation result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output result = self._check_exit_code(profile_create_cmd) assert 'Profile [test_ext_profile] created' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' in result.output result = self._check_exit_code(alias_create_cmd) assert 'Alias test_wf_alias created.' in result.output result = self._check_exit_code(alias_list_cmd) assert 'test_wf_alias' in result.output assert wfid in result.output assert '1 aliases were found for profile test_ext_profile.' in result.output # test WF execution (from wfid) using created profile and alias result = self._check_exit_code(run_cmd) assert 'Run failed' not in result.output result = self._check_exit_code(list_invocs_cmd) assert '1 invocations found.' in result.output assert '1 jobs ok' in result.output or '"ok": 1' in result.output # so it passes regardless if tabulate is installed or not # test alias and profile deletion result = self._check_exit_code(alias_delete_cmd) assert 'Alias test_wf_alias was successfully deleted from profile test_ext_profile' in result.output result = self._check_exit_code(alias_list_cmd) assert '0 aliases were found for profile test_ext_profile.' in result.output result = self._check_exit_code(profile_delete_cmd) assert 'Profile deleted.' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output
def run(self, path, job_path): """Run a job using a compatible artifact (workflow or tool).""" runnable = for_path(path) self._check_can_run(runnable) run_response = self._run(runnable, job_path) return run_response
def test_plain_init(self): ctx = cli.PlanemoCliContext() ctx.planemo_directory = "/tmp/planemo-test-workspace" cat_tool = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "cat.xml") test_workflow_path = os.path.join(TEST_DATA_DIR, 'wf2.ga') with engine_context(ctx, extra_tools=(cat_tool, )) as galaxy_engine: with galaxy_engine.ensure_runnables_served( [for_path(test_workflow_path)]) as config: wfid = config.workflow_id(test_workflow_path) # commands to test profile_list_cmd = ["profile_list"] profile_create_cmd = [ "profile_create", "test_ext_profile", "--galaxy_url", config.galaxy_url, "--galaxy_user_key", config.user_api_key ] alias_create_cmd = [ "create_alias", wfid, "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] alias_list_cmd = [ "list_alias", "--profile", "test_ext_profile" ] alias_delete_cmd = [ "delete_alias", "--alias", "test_wf_alias", "--profile", "test_ext_profile" ] profile_delete_cmd = ["profile_delete", "test_ext_profile"] run_cmd = [ "run", "test_wf_alias", os.path.join(TEST_DATA_DIR, "wf2-job.yml"), "--profile", "test_ext_profile" ] list_invocs_cmd = [ "list_invocations", "test_wf_alias", "--profile", "test_ext_profile" ] rerun_cmd = [ "rerun", "--invocation", "invocation_id", "--profile", "test_ext_profile" ] upload_data_cmd = [ "upload_data", "test_wf_alias", os.path.join(TEST_DATA_DIR, "wf2-job.yml"), "new-job.yml", "--profile", "test_ext_profile" ] workflow_test_init_cmd = [ "workflow_test_init", "invocation_id", "--from_invocation", "--profile", "test_ext_profile" ] test_workflow_test_init_cmd = [ "test", "TestWorkflow1.ga", "--profile", "test_ext_profile" ] # test alias and profile creation result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output result = self._check_exit_code(profile_create_cmd) assert 'Profile [test_ext_profile] created' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' in result.output result = self._check_exit_code(alias_create_cmd) assert 'Alias test_wf_alias created.' in result.output result = self._check_exit_code(alias_list_cmd) assert 'test_wf_alias' in result.output assert wfid in result.output assert '1 aliases were found for profile test_ext_profile.' in result.output # test upload_data command self._check_exit_code(upload_data_cmd) with open("new-job.yml") as f: new_job = yaml.safe_load(f) assert list(new_job['WorkflowInput1']) == list( new_job['WorkflowInput2']) == ["class", "galaxy_id"] # test WF execution (from wfid) using created profile and alias result = self._check_exit_code(run_cmd) assert 'Run failed' not in result.output result = self._check_exit_code(run_cmd + ["--no_wait"]) assert 'Run successfully executed' in result.output result = self._check_exit_code(list_invocs_cmd) assert '2 invocations found.' in result.output assert '1 jobs ok' in result.output or '"ok": 1' in result.output # so it passes regardless if tabulate is installed or not # test rerun invocation_id = config.user_gi.workflows.get_invocations( wfid)[0]['id'] rerun_cmd[2] = invocation_id result = self._check_exit_code(rerun_cmd) assert 'No jobs matching the specified invocation' in result.output # test generating test case from invocation_id workflow_test_init_cmd[1] = invocation_id self._check_exit_code(workflow_test_init_cmd) assert os.path.exists('TestWorkflow1.ga') assert os.path.exists('TestWorkflow1-tests.yml') self._check_exit_code(test_workflow_test_init_cmd) # test alias and profile deletion result = self._check_exit_code(alias_delete_cmd) assert 'Alias test_wf_alias was successfully deleted from profile test_ext_profile' in result.output result = self._check_exit_code(alias_list_cmd) assert '0 aliases were found for profile test_ext_profile.' in result.output result = self._check_exit_code(profile_delete_cmd) assert 'Profile deleted.' in result.output result = self._check_exit_code(profile_list_cmd) assert 'test_ext_profile' not in result.output
) datatype_fp = os.path.join(TEST_DATA_DIR, "training_datatypes.yaml") tuto_fp = os.path.join(TEST_DATA_DIR, "training_tutorial.md") tuto_wo_zenodo_fp = os.path.join(TEST_DATA_DIR, "training_tutorial_wo_zenodo.md") zenodo_link = 'https://zenodo.org/record/1321885' # load a workflow generated from Galaxy WF_FP = os.path.join(TEST_DATA_DIR, "training_workflow.ga") with open(WF_FP, "r") as wf_f: wf = json.load(wf_f) # load wf_param_values (output of tutorial.get_wf_param_values on wf['steps']['4']) with open(os.path.join(TEST_DATA_DIR, "training_wf_param_values.json"), "r") as wf_param_values_f: wf_param_values = json.load(wf_param_values_f) # configuration RUNNABLE = for_path(WF_FP) CTX = cli.Context() CTX.planemo_directory = "/tmp/planemo-test-workspace" KWDS = { 'topic_name': 'my_new_topic', 'topic_title': "New topic", 'topic_target': "use", 'topic_summary': "Topic summary", 'tutorial_name': "new_tuto", 'tutorial_title': "Title of tuto", 'hands_on': True, 'slides': True, 'workflow': None, 'workflow_id': None, 'zenodo_link': None, 'datatypes': os.path.join(TEST_DATA_DIR, "training_datatypes.yaml"),