def __setup_tool(self): tool_source = get_tool_source(self.tool_file) self.tool = create_tool_from_source(self.app, tool_source, config_file=self.tool_file) self.tool.assert_finalized() if getattr(self, "tool_action", None): self.tool.tool_action = self.tool_action return self.tool
def cases(runnable): """Build a `list` of :class:`TestCase` objects for specified runnable.""" cases = [] tests_path = _tests_path(runnable) if tests_path is None: if runnable.type == RunnableType.galaxy_tool: tool_source = get_tool_source(runnable.path) test_dicts = tool_source.parse_tests_to_dict() tool_id = tool_source.parse_id() tool_version = tool_source.parse_version() for i, test_dict in enumerate(test_dicts.get("tests", [])): cases.append( ExternalGalaxyToolTestCase(runnable, tool_id, tool_version, i, test_dict)) return cases tests_directory = os.path.abspath(os.path.dirname(tests_path)) def normalize_to_tests_path(path): if not os.path.isabs(path): absolute_path = os.path.join(tests_directory, path) else: absolute_path = path return os.path.normpath(absolute_path) with open(tests_path, "r") as f: tests_def = yaml.safe_load(f) if not isinstance(tests_def, list): message = TEST_FILE_NOT_LIST_MESSAGE % tests_path raise Exception(message) for i, test_def in enumerate(tests_def): if "job" not in test_def: message = TEST_FIELD_MISSING_MESSAGE % (i + 1, tests_path, "job") raise Exception(message) job_def = test_def["job"] if isinstance(job_def, dict): job_path = None job = job_def else: job_path = normalize_to_tests_path(job_def) job = None doc = test_def.get("doc", None) output_expectations = test_def.get("outputs", {}) case = TestCase( runnable=runnable, tests_directory=tests_directory, output_expectations=output_expectations, index=i, job_path=job_path, job=job, doc=doc, ) cases.append(case) return cases
def _test_id(self): if self.runnable.type in [ RunnableType.cwl_tool, RunnableType.galaxy_tool, ]: return get_tool_source(self.runnable.path).parse_id() else: return os.path.basename(self.runnable.path)
def get_outputs(path): tool_or_workflow = guess_artifact_type(path) if tool_or_workflow == "tool": tool_source = get_tool_source(path) output_datasets, _ = tool_source.parse_outputs(None) outputs = [ToolOutput(o) for o in output_datasets.values()] return outputs else: workflow = workflow_proxy(path, strict_cwl_validation=False) return [CwlWorkflowOutput(label) for label in workflow.output_labels]
def __setup_tool(self): tool_source = get_tool_source(self.tool_file) try: self.tool = create_tool_from_source(self.app, tool_source, config_file=self.tool_file) except Exception: self.tool = None if getattr(self, "tool_action", None and self.tool): self.tool.tool_action = self.tool_action return self.tool
def value_decode(self, k, v): if not v or v is NO_VALUE: return NO_VALUE # v is returned as bytestring, so we need to `unicodify` on python < 3.6 before we can use json.loads v = json.loads(unicodify(v)) if v.get('tool_cache_version', 0) != CURRENT_TOOL_CACHE_VERSION: return NO_VALUE for path, modtime in v['paths_and_modtimes'].items(): if os.path.getmtime(path) != modtime: return NO_VALUE payload = get_tool_source(config_file=k, xml_tree=etree.ElementTree( etree.fromstring( v['payload'].encode('utf-8'))), macro_paths=v['macro_paths']) return CachedValue(metadata=v['metadata'], payload=payload)
def main(argv=None): """Main entry-point for the CLI tool.""" parser = arg_parser(argv, globals()) add_build_arguments(parser) add_single_image_arguments(parser) parser.add_argument('command', metavar='COMMAND', help='Command (build-and-test, build, all)') parser.add_argument('tool', metavar="TOOL", default=None, help="Path to tool to build mulled image for.") args = parser.parse_args() tool_source = get_tool_source(args.tool) requirements, _ = tool_source.parse_requirements_and_containers() targets = requirements_to_mulled_targets(requirements) kwds = args_to_mull_targets_kwds(args) mull_targets(targets, **kwds)
def get_outputs(runnable): """Return a list of :class:`RunnableOutput` objects for this runnable.""" if not runnable.is_single_artifact: raise NotImplementedError("Cannot generate outputs for a directory.") if runnable.type in [RunnableType.galaxy_tool, RunnableType.cwl_tool]: tool_source = get_tool_source(runnable.path) # TODO: do something with collections at some point output_datasets, _ = tool_source.parse_outputs(None) outputs = [ToolOutput(o) for o in output_datasets.values()] return outputs elif runnable.type == RunnableType.galaxy_workflow: workflow_outputs = describe_outputs(runnable.path) return [GalaxyWorkflowOutput(o) for o in workflow_outputs] elif runnable.type == RunnableType.cwl_workflow: workflow = workflow_proxy(runnable.path, strict_cwl_validation=False) return [CwlWorkflowOutput(label) for label in workflow.output_labels] else: raise NotImplementedError("Getting outputs for this artifact type is not yet supported.")
def lint_xml_with(lint_context, tool_xml, extra_modules=[]): tool_source = get_tool_source(xml_tree=tool_xml) return lint_tool_source_with(lint_context, tool_source, extra_modules=extra_modules)
def _tool_id(tool_path): tool_source = get_tool_source(tool_path) return tool_source.parse_id()
def lint_xml_with(lint_context, tool_xml, extra_modules=None) -> LintContext: extra_modules = extra_modules or [] tool_source = get_tool_source(xml_tree=tool_xml) return lint_tool_source_with(lint_context, tool_source, extra_modules=extra_modules)
def test_deserialize_cwl_tool(tool_app): # Can't verify much about cwl tools at this point tool_source = get_tool_source(tool_app, tool_source_class='CwlToolSource', raw_tool_source=CWL_TOOL) assert isinstance(tool_source, CwlToolSource)
def _deserialize(app, tool_source_class, raw_tool_source): tool_source = get_tool_source(tool_source_class=tool_source_class, raw_tool_source=raw_tool_source) assert type(tool_source).__name__ == tool_source_class return create_tool_from_source(app, tool_source=tool_source)