示例#1
0
def test_get_subgraph():
    loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
    wf = norm(Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri())
    loadingContext.do_update = False
    tool = load_tool(wf, loadingContext)

    sg = norm(Path(get_data("tests/subgraph")).as_uri())

    def clean(val):
        if isinstance(val, str):
            if val.startswith(sg):
                return val[len(sg) + 1 :]
        if isinstance(val, dict):
            return {k: clean(v) for k, v in val.items()}
        if isinstance(val, list):
            return [clean(v) for v in val]
        return val

    for a in (
        "file1",
        "file2",
        "file3",
        "count_output",
        "output3",
        "output4",
        "output5",
        "step1",
        "step2",
        "step3",
        "step4",
        "step5",
    ):
        extracted = get_subgraph([wf + "#" + a], tool)
        with open(get_data("tests/subgraph/extract_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted))
def test_get_subgraph() -> None:
    """Compare known correct subgraphs to generated subgraphs."""
    loading_context = LoadingContext({"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
    loading_context.do_update = False
    tool = load_tool(wf, loading_context)

    sg = Path(get_data("tests/subgraph")).as_uri()

    for a in (
        "file1",
        "file2",
        "file3",
        "count_output",
        "output3",
        "output4",
        "output5",
        "step1",
        "step2",
        "step3",
        "step4",
        "step5",
    ):
        assert isinstance(tool, Workflow)
        extracted = get_subgraph([wf + "#" + a], tool, loading_context)
        with open(get_data("tests/subgraph/extract_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted), sg)
示例#3
0
def _run_example(as_dict, out=None):
    if not out:
        out = _examples_path_for("test.cwl")
    abstract_as_dict = from_dict(as_dict)
    with open(out, "w") as f:
        ordered_dump(abstract_as_dict, f)

    check_abstract_def(abstract_as_dict)

    # validate format2 workflows
    enable_dev = "dev" in CWL_VERSION
    loadingContext = LoadingContext()
    loadingContext.enable_dev = enable_dev
    loadingContext.loader = default_loader(
        loadingContext.fetcher_constructor,
        enable_dev=enable_dev,
    )
    loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
    loadingContext, workflowobj, uri = fetch_document(out, loadingContext)
    loadingContext, uri = resolve_and_validate_document(
        loadingContext,
        workflowobj,
        uri,
    )
    return abstract_as_dict
示例#4
0
def test_packed_workflow_execution(wf_path, job_path, namespaced, tmpdir):
    loadingContext = LoadingContext()
    loadingContext.resolver = tool_resolver
    loadingContext, workflowobj, uri = fetch_document(
        get_data(wf_path), loadingContext)
    loadingContext.do_update = False
    loadingContext, uri = resolve_and_validate_document(
        loadingContext, workflowobj, uri)
    processobj = loadingContext.loader.resolve_ref(uri)[0]
    packed = json.loads(print_pack(loadingContext.loader, processobj, uri, loadingContext.metadata))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, 'w') as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = ['--outdir', str(tmpdir), get_data(wf_path), get_data(job_path)]
    packed_params = ['--outdir', str(tmpdir), '--debug', get_data(wf_packed_path), get_data(job_path)]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
def test_packed_workflow_execution(wf_path, job_path, namespaced, tmpdir):
    loadingContext = LoadingContext()
    loadingContext.resolver = tool_resolver
    loadingContext, workflowobj, uri = fetch_document(
        get_data(wf_path), loadingContext)
    loadingContext.do_update = False
    loadingContext, uri = resolve_and_validate_document(
        loadingContext, workflowobj, uri)
    processobj = loadingContext.loader.resolve_ref(uri)[0]
    packed = json.loads(print_pack(loadingContext.loader, processobj, uri, loadingContext.metadata))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, 'w') as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = ['--outdir', str(tmpdir), get_data(wf_path), get_data(job_path)]
    packed_params = ['--outdir', str(tmpdir), '--debug', wf_packed_path, get_data(job_path)]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
示例#6
0
def test_get_step() -> None:
    loadingContext = LoadingContext(
        {"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
    loadingContext.do_update = False
    tool = load_tool(wf, loadingContext)
    assert isinstance(tool, Workflow)

    sg = Path(get_data("tests/subgraph")).as_uri()

    def clean(val: Any) -> Any:
        if isinstance(val, str):
            if val.startswith(sg):
                return val[len(sg) + 1:]
        if isinstance(val, dict):
            return {k: clean(v) for k, v in val.items()}
        if isinstance(val, list):
            return [clean(v) for v in val]
        return val

    for a in (
            "step1",
            "step2",
            "step3",
            "step4",
            "step5",
    ):
        extracted = get_step(tool, wf + "#" + a)
        with open(get_data("tests/subgraph/single_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted))
示例#7
0
def load_cwl(cwl_file, default_args):
    load.loaders = {}
    loading_context = LoadingContext(default_args)
    loading_context.construct_tool_object = default_make_tool
    loading_context.resolver = tool_resolver
    tool = load_tool(cwl_file, loading_context)
    it_is_workflow = tool.tool["class"] == "Workflow"
    return tool, it_is_workflow
def test_use_metadata():
    """Use the version from loadingContext.metadata if cwlVersion isn't present in the document."""
    loadingContext = LoadingContext({"do_update": False})
    tool = load_tool(get_data("tests/echo.cwl"), loadingContext)

    loadingContext = LoadingContext()
    loadingContext.metadata = tool.metadata
    tooldata = tool.tool.copy()
    del tooldata["cwlVersion"]
    tool2 = load_tool(tooldata, loadingContext)
示例#9
0
 def _load_downloaded_workflow(self):
     # Turn down cwltool and rdflib logging
     logging.getLogger("cwltool").setLevel(logging.ERROR)
     logging.getLogger("rdflib.term").setLevel(logging.ERROR)
     context = LoadingContext({"construct_tool_object": default_make_tool,
                               "resolver": tool_resolver,
                               "disable_js_validation": True})
     context.strict = False
     tool_path = self._get_tool_path()
     return load_tool(tool_path, context)
示例#10
0
def load_cwl(fname):
    """Load and validate CWL file using cwltool
    """
    logger.debug('Loading CWL file "{}"'.format(fname))
    # Fetching, preprocessing and validating cwl

    # Older versions of cwltool
    if legacy_cwltool:
        try:
            (document_loader, workflowobj, uri) = fetch_document(fname)
            (document_loader, _, processobj, metadata, uri) = \
                validate_document(document_loader, workflowobj, uri)
        except TypeError:
            from cwltool.context import LoadingContext, getdefault
            from cwltool import workflow
            from cwltool.resolver import tool_resolver
            from cwltool.load_tool import resolve_tool_uri

            loadingContext = LoadingContext()
            loadingContext.construct_tool_object = getdefault(
                loadingContext.construct_tool_object,
                workflow.default_make_tool)
            loadingContext.resolver = getdefault(loadingContext.resolver,
                                                 tool_resolver)

            uri, tool_file_uri = resolve_tool_uri(
                fname,
                resolver=loadingContext.resolver,
                fetcher_constructor=loadingContext.fetcher_constructor)

            document_loader, workflowobj, uri = fetch_document(
                uri,
                resolver=loadingContext.resolver,
                fetcher_constructor=loadingContext.fetcher_constructor)
            document_loader, avsc_names, processobj, metadata, uri = \
                validate_document(
                    document_loader, workflowobj, uri,
                    loadingContext.overrides_list, {},
                    enable_dev=loadingContext.enable_dev,
                    strict=loadingContext.strict,
                    preprocess_only=False,
                    fetcher_constructor=loadingContext.fetcher_constructor,
                    skip_schemas=False,
                    do_validate=loadingContext.do_validate)
    # Recent versions of cwltool
    else:
        (loading_context, workflowobj, uri) = fetch_document(fname)
        loading_context, uri = resolve_and_validate_document(
            loading_context, workflowobj, uri)
        document_loader = loading_context.loader
        processobj = workflowobj
        metadata = loading_context.metadata

    return document_loader, processobj, metadata, uri
def test_checklink_outputSource():
    """Is outputSource resolved correctly independent of value of do_validate."""
    outsrc = norm(Path(get_data(
        "tests/wf/1st-workflow.cwl")).as_uri()) + "#argument/classfile"

    loadingContext = LoadingContext({"do_validate": True})
    tool = load_tool(get_data("tests/wf/1st-workflow.cwl"), loadingContext)
    assert norm(tool.tool["outputs"][0]["outputSource"]) == outsrc

    loadingContext = LoadingContext({"do_validate": False})
    tool = load_tool(get_data("tests/wf/1st-workflow.cwl"), loadingContext)
    assert norm(tool.tool["outputs"][0]["outputSource"]) == outsrc
def test_get_subgraph_long_out_form() -> None:
    """Compare subgraphs generatation when 'out' is in the long form."""
    loading_context = LoadingContext({"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/1432.cwl")).as_uri()
    loading_context.do_update = False
    tool = load_tool(wf, loading_context)

    sg = Path(get_data("tests/")).as_uri()

    assert isinstance(tool, Workflow)
    extracted = get_subgraph([wf + "#step2"], tool, loading_context)
    with open(get_data("tests/subgraph/extract_step2_1432.json")) as f:
        assert json.load(f) == clean(convert_to_dict(extracted), sg)
示例#13
0
def test_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None:
    """Test that non-docker enabled CommandLineTool respects temp directory directives."""
    loading_context = LoadingContext({
        "metadata": {
            "cwlVersion": INTERNAL_VERSION,
            "http://commonwl.org/cwltool#original_cwlVersion":
            INTERNAL_VERSION,
        }
    })
    clt = CommandLineTool(
        cast(
            CommentedMap,
            cmap({
                "cwlVersion": INTERNAL_VERSION,
                "class": "CommandLineTool",
                "inputs": [],
                "outputs": [],
                "requirements": [],
            }),
        ),
        loading_context,
    )
    tmpdir_prefix = str(tmp_path / "1")
    tmp_outdir_prefix = str(tmp_path / "2")
    runtime_context = RuntimeContext({
        "tmpdir_prefix": tmpdir_prefix,
        "tmp_outdir_prefix": tmp_outdir_prefix,
    })
    job = next(clt.job({}, None, runtime_context))
    assert isinstance(job, JobBase)
    assert job.stagedir and job.stagedir.startswith(tmpdir_prefix)
    assert job.tmpdir and job.tmpdir.startswith(tmpdir_prefix)
    assert job.outdir and job.outdir.startswith(tmp_outdir_prefix)
示例#14
0
def test_load_graph_fragment_from_packed() -> None:
    """Loading a fragment from packed with update."""
    loadingContext = LoadingContext()
    uri = Path(
        get_data("tests/wf/packed-with-loadlisting.cwl")).as_uri() + "#main"
    try:
        with open(get_data("cwltool/extensions.yml")) as res:
            use_custom_schema("v1.0", "http://commonwl.org/cwltool",
                              res.read())

        # The updater transforms LoadListingRequirement from an
        # extension (in v1.0) to a core feature (in v1.1) but there
        # was a bug when loading a packed workflow and loading a
        # specific fragment it would get the un-updated document.
        # This recreates that case and asserts that we are using the
        # updated document like we should.

        tool = load_tool(uri, loadingContext)

        assert tool.tool["requirements"] == [{
            "class": "LoadListingRequirement",
            "loadListing": "no_listing"
        }]
    finally:
        use_standard_schema("v1.0")
示例#15
0
def get_file_dependencies_obj(cwl_obj, basedir):
    """Return a dictionary which contains the CWL workflow file dependencies.

    :param cwl_obj: A CWL tool or job which might contain file dependencies.
    :param basedir: Workflow base dir.
    :returns: A dictionary composed of valid CWL file dependencies.
    """
    # Load de document
    loading_context = LoadingContext()
    document_loader, workflow_obj, uri = fetch_document(
        cwl_obj,
        resolver=loading_context.resolver,
        fetcher_constructor=loading_context.fetcher_constructor)
    in_memory_buffer = io.StringIO() if PY3 else io.BytesIO()
    # Get dependencies
    printdeps(workflow_obj,
              document_loader,
              in_memory_buffer,
              'primary',
              uri,
              basedir=basedir)
    file_dependencies_obj = yaml.load(in_memory_buffer.getvalue(),
                                      Loader=yaml.FullLoader)
    in_memory_buffer.close()
    return file_dependencies_obj
示例#16
0
def test_argparser_without_doc() -> None:
    """The `desription` field is None if `doc` field is not provided."""
    loadingContext = LoadingContext()
    tool = load_tool(get_data("tests/without_doc.cwl"), loadingContext)
    p = argparse.ArgumentParser()
    parser = generate_parser(p, tool, {}, [], False)
    assert parser.description is None
示例#17
0
def load_tool(
        argsworkflow,  # type: Union[Text, Dict[Text, Any]]
        makeTool,  # type: Callable[..., Process]
        kwargs=None,  # type: Dict
        enable_dev=False,  # type: bool
        strict=False,  # type: bool
        resolver=None,  # type: Callable[[Loader, Union[Text, Dict[Text, Any]]], Text]
        fetcher_constructor=None,  # type: FetcherConstructorType
        overrides=None):
    # type: (...) -> Process
    uri, tool_file_uri = resolve_tool_uri(
        argsworkflow,
        resolver=resolver,
        fetcher_constructor=fetcher_constructor)

    document_loader, workflowobj, uri = fetch_document(
        uri, resolver=resolver, fetcher_constructor=fetcher_constructor)

    document_loader, avsc_names, processobj, metadata, uri \
        = validate_document(document_loader, workflowobj, uri,
                            enable_dev=enable_dev,
                            strict=strict,
                            fetcher_constructor=fetcher_constructor,
                            overrides=overrides,
                            skip_schemas=kwargs.get('skip_schemas', True) if kwargs else True,
                            metadata=kwargs.get('metadata', None) if kwargs else None)
    return make_tool(document_loader, avsc_names, metadata, uri,
                     LoadingContext())
示例#18
0
def main():
    parser = arg_parser()
    parsed_args = parser.parse_args(sys.argv[1:])

    # Load the requested parsl configuration
    if parsed_args.parsl == 'cori':
        parsl.load(cori_regular_config)
    elif parsed_args.parsl == 'cori-debug':
        parsl.load(cori_debug_config)
    else:
        parsl.load(threads_config)

    # Trigger the argparse message if the cwl file is missing
    # Otherwise cwltool will use the default argparser
    if not parsed_args.workflow:
        if os.path.isfile("CWLFile"):
            setattr(parsed_args, "workflow", "CWLFile")
        else:
            _logger.error("")
            _logger.error("CWL document required, no input file was provided")
            parser.print_help()
            sys.exit(1)
    elif not parsed_args.basedir:
        _logger.error("")
        _logger.error("Basedir is required for storing itermediate results")
        parser.print_help()
        sys.exit(1)

    rc = RuntimeContext(vars(parsed_args))
    rc.shifter = False
    parsed_args.__dict__['parallel'] = True

    rc.tmpdir_prefix = rc.basedir + '/tmp/tmp'
    rc.tmp_outdir_prefix = rc.basedir + '/out/out'  # type: Text
    if parsed_args.shifter:
        rc.shifter = True
        rc.docker_outdir = '/spooldir'
        rc.docker_stagedir = rc.basedir + '/stage'
        rc.docker_tmpdir = '/tmpdir'

    lc = LoadingContext(vars(parsed_args))
    lc.construct_tool_object = customMakeTool

    sys.exit(
        cwltool.main.main(args=parsed_args,
                          loadingContext=lc,
                          runtimeContext=rc))
示例#19
0
def test_packed_workflow_execution(wf_path: str, job_path: str,
                                   namespaced: bool,
                                   tmpdir: py.path.local) -> None:
    loadingContext = LoadingContext()
    loadingContext.resolver = tool_resolver
    loadingContext, workflowobj, uri = fetch_document(get_data(wf_path),
                                                      loadingContext)
    loadingContext.do_update = False
    loadingContext, uri = resolve_and_validate_document(
        loadingContext, workflowobj, uri)
    loader = loadingContext.loader
    assert loader
    loader.resolve_ref(uri)[0]
    packed = json.loads(print_pack(loadingContext, uri))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, "w") as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = [
        "--outdir",
        str(tmpdir),
        get_data(wf_path),
        get_data(job_path)
    ]
    packed_params = [
        "--outdir",
        str(tmpdir),
        "--debug",
        wf_packed_path,
        get_data(job_path),
    ]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(
        normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
def test_check_version():
    """
    It is permitted to load without updating, but not execute.

    Attempting to execute without updating to the internal version should raise an error.
    """
    joborder = {"inp": "abc"}
    loadingContext = LoadingContext({"do_update": True})
    tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
    for j in tool.job(joborder, None, RuntimeContext()):
        pass

    loadingContext = LoadingContext({"do_update": False})
    tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
    with pytest.raises(WorkflowException):
        for j in tool.job(joborder, None, RuntimeContext()):
            pass
def test_get_step() -> None:
    loading_context = LoadingContext({"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
    loading_context.do_update = False
    tool = load_tool(wf, loading_context)
    assert isinstance(tool, Workflow)

    sg = Path(get_data("tests/subgraph")).as_uri()

    for a in (
        "step1",
        "step2",
        "step3",
        "step4",
        "step5",
    ):
        extracted = get_step(tool, wf + "#" + a, loading_context)
        with open(get_data("tests/subgraph/single_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted), sg)
def cwl_graph_generate(cwl_path: str):
    if cwl_path[:5] != "file:":
        cwl_path = f"file://{path.abspath(cwl_path)}"

    document_loader, workflowobj, uri = fetch_document(cwl_path)
    document_loader, avsc_names, processobj, metadata, uri = validate_document(
        document_loader, workflowobj, uri, strict=False, preprocess_only=True)
    loadingContext = LoadingContext()
    tool = make_tool(document_loader, avsc_names, metadata, uri,
                     loadingContext)
    cwl_viewer_dot(tool)
def test_argparse_append_with_default(
    job_order: List[str], expected_values: List[str]
) -> None:
    """The appended arguments must not include the default. But if no appended argument, then the default is used."""
    loadingContext = LoadingContext()
    tool = load_tool(get_data("tests/default_values_list.cwl"), loadingContext)
    toolparser = generate_parser(
        argparse.ArgumentParser(prog="test"), tool, {}, [], False
    )
    cmd_line = vars(toolparser.parse_args(job_order))
    file_paths = list(cmd_line["file_paths"])
    assert expected_values == file_paths
示例#24
0
def _has_relax_path_checks_flag():
    """Return True if cwltool uses a flag to control path checks.

    Old cwltool uses the module global below to control whether
    it's strict about path checks. New versions use an attribute
    of LoadingContext.

    Once the version of cwltool required is new enough, we can remove
    this function and simplify the conditionals where it's used.
    """

    lc = LoadingContext()
    return hasattr(lc, "relax_path_checks")
def test_load_graph_fragment():
    """Reloading from a dictionary without a cwlVersion."""
    loadingContext = LoadingContext()
    uri = Path(get_data("tests/wf/scatter-wf4.cwl")).as_uri() + "#main"
    tool = load_tool(uri, loadingContext)

    rs, metadata = tool.doc_loader.resolve_ref(uri)
    # Reload from a dict (in 'rs'), not a URI.  The dict is a fragment
    # of original document and doesn't have cwlVersion set, so test
    # that it correctly looks up the root document to get the
    # cwlVersion.
    tool = load_tool(tool.tool, loadingContext)
    assert tool.metadata["cwlVersion"] == INTERNAL_VERSION
示例#26
0
 def parsed(self):
     """
     Lazy property to parse CWL on-demand
     :return: The CWL document, parsed into a dict
     """
     if self._parsed is None:
         context = LoadingContext({
             "construct_tool_object": default_make_tool,
             "resolver": tool_resolver,
             "disable_js_validation": True
         })
         self._parsed = load_tool(self.url + '#main', context)
     return self._parsed
示例#27
0
    def test_fetcher(self):
        class TestFetcher(schema_salad.ref_resolver.Fetcher):
            def __init__(self, a, b):
                pass

            def fetch_text(self, url):  # type: (unicode) -> unicode
                if url == "baz:bar/foo.cwl":
                    return """
cwlVersion: v1.0
class: CommandLineTool
baseCommand: echo
inputs: []
outputs: []
"""
                else:
                    raise RuntimeError("Not foo.cwl, was %s" % url)

            def check_exists(self, url):  # type: (unicode) -> bool
                if url == "baz:bar/foo.cwl":
                    return True
                else:
                    return False

            def urljoin(self, base, url):
                urlsp = urllib.parse.urlsplit(url)
                if urlsp.scheme:
                    return url
                basesp = urllib.parse.urlsplit(base)

                if basesp.scheme == "keep":
                    return base + "/" + url
                return urllib.parse.urljoin(base, url)

        def test_resolver(d, a):
            if a.startswith("baz:bar/"):
                return a
            else:
                return "baz:bar/" + a

        loadingContext = LoadingContext({
            "construct_tool_object": default_make_tool,
            "resolver": test_resolver,
            "fetcher_constructor": TestFetcher
        })

        load_tool("foo.cwl", loadingContext)

        self.assertEquals(
            0,
            main(["--print-pre", "--debug", "foo.cwl"],
                 loadingContext=loadingContext))
示例#28
0
def load_job(workflow, job, cwl_args=None, cwd=None):
    """
    Tries to load json object from "job". If failed, assumes that
    "job" has been already parsed into Object. Inits loaded "job_data"
    based on the "workflow" (mostly for setting defaults from the workflow
    inputs; never fails). "cwl_args" can be used to update parameters for
    loading and runtime contexts.

    If "job" was file, resolves relative paths based on the job file location.
    If "job" was already parsed into Object, resolves relative paths based on
    "cwd". If "cwd" was None uses "inputs_folder" value from "cwl_args" or
    its default value returned from "get_default_cwl_args" function.

    Checking links after relative paths are resolved is disabled (checklinks
    is set to False in both places). This will prevent rasing an exception by
    schema salad in those cases when an input file will be created from the
    provided content during workflow execution.
    
    Always returns CommentedMap
    """

    cwl_args = {} if cwl_args is None else cwl_args

    default_cwl_args = get_default_cwl_args(cwl_args)
    cwd = default_cwl_args["inputs_folder"] if cwd is None else cwd

    loading_context = setup_loadingContext(
        LoadingContext(default_cwl_args), RuntimeContext(default_cwl_args),
        argparse.Namespace(**default_cwl_args))

    job_copy = deepcopy(job)

    try:
        job_data, _ = loading_context.loader.resolve_ref(job_copy,
                                                         checklinks=False)
    except (FileNotFoundError, SchemaSaladException) as err:
        job_data = load_yaml(json.dumps(job_copy))
        job_data["id"] = file_uri(cwd) + "/"
        job_data, metadata = loading_context.loader.resolve_all(
            job_data, job_data["id"], checklinks=False)

    initialized_job_data = init_job_order(
        job_order_object=job_data,
        args=argparse.Namespace(**default_cwl_args),
        process=slow_cwl_load(workflow=workflow, cwl_args=default_cwl_args),
        loader=loading_context.loader,
        stdout=os.devnull)

    return initialized_job_data
示例#29
0
def test_get_subgraph():
    loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
    wf = norm(Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri())
    loadingContext.do_update = False
    tool = load_tool(wf, loadingContext)

    sg = norm(Path(get_data("tests/subgraph")).as_uri())

    def clean(val):
        if isinstance(val, string_types):
            if val.startswith(sg):
                return val[len(sg)+1:]
        if isinstance(val, dict):
            return {k: clean(v) for k,v in val.items()}
        if isinstance(val, list):
            return [clean(v) for v in val]
        return val

    for a in ("file1", "file2", "file3", "count_output",
              "output3", "output4", "output5",
              "step1", "step2", "step3", "step4", "step5"):
        extracted = get_subgraph([wf+"#"+a], tool)
        with open(get_data("tests/subgraph/extract_"+a+".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted))
示例#30
0
def test_cuda_eval_resource_max() -> None:
    with open(get_data("cwltool/extensions-v1.1.yml")) as res:
        use_custom_schema("v1.2", "http://commonwl.org/cwltool", res.read())

    joborder = {}  # type: CWLObjectType
    loadingContext = LoadingContext({"do_update": True})
    runtime_context = RuntimeContext({})

    tool = load_tool(get_data("tests/wf/nvidia-smi-max.cwl"), loadingContext)
    builder = _makebuilder(tool.requirements[0])
    builder.job = joborder

    resources = tool.evalResources(builder, runtime_context)

    assert resources["cudaDeviceCount"] == 4
示例#31
0
def test_fetcher() -> None:
    def test_resolver(d: Any, a: str) -> str:
        if a.startswith("baz:bar/"):
            return a
        return "baz:bar/" + a

    loadingContext = LoadingContext({
        "construct_tool_object": default_make_tool,
        "resolver": test_resolver,
        "fetcher_constructor": CWLTestFetcher,
    })

    load_tool("foo.cwl", loadingContext)

    assert (main(["--print-pre", "--debug", "foo.cwl"],
                 loadingContext=loadingContext) == 0)
示例#32
0
def test_default_docker_warning(mocker: Any) -> None:
    """Check warning when default docker Container is used on Windows."""
    mocker.patch("cwltool.command_line_tool._logger")

    tool = command_line_tool.CommandLineTool(
        cast(CommentedMap, cmap({"inputs": [], "outputs": []})), LoadingContext()
    )
    tool.make_job_runner(
        RuntimeContext({"find_default_container": lambda x: "frolvlad/alpine-bash"})
    )

    command_line_tool._logger.warning.assert_called_with(  # type: ignore
        command_line_tool.DEFAULT_CONTAINER_MSG,
        windows_default_container_id,
        windows_default_container_id,
    )