def test_packed_workflow_execution(wf_path, job_path, namespaced, tmpdir):
    loadingContext = LoadingContext()
    loadingContext.resolver = tool_resolver
    loadingContext, workflowobj, uri = fetch_document(
        get_data(wf_path), loadingContext)
    loadingContext.do_update = False
    loadingContext, uri = resolve_and_validate_document(
        loadingContext, workflowobj, uri)
    processobj = loadingContext.loader.resolve_ref(uri)[0]
    packed = json.loads(print_pack(loadingContext.loader, processobj, uri, loadingContext.metadata))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, 'w') as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = ['--outdir', str(tmpdir), get_data(wf_path), get_data(job_path)]
    packed_params = ['--outdir', str(tmpdir), '--debug', wf_packed_path, get_data(job_path)]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
def test_get_subgraph() -> None:
    """Compare known correct subgraphs to generated subgraphs."""
    loading_context = LoadingContext({"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
    loading_context.do_update = False
    tool = load_tool(wf, loading_context)

    sg = Path(get_data("tests/subgraph")).as_uri()

    for a in (
        "file1",
        "file2",
        "file3",
        "count_output",
        "output3",
        "output4",
        "output5",
        "step1",
        "step2",
        "step3",
        "step4",
        "step5",
    ):
        assert isinstance(tool, Workflow)
        extracted = get_subgraph([wf + "#" + a], tool, loading_context)
        with open(get_data("tests/subgraph/extract_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted), sg)
Exemplo n.º 3
0
def test_get_subgraph():
    loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
    wf = norm(Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri())
    loadingContext.do_update = False
    tool = load_tool(wf, loadingContext)

    sg = norm(Path(get_data("tests/subgraph")).as_uri())

    def clean(val):
        if isinstance(val, str):
            if val.startswith(sg):
                return val[len(sg) + 1 :]
        if isinstance(val, dict):
            return {k: clean(v) for k, v in val.items()}
        if isinstance(val, list):
            return [clean(v) for v in val]
        return val

    for a in (
        "file1",
        "file2",
        "file3",
        "count_output",
        "output3",
        "output4",
        "output5",
        "step1",
        "step2",
        "step3",
        "step4",
        "step5",
    ):
        extracted = get_subgraph([wf + "#" + a], tool)
        with open(get_data("tests/subgraph/extract_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted))
Exemplo n.º 4
0
def test_get_step() -> None:
    loadingContext = LoadingContext(
        {"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
    loadingContext.do_update = False
    tool = load_tool(wf, loadingContext)
    assert isinstance(tool, Workflow)

    sg = Path(get_data("tests/subgraph")).as_uri()

    def clean(val: Any) -> Any:
        if isinstance(val, str):
            if val.startswith(sg):
                return val[len(sg) + 1:]
        if isinstance(val, dict):
            return {k: clean(v) for k, v in val.items()}
        if isinstance(val, list):
            return [clean(v) for v in val]
        return val

    for a in (
            "step1",
            "step2",
            "step3",
            "step4",
            "step5",
    ):
        extracted = get_step(tool, wf + "#" + a)
        with open(get_data("tests/subgraph/single_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted))
Exemplo n.º 5
0
def test_packed_workflow_execution(wf_path, job_path, namespaced, tmpdir):
    loadingContext = LoadingContext()
    loadingContext.resolver = tool_resolver
    loadingContext, workflowobj, uri = fetch_document(
        get_data(wf_path), loadingContext)
    loadingContext.do_update = False
    loadingContext, uri = resolve_and_validate_document(
        loadingContext, workflowobj, uri)
    processobj = loadingContext.loader.resolve_ref(uri)[0]
    packed = json.loads(print_pack(loadingContext.loader, processobj, uri, loadingContext.metadata))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, 'w') as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = ['--outdir', str(tmpdir), get_data(wf_path), get_data(job_path)]
    packed_params = ['--outdir', str(tmpdir), '--debug', get_data(wf_packed_path), get_data(job_path)]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
def test_get_subgraph_long_out_form() -> None:
    """Compare subgraphs generatation when 'out' is in the long form."""
    loading_context = LoadingContext({"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/1432.cwl")).as_uri()
    loading_context.do_update = False
    tool = load_tool(wf, loading_context)

    sg = Path(get_data("tests/")).as_uri()

    assert isinstance(tool, Workflow)
    extracted = get_subgraph([wf + "#step2"], tool, loading_context)
    with open(get_data("tests/subgraph/extract_step2_1432.json")) as f:
        assert json.load(f) == clean(convert_to_dict(extracted), sg)
Exemplo n.º 7
0
def test_packed_workflow_execution(wf_path: str, job_path: str,
                                   namespaced: bool,
                                   tmpdir: py.path.local) -> None:
    loadingContext = LoadingContext()
    loadingContext.resolver = tool_resolver
    loadingContext, workflowobj, uri = fetch_document(get_data(wf_path),
                                                      loadingContext)
    loadingContext.do_update = False
    loadingContext, uri = resolve_and_validate_document(
        loadingContext, workflowobj, uri)
    loader = loadingContext.loader
    assert loader
    loader.resolve_ref(uri)[0]
    packed = json.loads(print_pack(loadingContext, uri))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, "w") as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = [
        "--outdir",
        str(tmpdir),
        get_data(wf_path),
        get_data(job_path)
    ]
    packed_params = [
        "--outdir",
        str(tmpdir),
        "--debug",
        wf_packed_path,
        get_data(job_path),
    ]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(
        normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
def test_get_step() -> None:
    loading_context = LoadingContext({"construct_tool_object": default_make_tool})
    wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
    loading_context.do_update = False
    tool = load_tool(wf, loading_context)
    assert isinstance(tool, Workflow)

    sg = Path(get_data("tests/subgraph")).as_uri()

    for a in (
        "step1",
        "step2",
        "step3",
        "step4",
        "step5",
    ):
        extracted = get_step(tool, wf + "#" + a, loading_context)
        with open(get_data("tests/subgraph/single_" + a + ".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted), sg)
Exemplo n.º 9
0
def test_get_subgraph():
    loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
    wf = norm(Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri())
    loadingContext.do_update = False
    tool = load_tool(wf, loadingContext)

    sg = norm(Path(get_data("tests/subgraph")).as_uri())

    def clean(val):
        if isinstance(val, string_types):
            if val.startswith(sg):
                return val[len(sg)+1:]
        if isinstance(val, dict):
            return {k: clean(v) for k,v in val.items()}
        if isinstance(val, list):
            return [clean(v) for v in val]
        return val

    for a in ("file1", "file2", "file3", "count_output",
              "output3", "output4", "output5",
              "step1", "step2", "step3", "step4", "step5"):
        extracted = get_subgraph([wf+"#"+a], tool)
        with open(get_data("tests/subgraph/extract_"+a+".json")) as f:
            assert json.load(f) == clean(convert_to_dict(extracted))
def run(args: argparse.Namespace) -> int:
    """Primary processing loop."""
    imports: Set[str] = set()
    for document in args.inputs:
        _logger.info("Processing %s.", document)
        with open(document) as doc_handle:
            result = yaml.main.round_trip_load(doc_handle,
                                               preserve_quotes=True)
        add_lc_filename(result, document)
        version = result.get("cwlVersion", None)
        if version in ("draft-3", "cwl:draft-3", "v1.0", "v1.1"):
            result = cwlupgrader.upgrade_document(result, False, False,
                                                  args.dir, imports)
        else:
            _logger.error(
                "Sorry, %s in %s is not a supported CWL version by this tool.",
                (version, document),
            )
            return -1
        uri = Path(document).resolve().as_uri()
        if not args.no_expression_refactoring:
            refactored, _ = cwl_v1_2_expression_refactor.traverse(
                load_document_by_yaml(result, uri),
                not args.etools,
                False,
                args.skip_some1,
                args.skip_some2,
            )
            if not isinstance(refactored, MutableSequence):
                result = save(
                    refactored,
                    base_url=refactored.loadingOptions.fileuri
                    if refactored.loadingOptions.fileuri else "",
                )
            #   ^^ Setting the base_url and keeping the default value
            #      for relative_uris=True means that the IDs in the generated
            #      JSON/YAML are kept clean of the path to the input document
            else:
                result = [
                    save(result_item,
                         base_url=result_item.loadingOptions.fileuri)
                    for result_item in refactored
                ]
        if "$graph" in result:
            packed = result
        else:
            with tempfile.TemporaryDirectory() as tmpdirname:
                path = Path(tmpdirname) / Path(document).name
                with open(path, "w") as handle:
                    yaml.main.round_trip_dump(result, handle)
                # TODO replace the cwltool based packing with a parser_v1_2 based packer
                runtimeContext = RuntimeContext()
                loadingContext = LoadingContext()
                use_standard_schema("v1.2")
                # loadingContext.construct_tool_object = workflow.default_make_tool
                # loadingContext.resolver = tool_resolver
                loadingContext.do_update = False
                uri, tool_file_uri = resolve_tool_uri(
                    str(path),
                    resolver=loadingContext.resolver,
                    fetcher_constructor=loadingContext.fetcher_constructor,
                )
                loadingContext, workflowobj, uri = fetch_document(
                    uri, loadingContext)
                loadingContext, uri = resolve_and_validate_document(
                    loadingContext,
                    workflowobj,
                    uri,
                    preprocess_only=True,
                    skip_schemas=True,
                )
                packed = print_pack(loadingContext, uri)
        output = Path(args.dir) / Path(document).name
        with open(output, "w", encoding="utf-8") as output_filehandle:
            output_filehandle.write(packed)
    return 0