示例#1
0
def _cwl_workflow_template(inputs, top_level=False):
    """Retrieve CWL inputs shared amongst different workflows.
    """
    ready_inputs = []
    for inp in inputs:
        cur_inp = copy.deepcopy(inp)
        for attr in ["source", "valueFrom"]:
            cur_inp.pop(attr, None)
        if top_level:
            cur_inp = workflow._flatten_nested_input(cur_inp)
        ready_inputs.append(cur_inp)
    return {"class": "Workflow",
            "cwlVersion": "v1.0",
            "hints": [{"class": "DockerRequirement",
                       "dockerPull": "bcbio/bcbio",
                       "dockerImageId": "bcbio/bcbio"}],
            "requirements": [{"class": "EnvVarRequirement",
                              "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
                             {"class": "ScatterFeatureRequirement"},
                             {"class": "StepInputExpressionRequirement"},
                             {"class": "SubworkflowFeatureRequirement"},
                             {"class": "InlineJavascriptRequirement"}],
            "inputs": ready_inputs,
            "outputs": [],
            "steps": []}
示例#2
0
def _cwl_workflow_template(inputs, top_level=False):
    """Retrieve CWL inputs shared amongst different workflows.
    """
    ready_inputs = []
    for inp in inputs:
        cur_inp = copy.deepcopy(inp)
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            cur_inp.pop(attr, None)
        if top_level:
            cur_inp = workflow._flatten_nested_input(cur_inp)
        cur_inp = _clean_record(cur_inp)
        ready_inputs.append(cur_inp)
    return {"class": "Workflow",
            "cwlVersion": "v1.0",
            "hints": [],
            "requirements": [{"class": "EnvVarRequirement",
                              "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
                             {"class": "ScatterFeatureRequirement"},
                             {"class": "SubworkflowFeatureRequirement"}],
            "inputs": ready_inputs,
            "outputs": [],
            "steps": []}
示例#3
0
def _cwl_workflow_template(inputs, top_level=False):
    """Retrieve CWL inputs shared amongst different workflows.
    """
    ready_inputs = []
    for inp in inputs:
        cur_inp = copy.deepcopy(inp)
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            cur_inp.pop(attr, None)
        if top_level:
            cur_inp = workflow._flatten_nested_input(cur_inp)
        cur_inp = _clean_record(cur_inp)
        ready_inputs.append(cur_inp)
    return {"class": "Workflow",
            "cwlVersion": "v1.0",
            "hints": [],
            "requirements": [{"class": "EnvVarRequirement",
                              "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
                             {"class": "ScatterFeatureRequirement"},
                             {"class": "SubworkflowFeatureRequirement"}],
            "inputs": ready_inputs,
            "outputs": [],
            "steps": []}
示例#4
0
def _add_inputs_to_tool(inputs, tool, parallel, use_commandline_args=False):
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        # Ensure records and workflow inputs get scattered
        if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel) and
              (workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {"prefix": "%s=" % base_id,
                           "separate": False, "itemSeparator": ";;", "position": i}
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        tool["inputs"].append(inp_tool)
    return tool
示例#5
0
def _add_inputs_to_tool(inputs, tool, parallel, use_commandline_args=False):
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        # Ensure records and workflow inputs get scattered
        if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel) and
              (workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {"prefix": "%s=" % base_id,
                           "separate": False, "itemSeparator": ";;", "position": i}
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        tool["inputs"].append(inp_tool)
    return tool
示例#6
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory(
        (programs or []) + ["default"], samples)
    cores = step_cores if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    bcbio_docker_disk = 1 * 1024  # Minimum requirements for bcbio Docker image
    cwl_res = {
        "class": "ResourceRequirement",
        "coresMin": cores,
        "ramMin": mem_mb_total,
        "outdirMin": bcbio_docker_disk
    }
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    docker = {
        "class": "DockerRequirement",
        "dockerPull": docker_image,
        "dockerImageId": docker_image
    }
    if file_estimates and disk:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        if total_estimate:
            cwl_res["tmpdirMin"] = total_estimate
            cwl_res["outdirMin"] += total_estimate
    out = {
        "class": "CommandLineTool",
        "cwlVersion": "v1.0",
        "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
        "requirements": [],
        "hints": [docker, cwl_res],
        "arguments": [],
        "inputs": [],
        "outputs": []
    }
    if programs:

        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = [
                "https://anaconda.org/bioconda/%s" % out["package"]
            ]
            return out

        out["hints"].append({
            "class": "SoftwareRequirement",
            "packages": [resolve_package(p) for p in programs]
        })
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{
        "class": "InlineJavascriptRequirement"
    }, {
        "class":
        "InitialWorkDirRequirement",
        "listing": [{
            "entryname": "cwl.inputs.json",
            "entry": "$(JSON.stringify(inputs))"
        }]
    }]
    out["arguments"] += [{
        "position":
        0,
        "valueFrom":
        "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"
    },
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" %
                         ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join([
                             "%s:%s" %
                             (workflow.get_base_id(v["id"]),
                              "record" if workflow.is_cwl_record(v) else "var")
                             for v in inputs
                         ])]
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        if _is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {
                "prefix": "%s=" % base_id,
                "separate": False,
                "itemSeparator": ";;",
                "position": i
            }
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool = _clean_record(outp_tool)
        outp_tool["id"] = workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:

        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str',
                                               data,
                                               style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)

        yaml.add_representer(str, str_presenter)
        yaml.dump(out,
                  out_handle,
                  default_flow_style=False,
                  allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
示例#7
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory((programs or []) + ["default"], samples)
    cores = step_cores if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    bcbio_docker_disk = 1 * 1024  # Minimum requirements for bcbio Docker image
    cwl_res = {"class": "ResourceRequirement",
               "coresMin": cores, "ramMin": mem_mb_total, "outdirMin": bcbio_docker_disk}
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    docker = {"class": "DockerRequirement", "dockerPull": docker_image, "dockerImageId": docker_image}
    if file_estimates and disk:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        if total_estimate:
            cwl_res["tmpdirMin"] = total_estimate
            cwl_res["outdirMin"] += total_estimate
    out = {"class": "CommandLineTool",
           "cwlVersion": "v1.0",
           "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
           "requirements": [],
           "hints": [docker, cwl_res],
           "arguments": [],
           "inputs": [],
           "outputs": []}
    if programs:
        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = ["https://anaconda.org/bioconda/%s" % out["package"]]
            return out
        out["hints"].append({"class": "SoftwareRequirement",
                             "packages": [resolve_package(p) for p in programs]})
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{"class": "InlineJavascriptRequirement"},
                            {"class": "InitialWorkDirRequirement",
                                "listing": [{"entryname": "cwl.inputs.json",
                                            "entry": "$(JSON.stringify(inputs))"}]}]
    out["arguments"] += [{"position": 0, "valueFrom":
                          "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"},
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" % ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join(["%s:%s" %
                                                          (workflow.get_base_id(v["id"]),
                                                           "record" if workflow.is_cwl_record(v) else "var")
                                                          for v in inputs])]
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        if _is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {"prefix": "%s=" % base_id,
                           "separate": False, "itemSeparator": ";;", "position": i}
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool["id"] = workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:
        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)
        yaml.add_representer(str, str_presenter)
        yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
示例#8
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples, cur_remotes):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory(
        (programs or []) + ["default"], samples)
    cores = min([step_cores, resource_cores]) if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    bcbio_docker_disk = 1 * 1024  # Minimum requirements for bcbio Docker image
    cwl_res = {
        "class": "ResourceRequirement",
        "coresMin": cores,
        "ramMin": mem_mb_total,
        "outdirMin": bcbio_docker_disk
    }
    cwl_res = _add_disk_estimates(cwl_res, inputs, file_estimates, disk)
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    docker = {
        "class": "DockerRequirement",
        "dockerPull": docker_image,
        "dockerImageId": docker_image
    }
    out = {
        "class": "CommandLineTool",
        "cwlVersion": "v1.0",
        "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
        "requirements": [],
        "hints": [docker, cwl_res],
        "arguments": [],
        "inputs": [],
        "outputs": []
    }
    if programs:

        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = [
                "https://anaconda.org/bioconda/%s" % out["package"]
            ]
            return out

        out["hints"].append({
            "class": "SoftwareRequirement",
            "packages": [resolve_package(p) for p in programs]
        })
        # GATK requires networking for setting up log4j logging, use arvados extension
        if any(p.startswith(("gatk", "sentieon")) for p in programs):
            out["hints"] += [{"class": "arv:APIRequirement"}]
    # Multi-process methods that read heavily from BAM files need extra keep cache for Arvados
    if name in ["pipeline_summary", "variantcall_batch_region"]:
        out["hints"] += [{
            "class": "arv:RuntimeConstraints",
            "keep_cache": 4096
        }]
    if any(h.get("class", "").startswith("arv:") for h in out["hints"]):
        out["$namespaces"] = {"arv": "http://arvados.org/cwl#"}
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{
        "class": "InlineJavascriptRequirement"
    }, {
        "class":
        "InitialWorkDirRequirement",
        "listing": [{
            "entryname": "cwl.inputs.json",
            "entry": "$(JSON.stringify(inputs))"
        }]
    }]
    out["arguments"] += [{
        "position":
        0,
        "valueFrom":
        "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"
    },
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" %
                         ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join([
                             "%s:%s" %
                             (workflow.get_base_id(v["id"]),
                              "record" if workflow.is_cwl_record(v) else "var")
                             for v in inputs
                         ])]
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        # Ensure records and workflow inputs get scattered
        if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel)
                and (workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {
                "prefix": "%s=" % base_id,
                "separate": False,
                "itemSeparator": ";;",
                "position": i
            }
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool = _clean_record(outp_tool)
        outp_tool["id"] = workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:

        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str',
                                               data,
                                               style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)

        yaml.add_representer(str, str_presenter)
        yaml.dump(out,
                  out_handle,
                  default_flow_style=False,
                  allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))