Example #1
0
def _add_disk_estimates(cwl_res, inputs, file_estimates, disk):
    """Add disk usage estimates to CWL ResourceRequirement.

    Based on inputs (which need to be staged) and disk
    specifications (which estimate outputs).
    """
    if not disk:
        disk = {}
    if file_estimates:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        for inp in inputs:
            scale = 2.0 if inp.get("type") == "array" else 1.0
            if workflow.is_cwl_record(inp):
                for f in _get_record_fields(inp):
                    if f["name"] in file_estimates:
                        total_estimate += file_estimates[f["name"]] * scale
            elif inp["id"] in file_estimates:
                total_estimate += file_estimates[inp["id"]] * scale
        if total_estimate:
            # Round total estimates to integer, assign extra half to temp space
            # It's not entirely clear how different runners interpret this
            cwl_res["tmpdirMin"] = int(math.ceil(total_estimate * 0.5))
            cwl_res["outdirMin"] += int(math.ceil(total_estimate))
    return cwl_res
Example #2
0
def _add_disk_estimates(cwl_res, inputs, file_estimates, disk):
    """Add disk usage estimates to CWL ResourceRequirement.

    Based on inputs (which need to be staged) and disk
    specifications (which estimate outputs).
    """
    if not disk:
        disk = {}
    if file_estimates:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        for inp in inputs:
            scale = 2.0 if inp.get("type") == "array" else 1.0
            if workflow.is_cwl_record(inp):
                for f in _get_record_fields(inp):
                    if f["name"] in file_estimates:
                        total_estimate += file_estimates[f["name"]] * scale
            elif inp["id"] in file_estimates:
                total_estimate += file_estimates[inp["id"]] * scale
        if total_estimate:
            # scale total estimate to allow extra room, round to integer
            total_estimate = int(math.ceil(total_estimate * 1.5))
            cwl_res["tmpdirMin"] = total_estimate
            cwl_res["outdirMin"] += total_estimate
    return cwl_res
Example #3
0
def _clean_record(rec):
    """Remove secondary files from record fields, which are currently not supported.

    To be removed later when secondaryFiles added to records.
    """
    if workflow.is_cwl_record(rec):

        def _clean_fields(d):
            if isinstance(d, dict):
                if "fields" in d:
                    out = []
                    for f in d["fields"]:
                        f = utils.deepish_copy(f)
                        f.pop("secondaryFiles", None)
                        out.append(f)
                    d["fields"] = out
                    return d
                else:
                    out = {}
                    for k, v in d.items():
                        out[k] = _clean_fields(v)
                    return out
            else:
                return d

        return _clean_fields(rec)
    else:
        return rec
Example #4
0
def _add_inputs_to_tool(inputs, tool, parallel, use_commandline_args=False):
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        # Ensure records and workflow inputs get scattered
        if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel)
                and (workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {
                "prefix": "%s=" % base_id,
                "separate": False,
                "itemSeparator": ";;",
                "position": i
            }
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        tool["inputs"].append(inp_tool)
    return tool
Example #5
0
def _get_sentinel_val(v):
    """Retrieve expected sentinel value for an output, expanding records.
    """
    out = workflow.get_base_id(v["id"])
    if workflow.is_cwl_record(v):
        out += ":%s" % ";".join([x["name"] for x in _get_record_fields(v)])
    return out
Example #6
0
def _clean_record(rec):
    """Remove secondary files from record fields, which are currently not supported.

    To be removed later when secondaryFiles added to records.
    """
    if workflow.is_cwl_record(rec):
        def _clean_fields(d):
            if isinstance(d, dict):
                if "fields" in d:
                    out = []
                    for f in d["fields"]:
                        f = utils.deepish_copy(f)
                        f.pop("secondaryFiles", None)
                        out.append(f)
                    d["fields"] = out
                    return d
                else:
                    out = {}
                    for k, v in d.items():
                        out[k] = _clean_fields(v)
                    return out
            else:
                return d
        return _clean_fields(rec)
    else:
        return rec
Example #7
0
def _get_sentinel_val(v):
    """Retrieve expected sentinel value for an output, expanding records.
    """
    out = workflow.get_base_id(v["id"])
    if workflow.is_cwl_record(v):
        out += ":%s" % ";".join([x["name"] for x in _get_record_fields(v)])
    return out
Example #8
0
def _add_disk_estimates(cwl_res, inputs, file_estimates, disk):
    """Add disk usage estimates to CWL ResourceRequirement.

    Based on inputs (which need to be staged) and disk
    specifications (which estimate outputs).
    """
    if not disk:
        disk = {}
    if file_estimates:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        for inp in inputs:
            scale = 2.0 if inp.get("type") == "array" else 1.0
            if workflow.is_cwl_record(inp):
                for f in _get_record_fields(inp):
                    if f["name"] in file_estimates:
                        total_estimate += file_estimates[f["name"]] * scale
            elif inp["id"] in file_estimates:
                total_estimate += file_estimates[inp["id"]] * scale
        if total_estimate:
            # scale total estimate to allow extra room, round to integer
            total_estimate = int(math.ceil(total_estimate * 1.5))
            cwl_res["tmpdirMin"] = total_estimate
            cwl_res["outdirMin"] += total_estimate
    return cwl_res
Example #9
0
def _do_scatter_var(v, parallel):
    """Logic for scattering a variable.
    """
    # For batches, scatter records only at the top level (double nested)
    if parallel.startswith("batch") and workflow.is_cwl_record(v):
        return (tz.get_in(["type", "type"], v) == "array" and
                tz.get_in(["type", "type", "type"], v) == "array")
    # Otherwise, scatter arrays
    else:
        return (tz.get_in(["type", "type"], v) == "array")
Example #10
0
def _do_scatter_var(v, parallel):
    """Logic for scattering a variable.
    """
    # For batches, scatter records only at the top level (double nested)
    if parallel.startswith("batch") and workflow.is_cwl_record(v):
        return (tz.get_in(["type", "type"], v) == "array"
                and tz.get_in(["type", "type", "type"], v) == "array")
    # Otherwise, scatter arrays
    else:
        return (tz.get_in(["type", "type"], v) == "array")
Example #11
0
def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
                        cur_remotes, no_files):
    """Retrieve disk usage estimates as CWL ResourceRequirement and hint.

    Disk specification for temporary files and outputs.

    Also optionally includes disk input estimates as a custom hint for
    platforms which need to stage these and don't pre-estimate these when
    allocating machine sizes.
    """
    tmp_disk, out_disk, in_disk = 0, 0, 0
    if file_estimates:
        if disk:
            for key, multiplier in disk.items():
                if key in file_estimates:
                    out_disk += int(multiplier * file_estimates[key])
        for inp in inputs:
            scale = 2.0 if inp.get("type") == "array" else 1.0
            # Allocating all samples, could remove for `to_rec` when we ensure we
            # don't have to stage. Currently dnanexus stages everything so need to consider
            if parallel in ["multi-combined", "multi-batch"
                            ] and "dnanexus" in cur_remotes:
                scale *= (len(samples))
            if workflow.is_cwl_record(inp):
                for f in _get_record_fields(inp):
                    if f["name"] in file_estimates:
                        in_disk += file_estimates[f["name"]] * scale
            elif inp["id"] in file_estimates:
                in_disk += file_estimates[inp["id"]] * scale
        # Round total estimates to integer, assign extra half to temp space
        # It's not entirely clear how different runners interpret this
        tmp_disk = int(math.ceil(out_disk * 0.5))
        out_disk = int(math.ceil(out_disk))

    bcbio_docker_disk = (
        10 if cur_remotes else
        1) * 1024  # Minimum requirements for bcbio Docker image
    disk_hint = {
        "outdirMin": bcbio_docker_disk + out_disk,
        "tmpdirMin": tmp_disk
    }
    # Skip input disk for steps which require only transformation (and thus no staging)
    if no_files:
        in_disk = 0
    # Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
    elif in_disk == 0:
        in_disk = 1
    input_hint = {
        "class": "dx:InputResourceRequirement",
        "indirMin": int(math.ceil(in_disk))
    }
    return disk_hint, input_hint
Example #12
0
def _get_sentinel_val(v):
    """Retrieve expected sentinel value for an output, expanding records.
    """
    out = workflow.get_base_id(v["id"])
    if workflow.is_cwl_record(v):
        def _get_fields(d):
            if isinstance(d, dict):
                if "fields" in d:
                    return d["fields"]
                else:
                    for v in d.values():
                        fields = _get_fields(v)
                        if fields:
                            return fields
        out += ":%s" % ";".join([x["name"] for x in _get_fields(v)])
    return out
Example #13
0
def _get_sentinel_val(v):
    """Retrieve expected sentinel value for an output, expanding records.
    """
    out = workflow.get_base_id(v["id"])
    if workflow.is_cwl_record(v):

        def _get_fields(d):
            if isinstance(d, dict):
                if "fields" in d:
                    return d["fields"]
                else:
                    for v in d.values():
                        fields = _get_fields(v)
                        if fields:
                            return fields

        out += ":%s" % ";".join([x["name"] for x in _get_fields(v)])
    return out
Example #14
0
def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
                        cur_remotes, no_files):
    """Retrieve disk usage estimates as CWL ResourceRequirement and hint.

    Disk specification for temporary files and outputs.

    Also optionally includes disk input estimates as a custom hint for
    platforms which need to stage these and don't pre-estimate these when
    allocating machine sizes.
    """
    tmp_disk, out_disk, in_disk = 0, 0, 0
    if file_estimates:
        if disk:
            for key, multiplier in disk.items():
                if key in file_estimates:
                    out_disk += int(multiplier * file_estimates[key])
        for inp in inputs:
            scale = 2.0 if inp.get("type") == "array" else 1.0
            # Allocating all samples, could remove for `to_rec` when we ensure we
            # don't have to stage. Currently dnanexus stages everything so need to consider
            if parallel in ["multi-combined", "multi-batch"] and "dnanexus" in cur_remotes:
                scale *= (len(samples))
            if workflow.is_cwl_record(inp):
                for f in _get_record_fields(inp):
                    if f["name"] in file_estimates:
                        in_disk += file_estimates[f["name"]] * scale
            elif inp["id"] in file_estimates:
                in_disk += file_estimates[inp["id"]] * scale
        # Round total estimates to integer, assign extra half to temp space
        # It's not entirely clear how different runners interpret this
        tmp_disk = int(math.ceil(out_disk * 0.5))
        out_disk = int(math.ceil(out_disk))

    bcbio_docker_disk = (10 if cur_remotes else 1) * 1024  # Minimum requirements for bcbio Docker image
    disk_hint = {"outdirMin": bcbio_docker_disk + out_disk, "tmpdirMin": tmp_disk}
    # Skip input disk for steps which require only transformation (and thus no staging)
    if no_files:
        in_disk = 0
    # Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
    elif in_disk == 0:
        in_disk = 1
    input_hint = {"class": "dx:InputResourceRequirement", "indirMin": int(math.ceil(in_disk))}
    return disk_hint, input_hint
Example #15
0
def _add_inputs_to_tool(inputs, tool, parallel, use_commandline_args=False):
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        # Ensure records and workflow inputs get scattered
        if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel) and
              (workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {"prefix": "%s=" % base_id,
                           "separate": False, "itemSeparator": ";;", "position": i}
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        tool["inputs"].append(inp_tool)
    return tool
Example #16
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory((programs or []) + ["default"], samples)
    cores = step_cores if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    bcbio_docker_disk = 1 * 1024  # Minimum requirements for bcbio Docker image
    cwl_res = {"class": "ResourceRequirement",
               "coresMin": cores, "ramMin": mem_mb_total, "outdirMin": bcbio_docker_disk}
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    docker = {"class": "DockerRequirement", "dockerPull": docker_image, "dockerImageId": docker_image}
    if file_estimates and disk:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        if total_estimate:
            cwl_res["tmpdirMin"] = total_estimate
            cwl_res["outdirMin"] += total_estimate
    out = {"class": "CommandLineTool",
           "cwlVersion": "v1.0",
           "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
           "requirements": [],
           "hints": [docker, cwl_res],
           "arguments": [],
           "inputs": [],
           "outputs": []}
    if programs:
        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = ["https://anaconda.org/bioconda/%s" % out["package"]]
            return out
        out["hints"].append({"class": "SoftwareRequirement",
                             "packages": [resolve_package(p) for p in programs]})
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{"class": "InlineJavascriptRequirement"},
                            {"class": "InitialWorkDirRequirement",
                                "listing": [{"entryname": "cwl.inputs.json",
                                            "entry": "$(JSON.stringify(inputs))"}]}]
    out["arguments"] += [{"position": 0, "valueFrom":
                          "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"},
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" % ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join(["%s:%s" %
                                                          (workflow.get_base_id(v["id"]),
                                                           "record" if workflow.is_cwl_record(v) else "var")
                                                          for v in inputs])]
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        if _is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {"prefix": "%s=" % base_id,
                           "separate": False, "itemSeparator": ";;", "position": i}
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool["id"] = workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:
        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)
        yaml.add_representer(str, str_presenter)
        yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
Example #17
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples, cur_remotes):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory(
        (programs or []) + ["default"], samples)
    cores = min([step_cores, resource_cores]) if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    bcbio_docker_disk = 1 * 1024  # Minimum requirements for bcbio Docker image
    cwl_res = {
        "class": "ResourceRequirement",
        "coresMin": cores,
        "ramMin": mem_mb_total,
        "outdirMin": bcbio_docker_disk
    }
    cwl_res = _add_disk_estimates(cwl_res, inputs, file_estimates, disk)
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    docker = {
        "class": "DockerRequirement",
        "dockerPull": docker_image,
        "dockerImageId": docker_image
    }
    out = {
        "class": "CommandLineTool",
        "cwlVersion": "v1.0",
        "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
        "requirements": [],
        "hints": [docker, cwl_res],
        "arguments": [],
        "inputs": [],
        "outputs": []
    }
    if programs:

        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = [
                "https://anaconda.org/bioconda/%s" % out["package"]
            ]
            return out

        out["hints"].append({
            "class": "SoftwareRequirement",
            "packages": [resolve_package(p) for p in programs]
        })
        # GATK requires networking for setting up log4j logging, use arvados extension
        if any(p.startswith(("gatk", "sentieon")) for p in programs):
            out["hints"] += [{"class": "arv:APIRequirement"}]
    # Multi-process methods that read heavily from BAM files need extra keep cache for Arvados
    if name in ["pipeline_summary", "variantcall_batch_region"]:
        out["hints"] += [{
            "class": "arv:RuntimeConstraints",
            "keep_cache": 4096
        }]
    if any(h.get("class", "").startswith("arv:") for h in out["hints"]):
        out["$namespaces"] = {"arv": "http://arvados.org/cwl#"}
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{
        "class": "InlineJavascriptRequirement"
    }, {
        "class":
        "InitialWorkDirRequirement",
        "listing": [{
            "entryname": "cwl.inputs.json",
            "entry": "$(JSON.stringify(inputs))"
        }]
    }]
    out["arguments"] += [{
        "position":
        0,
        "valueFrom":
        "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"
    },
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" %
                         ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join([
                             "%s:%s" %
                             (workflow.get_base_id(v["id"]),
                              "record" if workflow.is_cwl_record(v) else "var")
                             for v in inputs
                         ])]
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        # Ensure records and workflow inputs get scattered
        if (_is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel)
                and (workflow.is_cwl_record(inp) or inp["wf_duplicate"])):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {
                "prefix": "%s=" % base_id,
                "separate": False,
                "itemSeparator": ";;",
                "position": i
            }
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool = _clean_record(outp_tool)
        outp_tool["id"] = workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:

        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str',
                                               data,
                                               style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)

        yaml.add_representer(str, str_presenter)
        yaml.dump(out,
                  out_handle,
                  default_flow_style=False,
                  allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
Example #18
0
def _write_tool(step_dir,
                name,
                inputs,
                outputs,
                parallel,
                image,
                programs,
                file_estimates,
                disk,
                step_cores,
                samples,
                cur_remotes,
                no_files,
                container_tags=None):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory(
        (programs or []) + ["default"], samples)
    cores = min([step_cores, resource_cores]) if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    cwl_res = {
        "class": "ResourceRequirement",
        "coresMin": cores,
        "ramMin": mem_mb_total
    }
    disk_hint, input_hint = _get_disk_estimates(name, parallel, inputs,
                                                file_estimates, samples, disk,
                                                cur_remotes, no_files)
    cwl_res.update(disk_hint)
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    if container_tags is not None:
        docker_image, container_tags = _add_current_quay_tag(
            docker_image, container_tags)
    docker = {
        "class": "DockerRequirement",
        "dockerPull": docker_image,
        "dockerImageId": docker_image
    }
    out = {
        "class": "CommandLineTool",
        "cwlVersion": "v1.0",
        "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
        "requirements": [],
        "hints": [docker, cwl_res, input_hint],
        "arguments": [],
        "inputs": [],
        "outputs": []
    }
    if programs:

        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = [
                "https://anaconda.org/bioconda/%s" % out["package"]
            ]
            return out

        out["hints"].append({
            "class": "SoftwareRequirement",
            "packages": [resolve_package(p) for p in programs]
        })
        # GATK requires networking for setting up log4j logging, use arvados extension
        if any(p.startswith(("gatk", "sentieon")) for p in programs):
            out["hints"] += [{"class": "arv:APIRequirement"}]
    # Multi-process methods that read heavily from BAM files need extra keep cache for Arvados
    if name in ["pipeline_summary", "variantcall_batch_region", "detect_sv"]:
        out["hints"] += [{
            "class": "arv:RuntimeConstraints",
            "keep_cache": 4096
        }]

    def add_to_namespaces(k, v, out):
        if "$namespaces" not in out:
            out["$namespaces"] = {}
        out["$namespaces"][k] = v
        return out

    if any(h.get("class", "").startswith("arv:") for h in out["hints"]):
        out = add_to_namespaces("arv", "http://arvados.org/cwl#", out)
    if any(h.get("class", "").startswith("dx") for h in out["hints"]):
        out = add_to_namespaces("dx", "https://www.dnanexus.com/cwl#", out)
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{
        "class": "InlineJavascriptRequirement"
    }, {
        "class":
        "InitialWorkDirRequirement",
        "listing": [{
            "entryname": "cwl.inputs.json",
            "entry": "$(JSON.stringify(inputs))"
        }]
    }]
    out["arguments"] += [{
        "position":
        0,
        "valueFrom":
        "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"
    },
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" %
                         ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join([
                             "%s:%s" %
                             (workflow.get_base_id(v["id"]),
                              "record" if workflow.is_cwl_record(v) else "var")
                             for v in inputs
                         ]), "run_number=0"]
    out = _add_inputs_to_tool(inputs, out, parallel, use_commandline_args)
    out = _add_outputs_to_tool(outputs, out)
    _tool_to_file(out, out_file)
    return os.path.join("steps", os.path.basename(out_file))
Example #19
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory(
        (programs or []) + ["default"], samples)
    cores = step_cores if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    bcbio_docker_disk = 1 * 1024  # Minimum requirements for bcbio Docker image
    cwl_res = {
        "class": "ResourceRequirement",
        "coresMin": cores,
        "ramMin": mem_mb_total,
        "outdirMin": bcbio_docker_disk
    }
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    docker = {
        "class": "DockerRequirement",
        "dockerPull": docker_image,
        "dockerImageId": docker_image
    }
    if file_estimates and disk:
        total_estimate = 0
        for key, multiplier in disk.items():
            if key in file_estimates:
                total_estimate += int(multiplier * file_estimates[key])
        if total_estimate:
            cwl_res["tmpdirMin"] = total_estimate
            cwl_res["outdirMin"] += total_estimate
    out = {
        "class": "CommandLineTool",
        "cwlVersion": "v1.0",
        "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
        "requirements": [],
        "hints": [docker, cwl_res],
        "arguments": [],
        "inputs": [],
        "outputs": []
    }
    if programs:

        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = [
                "https://anaconda.org/bioconda/%s" % out["package"]
            ]
            return out

        out["hints"].append({
            "class": "SoftwareRequirement",
            "packages": [resolve_package(p) for p in programs]
        })
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{
        "class": "InlineJavascriptRequirement"
    }, {
        "class":
        "InitialWorkDirRequirement",
        "listing": [{
            "entryname": "cwl.inputs.json",
            "entry": "$(JSON.stringify(inputs))"
        }]
    }]
    out["arguments"] += [{
        "position":
        0,
        "valueFrom":
        "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"
    },
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" %
                         ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join([
                             "%s:%s" %
                             (workflow.get_base_id(v["id"]),
                              "record" if workflow.is_cwl_record(v) else "var")
                             for v in inputs
                         ])]
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = base_id
        if inp.get("wf_duplicate"):
            inp_tool["id"] += "_toolinput"
        for attr in ["source", "valueFrom", "wf_duplicate"]:
            inp_tool.pop(attr, None)
        if _is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel):
            inp_tool = workflow._flatten_nested_input(inp_tool)
        if use_commandline_args:
            inp_binding = {
                "prefix": "%s=" % base_id,
                "separate": False,
                "itemSeparator": ";;",
                "position": i
            }
            inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        else:
            inp_binding = None
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        inp_tool = _clean_record(inp_tool)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool = _clean_record(outp_tool)
        outp_tool["id"] = workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:

        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str',
                                               data,
                                               style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)

        yaml.add_representer(str, str_presenter)
        yaml.dump(out,
                  out_handle,
                  default_flow_style=False,
                  allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
Example #20
0
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
                file_estimates, disk, step_cores, samples, cur_remotes, no_files,
                container_tags=None):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    resource_cores, mem_gb_per_core = resources.cpu_and_memory((programs or []) + ["default"], samples)
    cores = min([step_cores, resource_cores]) if step_cores else resource_cores
    mem_mb_total = int(mem_gb_per_core * cores * 1024)
    cwl_res = {"class": "ResourceRequirement", "coresMin": cores, "ramMin": mem_mb_total}
    disk_hint, input_hint = _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
                                                cur_remotes, no_files)
    cwl_res.update(disk_hint)
    docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
    if container_tags is not None:
        docker_image, container_tags = _add_current_quay_tag(docker_image, container_tags)
    docker = {"class": "DockerRequirement", "dockerPull": docker_image, "dockerImageId": docker_image}
    out = {"class": "CommandLineTool",
           "cwlVersion": "v1.0",
           "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
           "requirements": [],
           "hints": [docker, cwl_res, input_hint],
           "arguments": [],
           "inputs": [],
           "outputs": []}
    if programs:
        def resolve_package(p):
            out = {}
            parts = p.split("=")
            if len(parts) == 2:
                out["package"] = parts[0]
                out["version"] = [parts[1]]
            else:
                out["package"] = p
            out["specs"] = ["https://anaconda.org/bioconda/%s" % out["package"]]
            return out
        out["hints"].append({"class": "SoftwareRequirement",
                             "packages": [resolve_package(p) for p in programs]})
        # GATK requires networking for setting up log4j logging, use arvados extension
        if any(p.startswith(("gatk", "sentieon")) for p in programs):
            out["hints"] += [{"class": "arv:APIRequirement"}]
    # Multi-process methods that read heavily from BAM files need extra keep cache for Arvados
    if name in ["pipeline_summary", "variantcall_batch_region", "detect_sv"]:
        out["hints"] += [{"class": "arv:RuntimeConstraints", "keep_cache": 4096}]
    def add_to_namespaces(k, v, out):
        if "$namespaces" not in out:
            out["$namespaces"] = {}
        out["$namespaces"][k] = v
        return out
    if any(h.get("class", "").startswith("arv:") for h in out["hints"]):
        out = add_to_namespaces("arv", "http://arvados.org/cwl#", out)
    if any(h.get("class", "").startswith("dx") for h in out["hints"]):
        out = add_to_namespaces("dx", "https://www.dnanexus.com/cwl#", out)
    # Use JSON for inputs, rather than command line arguments
    # Correctly handles multiple values and batching across CWL runners
    use_commandline_args = False
    out["requirements"] += [{"class": "InlineJavascriptRequirement"},
                            {"class": "InitialWorkDirRequirement",
                                "listing": [{"entryname": "cwl.inputs.json",
                                            "entry": "$(JSON.stringify(inputs))"}]}]
    out["arguments"] += [{"position": 0, "valueFrom":
                          "sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"},
                         "sentinel_parallel=%s" % parallel,
                         "sentinel_outputs=%s" % ",".join([_get_sentinel_val(v) for v in outputs]),
                         "sentinel_inputs=%s" % ",".join(["%s:%s" %
                                                          (workflow.get_base_id(v["id"]),
                                                           "record" if workflow.is_cwl_record(v) else "var")
                                                          for v in inputs]),
                         "run_number=0"]
    out = _add_inputs_to_tool(inputs, out, parallel, use_commandline_args)
    out = _add_outputs_to_tool(outputs, out)
    _tool_to_file(out, out_file)
    return os.path.join("steps", os.path.basename(out_file))