コード例 #1
0
ファイル: test_pb_io.py プロジェクト: wenmm/pbsmrtpipe
 def _parse_multiple(self, file_name):
     return IO.parse_pipeline_preset_jsons([file_name])
コード例 #2
0
ファイル: test_pb_io.py プロジェクト: pb-cdunn/pbsmrtpipe
 def _parse_multiple(self, file_name):
     return IO.parse_pipeline_preset_jsons([file_name])
コード例 #3
0
def _load_io_for_task(registered_tasks, entry_points_d, preset_jsons, preset_xmls, rc_preset_or_none, force_distribute=None, force_chunk_mode=None, debug_mode=None):
    """Grungy loading of the IO and resolving values

    Returns a tuple of (WorkflowLevelOptions, TaskOptions, ClusterRender)
    """
    slog.info("validating entry points. {e}".format(e=entry_points_d))
    _validate_entry_points_or_raise(entry_points_d)
    slog.info("successfully validated {n} entry points".format(n=len(entry_points_d)))

    wopts = {}
    topts = {}

    if rc_preset_or_none is None:
        rc_preset = IO.load_preset_from_env()
    else:
        rc_preset = IO.parse_pipeline_preset_xml(rc_preset_or_none)

    if rc_preset:
        topts.update(dict(rc_preset.task_options))
        wopts.update(dict(rc_preset.workflow_options))

    if preset_xmls:
        preset_record = IO.parse_pipeline_preset_xmls(preset_xmls)
        wopts.update(dict(preset_record.workflow_options))
        topts.update(dict(preset_record.task_options))

    if preset_jsons:
        preset_record = IO.parse_pipeline_preset_jsons(preset_jsons)
        wopts.update(dict(preset_record.workflow_options))
        topts.update(dict(preset_record.task_options))

    workflow_level_opts = IO.WorkflowLevelOptions.from_id_dict(wopts)

    workflow_level_opts = IO.validate_or_modify_workflow_level_options(workflow_level_opts)

    if isinstance(force_chunk_mode, bool):
        workflow_level_opts.chunk_mode = force_chunk_mode

    # Validate
    topts = IO.validate_raw_task_options(registered_tasks, topts)

    log.debug("Resolved task options to {d}".format(d=workflow_level_opts))
    log.debug(pprint.pprint(workflow_level_opts.to_dict(), indent=4))

    if isinstance(workflow_level_opts.cluster_manager_path, str):
        cluster_render = C.load_cluster_templates(workflow_level_opts.cluster_manager_path)
        # override distributed mode
        if isinstance(force_distribute, bool):
            workflow_level_opts.distributed_mode = force_distribute
    else:
        cluster_render = None

    workflow_level_opts.max_nchunks = min(workflow_level_opts.max_nchunks, GlobalConstants.MAX_NCHUNKS)

    if workflow_level_opts.distributed_mode is False:
        slog.info("local-only mode detected setting total NPROC to {x}".format(x=multiprocessing.cpu_count()))
        workflow_level_opts.total_max_nproc = multiprocessing.cpu_count()

    if debug_mode is True:
        slog.info("overriding debug-mode to True")
        workflow_level_opts.debug_mode = debug_mode

    return workflow_level_opts, topts, cluster_render
コード例 #4
0
def _load_io_for_workflow(registered_tasks, registered_pipelines, workflow_template_xml_or_pipeline,
                          entry_points_d, preset_jsons, preset_xmls, rc_preset_or_none, force_distribute=None, force_chunk_mode=None, debug_mode=None):
    """
    Load and resolve input IO layer

    # Load Presets and Workflow Options. Resolve and Merge
    # The Order of loading is
    # - rc, workflow.xml, then preset.xml
    # force_distribute will attempt to override ALL settings (if cluster_manager is defined)

    :returns: A tuple of Workflow Bindings, Workflow Level Options, Task Opts, ClusterRenderer)
    :rtype: (List[(str, str)], WorkflowLevelOpts, {TaskId:value}, ClusterRenderer)
    """

    # Load Presets and Workflow Options. Resolve and Merge
    # The Order of loading is
    # - rc, workflow.xml, then preset.xml

    # A little sanity check
    # Validate that entry points exist

    slog.info("validating entry points.")
    _validate_entry_points_or_raise(entry_points_d)
    slog.info("successfully validated {n} entry points".format(n=len(entry_points_d)))

    wopts = {}
    topts = {}

    if rc_preset_or_none is None:
        rc_preset = IO.load_preset_from_env()
    else:
        rc_preset = IO.parse_pipeline_preset_xml(rc_preset_or_none)

    if isinstance(workflow_template_xml_or_pipeline, Pipeline):
        # Use default values defined in the Pipeline
        builder_record = IO.BuilderRecord(workflow_template_xml_or_pipeline.all_bindings, workflow_template_xml_or_pipeline.task_options, {})
        slog.info("Loaded pipeline Id {p}".format(p=workflow_template_xml_or_pipeline.pipeline_id))
    else:
        slog.info("Loading workflow template.")
        builder_record = IO.parse_pipeline_template_xml(workflow_template_xml_or_pipeline, registered_pipelines)
        slog.info("successfully loaded workflow template.")

    preset_xml_record = preset_json_record = None
    if preset_jsons:
        slog.info("Loading preset(s) {p}".format(p=preset_jsons))
        preset_json_record = IO.parse_pipeline_preset_jsons(preset_jsons)
        slog.info("successfully loaded preset.")
    else:
        slog.info("No JSON preset provided. Skipping preset json loading.")
    if preset_xmls:
        slog.info("Loading preset(s) {p}".format(p=preset_xmls))
        preset_xml_record = IO.parse_pipeline_preset_xmls(preset_xmls)
        slog.info("successfully loaded preset.")
    else:
        slog.info("No XML preset provided. Skipping preset XML loading.")

    if rc_preset is not None:
        topts.update(dict(rc_preset.task_options))
        wopts.update(dict(rc_preset.workflow_options))

    wopts.update(dict(builder_record.workflow_options))
    topts.update(builder_record.task_options)

    if preset_xml_record is not None:
        wopts.update(dict(preset_xml_record.workflow_options))
        topts.update(dict(preset_xml_record.task_options))
    if preset_json_record is not None:
        wopts.update(dict(preset_json_record.workflow_options))
        topts.update(dict(preset_json_record.task_options))

    workflow_level_opts = IO.WorkflowLevelOptions.from_id_dict(wopts)
    if len(sys.argv) > 0:
        # XXX evil, but this gets sys.argv into pbsmrtpipe.log
        workflow_level_opts.system_message = " ".join(sys.argv)

    # override distributed mode only if provided.
    if isinstance(force_distribute, bool):
        workflow_level_opts.distributed_mode = force_distribute
    workflow_level_opts = IO.validate_or_modify_workflow_level_options(workflow_level_opts)

    slog.info("Successfully validated workflow options.")

    slog.info("validating supplied task options.")
    topts = IO.validate_raw_task_options(registered_tasks, topts)
    slog.info("successfully loaded and validated task options.")

    workflow_bindings = builder_record.bindings

    if isinstance(workflow_level_opts.cluster_manager_path, str):
        cluster_render = C.load_cluster_templates(workflow_level_opts.cluster_manager_path)
    else:
        cluster_render = None

    if isinstance(force_chunk_mode, bool):
        workflow_level_opts.chunk_mode = force_chunk_mode

    workflow_level_opts.max_nchunks = min(workflow_level_opts.max_nchunks, GlobalConstants.MAX_NCHUNKS)

    if workflow_level_opts.distributed_mode is False:
        total_max_nproc = multiprocessing.cpu_count() if workflow_level_opts.total_max_nproc is None else workflow_level_opts.total_max_nproc
        workflow_level_opts.total_max_nproc = min(total_max_nproc, multiprocessing.cpu_count())
        workflow_level_opts.max_nproc = min(workflow_level_opts.max_nproc, workflow_level_opts.total_max_nproc)
        slog.info("local-only mode updating       MAX NPROC to {x}".format(x=workflow_level_opts.max_nproc))
        slog.info("local-only mode updating TOTAL MAX NPROC to {x}".format(x=workflow_level_opts.total_max_nproc))

    if debug_mode is True:
        slog.info("overriding debug-mode to True")
        workflow_level_opts.debug_mode = debug_mode

    log.debug("Resolved workflow level options to {d}".format(d=workflow_level_opts))
    log.debug("\n" + pprint.pformat(workflow_level_opts.to_dict(), indent=4))
    log.debug("Initial resolving of loaded preset.xml and pipeline.xml task options:")
    log.debug("\n" + pprint.pformat(topts))

    return workflow_bindings, workflow_level_opts, topts, cluster_render
コード例 #5
0
def _load_io_for_task(registered_tasks, entry_points_d, preset_jsons, preset_xmls, rc_preset_or_none, force_distribute=None, force_chunk_mode=None, debug_mode=None):
    """Grungy loading of the IO and resolving values

    Returns a tuple of (WorkflowLevelOptions, TaskOptions, ClusterRender)
    """
    slog.info("validating entry points. {e}".format(e=entry_points_d))
    _validate_entry_points_or_raise(entry_points_d)
    slog.info("successfully validated {n} entry points".format(n=len(entry_points_d)))

    wopts = {}
    topts = {}

    if rc_preset_or_none is None:
        rc_preset = IO.load_preset_from_env()
    else:
        rc_preset = IO.parse_pipeline_preset_xml(rc_preset_or_none)

    if rc_preset:
        topts.update(dict(rc_preset.task_options))
        wopts.update(dict(rc_preset.workflow_options))

    if preset_xmls:
        preset_record = IO.parse_pipeline_preset_xmls(preset_xmls)
        wopts.update(dict(preset_record.workflow_options))
        topts.update(dict(preset_record.task_options))

    if preset_jsons:
        preset_record = IO.parse_pipeline_preset_jsons(preset_jsons)
        wopts.update(dict(preset_record.workflow_options))
        topts.update(dict(preset_record.task_options))

    workflow_level_opts = IO.WorkflowLevelOptions.from_id_dict(wopts)

    workflow_level_opts = IO.validate_or_modify_workflow_level_options(workflow_level_opts)

    if isinstance(force_chunk_mode, bool):
        workflow_level_opts.chunk_mode = force_chunk_mode

    # Validate
    topts = IO.validate_raw_task_options(registered_tasks, topts)

    log.debug("Resolved task options to {d}".format(d=workflow_level_opts))
    log.debug(pprint.pprint(workflow_level_opts.to_dict(), indent=4))

    if isinstance(workflow_level_opts.cluster_manager_path, str):
        cluster_render = C.load_cluster_templates(workflow_level_opts.cluster_manager_path)
        # override distributed mode
        if isinstance(force_distribute, bool):
            workflow_level_opts.distributed_mode = force_distribute
    else:
        cluster_render = None

    workflow_level_opts.max_nchunks = min(workflow_level_opts.max_nchunks, GlobalConstants.MAX_NCHUNKS)

    if workflow_level_opts.distributed_mode is False:
        slog.info("local-only mode detected setting total NPROC to {x}".format(x=multiprocessing.cpu_count()))
        workflow_level_opts.total_max_nproc = multiprocessing.cpu_count()

    if debug_mode is True:
        slog.info("overriding debug-mode to True")
        workflow_level_opts.debug_mode = debug_mode

    return workflow_level_opts, topts, cluster_render
コード例 #6
0
def _load_io_for_workflow(registered_tasks, registered_pipelines, workflow_template_xml_or_pipeline,
                          entry_points_d, preset_jsons, preset_xmls, rc_preset_or_none, force_distribute=None, force_chunk_mode=None, debug_mode=None):
    """
    Load and resolve input IO layer

    # Load Presets and Workflow Options. Resolve and Merge
    # The Order of loading is
    # - rc, workflow.xml, then preset.xml
    # force_distribute will attempt to override ALL settings (if cluster_manager is defined)

    :returns: A tuple of Workflow Bindings, Workflow Level Options, Task Opts, ClusterRenderer)
    :rtype: (List[(str, str)], WorkflowLevelOpts, {TaskId:value}, ClusterRenderer)
    """

    # Load Presets and Workflow Options. Resolve and Merge
    # The Order of loading is
    # - rc, workflow.xml, then preset.xml

    # A little sanity check
    # Validate that entry points exist

    slog.info("validating entry points.")
    _validate_entry_points_or_raise(entry_points_d)
    slog.info("successfully validated {n} entry points".format(n=len(entry_points_d)))

    wopts = {}
    topts = {}

    if rc_preset_or_none is None:
        rc_preset = IO.load_preset_from_env()
    else:
        rc_preset = IO.parse_pipeline_preset_xml(rc_preset_or_none)

    if isinstance(workflow_template_xml_or_pipeline, Pipeline):
        # Use default values defined in the Pipeline
        builder_record = IO.BuilderRecord(workflow_template_xml_or_pipeline.all_bindings, workflow_template_xml_or_pipeline.task_options, {})
        slog.info("Loaded pipeline Id {p}".format(p=workflow_template_xml_or_pipeline.pipeline_id))
    else:
        slog.info("Loading workflow template.")
        builder_record = IO.parse_pipeline_template_xml(workflow_template_xml_or_pipeline, registered_pipelines)
        slog.info("successfully loaded workflow template.")

    preset_xml_record = preset_json_record = None
    if preset_jsons:
        slog.info("Loading preset(s) {p}".format(p=preset_jsons))
        preset_json_record = IO.parse_pipeline_preset_jsons(preset_jsons)
        slog.info("successfully loaded preset.")
    else:
        slog.info("No JSON preset provided. Skipping preset json loading.")
    if preset_xmls:
        slog.info("Loading preset(s) {p}".format(p=preset_xmls))
        preset_xml_record = IO.parse_pipeline_preset_xmls(preset_xmls)
        slog.info("successfully loaded preset.")
    else:
        slog.info("No XML preset provided. Skipping preset XML loading.")

    if rc_preset is not None:
        topts.update(dict(rc_preset.task_options))
        wopts.update(dict(rc_preset.workflow_options))

    wopts.update(dict(builder_record.workflow_options))
    topts.update(builder_record.task_options)

    if preset_xml_record is not None:
        wopts.update(dict(preset_xml_record.workflow_options))
        topts.update(dict(preset_xml_record.task_options))
    if preset_json_record is not None:
        wopts.update(dict(preset_json_record.workflow_options))
        topts.update(dict(preset_json_record.task_options))

    workflow_level_opts = IO.WorkflowLevelOptions.from_id_dict(wopts)
    if len(sys.argv) > 0:
        # XXX evil, but this gets sys.argv into pbsmrtpipe.log
        workflow_level_opts.system_message = " ".join(sys.argv)

    # override distributed mode only if provided.
    if isinstance(force_distribute, bool):
        workflow_level_opts.distributed_mode = force_distribute
    workflow_level_opts = IO.validate_or_modify_workflow_level_options(workflow_level_opts)

    slog.info("Successfully validated workflow options.")

    slog.info("validating supplied task options.")
    topts = IO.validate_raw_task_options(registered_tasks, topts)
    slog.info("successfully validated (pre DI) task options.")

    workflow_bindings = builder_record.bindings

    if isinstance(workflow_level_opts.cluster_manager_path, str):
        cluster_render = C.load_cluster_templates(workflow_level_opts.cluster_manager_path)
    else:
        cluster_render = None

    if isinstance(force_chunk_mode, bool):
        workflow_level_opts.chunk_mode = force_chunk_mode

    workflow_level_opts.max_nchunks = min(workflow_level_opts.max_nchunks, GlobalConstants.MAX_NCHUNKS)

    if workflow_level_opts.distributed_mode is False:
        total_max_nproc = multiprocessing.cpu_count() if workflow_level_opts.total_max_nproc is None else workflow_level_opts.total_max_nproc
        workflow_level_opts.total_max_nproc = min(total_max_nproc, multiprocessing.cpu_count())
        workflow_level_opts.max_nproc = min(workflow_level_opts.max_nproc, workflow_level_opts.total_max_nproc)
        slog.info("local-only mode updating       MAX NPROC to {x}".format(x=workflow_level_opts.max_nproc))
        slog.info("local-only mode updating TOTAL MAX NPROC to {x}".format(x=workflow_level_opts.total_max_nproc))

    if debug_mode is True:
        slog.info("overriding debug-mode to True")
        workflow_level_opts.debug_mode = debug_mode

    log.debug("Resolved workflow level options to {d}".format(d=workflow_level_opts))
    log.debug("\n" + pprint.pformat(workflow_level_opts.to_dict(), indent=4))
    log.debug("Initial resolving of loaded preset.xml and pipeline.xml task options:")
    log.debug("\n" + pprint.pformat(topts))

    return workflow_bindings, workflow_level_opts, topts, cluster_render