コード例 #1
0
 def _run_compiled_code(self, script_path: str):
     _name = self.pipeline.config.pipeline_name
     pipeline_yaml_path = kfputils.compile_pipeline(script_path, _name)
     kfputils.upload_pipeline(pipeline_yaml_path, _name)
     run_name = kfputils.generate_run_name(_name)
     kfputils.run_pipeline(
         run_name=run_name,
         experiment_name=self.pipeline.config.experiment_name,
         pipeline_package_path=pipeline_yaml_path)
コード例 #2
0
ファイル: kfp.py プロジェクト: srinivasav22/kale
def run_pipeline(request, pipeline_metadata, pipeline_id, version_id):
    """Run a pipeline."""
    run = kfputils.run_pipeline(
        experiment_name=pipeline_metadata["experiment_name"],
        pipeline_id=pipeline_id,
        version_id=version_id)

    return {"id": run.id, "name": run.name, "status": run.status}
コード例 #3
0
ファイル: cli.py プロジェクト: gbrlins/kale-1
def main():
    """Entry-point of CLI command."""
    parser = argparse.ArgumentParser(description=ARGS_DESC,
                                     formatter_class=RawTextHelpFormatter)
    general_group = parser.add_argument_group('General')
    general_group.add_argument('--nb',
                               type=str,
                               help='Path to source JupyterNotebook',
                               required=True)
    # use store_const instead of store_true because we None instead of
    # False in case the flag is missing
    general_group.add_argument('--upload_pipeline',
                               action='store_const',
                               const=True)
    general_group.add_argument('--run_pipeline',
                               action='store_const',
                               const=True)
    general_group.add_argument('--debug', action='store_true')

    metadata_group = parser.add_argument_group('Notebook Metadata Overrides',
                                               METADATA_GROUP_DESC)
    metadata_group.add_argument('--experiment_name',
                                type=str,
                                help='Name of the created experiment')
    metadata_group.add_argument('--pipeline_name',
                                type=str,
                                help='Name of the deployed pipeline')
    metadata_group.add_argument('--pipeline_description',
                                type=str,
                                help='Description of the deployed pipeline')
    metadata_group.add_argument('--docker_image',
                                type=str,
                                help='Docker base image used to build the '
                                'pipeline steps')
    metadata_group.add_argument('--kfp_host',
                                type=str,
                                help='KFP endpoint. Provide address as '
                                '<host>:<port>.')
    metadata_group.add_argument('--storage-class-name',
                                type=str,
                                help='The storage class name for the created'
                                ' volumes')
    metadata_group.add_argument('--volume-access-mode',
                                type=str,
                                help='The access mode for the created volumes')

    args = parser.parse_args()

    # get the notebook metadata args group
    mt_overrides_group = next(
        filter(lambda x: x.title == 'Notebook Metadata Overrides',
               parser._action_groups))
    # get the single args of that group
    mt_overrides_group_dict = {
        a.dest: getattr(args, a.dest, None)
        for a in mt_overrides_group._group_actions
        if getattr(args, a.dest, None) is not None
    }

    # FIXME: We are removing the `debug` arg. This shouldn't be an issue
    processor = NotebookProcessor(args.nb, mt_overrides_group_dict)
    pipeline = processor.run()
    dsl_script_path = Compiler(pipeline).compile()
    pipeline_name = pipeline.config.pipeline_name
    pipeline_package_path = kfputils.compile_pipeline(dsl_script_path,
                                                      pipeline_name)

    if args.upload_pipeline:
        kfputils.upload_pipeline(pipeline_package_path=pipeline_package_path,
                                 pipeline_name=pipeline_name,
                                 host=pipeline.config.kfp_host)

    if args.run_pipeline:
        run_name = kfputils.generate_run_name(pipeline_name)
        kfputils.run_pipeline(run_name=run_name,
                              experiment_name=pipeline.config.experiment_name,
                              pipeline_package_path=pipeline_package_path,
                              host=pipeline.config.kfp_host)
コード例 #4
0
def main():
    """Entry-point of CLI command."""
    parser = argparse.ArgumentParser(description=ARGS_DESC,
                                     formatter_class=RawTextHelpFormatter)
    general_group = parser.add_argument_group('General')
    general_group.add_argument('--nb',
                               type=str,
                               help='Path to source JupyterNotebook',
                               required=True)
    # use store_const instead of store_true because we None instead of
    # False in case the flag is missing
    general_group.add_argument('--upload_pipeline',
                               action='store_const',
                               const=True)
    general_group.add_argument('--run_pipeline',
                               action='store_const',
                               const=True)
    general_group.add_argument('--debug', action='store_true')

    metadata_group = parser.add_argument_group('Notebook Metadata Overrides',
                                               METADATA_GROUP_DESC)
    metadata_group.add_argument('--experiment_name',
                                type=str,
                                help='Name of the created experiment')
    metadata_group.add_argument('--pipeline_name',
                                type=str,
                                help='Name of the deployed pipeline')
    metadata_group.add_argument('--pipeline_description',
                                type=str,
                                help='Description of the deployed pipeline')
    metadata_group.add_argument('--docker_image',
                                type=str,
                                help='Docker base image used to build the '
                                'pipeline steps')
    metadata_group.add_argument('--kfp_host',
                                type=str,
                                help='KFP endpoint. Provide address as '
                                '<host>:<port>.')

    args = parser.parse_args()

    # get the notebook metadata args group
    mt_overrides_group = next(
        filter(lambda x: x.title == 'Notebook Metadata Overrides',
               parser._action_groups))
    # get the single args of that group
    mt_overrides_group_dict = {
        a.dest: getattr(args, a.dest, None)
        for a in mt_overrides_group._group_actions
        if getattr(args, a.dest, None) is not None
    }

    kale = Kale(source_notebook_path=args.nb,
                notebook_metadata_overrides=mt_overrides_group_dict,
                debug=args.debug)
    pipeline_graph, pipeline_parameters = kale.notebook_to_graph()
    script_path = kale.generate_kfp_executable(pipeline_graph,
                                               pipeline_parameters)
    # compile the pipeline to kfp tar package
    pipeline_name = kale.pipeline_metadata['pipeline_name']
    pipeline_package_path = kfputils.compile_pipeline(script_path,
                                                      pipeline_name)

    if args.upload_pipeline:
        kfputils.upload_pipeline(
            pipeline_package_path=pipeline_package_path,
            pipeline_name=kale.pipeline_metadata['pipeline_name'],
            host=kale.pipeline_metadata.get('kfp_host', None))

    if args.run_pipeline:
        run_name = kfputils.generate_run_name(
            kale.pipeline_metadata['pipeline_name'])
        kfputils.run_pipeline(
            run_name=run_name,
            experiment_name=kale.pipeline_metadata['experiment_name'],
            pipeline_package_path=pipeline_package_path,
            host=kale.pipeline_metadata.get('kfp_host', None))
コード例 #5
0
    _kale_sum_matrix_task.add_pod_label(
        "pipelines.kubeflow.org/metadata_written", "true")
    _kale_dep_names = (_kale_sum_matrix_task.dependent_names +
                       _kale_volume_step_names)
    _kale_sum_matrix_task.add_pod_annotation(
        "kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
    if _kale_volume_name_parameters:
        _kale_sum_matrix_task.add_pod_annotation(
            "kubeflow-kale.org/volume-name-parameters",
            json.dumps(_kale_volume_name_parameters))


if __name__ == "__main__":
    pipeline_func = auto_generated_pipeline
    pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
    import kfp.compiler as compiler
    compiler.Compiler().compile(pipeline_func, pipeline_filename)

    # Get or create an experiment and submit a pipeline run
    import kfp
    client = kfp.Client()
    experiment = client.create_experiment('hp-tuning')

    # Submit a pipeline run
    from kale.common import kfputils
    pipeline_id, version_id = kfputils.upload_pipeline(pipeline_filename,
                                                       "hp-test")
    run_result = kfputils.run_pipeline(experiment_name=experiment.name,
                                       pipeline_id=pipeline_id,
                                       version_id=version_id)
コード例 #6
0
def create_and_wait_kfp_run(pipeline_id: str,
                            version_id: str,
                            run_name: str,
                            experiment_name: str = "Default",
                            api_version: str = KATIB_API_VERSION_V1BETA1,
                            **kwargs):
    """Create a KFP run, wait for it to complete and retrieve its metrics.

    Create a KFP run from a KFP pipeline with custom arguments and wait for
    it to finish. If it succeeds, return its metrics, logging them in a format
    that can be parsed by Katib's metrics collector.

    Also, annotate the parent trial with the run UUID of the KFP run and
    annotation the KFP workflow with the Katib experiment and trial names and
    ids.

    Args:
        pipeline_id: KFP pipeline
        version_id: KFP pipeline's version
        run_name: The name of the new run
        experiment_name: KFP experiment to create run in. (default: "Default")
        api_version: The version of the Katib CRD (`v1alpha3` or `v1beta1`
        kwargs: All the parameters the pipeline will be fed with

    Returns:
        metrics: Dict of metrics along with their values
    """
    pod_namespace = podutils.get_namespace()
    run = kfputils.run_pipeline(experiment_name=experiment_name,
                                pipeline_id=pipeline_id,
                                version_id=version_id,
                                run_name=run_name,
                                **kwargs)
    run_id = run.id

    log.info("Annotating Trial '%s' with the KFP Run UUID '%s'...",
             run_name, run_id)
    try:
        # Katib Trial name == KFP Run name by design (see rpc.katib)
        annotate_trial(run_name, pod_namespace,
                       {KALE_KATIB_KFP_ANNOTATION_KEY: run_id}, api_version)
    except Exception:
        log.exception("Failed to annotate Trial '%s' with the KFP Run UUID"
                      " '%s'", run_name, run_id)

    log.info("Getting Workflow name for run '%s'...", run_id)
    workflow_name = kfputils.get_workflow_from_run(
        kfputils.get_run(run_id))["metadata"]["name"]
    log.info("Workflow name: %s", workflow_name)
    log.info("Getting the Katib trial...")
    trial = get_trial(run_name, pod_namespace, api_version)
    log.info("Trial name: %s, UID: %s", trial["metadata"]["name"],
             trial["metadata"]["uid"])
    log.info("Getting owner Katib experiment of trial...")
    exp_name, exp_id = get_owner_experiment_from_trial(trial)
    log.info("Experiment name: %s, UID: %s", exp_name, exp_id)
    wf_annotations = {
        EXPERIMENT_NAME_ANNOTATION_KEY: exp_name,
        EXPERIMENT_ID_ANNOTATION_KEY: exp_id,
        TRIAL_NAME_ANNOTATION_KEY: trial["metadata"]["name"],
        TRIAL_ID_ANNOTATION_KEY: trial["metadata"]["uid"],
    }
    try:
        workflowutils.annotate_workflow(workflow_name, pod_namespace,
                                        wf_annotations)
    except Exception:
        log.exception("Failed to annotate Workflow '%s' with the Katib"
                      " details", workflow_name)

    status = kfputils.wait_kfp_run(run_id)

    # If run has not succeeded, return no metrics
    if status != "Succeeded":
        log.warning("KFP run did not run successfully. No metrics to"
                    " return.")
        # exit gracefully with error
        sys.exit(-1)

    # Retrieve metrics
    run_metrics = kfputils.get_kfp_run_metrics(run_id)
    for name, value in run_metrics.items():
        log.info("%s=%s", name, value)

    return run_metrics