コード例 #1
0
 def _set_docker_image(self):
     if not self.docker_image:
         try:
             self.docker_image = podutils.get_docker_base_image()
         except (ConfigException, FileNotFoundError, ApiException):
             # no K8s config found; use kfp default image
             self.docker_image = ""
コード例 #2
0
ファイル: pipeline.py プロジェクト: noushi/kale
 def _set_docker_image(self):
     if not self.docker_image:
         try:
             # will fail in case in cluster config is not found
             self.docker_image = podutils.get_docker_base_image()
         except ConfigException:
             # no K8s config found; use kfp default image
             self.docker_image = ""
コード例 #3
0
ファイル: pipeline.py プロジェクト: srinivasav22/kale
 def _set_docker_image(self):
     if not self.docker_image:
         try:
             self.docker_image = podutils.get_docker_base_image()
         except (ConfigException, RuntimeError, FileNotFoundError,
                 ApiException):
             # * ConfigException: no K8s config found
             # * RuntimeError, FileNotFoundError: this is not running in a
             #   pod
             # * ApiException: K8s call to read pod raised exception;
             # Use kfp default image
             self.docker_image = ""
コード例 #4
0
 def detect_environment(self):
     """Detect local confs to preserve reproducibility in pipeline steps."""
     # When running inside a Kubeflow Notebook Server we can detect the
     # running docker image and use it as default in the pipeline steps.
     if not self.pipeline_metadata['docker_image']:
         docker_image = ""
         try:
             # will fail in case in cluster config is not found
             docker_image = get_docker_base_image()
         except ConfigException:
             # no K8s config found
             # use kfp default image
             pass
         except Exception:
             # some other exception
             raise
         self.pipeline_metadata["docker_image"] = docker_image
コード例 #5
0
ファイル: nb.py プロジェクト: ydataai/kale
def get_base_image(request):
    """Get the current pod's docker base image."""
    return podutils.get_docker_base_image()
コード例 #6
0
ファイル: serveutils.py プロジェクト: srinivasav22/kale
def create_inference_service(name: str,
                             predictor: str,
                             pvc_name: str,
                             model_path: str,
                             image: str = None,
                             port: int = None,
                             transformer: bool = False,
                             submit: bool = True) -> KFServer:
    """Create and submit an InferenceService.

    Args:
        name (str): Name of the InferenceService CR
        predictor (str): One of serveutils.PREDICTORS
        pvc_name (str): Name of the PVC which contains the model
        model_path (str): Absolute path to the dump of the model
        image (optional): Image to run the InferenceService
        port (optional): To be used in conjunction with `image`. The port where
            the custom endpoint is exposed.
        transformer (bool): True if the InferenceService is to be deployed with
            a transformer.
        submit (bool): Set to False to just create the YAML and not submit the
            CR to the K8s.

    Returns (str): Path to the generated YAML
    """
    if predictor not in PREDICTORS:
        raise ValueError("Invalid predictor: %s. Choose one of %s" %
                         (predictor, PREDICTORS))

    if predictor == "custom":
        if not image:
            raise ValueError("You must specify an image when using a custom"
                             " predictor.")
        if not port:
            raise ValueError("You must specify a port when using a custom"
                             " predictor.")
        predictor_spec = CUSTOM_PREDICTOR_TEMPLATE.format(
            image=image, port=port, pvc_name=pvc_name, model_path=model_path)
    else:
        if image is not None:
            log.info(
                "Creating an InferenceService with predictor '%s'."
                " Ignoring image...", predictor)
        if port is not None:
            log.info(
                "Creating an InferenceService with predictor '%s'."
                " Ignoring port...", predictor)
        predictor_spec = PVC_PREDICTOR_TEMPLATE.format(predictor=predictor,
                                                       pvc_name=pvc_name,
                                                       model_path=model_path)

    infs_spec = yaml.safe_load(RAW_TEMPLATE.format(name=name))
    predictor_spec = yaml.safe_load(predictor_spec)
    if predictor == "tensorflow":
        # XXX: TF Server is the only predictor being pulled from an external
        # repository. TFServer container are tagger using the library's version
        # number. All the other predictor are built by the KFServing community
        # and are tagged following KFServing's version number. Default values
        # for these can be set in the `inferenceservice-config` ConfigMap.
        _version = _get_runtime_version(predictor)
        predictor_spec["tensorflow"]["runtimeVersion"] = _version
    infs_spec["spec"]["default"]["predictor"] = predictor_spec

    if transformer:
        transformer_spec = yaml.safe_load(
            TRANSFORMER_CUSTOM_TEMPLATE.format(
                image=podutils.get_docker_base_image(),
                pvc_name=pvc_name,
                pvc_mount_point=PVC_ROOT))
        infs_spec["spec"]["default"]["transformer"] = transformer_spec

    yaml_filename = "%s.kfserving.yaml" % name
    yaml_contents = yaml.dump(infs_spec)
    log.info("Saving InferenceService definition at '%s'", yaml_filename)
    with open(yaml_filename, "w") as yaml_file:
        yaml_file.write(yaml_contents)

    if submit:
        _submit_inference_service(infs_spec, podutils.get_namespace())
        _add_owner_references(name, pvc_name)
    return KFServer(name=name, spec=yaml_contents)