Exemple #1
0
def save(obj, obj_name):
    """Serialise (save) an object using Kale's marshalling backends.

    Args:
        obj: The Python object to be saved
        obj_name: The variable name of 'obj'
    """
    try:
        _save(obj, os.path.join(KALE_DATA_DIRECTORY, obj_name))
    except KaleMarshalException as e:
        log.error(e)
        log.debug("Original Traceback", exc_info=e.__traceback__)
        utils.graceful_exit(1)
Exemple #2
0
def load(file_name):
    """Load a file using Kale's marshalling backends.

    Args:
        file_name: The name of the serialized object to be loaded

    Returns: loaded object
    """
    try:
        return _load(file_name)
    except KaleMarshalException as e:
        log.error(e)
        log.debug("Original Traceback", exc_info=e.__traceback__)
        utils.graceful_exit(1)
Exemple #3
0
    def load(self, basename: str):
        """Restore a file to memory.

        Args:
            basename: The name of the serialized object to be loaded

        Returns: restored object
        """
        try:
            entry_name = self._unique_ls(basename)
            return self._dispatch_file_type(entry_name).wrapped_load(basename)
        except Exception as e:
            error_msg = ("During data passing, Kale could not load the"
                         " following file:\n\n\n  - name: '%s'" % basename)
            log.error(error_msg + self.END_USER_EXC_MSG % e)
            log.debug("Original Traceback", exc_info=e.__traceback__)
            utils.graceful_exit(1)
Exemple #4
0
    def save(self, obj: Any, obj_name: str):
        """Save an object to file.

        Args:
            obj: Object to be marshalled
            obj_name: Name of the object to be saved
        """
        try:
            return self._dispatch_obj_type(obj).wrapped_save(obj, obj_name)
        except Exception as e:
            error_msg = (
                "During data passing, Kale could not marshal the"
                " following object:\n\n  - path: '%s'\n  - type: '%s'" %
                (obj_name, type(obj)))
            log.error(error_msg + self.END_USER_EXC_MSG % e)
            log.debug("Original Traceback", exc_info=e.__traceback__)
            utils.graceful_exit(1)
Exemple #5
0
 def _ttl_signal_handler(_signal, _frame):
     log.error("Timeout expired. This step was configured to run with a TTL"
               " of %s seconds. Stopping execution..." % timeout)
     utils.graceful_exit(-1)
Exemple #6
0
def serve(model: Any,
          name: str = None,
          wait: bool = True,
          predictor: str = None,
          preprocessing_fn: Callable = None,
          preprocessing_assets: Dict = None) -> KFServer:
    """Main API used to serve models from a notebook or a pipeline step.

    This function procedurally deploys a KFServing InferenceService, starting
    from a model object. A summary list of actions follows:

    * Autogenerate an InferenceService name, if not provided
    * Process transformer function (and related assets)
    * Dump the model, to a path under a mounted PVC
    * Snapshot the PVC
    * Hydrate a new PVC from the new snapshot
    * Submit an InferenceService CR
    * Monitor the CR until it becomes ready

    FIXME: Improve documentation. Provide some examples in the docstring and
      explain how the preprocessing function parsing works.

    Args:
        model: Model object to be used as a predictor
        name (optional): Name of the predictor. Will be autogenerated if not
            provided
        wait (optional): Wait for the InferenceService to become ready.
            Default: True
        predictor (optional): Predictor type to be used for the
            InferenceService. If not provided it will be inferred using
            the the matching marshalling backends.
        preprocessing_fn (optional): A processing function that will be
            deployed as a KFServing Transformer
        preprocessing_assets (optional): A dictionary with object required by
            the preprocessing function. This is needed in case the
            preprocessing function references global objects.

    Returns: A KFServer instance
    """
    log.info("Starting serve procedure for model '%s'", model)
    if not name:
        name = "%s-%s" % (podutils.get_pod_name(), utils.random_string(5))

    # Validate and process transformer
    if preprocessing_fn:
        _prepare_transformer_assets(preprocessing_fn, preprocessing_assets)

    # Detect predictor type
    predictor_type = marshal.get_backend(model).predictor_type
    if predictor and predictor != predictor_type:
        raise RuntimeError("Trying to create an InferenceService with"
                           " predictor of type '%s' but the model is of type"
                           " '%s'" % (predictor, predictor_type))
    if not predictor_type:
        log.error(
            "Kale does not yet support serving objects with '%s'"
            " backend.\n\nPlease help us improve Kale by opening a new"
            " issue at:\n"
            "https://github.com/kubeflow-kale/kale/issues",
            marshal.get_backend(model).display_name)
        utils.graceful_exit(-1)
    predictor = predictor_type  # in case `predictor` is None

    volume = podutils.get_volume_containing_path(PVC_ROOT)
    volume_name = volume[1].persistent_volume_claim.claim_name
    log.info("Model is contained in volume '%s'", volume_name)

    # Dump the model
    marshal.set_data_dir(PREDICTOR_MODEL_DIR)
    model_filepath = marshal.save(model, "model")
    log.info("Model saved successfully at '%s'", model_filepath)

    # Take snapshot
    task_info = rokutils.snapshot_pvc(volume_name,
                                      bucket=rokutils.SERVING_BUCKET,
                                      wait=True)
    task = rokutils.get_task(task_info["task"]["id"],
                             bucket=rokutils.SERVING_BUCKET)
    new_pvc_name = "%s-pvc-%s" % (name, utils.random_string(5))
    rokutils.hydrate_pvc_from_snapshot(task["result"]["event"]["object"],
                                       task["result"]["event"]["version"],
                                       new_pvc_name,
                                       bucket=rokutils.SERVING_BUCKET)

    # Cleanup: remove dumped model and transformer assets from the current PVC
    utils.rm_r(
        os.path.join(PREDICTOR_MODEL_DIR, os.path.basename(model_filepath)))
    utils.rm_r(TRANSFORMER_ASSETS_DIR, silent=True)

    # Need an absolute path from the *root* of the PVC. Add '/' if not exists.
    pvc_model_path = "/" + PREDICTOR_MODEL_DIR.lstrip(PVC_ROOT)
    # Tensorflow saves the model's files into a directory by itself
    if predictor == "tensorflow":
        pvc_model_path += "/" + os.path.basename(model_filepath).lstrip("/")

    kfserver = create_inference_service(name=name,
                                        predictor=predictor,
                                        pvc_name=new_pvc_name,
                                        model_path=pvc_model_path,
                                        transformer=preprocessing_fn
                                        is not None)

    if wait:
        monitor_inference_service(kfserver.name)
    return kfserver