예제 #1
0
def create_outlier_cls():
    @model(
        name="outlier",
        platform=ModelFramework.Custom,
        protocol=V2Protocol(),
        uri="s3://tempo/outlier/cifar10/outlier",
        local_folder=os.path.join(ARTIFACTS_FOLDER, OUTLIER_FOLDER),
    )
    class OutlierModel(object):
        def __init__(self):
            from alibi_detect.utils.saving import load_detector

            model = self.get_tempo()
            models_folder = model.details.local_folder
            print(f"Loading from {models_folder}")
            self.od = load_detector(os.path.join(models_folder, "cifar10"))

        @predictmethod
        def outlier(self, payload: np.ndarray) -> dict:
            od_preds = self.od.predict(
                payload,
                outlier_type="instance",  # use 'feature' or 'instance' level
                return_feature_score=True,
                # scores used to determine outliers
                return_instance_score=True,
            )

            return json.loads(json.dumps(od_preds, cls=NumpyEncoder))

    return OutlierModel
예제 #2
0
def test_v2_to_protocol_request_other():
    v2 = V2Protocol()
    data = 1
    request = v2.to_protocol_request(data)
    # we should not have the "parameters", mainly so that content_type= "np" is not present.
    # this seems a bit convoluted, so we need to find a better way perhaps for dealing with inference types in tempo
    assert "parameters" not in request
예제 #3
0
    def __init__(
        self,
        name: str,
        protocol: Protocol = V2Protocol(),
        local_folder: str = None,
        uri: str = None,
        platform: ModelFramework = None,
        inputs: ModelDataType = None,
        outputs: ModelDataType = None,
        model_func: Callable[..., Any] = None,
        conda_env: str = None,
        runtime_options: BaseRuntimeOptionsType = DockerOptions(),
        description: str = "",
    ):
        """

        Parameters
        ----------
        name
         Name of the pipeline. Needs to be Kubernetes compliant.
        protocol
         :class:`tempo.serve.protocol.Protocol`. Defaults to KFserving V2.
        local_folder
         Location of local artifacts.
        uri
         Location of remote artifacts.
        platform
         The :class:`tempo.serve.metadata.ModelFramework`
        inputs
         The input types.
        outputs
         The output types.
        conda_env
         The conda environment name to use. If not specified will look for conda.yaml in
         local_folder or generate from current running environment.
        runtime_options
         The runtime options. Can be left empty and set when creating a runtime.
        description
         The description of the model

        """
        super().__init__(
            name,
            # TODO: Should we unify names?
            user_func=model_func,
            local_folder=local_folder,
            uri=uri,
            platform=platform,
            inputs=inputs,
            outputs=outputs,
            conda_env=conda_env,
            protocol=protocol,
            runtime_options=runtime_options,
            description=description,
        )
예제 #4
0
def test_v2_from_protocol_response():
    res = {
        "outputs": [{
            "name": "a",
            "data": [97, 98, 99],
            "datatype": "BYTES"
        }]
    }
    modelTyArgs = ModelDataArgs(args=[ModelDataArg(ty=str, name=None)])
    v2 = V2Protocol()
    res = v2.from_protocol_response(res, modelTyArgs)
예제 #5
0
def test_custom_model(v2_input, expected):
    @model(
        name="custom",
        protocol=V2Protocol(),
        platform=ModelFramework.Custom,
    )
    def custom_model(a: np.ndarray) -> np.ndarray:
        return a

    response = custom_model.request(v2_input)
    assert response == expected
예제 #6
0
def custom_model() -> Model:
    @model(
        name="custom-model",
        protocol=V2Protocol(),
        platform=ModelFramework.Custom,
    )
    def _custom_model(payload: np.ndarray) -> np.ndarray:
        return _custom_model.context.model(payload)

    @_custom_model.loadmethod
    def _load():
        _custom_model.context.model = lambda a: a.sum(keepdims=True)

    return _custom_model
예제 #7
0
def test_v2_to_protocol_request_numpy():
    v2 = V2Protocol()
    data = np.random.randn(1, 28 * 28)
    request = v2.to_protocol_request(data)
    expected_request = {
        "parameters":
        _REQUEST_NUMPY_CONTENT_TYPE,
        "inputs": [{
            "name": "input-0",
            "datatype": "FP64",
            "data": data.flatten().tolist(),
            "shape": list(data.shape)
        }],
    }

    assert expected_request == request
예제 #8
0
    def from_protocol_request(self, res: Dict, tys: ModelDataArgs) -> Any:
        inp = {}
        for idx, input in enumerate(res["inputs"]):
            ty = TensorflowProtocol.get_ty(input["name"], idx, tys)

            if ty == np.ndarray:
                arr = V2Protocol.create_np_from_v2(input["data"],
                                                   input["datatype"],
                                                   input["shape"])
                inp[input["name"]] = arr
            else:
                raise ValueError(f"Unknown ty {ty} in conversion")

        if len(inp) == 1:
            return list(inp.values())[0]
        else:
            return inp
예제 #9
0
def create_svc_cls(outlier, model):
    @pipeline(
        name="cifar10-service",
        protocol=V2Protocol(),
        uri="s3://tempo/outlier/cifar10/svc",
        local_folder=os.path.join(ARTIFACTS_FOLDER, "svc"),
        models=PipelineModels(outlier=outlier, cifar10=model),
    )
    class Cifar10Svc(object):
        @predictmethod
        def predict(self, payload: np.ndarray) -> np.ndarray:
            r = self.models.outlier(payload=payload)
            if r["data"]["is_outlier"][0]:
                return np.array([])
            else:
                return self.models.cifar10(payload)

    return Cifar10Svc
예제 #10
0
def test_model_spec():
    ms = ModelSpec(
        model_details=ModelDetails(
            name="test",
            local_folder="",
            uri="",
            platform=ModelFramework.XGBoost,
            inputs=ModelDataArgs(args=[ModelDataArg(ty=str)]),
            outputs=ModelDataArgs(args=[]),
        ),
        protocol=V2Protocol(),
        runtime_options=KFServingOptions().local_options,
    )
    s = ms.json()
    j = json.loads(s)
    ms2 = ModelSpec(**j)
    assert isinstance(ms2.protocol, V2Protocol)
    assert ms2.model_details.inputs.args[0].ty == str
예제 #11
0
def test_convert_from_bytes(data, ty, expected):
    output = {"data": data}
    res = V2Protocol.convert_from_bytes(output, ty)
    assert res == expected
예제 #12
0
def test_v2_from_any(data, expected):
    d = V2Protocol.create_v2_from_any(data, "a")
    assert d["name"] == "a"
    assert d["data"] == expected
    assert d["datatype"] == "BYTES"
예제 #13
0
def model(
    name: str,
    local_folder: str = None,
    uri: str = None,
    platform: ModelFramework = ModelFramework.Custom,
    inputs: ModelDataType = None,
    outputs: ModelDataType = None,
    conda_env: str = None,
    protocol: Protocol = V2Protocol(),
    runtime_options: BaseRuntimeOptionsType = DockerOptions(),
    description: str = "",
):
    """

    Parameters
    ----------
    name
     Name of the model. Needs to be Kubernetes compliant.
    protocol
     :class:`tempo.serve.protocol.Protocol`. Defaults to KFserving V2.
    local_folder
     Location of local artifacts.
    uri
     Location of remote artifacts.
    inputs
     The input types.
    outputs
     The output types.
    conda_env
     The conda environment name to use. If not specified will look for conda.yaml in local_folder
     or generate from current running environment.
    runtime_options
     The runtime options. Can be left empty and set when creating a runtime.
    platform
     The :class:`tempo.serve.metadata.ModelFramework`
    description
     Description of the model

    Returns
    -------
    A decorated function or class as a Tempo Model.

    """
    def _model(f):
        predict_method = f
        if isclass(f):
            predict_method = _get_predict_method(f)

        model = Model(
            name,
            protocol=protocol,
            local_folder=local_folder,
            uri=uri,
            platform=platform,
            inputs=inputs,
            outputs=outputs,
            model_func=predict_method,
            conda_env=conda_env,
            runtime_options=runtime_options,
            description=description,
        )

        if isclass(f):
            return _wrap_class(f, model)

        return model

    return _model
예제 #14
0
def pipeline(
    name: str,
    protocol: Protocol = V2Protocol(),
    local_folder: str = None,
    uri: str = None,
    models: PipelineModels = None,
    inputs: ModelDataType = None,
    outputs: ModelDataType = None,
    conda_env: str = None,
    runtime_options: BaseRuntimeOptionsType = DockerOptions(),
    description: str = "",
):
    """
    A decorator for a class or function to make it a Tempo Pipeline.

    Parameters
    ----------
    name
     Name of the pipeline. Needs to be Kubernetes compliant.
    protocol
     :class:`tempo.serve.protocol.Protocol`. Defaults to KFserving V2.
    local_folder
     Location of local artifacts.
    uri
     Location of remote artifacts.
    models
     A list of models defined as PipelineModels.
    inputs
     The input types.
    outputs
     The output types.
    conda_env
     The conda environment name to use. If not specified will look for conda.yaml in local_folder
     or generate from current running environment.
    runtime_options
     The runtime options. Can be left empty and set when creating a runtime.
    description
     Description of the pipeline

    Returns
    -------
    A decorated class or function.

    """
    def _pipeline(f):
        predict_method = f
        if isclass(f):
            predict_method = _get_predict_method(f)

        pipeline = Pipeline(
            name,
            local_folder=local_folder,
            uri=uri,
            models=models,
            inputs=inputs,
            outputs=outputs,
            pipeline_func=predict_method,
            conda_env=conda_env,
            protocol=protocol,
            runtime_options=runtime_options,
            description=description,
        )

        if isclass(f):
            K = _wrap_class(f, pipeline, field_name="pipeline")

            @property
            def models_property(self):
                # This is so we do not store reference to `.models` as part of
                # the K class - needed when saving limited copy of models for remote
                return K.pipeline.models

            K.models = models_property

            return K

        return pipeline

    return _pipeline