Esempio n. 1
0
    def __init__(
        self,
        name: str,
        user_func: Callable[[Any], Any] = None,
        local_folder: str = None,
        uri: str = None,
        platform: ModelFramework = None,
        inputs: ModelDataType = None,
        outputs: ModelDataType = None,
        conda_env: str = None,
        runtime: Runtime = None,
    ):
        self._name = name
        self._user_func = user_func
        self.conda_env = conda_env
        if uri is None:
            uri = ""

        local_folder = self._get_local_folder(local_folder)
        inputs, outputs = self._get_args(inputs, outputs)

        self.details = ModelDetails(
            name=name,
            local_folder=local_folder,
            uri=uri,
            platform=platform,
            inputs=inputs,
            outputs=outputs,
        )

        self.cls = None
        self.runtime = runtime
Esempio n. 2
0
def test_kubernetes_spec_pipeline():
    details = ModelDetails(
        name="inference-pipeline",
        platform=ModelFramework.TempoPipeline,
        uri="gs://seldon/tempo",
        local_folder="",
        inputs=ModelDataArgs(args=[]),
        outputs=ModelDataArgs(args=[]),
    )
    options = KubernetesOptions(namespace="production", replicas=1)
    k8s_object = KubernetesSpec(details, KFServingV2Protocol(), options)

    container_spec = _V2ContainerFactory.get_container_spec(details)
    container_env = [{
        "name": name,
        "value": value
    } for name, value in container_spec["environment"].items()]

    expected = {
        "apiVersion": "machinelearning.seldon.io/v1",
        "kind": "SeldonDeployment",
        "metadata": {
            "name": details.name,
            "namespace": options.namespace
        },
        "spec": {
            "protocol":
            "kfserving",
            "predictors": [{
                "componentSpecs": [{
                    "spec": {
                        "containers": [{
                            "name": "classifier",
                            "image": container_spec["image"],
                            "env": container_env,
                            "args": [],
                        }]
                    }
                }],
                "graph": {
                    "modelUri": details.uri,
                    "name": "classifier",
                    "type": "MODEL",
                    "implementation": "TRITON_SERVER",
                    "serviceAccountName": "tempo-pipeline",
                },
                "name":
                "default",
                "replicas":
                options.replicas,
            }],
        },
    }

    assert k8s_object.spec == expected
Esempio n. 3
0
def test_tensorflow_spec():
    md = ModelDetails(
        name="test",
        local_folder="",
        uri="",
        platform=ModelFramework.Tensorflow,
        inputs=ModelDataArgs(args=[]),
        outputs=ModelDataArgs(args=[]),
    )
    protocol = SeldonProtocol()
    options = KubernetesOptions(namespace="production", replicas=1)
    runtime_options = RuntimeOptions(k8s_options=options)
    model_spec = ModelSpec(model_details=md,
                           protocol=protocol,
                           runtime_options=runtime_options)
    spec = get_container_spec(model_spec)
    assert "image" in spec
    assert "command" in spec
Esempio n. 4
0
def test_model_spec():
    ms = ModelSpec(
        model_details=ModelDetails(
            name="test",
            local_folder="",
            uri="",
            platform=ModelFramework.XGBoost,
            inputs=ModelDataArgs(args=[ModelDataArg(ty=str)]),
            outputs=ModelDataArgs(args=[]),
        ),
        protocol=V2Protocol(),
        runtime_options=KFServingOptions().local_options,
    )
    s = ms.json()
    j = json.loads(s)
    ms2 = ModelSpec(**j)
    assert isinstance(ms2.protocol, V2Protocol)
    assert ms2.model_details.inputs.args[0].ty == str
Esempio n. 5
0
 def __init__(
     self,
     name: str,
     runtime: Runtime = None,
     local_folder: str = None,
     uri: str = None,
     platform: ModelFramework = None,
     inputs: ModelDataType = None,
     outputs: ModelDataType = None,
     model_func: Callable[[Any], Any] = None,
 ):
     super().__init__(name, model_func, runtime.get_protocol(), inputs,
                      outputs)
     self._model_func = model_func
     self._runtime = runtime
     self._details = ModelDetails(
         name=name,
         local_folder=local_folder,
         uri=uri,
         platform=platform,
         inputs=inputs,
         outputs=outputs,
     )
Esempio n. 6
0
def test_kubernetes_spec_pipeline():
    details = ModelDetails(
        name="inference-pipeline",
        platform=ModelFramework.TempoPipeline,
        uri="gs://seldon/tempo",
        local_folder="",
        inputs=ModelDataArgs(args=[]),
        outputs=ModelDataArgs(args=[]),
    )
    options = KubernetesOptions(namespace="production", replicas=1)
    protocol = KFServingV2Protocol()
    runtime_options = RuntimeOptions(k8s_options=options)
    model_spec = ModelSpec(model_details=details,
                           protocol=protocol,
                           runtime_options=runtime_options)
    k8s_object = KubernetesSpec(model_spec)

    expected = {
        "apiVersion": "machinelearning.seldon.io/v1",
        "kind": "SeldonDeployment",
        "metadata": {
            "annotations": {
                "seldon.io/tempo-description":
                "",
                "seldon.io/tempo-model":
                '{"model_details": '
                '{"name": '
                '"inference-pipeline", '
                '"local_folder": "", '
                '"uri": '
                '"gs://seldon/tempo", '
                '"platform": "tempo", '
                '"inputs": {"args": '
                '[]}, "outputs": '
                '{"args": []}, '
                '"description": ""}, '
                '"protocol": '
                '"tempo.kfserving.protocol.KFServingV2Protocol", '
                '"runtime_options": '
                '{"runtime": null, '
                '"docker_options": '
                '{"defaultRuntime": '
                '"tempo.seldon.SeldonDockerRuntime"}, '
                '"k8s_options": '
                '{"replicas": 1, '
                '"minReplicas": null, '
                '"maxReplicas": null, '
                '"authSecretName": '
                "null, "
                '"serviceAccountName": '
                "null, "
                '"defaultRuntime": '
                '"tempo.seldon.SeldonKubernetesRuntime", '
                '"namespace": '
                '"production"}, '
                '"ingress_options": '
                '{"ingress": '
                '"tempo.ingress.istio.IstioIngress", '
                '"ssl": false, '
                '"verify_ssl": true}}}',
            },
            "labels": {
                "seldon.io/tempo": "true"
            },
            "name": "inference-pipeline",
            "namespace": "production",
        },
        "spec": {
            "protocol":
            "kfserving",
            "predictors": [{
                "graph": {
                    "modelUri": details.uri,
                    "name": "inference-pipeline",
                    "type": "MODEL",
                    "implementation": "TEMPO_SERVER",
                    "serviceAccountName": "tempo-pipeline",
                },
                "name": "default",
                "replicas": options.replicas,
            }],
        },
    }

    assert k8s_object.spec == expected