Beispiel #1
0
def test_deploy_yaml():
    rt = SeldonDeployRuntime(
        host="http://34.78.44.92/seldon-deploy/api/v1alpha1",
        user="******",
        oidc_server="https://34.78.44.92/auth/realms/deploy-realm",
        password="******",
        oidc_client_id="sd-api",
        verify_ssl=False,
    )

    options = RuntimeOptions(
        runtime="tempo.seldon.SeldonKubernetesRuntime",
        k8s_options=KubernetesOptions(namespace="seldon"),
        ingress_options=IngressOptions(ssl=True, verify_ssl=False),
    )

    sklearn_model = Model(
        name="test-iris-sklearn",
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn/iris",
        protocol=SeldonProtocol(),
        runtime_options=options,
    )

    spec = rt.to_k8s_yaml(sklearn_model)
    rtk = SeldonKubernetesRuntime()
    expected = rtk.to_k8s_yaml(sklearn_model)
    assert spec == expected
Beispiel #2
0
def test_kubernetes_spec(sklearn_model: Model):
    options = KubernetesOptions(namespace="production", replicas=1)
    k8s_object = KubernetesSpec(sklearn_model.details, SeldonProtocol(),
                                options)

    expected = {
        "apiVersion": "machinelearning.seldon.io/v1",
        "kind": "SeldonDeployment",
        "metadata": {
            "name": sklearn_model.details.name,
            "namespace": options.namespace,
        },
        "spec": {
            "protocol":
            "seldon",
            "predictors": [{
                "graph": {
                    "modelUri":
                    sklearn_model.details.uri,
                    "name":
                    "classifier",
                    "type":
                    "MODEL",
                    "implementation":
                    KubernetesSpec.Implementations[
                        sklearn_model.details.platform],
                },
                "name": "default",
                "replicas": options.replicas,
            }],
        },
    }

    assert k8s_object.spec == expected
Beispiel #3
0
def test_deploy():
    rt = SeldonDeployRuntime()

    config = SeldonDeployConfig(
        host="https://34.78.44.92/seldon-deploy/api/v1alpha1",
        user="******",
        password="******",
        oidc_server="https://34.78.44.92/auth/realms/deploy-realm",
        oidc_client_id="sd-api",
        verify_ssl=False,
        auth_type=SeldonDeployAuthType.oidc,
    )

    rt.authenticate(settings=config)

    options = RuntimeOptions(
        runtime="tempo.seldon.SeldonKubernetesRuntime",
        k8s_options=KubernetesOptions(namespace="seldon"),
        ingress_options=IngressOptions(ssl=True, verify_ssl=False),
    )

    sklearn_model = Model(
        name="test-iris-sklearn",
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn/iris",
        protocol=SeldonProtocol(),
        runtime_options=options,
    )

    rt.deploy(sklearn_model)
    rt.wait_ready(sklearn_model)
    print(sklearn_model(np.array([[4.9, 3.1, 1.5, 0.2]])))
    rt.undeploy(sklearn_model)
Beispiel #4
0
Datei: k8s.py Projekt: M46F/tempo
 def __init__(self, k8s_options: KubernetesOptions = None, protocol=None):
     if k8s_options is None:
         k8s_options = KubernetesOptions()
     self.k8s_options = k8s_options
     self.create_k8s_client()
     if protocol is None:
         self.protocol = SeldonProtocol()
     else:
         self.protocol = protocol
Beispiel #5
0
def sklearn_model() -> Model:
    model_path = os.path.join(TESTDATA_PATH, "sklearn", "iris")
    return Model(
        name="test-iris-sklearn",
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn/iris",
        local_folder=model_path,
        protocol=SeldonProtocol(),
        runtime_options=RuntimeOptions(k8s_options=KubernetesOptions(namespace="production", replicas=1)),
    )
Beispiel #6
0
def test_kubernetes_spec_pipeline():
    details = ModelDetails(
        name="inference-pipeline",
        platform=ModelFramework.TempoPipeline,
        uri="gs://seldon/tempo",
        local_folder="",
        inputs=ModelDataArgs(args=[]),
        outputs=ModelDataArgs(args=[]),
    )
    options = KubernetesOptions(namespace="production", replicas=1)
    k8s_object = KubernetesSpec(details, KFServingV2Protocol(), options)

    container_spec = _V2ContainerFactory.get_container_spec(details)
    container_env = [{
        "name": name,
        "value": value
    } for name, value in container_spec["environment"].items()]

    expected = {
        "apiVersion": "machinelearning.seldon.io/v1",
        "kind": "SeldonDeployment",
        "metadata": {
            "name": details.name,
            "namespace": options.namespace
        },
        "spec": {
            "protocol":
            "kfserving",
            "predictors": [{
                "componentSpecs": [{
                    "spec": {
                        "containers": [{
                            "name": "classifier",
                            "image": container_spec["image"],
                            "env": container_env,
                            "args": [],
                        }]
                    }
                }],
                "graph": {
                    "modelUri": details.uri,
                    "name": "classifier",
                    "type": "MODEL",
                    "implementation": "TRITON_SERVER",
                    "serviceAccountName": "tempo-pipeline",
                },
                "name":
                "default",
                "replicas":
                options.replicas,
            }],
        },
    }

    assert k8s_object.spec == expected
Beispiel #7
0
def test_deploy_yaml():
    rt = SeldonDeployRuntime(
        host="http://34.105.136.157/seldon-deploy/api/v1alpha1",
        user="******",
        password="******",
        k8s_options=KubernetesOptions(namespace="seldon"),
    )

    sklearn_model = Model(
        name="test-iris-sklearn",
        runtime=rt,
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn/iris",
        local_folder=os.getcwd() + "/sklearn",
    )

    srt = SeldonKubernetesRuntime(k8s_options=KubernetesOptions(namespace="seldon"))
    sklearn_model.set_runtime(srt)
    expected = sklearn_model.to_k8s_yaml()
    sklearn_model.set_runtime(rt)
    assert sklearn_model.to_k8s_yaml() == expected
Beispiel #8
0
def test_seldon_model_yaml_auth():
    m = Model(
        name="test-iris-xgboost",
        protocol=SeldonProtocol(),
        platform=ModelFramework.XGBoost,
        uri="gs://seldon-models/xgboost/iris",
        local_folder="/tmp/model",
    )
    runtime = SeldonKubernetesRuntime(
        runtime_options=RuntimeOptions(k8s_options=KubernetesOptions(authSecretName="auth"))
    )
    print(runtime.to_k8s_yaml(m))
Beispiel #9
0
def test_seldon_model_yaml_auth(expected):
    m = Model(
        name="test-iris-xgboost",
        protocol=SeldonProtocol(),
        platform=ModelFramework.XGBoost,
        uri="gs://seldon-models/xgboost/iris",
        local_folder="/tmp/model",
    )
    runtime = SeldonKubernetesRuntime(runtime_options=SeldonCoreOptions(
        k8s_options=KubernetesOptions(authSecretName="auth")))
    yaml_str = runtime.manifest(m)
    yaml_obj = yaml.safe_load(yaml_str)
    yaml_obj_expected = yaml.safe_load(expected)
    del yaml_obj["metadata"]["annotations"]["seldon.io/tempo-model"]
    assert yaml_obj == yaml_obj_expected
Beispiel #10
0
def test_tensorflow_spec():
    md = ModelDetails(
        name="test",
        local_folder="",
        uri="",
        platform=ModelFramework.Tensorflow,
        inputs=ModelDataArgs(args=[]),
        outputs=ModelDataArgs(args=[]),
    )
    protocol = SeldonProtocol()
    options = KubernetesOptions(namespace="production", replicas=1)
    runtime_options = RuntimeOptions(k8s_options=options)
    model_spec = ModelSpec(model_details=md,
                           protocol=protocol,
                           runtime_options=runtime_options)
    spec = get_container_spec(model_spec)
    assert "image" in spec
    assert "command" in spec
Beispiel #11
0
def test_deploy():
    rt = SeldonDeployRuntime(
        host="http://34.105.136.157/seldon-deploy/api/v1alpha1",
        user="******",
        password="******",
        k8s_options=KubernetesOptions(namespace="seldon"),
    )

    sklearn_model = Model(
        name="test-iris-sklearn",
        runtime=rt,
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn/iris",
        local_folder=os.getcwd() + "/sklearn",
    )

    # sklearn_model.deploy()
    sklearn_model(np.array([[4.9, 3.1, 1.5, 0.2]]))
Beispiel #12
0
 def __init__(
     self,
     host: str,
     user: str,
     password: str,
     auth_type: SeldonDeployAuthType = SeldonDeployAuthType.session_cookie,
     k8s_options: KubernetesOptions = None,
     _protocol=None,
 ):
     if k8s_options is None:
         k8s_options = KubernetesOptions()
     self._k8s_options = k8s_options
     self._host = host
     self._user = user
     self._password = password
     self._auth_type = auth_type
     if _protocol is None:
         self.protocol = SeldonProtocol()
     else:
         self.protocol = _protocol
Beispiel #13
0
def k8s_runtime_v2(k8s_namespace: str) -> SeldonKubernetesRuntime:
    return SeldonKubernetesRuntime(
        k8s_options=KubernetesOptions(namespace=k8s_namespace),
        protocol=KFServingV2Protocol(),
    )
Beispiel #14
0
def k8s_runtime(k8s_namespace: str) -> SeldonKubernetesRuntime:
    return SeldonKubernetesRuntime(k8s_options=KubernetesOptions(
        namespace=k8s_namespace))
Beispiel #15
0
def runtime(namespace: str) -> SeldonKubernetesRuntime:
    return SeldonKubernetesRuntime(runtime_options=RuntimeOptions(
        k8s_options=KubernetesOptions(namespace=namespace)))
Beispiel #16
0
def test_kubernetes_spec_pipeline():
    details = ModelDetails(
        name="inference-pipeline",
        platform=ModelFramework.TempoPipeline,
        uri="gs://seldon/tempo",
        local_folder="",
        inputs=ModelDataArgs(args=[]),
        outputs=ModelDataArgs(args=[]),
    )
    options = KubernetesOptions(namespace="production", replicas=1)
    protocol = KFServingV2Protocol()
    runtime_options = RuntimeOptions(k8s_options=options)
    model_spec = ModelSpec(model_details=details,
                           protocol=protocol,
                           runtime_options=runtime_options)
    k8s_object = KubernetesSpec(model_spec)

    expected = {
        "apiVersion": "machinelearning.seldon.io/v1",
        "kind": "SeldonDeployment",
        "metadata": {
            "annotations": {
                "seldon.io/tempo-description":
                "",
                "seldon.io/tempo-model":
                '{"model_details": '
                '{"name": '
                '"inference-pipeline", '
                '"local_folder": "", '
                '"uri": '
                '"gs://seldon/tempo", '
                '"platform": "tempo", '
                '"inputs": {"args": '
                '[]}, "outputs": '
                '{"args": []}, '
                '"description": ""}, '
                '"protocol": '
                '"tempo.kfserving.protocol.KFServingV2Protocol", '
                '"runtime_options": '
                '{"runtime": null, '
                '"docker_options": '
                '{"defaultRuntime": '
                '"tempo.seldon.SeldonDockerRuntime"}, '
                '"k8s_options": '
                '{"replicas": 1, '
                '"minReplicas": null, '
                '"maxReplicas": null, '
                '"authSecretName": '
                "null, "
                '"serviceAccountName": '
                "null, "
                '"defaultRuntime": '
                '"tempo.seldon.SeldonKubernetesRuntime", '
                '"namespace": '
                '"production"}, '
                '"ingress_options": '
                '{"ingress": '
                '"tempo.ingress.istio.IstioIngress", '
                '"ssl": false, '
                '"verify_ssl": true}}}',
            },
            "labels": {
                "seldon.io/tempo": "true"
            },
            "name": "inference-pipeline",
            "namespace": "production",
        },
        "spec": {
            "protocol":
            "kfserving",
            "predictors": [{
                "graph": {
                    "modelUri": details.uri,
                    "name": "inference-pipeline",
                    "type": "MODEL",
                    "implementation": "TEMPO_SERVER",
                    "serviceAccountName": "tempo-pipeline",
                },
                "name": "default",
                "replicas": options.replicas,
            }],
        },
    }

    assert k8s_object.spec == expected