Example #1
0
 def test_model_rest_non200(self, namespace, s2i_python_version):
     create_push_s2i_image(s2i_python_version, "model", "rest_non200")
     retry_run(
         f"kubectl apply -f ../resources/s2i_python_model_non200.json -n {namespace}"
     )
     wait_for_status("mymodel", namespace)
     wait_for_rollout("mymodel", namespace)
     r = initial_rest_request("mymodel", namespace)
     arr = np.array([[1, 2, 3]])
     r = rest_request_ambassador("mymodel",
                                 namespace,
                                 API_AMBASSADOR,
                                 data=arr)
     res = r.json()
     logging.warning(res)
     assert r.status_code == 500
     assert r.json()["status"]["code"] == 500
     assert (
         r.json()["status"]["info"] ==
         "Internal service call failed calling http://localhost:9000/predict status code 400"
     )
     run(
         f"kubectl delete -f ../resources/s2i_python_model_non200.json -n {namespace}",
         shell=True,
     )
Example #2
0
 def test_model_rest_non200(self, s2i_python_version):
     namespace = "s2i-test-model-rest-non200"
     retry_run(f"kubectl create namespace {namespace}")
     create_push_s2i_image(s2i_python_version, "model", "rest_non200")
     retry_run(
         f"kubectl apply -f ../resources/s2i_python_model_non200.json -n {namespace}"
     )
     wait_for_rollout("mymodel-mymodel-4e3d66d", namespace)
     r = initial_rest_request("mymodel", namespace)
     arr = np.array([[1, 2, 3]])
     r = rest_request_ambassador("mymodel",
                                 namespace,
                                 API_AMBASSADOR,
                                 data=arr)
     res = r.json()
     logging.warning(res)
     assert r.status_code == 200
     assert r.json()["status"]["code"] == 400
     assert r.json()["status"]["reason"] == "exception message"
     assert r.json()["status"]["info"] == "exception caught"
     assert r.json()["status"]["status"] == "FAILURE"
     run(
         f"kubectl delete -f ../resources/s2i_python_model_non200.json -n {namespace}",
         shell=True,
     )
     run(f"kubectl delete namespace {namespace}", shell=True)
Example #3
0
def test_namespace_update(namespace, seldon_version):
    # Deploy test model
    retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    assert_model("mymodel", namespace, initial=True)

    # Label namespace to deploy a single operator
    retry_run(
        f"kubectl label namespace {namespace} seldon.io/controller-id={namespace}"
    )

    def _install_namespace_scoped():
        # Install on the current namespace
        retry_run(
            "helm install seldon "
            "../../helm-charts/seldon-core-operator "
            f"--namespace {namespace} "
            "--set crd.create=false "
            "--set singleNamespace=true "
            "--wait",
            attempts=2,
        )

        # Assert that model is still working under new namespaced version
        wait_for_status("mymodel", namespace)
        wait_for_rollout("mymodel", namespace)

    assert_model_during_op(_install_namespace_scoped, "mymodel", namespace)
Example #4
0
def test_rolling_deployment(namespace, api_gateway, from_deployment,
                            to_deployment, change):
    if api_gateway == API_ISTIO_GATEWAY:
        retry_run(
            f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
        )

    from_file_path = to_resources_path(from_deployment)
    retry_run(f"kubectl apply -f {from_file_path} -n {namespace}")
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    assert_model("mymodel", namespace, initial=True, endpoint=api_gateway)

    old_pod_name = get_pod_name_for_sdep("mymodel", namespace)[0]
    to_file_path = to_resources_path(to_deployment)

    def _update_model():
        retry_run(f"kubectl apply -f {to_file_path} -n {namespace}")
        if change:
            wait_for_pod_shutdown(old_pod_name, namespace)
        wait_for_status("mymodel", namespace)
        time.sleep(2)  # Wait a little after deployment marked Available

    assert_model_during_op(_update_model,
                           "mymodel",
                           namespace,
                           endpoint=api_gateway)

    delete_cmd = f"kubectl delete --ignore-not-found -n {namespace}"
    run(f"{delete_cmd} -f {from_file_path}", shell=True)
    run(f"{delete_cmd} -f {to_file_path}", shell=True)
    def test_alibi_explain_anchor_image_tensorflow_protocol(self, namespace):
        spec = "../resources/tf_cifar_anchor_image_explainer.yaml"
        name = "cifar10-default-explainer"
        vs_prefix = (f"seldon/{namespace}/cifar10-explainer/default/v1/models/"
                     f"cifar10-classifier:explain")
        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        # note: we add a batch dimension but it should be really one image
        test_data = np.random.randn(1, 32, 32, 3)
        inference_request = {"instances": test_data.tolist()}

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}",
                    json=inference_request,
                )
                explanation = r.json()

        assert explanation["meta"]["name"] == "AnchorImage"
        assert "anchor" in explanation["data"]
        assert "precision" in explanation["data"]
        assert "coverage" in explanation["data"]

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
Example #6
0
def test_tracing_rest(namespace):
    # Deploy model and check that is running
    retry_run(
        f"kubectl apply -f ../resources/graph-tracing.json -n {namespace}")
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    initial_rest_request("mymodel", namespace)

    # We need the current pod name to find the right traces
    deployment_names = get_deployment_names("mymodel", namespace)
    deployment_name = deployment_names[0]
    pod_names = get_pod_names(deployment_name, namespace)
    pod_name = pod_names[0]

    # Get traces and assert their content
    traces = get_traces(pod_name,
                        "executor",
                        "predictions",
                        _should_retry=_is_jaeger_syncing)
    assert len(traces) == 1

    trace = traces[0]
    processes = trace["processes"]
    assert len(processes) == 2
    assert_trace(trace,
                 expected_operations=["predictions", "/predict", "Predict"])
Example #7
0
    def test_modelmetadata_grpc(self, namespace, s2i_python_version):
        create_push_s2i_image(s2i_python_version, "modelmetadata", "grpc")
        retry_run(
            f"kubectl apply -f ../resources/metadata_modelmetadata_grpc.yaml -n {namespace}"
        )
        wait_for_status("mymodel-modelmetadata", namespace)
        wait_for_rollout("mymodel-modelmetadata", namespace)
        r = initial_grpc_request("mymodel-modelmetadata", namespace)

        r = grpc_request_ambassador_metadata(
            "mymodel-modelmetadata", namespace, model_name="my-model"
        )

        res = json.loads(json_format.MessageToJson(r))
        logging.info(res)

        # Cast reference model metadata to proto and back in order to have int->float
        # infamous casting in google.protobuf.Value
        metadata_proto = prediction_pb2.SeldonModelMetadata()
        json_format.ParseDict(
            model_metadata, metadata_proto, ignore_unknown_fields=True
        )
        assert res == json.loads(json_format.MessageToJson(metadata_proto))

        r = grpc_request_ambassador_metadata("mymodel-modelmetadata", namespace)

        res = json.loads(json_format.MessageToJson(r))
        logging.info(res)

        graph_metadata_proto = prediction_pb2.SeldonGraphMetadata()
        json_format.ParseDict(
            graph_metadata_grpc, graph_metadata_proto, ignore_unknown_fields=True
        )
        assert res == json.loads(json_format.MessageToJson(graph_metadata_proto))
    def test_alibi_explain_anchor_tabular(self, namespace):
        spec = "../resources/iris_anchor_tabular_explainer.yaml"
        name = "iris-default-explainer"
        vs_prefix = f"seldon/{namespace}/iris-explainer/default/api/v1.0/explain"

        inference_request = {
            "data": {
                "names": ["text"],
                "ndarray": [[5.964, 4.006, 2.081, 1.031]],
            }
        }

        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        time.sleep(AFTER_WAIT_SLEEP)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}",
                    json=inference_request,
                )
                explanation = r.json()

        assert explanation["meta"]["name"] == "AnchorTabular"
        assert "anchor" in explanation["data"]
        assert "precision" in explanation["data"]
        assert "coverage" in explanation["data"]

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
 def test_rolling_update7(self, namespace, api_gateway):
     retry_run(f"kubectl apply -f ../resources/graph1svc.json -n {namespace}")
     wait_for_status("mymodel", namespace)
     wait_for_rollout("mymodel", namespace, expected_deployments=2)
     logging.warning("Initial request")
     r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
     assert r.status_code == 200
     assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
     retry_run(f"kubectl apply -f ../resources/graph3svc.json -n {namespace}")
     r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
     assert r.status_code == 200
     assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
     i = 0
     for i in range(100):
         r = rest_request_ambassador("mymodel", namespace, api_gateway)
         assert r.status_code == 200
         res = r.json()
         assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
             res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
         )
         if (not r.status_code == 200) or (
             res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
         ):
             break
         time.sleep(1)
     assert i < 100
     logging.warning("Success for test_rolling_update7")
     run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True)
     run(f"kubectl delete -f ../resources/graph3svc.json -n {namespace}", shell=True)
Example #10
0
def test_tracing_rest(namespace):
    # Deploy model and check that is running
    retry_run(
        f"kubectl apply -f ../resources/graph-tracing.json -n {namespace}")
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    initial_rest_request("mymodel", namespace)

    # We need the current pod name to find the right traces
    deployment_names = get_deployment_names("mymodel", namespace)
    deployment_name = deployment_names[0]
    pod_names = get_pod_names(deployment_name, namespace)
    pod_name = pod_names[0]

    print("deployment name", deployment_name, "pod name", pod_name)

    # The engine and the executor identify as different services and different
    # operations against Jaeger. We need to consider both.
    service = "executor"
    operation = "predictions"
    request_operation = "/predict"

    # Get traces and assert their content
    traces = get_traces(pod_name,
                        service,
                        operation,
                        _should_retry=_is_jaeger_syncing)
    assert len(traces) == 1

    trace = traces[0]
    processes = trace["processes"]
    assert len(processes) == 2
    assert_trace(trace,
                 expected_operations=[operation, request_operation, "Predict"])
    def test_mlflow(self, namespace):
        spec = "../../servers/mlflowserver/samples/elasticnet_wine.yaml"
        retry_run(f"kubectl apply -f {spec} -n {namespace}")
        wait_for_status("mlflow", namespace)
        wait_for_rollout("mlflow", namespace)
        time.sleep(1)

        r = initial_rest_request(
            "mlflow",
            namespace,
            data=[[6.3, 0.3, 0.34, 1.6, 0.049, 14, 132, 0.994, 3.3, 0.49,
                   9.5]],
            dtype="ndarray",
            names=[
                "fixed acidity",
                "volatile acidity",
                "citric acid",
                "residual sugar",
                "chlorides",
                "free sulfur dioxide",
                "total sulfur dioxide",
                "density",
                "pH",
                "sulphates",
                "alcohol",
            ],
        )
        assert r.status_code == 200

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
    def test_sklearn(self, namespace):
        spec = "../../servers/sklearnserver/samples/iris.yaml"
        retry_run(f"kubectl apply -f {spec} -n {namespace}")
        wait_for_status("sklearn", namespace)
        wait_for_rollout("sklearn", namespace)
        time.sleep(1)
        logging.warning("Initial request")
        r = initial_rest_request("sklearn",
                                 namespace,
                                 data=[[0.1, 0.2, 0.3, 0.4]],
                                 dtype="ndarray")
        assert r.status_code == 200

        r = rest_request_ambassador("sklearn", namespace, method="metadata")
        assert r.status_code == 200

        res = r.json()
        logging.warning(res)
        assert res["name"] == "iris"
        assert res["versions"] == ["iris/v1"]

        r = grpc_request_ambassador("sklearn",
                                    namespace,
                                    data=np.array([[0.1, 0.2, 0.3, 0.4]]))
        res = json.loads(json_format.MessageToJson(r))
        logging.info(res)

        logging.warning("Success for test_prepack_sklearn")
        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
Example #13
0
def test_rolling_update_deployment(namespace, from_deployment, to_deployment):
    from_file_path = to_resources_path(from_deployment)
    retry_run(f"kubectl apply -f {from_file_path} -n {namespace}")
    # Note that this is not yet parametrised!
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    logging.warning("Initial request")
    r = initial_rest_request("mymodel", namespace)
    assert r.status_code == 200
    assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]

    to_file_path = to_resources_path(to_deployment)
    retry_run(f"kubectl apply -f {to_file_path} -n {namespace}")
    r = initial_rest_request("mymodel", namespace)
    assert r.status_code == 200
    assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]

    i = 0
    for i in range(100):
        r = rest_request_ambassador("mymodel", namespace, API_AMBASSADOR)
        assert r.status_code == 200
        res = r.json()
        assert (res["data"]["tensor"]["values"] == [
            1.0, 2.0, 3.0, 4.0
        ]) or (res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0])
        if (not r.status_code == 200) or (res["data"]["tensor"]["values"]
                                          == [5.0, 6.0, 7.0, 8.0]):
            break
        time.sleep(1)

    assert i < 100

    run(f"kubectl delete -f {from_file_path} -n {namespace}", shell=True)
    run(f"kubectl delete -f {to_file_path} -n {namespace}", shell=True)
Example #14
0
 def test_model_combiner_grpc(self, namespace, s2i_python_version):
     create_push_s2i_image(s2i_python_version, "one", "grpc")
     create_push_s2i_image(s2i_python_version, "two", "grpc")
     create_push_s2i_image(s2i_python_version, "combiner", "grpc")
     retry_run(
         f"kubectl apply -f ../resources/tags_combiner_grpc.json -n {namespace}"
     )
     wait_for_status("mymodel-tags-combiner", namespace)
     wait_for_rollout("mymodel-tags-combiner", namespace)
     r = initial_grpc_request("mymodel-tags-combiner", namespace)
     arr = np.array([[1, 2, 3]])
     r = grpc_request_ambassador("mymodel-tags-combiner",
                                 namespace,
                                 API_AMBASSADOR,
                                 data=arr)
     res = json.loads(json_format.MessageToJson(r))
     logging.info(res)
     # assert r.status_code == 200
     assert res["data"]["ndarray"] == [["model-1"], ["model-2"]]
     assert res["meta"]["tags"] == {
         "combiner": "yes",
         "common": 2,
         "model-1": "yes",
         "model-2": "yes",
     }
     run(
         f"kubectl delete -f ../resources/tags_combiner_grpc.json -n {namespace}",
         shell=True,
     )
Example #15
0
 def test_model_graph_rest(self, namespace, s2i_python_version):
     create_push_s2i_image(s2i_python_version, "one", "rest")
     create_push_s2i_image(s2i_python_version, "two", "rest")
     retry_run(
         f"kubectl apply -f ../resources/tags_graph_rest.json -n {namespace}"
     )
     wait_for_status("mymodel-tags-graph", namespace)
     wait_for_rollout("mymodel-tags-graph", namespace)
     r = initial_rest_request("mymodel-tags-graph", namespace)
     arr = np.array([[1, 2, 3]])
     r = rest_request_ambassador("mymodel-tags-graph",
                                 namespace,
                                 API_AMBASSADOR,
                                 data=arr)
     res = r.json()
     logging.info(res)
     assert r.status_code == 200
     assert res["data"]["ndarray"] == ["model-2"]
     assert res["meta"]["tags"] == {
         "common": 2,
         "model-1": "yes",
         "model-2": "yes"
     }
     run(
         f"kubectl delete -f ../resources/tags_graph_rest.json -n {namespace}",
         shell=True,
     )
 def test_combiner_rest(self, s2i_python_version):
     namespace = "s2i-test-combiner-rest"
     retry_run(f"kubectl create namespace {namespace}")
     create_push_s2i_image(s2i_python_version, "model", "rest")
     create_push_s2i_image(s2i_python_version, "combiner", "rest")
     retry_run(
         f"kubectl apply -f ../resources/s2i_python_combiner.json -n {namespace}"
     )
     wait_for_rollout("mycombiner-mycombiner-acc7c4d", namespace)
     r = initial_rest_request("mycombiner", namespace)
     arr = np.array([[1, 2, 3]])
     r = rest_request_ambassador("mycombiner",
                                 namespace,
                                 API_AMBASSADOR,
                                 data=arr)
     res = r.json()
     logging.warning(res)
     assert r.status_code == 200
     assert r.json()["data"]["tensor"]["shape"] == [1, 3]
     assert r.json()["data"]["tensor"]["values"] == [3, 4, 5]
     run(
         f"kubectl delete -f ../resources/s2i_python_combiner.json -n {namespace}",
         shell=True,
     )
     run(f"kubectl delete namespace {namespace}", shell=True)
Example #17
0
 def _upgrade_seldon():
     retry_run(
         "helm upgrade seldon "
         "../../helm-charts/seldon-core-operator "
         "--namespace seldon-system "
         "--wait",
         attempts=2,
     )
Example #18
0
def delete_seldon(name="seldon", namespace="seldon-system"):
    retry_run(f"helm delete {name} -n {namespace}", attempts=3)

    # Helm 3.0.3 doesn't delete CRDs
    retry_run(
        "kubectl delete crd --ignore-not-found "
        "seldondeployments.machinelearning.seldon.io ",
        attempts=3,
    )
Example #19
0
def test_xss_header(namespace):
    sdep_name = "mymodel"
    sdep_path = to_resources_path("graph-echo.json")
    retry_run(f"kubectl apply -f {sdep_path} -n {namespace}")
    wait_for_status(sdep_name, namespace)
    wait_for_rollout(sdep_name, namespace)

    res = initial_rest_request(sdep_name, namespace)

    assert "X-Content-Type-Options" in res.headers
    assert res.headers["X-Content-Type-Options"] == "nosniff"
    def test_alibi_detect_cifar10_rclone(self, namespace):
        spec = "../resources/adserver-cifar10-od-rclone.yaml"
        name = "cifar10-od-server-rclone"
        vs_prefix = name

        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        time.sleep(AFTER_WAIT_SLEEP)

        with open(self.truck_json) as f:
            data = json.load(f)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}/",
                    json=data,
                    headers=self.HEADERS,
                )
                j = r.json()

        assert j["data"]["is_outlier"][0] == 0
        assert j["meta"]["name"] == "OutlierVAE"
        assert j["meta"]["detector_type"] == "offline"
        assert j["meta"]["data_type"] == "image"

        with open(self.truck_json_outlier) as f:
            data = json.load(f)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}/",
                    json=data,
                    headers=self.HEADERS,
                )
                j = r.json()

        assert j["data"]["is_outlier"][0] == 1
        assert j["meta"]["name"] == "OutlierVAE"
        assert j["meta"]["detector_type"] == "offline"
        assert j["meta"]["data_type"] == "image"

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
 def test_xgboost(self, namespace):
     spec = "../../servers/xgboostserver/samples/iris.yaml"
     retry_run(f"kubectl apply -f {spec}  -n {namespace}")
     wait_for_status("xgboost", namespace)
     wait_for_rollout("xgboost", namespace)
     time.sleep(1)
     logging.warning("Initial request")
     r = initial_rest_request(
         "xgboost", namespace, data=[[0.1, 0.2, 0.3, 0.4]], dtype="ndarray"
     )
     assert r.status_code == 200
     logging.warning("Success for test_prepack_xgboost")
     run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
def assert_model(sdep_name, namespace, initial=False):
    _request = initial_rest_request if initial else rest_request
    r = _request(sdep_name, namespace)

    assert r is not None
    assert r.status_code == 200
    assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]

    # NOTE: The following will test if the `SeldonDeployment` can be fetched as
    # a Kubernetes resource. This covers cases where some resources (e.g. CRD
    # versions or webhooks) may get inadvertently removed between versions.
    # The `retry_run()` method will **implicitly do an assert** on the return
    # code of the command.
    retry_run(f"kubectl get -n {namespace} sdep {sdep_name}")
def test_label_update(namespace, seldon_version):
    # Deploy test model
    retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    assert_model("mymodel", namespace, initial=True)

    # Install id-scoped operator
    controller_id = f"seldon-{namespace}"
    # TODO: We install the new controller on the same namespace but it's not
    # necessary, since it will get targeted by controllerId
    retry_run(
        f"helm install {controller_id} "
        "../../helm-charts/seldon-core-operator "
        f"--namespace {namespace} "
        "--set crd.create=false "
        f"--set controllerId={controller_id} "
        "--wait",
        attempts=2,
    )

    # Label model to be served by new controller
    retry_run("kubectl label sdep mymodel "
              f"seldon.io/controller-id={controller_id} "
              f"--namespace {namespace}")

    # Assert that model is still working under new id-scoped operator
    wait_for_status("mymodel", namespace)
    wait_for_rollout("mymodel", namespace)
    assert_model("mymodel", namespace, initial=True)

    # Delete all resources (webhooks, etc.) before deleting namespace
    retry_run(f"helm delete {controller_id} --namespace {namespace}")
 def test_namespace_operator(self, namespace):
     retry_run(
         f"helm install seldon ../../helm-charts/seldon-core-operator --namespace {namespace} --set executor.enabled=true --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false --set crd.create=false --set singleNamespace=true"
     )
     retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
     wait_for_status("mymodel", namespace)
     wait_for_rollout("mymodel", namespace)
     logging.warning("Initial request")
     r = initial_rest_request("mymodel", namespace, endpoint=API_AMBASSADOR)
     assert r.status_code == 200
     assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
     logging.warning("Success for test_namespace_operator")
     run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True)
     run(f"helm uninstall seldon -n {namespace}", shell=True)
Example #25
0
    def _install_namespace_scoped():
        # Install on the current namespace
        retry_run(
            "helm install seldon "
            "../../helm-charts/seldon-core-operator "
            f"--namespace {namespace} "
            "--set crd.create=false "
            "--set singleNamespace=true "
            "--wait",
            attempts=2,
        )

        # Assert that model is still working under new namespaced version
        wait_for_status("mymodel", namespace)
        wait_for_rollout("mymodel", namespace)
Example #26
0
def test_xss_escaping(namespace):
    sdep_name = "mymodel"
    sdep_path = to_resources_path("graph-echo.json")
    retry_run(f"kubectl apply -f {sdep_path} -n {namespace}")
    wait_for_status(sdep_name, namespace)
    wait_for_rollout(sdep_name, namespace)

    payload = '<div class="div-class"></div>'
    expected = '\\u003cdiv class=\\"div-class\\"\\u003e\\u003c/div\\u003e'

    res = rest_request(sdep_name, namespace, data=payload, dtype="strData")

    # We need to compare raw text. Otherwise, Python interprets the escaped
    # sequences.
    assert res.text == f'{{"meta":{{}},"strData":"{expected}"}}\n'
Example #27
0
    def test_alibi_explain_anchor_tabular(self, namespace):
        spec = "../resources/iris_anchor_tabular_explainer_v2.yaml"
        name = "iris-default-explainer"
        vs_prefix = (f"seldon/{namespace}/iris-explainer/default/v2/models/"
                     f"iris-default-explainer/infer")

        test_data = np.array([[5.964, 4.006, 2.081, 1.031]])
        inference_request = {
            "parameters": {
                "content_type": "np"
            },
            "inputs": [
                {
                    "name": "explain",
                    "shape": test_data.shape,
                    "datatype": "FP32",
                    "data": test_data.tolist(),
                    "parameters": {
                        "content_type": "np"
                    },
                },
            ],
        }

        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        time.sleep(AFTER_WAIT_SLEEP)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}",
                    json=inference_request,
                )
                # note: explanation will come back in v2 as a nested json dictionary
                explanation = json.loads(r.json()["outputs"][0]["data"])

        assert explanation["meta"]["name"] == "AnchorTabular"
        assert "anchor" in explanation["data"]
        assert "precision" in explanation["data"]
        assert "coverage" in explanation["data"]

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
Example #28
0
 def test_tfserving(self, namespace):
     spec = "../../servers/tfserving/samples/mnist_rest.yaml"
     retry_run(f"kubectl apply -f {spec}  -n {namespace}")
     wait_for_status("tfserving", namespace)
     wait_for_rollout("tfserving", namespace)
     time.sleep(1)
     logging.warning("Initial request")
     r = initial_rest_request(
         "tfserving",
         namespace,
         data=[create_random_data(784)[1].tolist()],
         dtype="ndarray",
     )
     assert r.status_code == 200
     logging.warning("Success for test_prepack_tfserving")
     run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
 def test_sklearn(self):
     namespace = "test-sklearn"
     spec = "../../servers/sklearnserver/samples/iris.yaml"
     retry_run(f"kubectl create namespace {namespace}")
     retry_run(f"kubectl apply -f {spec} -n {namespace}")
     wait_for_rollout("iris-default-4903e3c", namespace)
     wait_for_status("sklearn", namespace)
     time.sleep(1)
     logging.warning("Initial request")
     r = initial_rest_request(
         "sklearn", namespace, data=[[0.1, 0.2, 0.3, 0.4]], dtype="ndarray"
     )
     assert r.status_code == 200
     logging.warning("Success for test_prepack_sklearn")
     run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
     run(f"kubectl delete namespace {namespace}", shell=True)
Example #30
0
def namespace(request):
    """
    Creates an individual Kubernetes namespace for this particular test and it
    removes it at the end. The value of the injected argument into the test
    function will contain the namespace name.
    """

    test_name = request.node.name
    namespace = clean_string(test_name)

    # Create namespace
    retry_run(f"kubectl create namespace {namespace}")
    yield namespace

    # Tear down namespace
    run(f"kubectl delete namespace {namespace}", shell=True)