Exemplo n.º 1
0
def test_undeploy_pipeline_docker(inference_pipeline: Pipeline,
                                  docker_runtime: SeldonDockerRuntime):
    inference_pipeline.undeploy()

    for model in inference_pipeline._models:
        with pytest.raises(docker.errors.NotFound):
            docker_runtime._get_container(model._details)
Exemplo n.º 2
0
def test_undeploy_docker(sklearn_model: Model, runtime: SeldonDockerRuntime):
    time.sleep(2)

    runtime.undeploy(sklearn_model)

    with pytest.raises(docker.errors.NotFound):
        runtime._get_container(sklearn_model.model_spec)
Exemplo n.º 3
0
def test_undeploy_pipeline_docker(inference_pipeline: Pipeline,
                                  runtime: SeldonDockerRuntime):
    runtime.undeploy(inference_pipeline)

    for model in inference_pipeline.models.values():
        with pytest.raises(docker.errors.NotFound):
            runtime._get_container(model.model_spec)
Exemplo n.º 4
0
def test_undeploy_docker(sklearn_model: Model, docker_runtime: SeldonDockerRuntime):
    sklearn_model.deploy()
    time.sleep(2)

    sklearn_model.undeploy()

    with pytest.raises(docker.errors.NotFound):
        docker_runtime._get_container(sklearn_model.details)
Exemplo n.º 5
0
def test_deploy_pipeline_docker(
    inference_pipeline: Pipeline,
    runtime: SeldonDockerRuntime,
):
    for model in inference_pipeline.models.values():
        container = runtime._get_container(model.model_spec)
        assert container.status == "running"

    pipeline_container = runtime._get_container(inference_pipeline.model_spec)
    assert pipeline_container.status == "running"
Exemplo n.º 6
0
def test_deploy_pipeline_docker(
    inference_pipeline: Pipeline,
    docker_runtime: SeldonDockerRuntime,
    docker_runtime_v2: SeldonDockerRuntime,
):
    for model in inference_pipeline._models:
        container = docker_runtime._get_container(model.details)
        assert container.status == "running"

    pipeline_container = docker_runtime_v2._get_container(inference_pipeline.details)
    assert pipeline_container.status == "running"
Exemplo n.º 7
0
def test_deploy_docker(sklearn_model: Model, docker_runtime: SeldonDockerRuntime):
    sklearn_model.deploy()
    time.sleep(2)

    container = docker_runtime._get_container(sklearn_model.details)
    assert container.status == "running"

    sklearn_model.undeploy()
Exemplo n.º 8
0
def test_lambda(input, expected):
    model = Model(
        name="test-iris-sklearn",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol()),
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn",
        local_folder="sklearn/model",
        model_func=lambda x: np.array([[0, 0, 1]]),
    )

    response = model(input)
    np.testing.assert_allclose(response, expected, atol=1e-2)
Exemplo n.º 9
0
def test_custom_model(v2_input, expected):
    @model(
        name="custom",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol()),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
    )
    def custom_model(a: np.ndarray) -> np.ndarray:
        return a

    response = custom_model.request(v2_input)
    assert response == expected
Exemplo n.º 10
0
def test_custom_multiheaded_model_list(v2_input, expected):
    @model(
        name="multi-headed",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol()),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
    )
    def custom_multiheaded_model_list(a: np.ndarray,
                                      b: np.ndarray) -> List[np.ndarray]:
        return [a, b]

    response = custom_multiheaded_model_list.request(v2_input)
    assert response == expected
Exemplo n.º 11
0
def test_custom_multiheaded_model_tuple(v2_input, expected):
    @model(
        name="test-iris-sklearn",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol(
            model_name="multi-headed")),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
    )
    def custom_multiheaded_model_tuple(
            a: np.ndarray, b: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        return a, b

    response = custom_multiheaded_model_tuple.request(v2_input)
    assert response == expected
Exemplo n.º 12
0
def inference_pipeline() -> Pipeline:
    # NOTE: We define the class inside the scope of the fixture to make sure
    # that the serialisation works correctly.
    # This way, we simulate a remote host, without access to the actual class
    # definition.
    runtime = SeldonDockerRuntime(KFServingV2Protocol())

    @pipeline(
        name="inference-pipeline",
        runtime=runtime,
    )
    def _pipeline(payload: np.ndarray) -> np.ndarray:
        return payload.sum(keepdims=True)

    return _pipeline
Exemplo n.º 13
0
def test_custom_model_decorator_types(v2_input, expected):
    @model(
        name="test-iris-sklearn",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol(
            model_name="custom")),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
        inputs=np.ndarray,
        outputs=np.ndarray,
    )
    def custom_model_decorator_types(a):
        return a

    response = custom_model_decorator_types.request(v2_input)
    assert response == expected
Exemplo n.º 14
0
def test_launch_tensorflow(cifar10_model: Model, runtime: SeldonDockerRuntime):
    container = runtime._get_container(cifar10_model.model_spec)
    assert container.status == "running"
Exemplo n.º 15
0
def docker_runtime_v2() -> Generator[SeldonDockerRuntime, None, None]:
    runtime = SeldonDockerRuntime(protocol=KFServingV2Protocol())

    yield runtime
Exemplo n.º 16
0
def docker_runtime() -> Generator[SeldonDockerRuntime, None, None]:
    runtime = SeldonDockerRuntime()

    yield runtime
Exemplo n.º 17
0
def test_deploy_docker(sklearn_model_deployed, runtime: SeldonDockerRuntime):

    time.sleep(2)

    container = runtime._get_container(sklearn_model_deployed.model_spec)
    assert container.status == "running"