示例#1
0
def test_undeploy_pipeline_docker(inference_pipeline: Pipeline,
                                  docker_runtime: SeldonDockerRuntime):
    inference_pipeline.undeploy()

    for model in inference_pipeline._models:
        with pytest.raises(docker.errors.NotFound):
            docker_runtime._get_container(model._details)
示例#2
0
def test_undeploy_docker(sklearn_model: Model, runtime: SeldonDockerRuntime):
    time.sleep(2)

    runtime.undeploy(sklearn_model)

    with pytest.raises(docker.errors.NotFound):
        runtime._get_container(sklearn_model.model_spec)
示例#3
0
def test_undeploy_pipeline_docker(inference_pipeline: Pipeline,
                                  runtime: SeldonDockerRuntime):
    runtime.undeploy(inference_pipeline)

    for model in inference_pipeline.models.values():
        with pytest.raises(docker.errors.NotFound):
            runtime._get_container(model.model_spec)
示例#4
0
文件: test_docker.py 项目: M46F/tempo
def test_undeploy_docker(sklearn_model: Model, docker_runtime: SeldonDockerRuntime):
    sklearn_model.deploy()
    time.sleep(2)

    sklearn_model.undeploy()

    with pytest.raises(docker.errors.NotFound):
        docker_runtime._get_container(sklearn_model.details)
示例#5
0
def test_deploy_pipeline_docker(
    inference_pipeline: Pipeline,
    runtime: SeldonDockerRuntime,
):
    for model in inference_pipeline.models.values():
        container = runtime._get_container(model.model_spec)
        assert container.status == "running"

    pipeline_container = runtime._get_container(inference_pipeline.model_spec)
    assert pipeline_container.status == "running"
示例#6
0
def test_deploy_pipeline_docker(
    inference_pipeline: Pipeline,
    docker_runtime: SeldonDockerRuntime,
    docker_runtime_v2: SeldonDockerRuntime,
):
    for model in inference_pipeline._models:
        container = docker_runtime._get_container(model.details)
        assert container.status == "running"

    pipeline_container = docker_runtime_v2._get_container(inference_pipeline.details)
    assert pipeline_container.status == "running"
示例#7
0
文件: test_docker.py 项目: M46F/tempo
def test_deploy_docker(sklearn_model: Model, docker_runtime: SeldonDockerRuntime):
    sklearn_model.deploy()
    time.sleep(2)

    container = docker_runtime._get_container(sklearn_model.details)
    assert container.status == "running"

    sklearn_model.undeploy()
示例#8
0
def test_lambda(input, expected):
    model = Model(
        name="test-iris-sklearn",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol()),
        platform=ModelFramework.SKLearn,
        uri="gs://seldon-models/sklearn",
        local_folder="sklearn/model",
        model_func=lambda x: np.array([[0, 0, 1]]),
    )

    response = model(input)
    np.testing.assert_allclose(response, expected, atol=1e-2)
示例#9
0
def test_custom_model(v2_input, expected):
    @model(
        name="custom",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol()),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
    )
    def custom_model(a: np.ndarray) -> np.ndarray:
        return a

    response = custom_model.request(v2_input)
    assert response == expected
示例#10
0
def test_custom_multiheaded_model_list(v2_input, expected):
    @model(
        name="multi-headed",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol()),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
    )
    def custom_multiheaded_model_list(a: np.ndarray,
                                      b: np.ndarray) -> List[np.ndarray]:
        return [a, b]

    response = custom_multiheaded_model_list.request(v2_input)
    assert response == expected
示例#11
0
def test_custom_multiheaded_model_tuple(v2_input, expected):
    @model(
        name="test-iris-sklearn",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol(
            model_name="multi-headed")),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
    )
    def custom_multiheaded_model_tuple(
            a: np.ndarray, b: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        return a, b

    response = custom_multiheaded_model_tuple.request(v2_input)
    assert response == expected
示例#12
0
def inference_pipeline() -> Pipeline:
    # NOTE: We define the class inside the scope of the fixture to make sure
    # that the serialisation works correctly.
    # This way, we simulate a remote host, without access to the actual class
    # definition.
    runtime = SeldonDockerRuntime(KFServingV2Protocol())

    @pipeline(
        name="inference-pipeline",
        runtime=runtime,
    )
    def _pipeline(payload: np.ndarray) -> np.ndarray:
        return payload.sum(keepdims=True)

    return _pipeline
示例#13
0
def test_custom_model_decorator_types(v2_input, expected):
    @model(
        name="test-iris-sklearn",
        runtime=SeldonDockerRuntime(protocol=KFServingV2Protocol(
            model_name="custom")),
        platform=ModelFramework.Custom,
        uri="gs://seldon-models/custom",
        local_folder="custom_iris_path",
        inputs=np.ndarray,
        outputs=np.ndarray,
    )
    def custom_model_decorator_types(a):
        return a

    response = custom_model_decorator_types.request(v2_input)
    assert response == expected
示例#14
0
def test_launch_tensorflow(cifar10_model: Model, runtime: SeldonDockerRuntime):
    container = runtime._get_container(cifar10_model.model_spec)
    assert container.status == "running"
示例#15
0
def docker_runtime_v2() -> Generator[SeldonDockerRuntime, None, None]:
    runtime = SeldonDockerRuntime(protocol=KFServingV2Protocol())

    yield runtime
示例#16
0
def docker_runtime() -> Generator[SeldonDockerRuntime, None, None]:
    runtime = SeldonDockerRuntime()

    yield runtime
示例#17
0
def test_deploy_docker(sklearn_model_deployed, runtime: SeldonDockerRuntime):

    time.sleep(2)

    container = runtime._get_container(sklearn_model_deployed.model_spec)
    assert container.status == "running"