コード例 #1
0
def case_custom_model(custom_model: Model) -> ModelSettings:
    save(custom_model, save_env=False)
    model_uri = custom_model.details.local_folder

    return ModelSettings(
        name="custom-model",
        parameters=ModelParameters(uri=model_uri),
    )
コード例 #2
0
def case_wrapped_class_instance(inference_pipeline_class) -> ModelSettings:
    save(inference_pipeline_class, save_env=False)
    model_uri = inference_pipeline_class.pipeline.details.local_folder

    return ModelSettings(
        name="wrapped-class-instance",
        parameters=ModelParameters(uri=model_uri),
    )
コード例 #3
0
def case_wrapped_class(inference_pipeline_class) -> ModelSettings:
    MyClass = inference_pipeline_class.__class__

    save(MyClass, save_env=False)
    model_uri = MyClass.pipeline.details.local_folder

    return ModelSettings(
        name="wrapped-class",
        parameters=ModelParameters(uri=model_uri),
    )
コード例 #4
0
async def test_save(inference_pipeline):
    save(inference_pipeline, save_env=False)

    loaded_pipeline = load(inference_pipeline.details.local_folder)

    # Ensure models are exported as async
    assert len(inference_pipeline.models.__dict__) == len(
        loaded_pipeline.models.__dict__)
    for model in loaded_pipeline.models.values():
        assert isinstance(model, Model)
コード例 #5
0
def inference_pipeline_deployed(
    inference_pipeline: Pipeline,
    runtime: SeldonDockerRuntime,
) -> Generator[RemoteModel, None, None]:

    # NOTE: Need to re-save the pipeline so that it knows about the runtime
    save(inference_pipeline, save_env=True)
    rm = deploy(inference_pipeline)
    # TODO: Fix wait_ready for pipelines
    time.sleep(8)

    yield rm

    try:
        rm.undeploy()
    except docker.errors.NotFound:
        # TODO: Should undeploy be idempotent as well?
        # Ignore if the model has already been undeployed
        pass