async def test_name_fallback(model_folder: str, model_repository: ModelRepository): # Create empty model-settings.json file model_settings = ModelSettings() model_settings_path = os.path.join(model_folder, DEFAULT_MODEL_SETTINGS_FILENAME) with open(model_settings_path, "w") as model_settings_file: d = model_settings.dict() del d["name"] d["implementation"] = get_import_path(d["implementation"]) json.dump(d, model_settings_file) model_settings = model_repository._load_model_settings(model_settings_path) assert model_settings.name == os.path.basename(model_folder)
def test_model_settings_from_env(monkeypatch): model_name = "foo-model" model_version = "v0.1.0" model_uri = "/mnt/models/my-model" monkeypatch.setenv("mlserver_model_name", model_name) monkeypatch.setenv("mlserver_model_version", model_version) monkeypatch.setenv("mlserver_model_uri", model_uri) model_settings = ModelSettings() model_settings.parameters = ModelParameters() assert model_settings.name == model_name assert model_settings.version == model_version assert model_settings.parameters.uri == model_uri
def case_custom_model(custom_model: Model) -> ModelSettings: save(custom_model, save_env=False) model_uri = custom_model.details.local_folder return ModelSettings( name="custom-model", parameters=ModelParameters(uri=model_uri), )
def case_wrapped_class_instance(inference_pipeline_class) -> ModelSettings: save(inference_pipeline_class, save_env=False) model_uri = inference_pipeline_class.pipeline.details.local_folder return ModelSettings( name="wrapped-class-instance", parameters=ModelParameters(uri=model_uri), )
async def mlserver_runtime(model_settings: ModelSettings) -> InferenceRuntime: if is_lazy(model_settings): # NOTE: Some times pytest-cases may return a "LazyValue" model_settings = model_settings.get(request_or_item=mlserver_runtime) _runtime = InferenceRuntime(model_settings) await _runtime.load() return _runtime
def case_wrapped_class(inference_pipeline_class) -> ModelSettings: MyClass = inference_pipeline_class.__class__ save(MyClass, save_env=False) model_uri = MyClass.pipeline.details.local_folder return ModelSettings( name="wrapped-class", parameters=ModelParameters(uri=model_uri), )
def xgboost_model(xgboost_model_uri: str) -> XGBoostModel: model_settings = ModelSettings( name="xgboost-model", version="v1.2.3", parameters=ModelParameters(uri=xgboost_model_uri), ) model = XGBoostModel(model_settings) model.load() return model
def sklearn_model(sklearn_model_uri: str) -> SKLearnModel: model_settings = ModelSettings( name="sklearn-model", version="v1.2.3", parameters=ModelParameters(uri=sklearn_model_uri), ) model = SKLearnModel(model_settings) model.load() return model
async def test_ready(data_plane, model_registry, ready): model_settings = ModelSettings( name="sum-model-2", parameters=ModelParameters(version="v1.2.3")) new_model = SumModel(model_settings) await model_registry.load(new_model) new_model.ready = ready all_ready = await data_plane.ready() assert all_ready == ready
def case_async_custom_model() -> ModelSettings: @aio.model(name="async-custom-model", platform=ModelFramework.Custom) async def _custom_model(payload: np.ndarray) -> np.ndarray: return payload.sum(keepdims=True) save(_custom_model, save_env=False) model_uri = _custom_model.details.local_folder return ModelSettings( name="async-custom-model", parameters=ModelParameters(uri=model_uri), )
def model_settings_pytorch_fixed(pytorch_model_uri) -> ModelSettings: return ModelSettings( name="mlflow-model", parameters=ModelParameters(uri=pytorch_model_uri), )
def model_settings(model_uri: str) -> ModelSettings: return ModelSettings( name="mlflow-model", parameters=ModelParameters(uri=model_uri), )
def model_settings(model_uri: str) -> ModelSettings: return ModelSettings( name="xgboost-model", parameters=ModelParameters(uri=model_uri, version="v1.2.3"), )
def model_settings(pipeline_uri: str) -> ModelSettings: return ModelSettings( name="sum-pipeline", parameters=ModelParameters(uri=pipeline_uri), )
def xgboost_model_settings(xgboost_model_uri: str) -> ModelSettings: return ModelSettings( name="xgboost-model", version="v1.2.3", parameters=ModelParameters(uri=xgboost_model_uri), )
def sklearn_model_settings(sklearn_model_uri: str) -> ModelSettings: return ModelSettings( name="sklearn-model", version="v1.2.3", parameters=ModelParameters(uri=sklearn_model_uri), )