def test_log(self, model_version, docker_image): model_api = ModelAPI([[1, 2, 3]], [1]) model_version.log_docker(docker_image, model_api=model_api) retrieved_model_api = model_version.get_artifact( _artifact_utils.MODEL_API_KEY) assert json.load(retrieved_model_api) == model_api.to_dict()
def test_update_with_custom_module(self, client, model_version, created_entities): torch = pytest.importorskip("torch") with sys_path_manager() as sys_path: sys_path.append(".") from models.nets import FullyConnected # pylint: disable=import-error train_data = torch.rand((2, 4)) classifier = FullyConnected(num_features=4, hidden_size=32, dropout=0.2) model_api = ModelAPI(train_data.tolist(), classifier(train_data).tolist()) model_version.log_model(classifier, custom_modules=["models/"], model_api=model_api) env = Python(requirements=["torch={}".format(torch.__version__)]) model_version.log_environment(env) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) endpoint.update(model_version, DirectUpdateStrategy(), wait=True) test_data = torch.rand((4, 4)) prediction = torch.tensor(endpoint.get_deployed_model().predict(test_data.tolist())) assert torch.all(classifier(test_data).eq(prediction))
def test_dataframe_modelapi_and_values(dataframe_api_and_values): api, values = dataframe_api_and_values predicted_api = ModelAPI._data_to_api(values) assert json.dumps(api, sort_keys=True, indent=2) == json.dumps(predicted_api, sort_keys=True, indent=2)
def test_modelapi_and_values(api_and_values): api, values = api_and_values assert len(values) > 0 predicted_api = ModelAPI._data_to_api(values) assert json.dumps(api, sort_keys=True, indent=2) == json.dumps(predicted_api, sort_keys=True, indent=2)
def test_create_version_with_custom_modules(self, client, registered_model, created_entities): torch = pytest.importorskip("torch") np = pytest.importorskip("numpy") model_name = registered_model.name version_name = "my version" with sys_path_manager() as sys_path: sys_path.append(".") from models.nets import FullyConnected train_data = torch.rand((2, 4)) model_path = "classifier.pt" classifier = FullyConnected(num_features=4, hidden_size=32, dropout=0.2) torch.save(classifier, model_path) requirements_path = "requirements.txt" with open(requirements_path, "w") as f: f.write("torch=={}".format(torch.__version__)) runner = CliRunner() result = runner.invoke( cli, ['registry', 'create', 'registeredmodelversion', model_name, version_name, "--model", model_path, "--custom-module", "models/", "--requirements", requirements_path], ) assert not result.exception retrieved_model = registered_model.get_version(name=version_name).get_model() assert torch.allclose(classifier(train_data), retrieved_model(train_data)) os.remove(model_path) os.remove(requirements_path) # TODO: consolidate these in the command above model_version = registered_model.get_version(name=version_name) # Log model api: model_api = ModelAPI(train_data.tolist(), classifier(train_data).tolist()) model_api["model_packaging"] = { "deserialization": "cloudpickle", "type": "torch", "python_version": "2.7.17" } model_version.log_artifact(_artifact_utils.MODEL_API_KEY, model_api, True, "json") path = _utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) endpoint.update(model_version, DirectUpdateStrategy(), wait=True) test_data = torch.rand((4, 4)) prediction = torch.tensor(endpoint.get_deployed_model().predict(test_data.tolist())) assert torch.all(classifier(test_data).eq(prediction))