def test_update_with_parameters(self, client, created_entities, experiment_run, model_for_deployment): experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) strategy = CanaryUpdateStrategy(interval=1, step=0.5) strategy.add_rule(MaximumAverageLatencyThresholdRule(0.8)) updated_status = endpoint.update(experiment_run, strategy, resources=Resources(cpu=.25, memory="512Mi"), env_vars={ 'CUDA_VISIBLE_DEVICES': "1,2", "VERTA_HOST": "app.verta.ai" }) # Check that a new build is added: new_build_ids = get_build_ids(updated_status) assert len(new_build_ids) - len( new_build_ids.intersection(original_build_ids)) > 0
def test_update_autoscaling(self, client, created_entities, experiment_run, model_for_deployment): experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) autoscaling = Autoscaling(min_replicas=1, max_replicas=2, min_scale=0.5, max_scale=2.0) autoscaling.add_metric(CpuUtilizationTarget(0.5)) autoscaling.add_metric(MemoryUtilizationTarget(0.7)) autoscaling.add_metric(RequestsPerWorkerTarget(100)) endpoint.update(experiment_run, DirectUpdateStrategy(), autoscaling=autoscaling) update_status = endpoint.get_update_status() autoscaling_metrics = update_status["update_request"]["autoscaling"][ "metrics"] assert len(autoscaling_metrics) == 3 for metric in autoscaling_metrics: assert metric["metric_id"] in [1001, 1002, 1003] if metric["metric_id"] == 1001: assert metric["parameters"][0]["value"] == "0.5" elif metric["metric_id"] == 1002: assert metric["parameters"][0]["value"] == "100" else: assert metric["parameters"][0]["value"] == "0.7"
def test_canary_update(self, client, created_entities, experiment_run, model_for_deployment): experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) strategy = CanaryUpdateStrategy(interval=1, step=0.5) with pytest.raises(RuntimeError) as excinfo: endpoint.update(experiment_run, strategy) assert "canary update strategy must have at least one rule" in str( excinfo.value) strategy.add_rule(MaximumAverageLatencyThresholdRule(0.8)) updated_status = endpoint.update(experiment_run, strategy) # Check that a new build is added: new_build_ids = get_build_ids(updated_status) assert len(new_build_ids) - len( new_build_ids.intersection(original_build_ids)) > 0
def test_configure_endpoint(self, client, model_version, strs): LogisticRegression = pytest.importorskip( "sklearn.linear_model").LogisticRegression strs = iter(strs) model_version.log_model( LogisticRegression, custom_modules=[], ) model_version.log_environment(Python(["scikit-learn"])) # create kafka_settings = KafkaSettings(next(strs), next(strs), next(strs)) endpoint = client.create_endpoint(_utils.generate_default_name(), kafka_settings=kafka_settings) assert endpoint.kafka_settings == kafka_settings # update kafka_settings = KafkaSettings(next(strs), next(strs), next(strs)) endpoint.update(model_version, kafka_settings=kafka_settings) assert endpoint.kafka_settings == kafka_settings # clear endpoint.update(model_version, kafka_settings=False) assert endpoint.kafka_settings is None
def test_from_run_download_docker_context(self, experiment_run, model_for_deployment, in_tempdir, registered_model): """deployable_entity/test_deployment.py::TestDeployability::test_download_docker_context But through create_version_from_run(). """ download_to_path = "context.tgz" experiment_run.log_model(model_for_deployment["model"], custom_modules=[]) experiment_run.log_environment(Python(["scikit-learn"])) model_version = registered_model.create_version_from_run( run_id=experiment_run.id, name="From Run {}".format(experiment_run.id), ) filepath = model_version.download_docker_context(download_to_path) assert filepath == os.path.abspath(download_to_path) # can be loaded as tgz with tarfile.open(filepath, "r:gz") as f: filepaths = set(f.getnames()) assert "Dockerfile" in filepaths
def test_get(self, client, created_entities, experiment_run, model_for_deployment): experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = _utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) runner = CliRunner() result = runner.invoke( cli, ['deployment', 'get', 'endpoint', path], ) assert not result.exception assert "path: {}".format(endpoint.path) in result.output assert "id: {}".format(endpoint.id) in result.output assert "curl: <endpoint not deployed>" in result.output assert "status" in result.output assert "date created" in result.output assert "date updated" in result.output assert "stage's date created" in result.output assert "stage's date updated" in result.output assert "components" in result.output updated_status = endpoint.update(experiment_run, DirectUpdateStrategy(), True) result = runner.invoke( cli, ['deployment', 'get', 'endpoint', path], ) assert "curl: {}".format( endpoint.get_deployed_model().get_curl()) in result.output
def test_no_api_deploy_error(self, experiment_run, model_for_deployment): experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) # delete model API response = _utils.make_request( "DELETE", "{}://{}/api/v1/modeldb/experiment-run/deleteArtifact".format( experiment_run._conn.scheme, experiment_run._conn.socket), experiment_run._conn, json={ 'id': experiment_run.id, 'key': _artifact_utils.MODEL_API_KEY }) _utils.raise_for_http_error(response) with pytest.raises(RuntimeError) as excinfo: experiment_run.deploy() assert _artifact_utils.MODEL_API_KEY in str(excinfo.value) conn = experiment_run._conn requests.delete( "{}://{}/api/v1/deployment/models/{}".format( conn.scheme, conn.socket, experiment_run.id), headers=conn.auth, )
def test_update_with_resources(self, client, created_entities, experiment_run, model_for_deployment): endpoint_name = _utils.generate_default_name() endpoint = client.set_endpoint(endpoint_name) created_entities.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) resources = '{"cpu": 0.25, "memory": "100M"}' runner = CliRunner() result = runner.invoke( cli, [ 'deployment', 'update', 'endpoint', endpoint_name, '--run-id', experiment_run.id, "-s", "direct", '--resources', resources ], ) assert not result.exception resources_dict = Resources._from_dict(json.loads( resources))._as_dict() # config is `cpu`, wire is `cpu_millis` assert endpoint.get_update_status( )['update_request']['resources'] == resources_dict
def test_predict(self, client, experiment_run, created_entities): np = pytest.importorskip("numpy") sklearn = pytest.importorskip("sklearn") from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(np.random.random((36, 12)), np.random.random(36).round()) test_data = np.random.random((4, 12)) test_data_str = json.dumps(test_data.tolist()) experiment_run.log_model(classifier, custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = _utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) endpoint.update(experiment_run, DirectUpdateStrategy(), wait=True) runner = CliRunner() result = runner.invoke( cli, [ 'deployment', 'predict', 'endpoint', path, '--data', test_data_str ], ) assert not result.exception assert json.dumps( classifier.predict(test_data).tolist()) in result.output
def test_direct_update_endpoint(self, client, created_entities, experiment_run, model_for_deployment): endpoint_name = _utils.generate_default_name() endpoint = client.set_endpoint(endpoint_name) created_entities.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) runner = CliRunner() result = runner.invoke( cli, [ 'deployment', 'update', 'endpoint', endpoint_name, '--run-id', experiment_run.id, "--strategy", "direct" ], ) assert not result.exception updated_build_ids = get_build_ids(endpoint.get_status()) assert len(updated_build_ids) - len( updated_build_ids.intersection(original_build_ids)) > 0
def test_profiler_crud(self, client): requirements = ["numpy", "scipy", "pandas"] for req in requirements: pytest.importorskip(req) profilers = client.operations.profilers profiler_name = "age_column_profiler_{}".format(generate_default_name()) python_env = Python(requirements=requirements) created_profiler = profilers.upload(profiler_name, ContinuousHistogramProfiler(columns=["age"]), environment=python_env) assert isinstance(created_profiler, ProfilerReference) retrieved_profiler = profilers.get(created_profiler.id) assert isinstance(retrieved_profiler, ProfilerReference) assert created_profiler.id == retrieved_profiler.id listed_profilers = profilers.list() assert len(listed_profilers) > 1 assert created_profiler.id in map(lambda p: p.id, listed_profilers) old_name = created_profiler.name old_profiler_version = created_profiler.reference new_name = "profiler2_{}".format(generate_default_name()) created_profiler.update(new_name) assert created_profiler.name == new_name assert created_profiler.name != old_name assert old_profiler_version == created_profiler.reference delete = profilers.delete(created_profiler) assert delete
def test_vcs_installed_verta(self, requirements): vcs_verta_req = requirements[0] pinned_verta_req = "verta=={}".format(verta.__version__) env = Python(requirements=requirements) assert vcs_verta_req not in env.requirements assert pinned_verta_req in env.requirements
def test_get(self, experiment_run, model_for_deployment): model = model_for_deployment['model'].fit( model_for_deployment['train_features'], model_for_deployment['train_targets'], ) experiment_run.log_model(model, custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) experiment_run.deploy(wait=True) deployed_model = experiment_run.get_deployed_model() x = model_for_deployment['train_features'].iloc[1].values deployed_model.predict([x]) deployed_model_curl = deployed_model.get_curl() deployed_status = experiment_run.get_deployment_status() assert deployed_status["url"] in deployed_model_curl assert deployed_status["token"] in deployed_model_curl conn = experiment_run._conn requests.delete( "{}://{}/api/v1/deployment/models/{}".format( conn.scheme, conn.socket, experiment_run.id), headers=conn.auth, )
def test_repr(self, client, created_entities, experiment_run, model_for_deployment): experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) str_repr = repr(endpoint) assert "path: {}".format(endpoint.path) in str_repr assert "url" in str_repr assert "id: {}".format(endpoint.id) in str_repr assert "curl: <endpoint not deployed>" in str_repr # these fields might have changed: assert "status" in str_repr assert "date created" in str_repr assert "date updated" in str_repr assert "stage's date created" in str_repr assert "stage's date updated" in str_repr assert "components" in str_repr endpoint.update(experiment_run, DirectUpdateStrategy(), True) str_repr = repr(endpoint) assert "curl: {}".format( endpoint.get_deployed_model().get_curl()) in str_repr
def test_update_from_version(self, client, model_version, created_entities): np = pytest.importorskip("numpy") sklearn = pytest.importorskip("sklearn") from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(np.random.random((36, 12)), np.random.random(36).round()) model_version.log_model(classifier) env = Python(requirements=["scikit-learn"]) model_version.log_environment(env) path = _utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) runner = CliRunner() result = runner.invoke( cli, [ 'deployment', 'update', 'endpoint', path, '--model-version-id', model_version.id, "--strategy", "direct" ], ) assert not result.exception while not endpoint.get_status()['status'] == "active": time.sleep(3) test_data = np.random.random((4, 12)) assert np.array_equal(endpoint.get_deployed_model().predict(test_data), classifier.predict(test_data))
def test_update_wait(self, client, created_endpoints, experiment_run, model_version, model_for_deployment): """This tests endpoint.update(..., wait=True), including the case of build error""" experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_requirements(['scikit-learn']) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_endpoints.append(endpoint) status = endpoint.update(experiment_run, DirectUpdateStrategy(), True) assert status["status"] == "active" model_version.log_model(model_for_deployment['model'], custom_modules=[]) model_version.log_environment( Python(requirements=['blahblahblah==3.6.0'])) with pytest.raises(RuntimeError) as excinfo: endpoint.update(model_version, DirectUpdateStrategy(), True) # this should fail, and not take forever! excinfo_value = str(excinfo.value).strip() assert "Could not find a version that satisfies the requirement blahblahblah==3.6.0" in excinfo_value
def test_update_with_custom_module(self, client, model_version, created_endpoints): torch = pytest.importorskip("torch") with sys_path_manager() as sys_path: sys_path.append(".") from models.nets import FullyConnected # pylint: disable=import-error train_data = torch.rand((2, 4)) classifier = FullyConnected(num_features=4, hidden_size=32, dropout=0.2) model_api = ModelAPI(train_data.tolist(), classifier(train_data).tolist()) model_version.log_model(classifier, custom_modules=["models/"], model_api=model_api) env = Python(requirements=["torch={}".format(torch.__version__)]) model_version.log_environment(env) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_endpoints.append(endpoint) endpoint.update(model_version, DirectUpdateStrategy(), wait=True) test_data = torch.rand((4, 4)) prediction = torch.tensor(endpoint.get_deployed_model().predict( test_data.tolist())) assert torch.all(classifier(test_data).eq(prediction))
def test_update_twice(self, client, registered_model, created_entities): class ModelA(object): def predict(self, _): return "A" class ModelB(object): def predict(self, _): return "B" env = Python(requirements=[]) model_version = registered_model.create_version("first-version") model_version.log_model(ModelA()) model_version.log_environment(env) new_model_version = registered_model.create_version("second-version") new_model_version.log_model(ModelB()) new_model_version.log_environment(env) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) endpoint.update(model_version, wait=True) assert endpoint.get_deployed_model().predict("foo") == "A" # updating endpoint endpoint.update(new_model_version, DirectUpdateStrategy(), wait=True) assert endpoint.get_deployed_model().predict("foo") == "B"
def test_model_artifacts(self, model_version, endpoint, in_tempdir): key = "foo" val = {'a': 1} class ModelWithDependency(object): def __init__(self, artifacts): with open(artifacts[key], 'rb') as f: # should not KeyError if cloudpickle.load(f) != val: raise ValueError # should not ValueError def predict(self, x): return x # first log junk artifact, to test `overwrite` bad_key = "bar" bad_val = {'b': 2} model_version.log_artifact(bad_key, bad_val) model_version.log_model(ModelWithDependency, custom_modules=[], artifacts=[bad_key]) # log real artifact using `overwrite` model_version.log_artifact(key, val) model_version.log_model(ModelWithDependency, custom_modules=[], artifacts=[key], overwrite=True) model_version.log_environment(Python([])) endpoint.update(model_version, DirectUpdateStrategy(), wait=True) assert val == endpoint.get_deployed_model().predict(val)
def test_create_version(self, registered_model, in_tempdir, requirements_file): LogisticRegression = pytest.importorskip('sklearn.linear_model').LogisticRegression model_name = registered_model.name version_name = "my version" filename = "tiny1.bin" FILE_CONTENTS = os.urandom(2**16) with open(filename, 'wb') as f: f.write(FILE_CONTENTS) classifier_name = "tiny2.pth" classifier = LogisticRegression() with open(classifier_name, 'wb') as f: pickle.dump(classifier, f) runner = CliRunner() result = runner.invoke( cli, ['registry', 'create', 'registeredmodelversion', model_name, version_name, '-l', 'label1', '-l', 'label2', "--artifact", "file={}".format(filename), "--model", classifier_name, "--requirements", requirements_file.name], ) assert not result.exception model_version = registered_model.get_version(name=version_name) assert model_version.name in result.output assert model_version.get_artifact("file").read() == FILE_CONTENTS assert model_version.get_labels() == ["label1", "label2"] assert model_version.get_model().get_params() == classifier.get_params() # Check environment: reqs = Python.read_pip_file(requirements_file.name) env = Python(requirements=reqs) assert repr(env) == str(model_version.get_environment())
def test_create_version_from_run(self, experiment_run, model_for_deployment, registered_model): np = pytest.importorskip("numpy") model_name = registered_model.name version_name = "from_run" experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) artifact = np.random.random((36, 12)) experiment_run.log_artifact("some-artifact", artifact) runner = CliRunner() result = runner.invoke( cli, ['registry', 'create', 'registeredmodelversion', model_name, version_name, "--from-run", experiment_run.id], ) assert not result.exception model_version = registered_model.get_version(name=version_name) assert model_version.name in result.output env_str = str(model_version.get_environment()) assert 'scikit-learn' in env_str assert 'Python' in env_str assert model_for_deployment['model'].get_params() == model_version.get_model().get_params() assert np.array_equal(model_version.get_artifact("some-artifact"), artifact)
def test_from_file(self, requirements_file): reqs = Python.read_pip_file(requirements_file.name) env = Python(requirements=[], constraints=reqs) assert env._msg.python.constraints assert not env._msg.python.raw_constraints assert_parsed_reqs_match(env.constraints, reqs)
def test_update_twice(self, client, registered_model, created_endpoints): np = pytest.importorskip("numpy") json = pytest.importorskip("json") sklearn = pytest.importorskip("sklearn") from sklearn.linear_model import LogisticRegression env = Python(requirements=["scikit-learn"]) classifier = LogisticRegression() classifier.fit(np.random.random((36, 12)), np.random.random(36).round()) model_version = registered_model.create_version("first-version") model_version.log_model(classifier) model_version.log_environment(env) new_classifier = LogisticRegression() new_classifier.fit(np.random.random((36, 12)), np.random.random(36).round()) new_model_version = registered_model.create_version("second-version") new_model_version.log_model(new_classifier) new_model_version.log_environment(env) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_endpoints.append(endpoint) endpoint.update(model_version, DirectUpdateStrategy(), wait=True) # updating endpoint endpoint.update(new_model_version, DirectUpdateStrategy(), wait=True) test_data = np.random.random((4, 12)) assert np.array_equal(endpoint.get_deployed_model().predict(test_data), new_classifier.predict(test_data))
def test_update_from_json_config_model_version(self, client, in_tempdir, created_endpoints, model_version): np = pytest.importorskip("numpy") json = pytest.importorskip("json") sklearn = pytest.importorskip("sklearn") from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(np.random.random((36, 12)), np.random.random(36).round()) model_version.log_model(classifier, custom_modules=[]) env = Python(requirements=["scikit-learn"]) model_version.log_environment(env) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_endpoints.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) # Creating config dict: strategy_dict = { "model_version_id": model_version.id, "strategy": "canary", "canary_strategy": { "progress_step": 0.5, "progress_interval_seconds": 1, "rules": [{ "rule": "latency_avg_max", "rule_parameters": [{ "name": "threshold", "value": "0.1" }] }, { "rule": "error_4xx_rate", "rule_parameters": [{ "name": "threshold", "value": "1" }] }] } } filepath = "config.json" with open(filepath, "w") as f: json.dump(strategy_dict, f) endpoint.update_from_config(filepath, wait=True) test_data = np.random.random((4, 12)) prediction = endpoint.get_deployed_model().predict(test_data) assert np.array_equal(prediction, classifier.predict(test_data))
def test_update_from_json_config_with_params(self, client, in_tempdir, created_entities, experiment_run, model_for_deployment): yaml = pytest.importorskip("yaml") experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = verta._internal_utils._utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) # Creating config dict: config_dict = { "run_id": experiment_run.id, "strategy": "direct", "autoscaling": { "quantities": {"min_replicas": 1, "max_replicas": 4, "min_scale": 0.5, "max_scale": 2.0}, "metrics": [ {"metric": "cpu_utilization", "parameters": [{"name": "target", "value": "0.5"}]}, {"metric": "memory_utilization", "parameters": [{"name": "target", "value": "0.7"}]} ] }, "env_vars": {"VERTA_HOST": "app.verta.ai"}, "resources": {"cpu": 0.25, "memory": "100M"} } filepath = "config.json" with open(filepath, 'w') as f: json.dump(config_dict, f) endpoint.update_from_config(filepath) update_status = endpoint.get_update_status() # Check autoscaling: autoscaling_parameters = update_status["update_request"]["autoscaling"] autoscaling_quantities = autoscaling_parameters["quantities"] assert autoscaling_quantities == config_dict["autoscaling"]["quantities"] autoscaling_metrics = autoscaling_parameters["metrics"] assert len(autoscaling_metrics) == 2 for metric in autoscaling_metrics: assert metric["metric_id"] in [1001, 1002, 1003] if metric["metric_id"] == 1001: assert metric["parameters"][0]["value"] == "0.5" else: assert metric["parameters"][0]["value"] == "0.7" # Check env_vars: assert update_status["update_request"]["env"][0]["name"] == "VERTA_HOST" assert update_status["update_request"]["env"][0]["value"] == "app.verta.ai" # Check resources: resources_dict = Resources._from_dict(config_dict["resources"])._as_dict() # config is `cpu`, wire is `cpu_millis` assert endpoint.get_update_status()['update_request']['resources'] == resources_dict
def test_no_autocapture(self): env_ver = Python(requirements=[], _autocapture=False) # protobuf message is empty assert not json_format.MessageToDict( env_ver._msg, including_default_value_fields=False, )
def test_from_env(self): reqs = Python.read_pip_environment(skip_options=True, ) env = Python(requirements=reqs) assert env._msg.python.requirements assert not env._msg.python.raw_requirements reqs = _pip_requirements_utils.pin_verta_and_cloudpickle(reqs) assert_parsed_reqs_match(env.requirements, reqs)
def test_from_files(self, requirements_file): reqs = Python.read_pip_file(requirements_file.name) env = Python(requirements=reqs) assert env._msg.python.requirements assert not env._msg.python.raw_requirements reqs = pin_verta_and_cloudpickle(reqs) assert_parsed_reqs_match(env.requirements, reqs)
def test_from_file_no_versions(self, requirements_file_without_versions): reqs = Python.read_pip_file(requirements_file_without_versions.name) env = Python(requirements=reqs) assert env._msg.python.requirements assert not env._msg.python.raw_requirements parsed_libraries = set(req.split("==")[0] for req in env.requirements) assert parsed_libraries == set(reqs) | {"verta", "cloudpickle"}
def test_update_from_json_config(self, client, in_tempdir, created_entities, experiment_run, model_for_deployment): json = pytest.importorskip("json") experiment_run.log_model(model_for_deployment['model'], custom_modules=[]) experiment_run.log_environment(Python(['scikit-learn'])) path = _utils.generate_default_name() endpoint = client.set_endpoint(path) created_entities.append(endpoint) original_status = endpoint.get_status() original_build_ids = get_build_ids(original_status) # Creating config dict: strategy_dict = { "run_id": experiment_run.id, "strategy": "canary", "canary_strategy": { "progress_step": 0.05, "progress_interval_seconds": 30, "rules": [{ "rule": "latency_avg_max", "rule_parameters": [{ "name": "threshold", "value": "0.1" }] }, { "rule": "error_4xx_rate", "rule_parameters": [{ "name": "threshold", "value": "1" }] }] } } filepath = "config.json" with open(filepath, 'w') as f: json.dump(strategy_dict, f) runner = CliRunner() result = runner.invoke( cli, ['deployment', 'update', 'endpoint', path, "-f", filepath], ) assert not result.exception print(endpoint.get_update_status()) updated_build_ids = get_build_ids(endpoint.get_status()) assert len(updated_build_ids) - len( updated_build_ids.intersection(original_build_ids)) > 0