def test_v2_set_weights_config(): example = copy.deepcopy(er_example_step3) example['status']['currentWeightDistribution'] = [{ "name": "default", "value": 50 }, { "name": "canary", "value": 50 }] example['spec']['strategy']['weights'] = { "maxCandidateWeight": 53, "maxCandidateWeightIncrement": 2, "algorithm": 'Progressive' } expected_resp = [{ "name": "default", "value": 48 }, { "name": "canary", "value": 52 }] expr = ExperimentResource(**example) resp = get_weights(expr.convert_to_float()) assert resp.data == expected_resp
def test_v2_canary_failing_lowerlimit_criteria(): example = copy.deepcopy(er_example_step1) example['spec']['criteria']['objectives'][0].pop('upperLimit') example['spec']['criteria']['objectives'][0]['lowerLimit'] = 500 expr = ExperimentResource(**example) resp = get_version_assessments(expr.convert_to_float()) assert resp.data == {'default': [False], 'canary': [False]}
def test_v2_abn_general(): example = copy.deepcopy(abn_er_example_step2) expr = ExperimentResource(**example) resp = get_winner_assessment(expr.convert_to_float()) assert resp.data.winnerFound is True assert resp.data.winner == 'canary1'
def test_v2_weights_with_no_winner(): example = copy.deepcopy(er_example_step3) example['status']['analysis']['winnerAssessment']['data'] = { "winnerFound": False } expr = ExperimentResource(**example) resp = get_weights(expr.convert_to_float()) assert resp.data == w_response['data']
def test_v2_va_without_mean_latency_metric(): example = copy.deepcopy(er_example_step1) example['status']['analysis']['aggregatedMetrics']["data"].pop( 'mean-latency', None) expr = ExperimentResource(**example) resp = get_version_assessments(expr.convert_to_float()) assert resp.message == \ "Error: ; Warning: Aggregated metric object for mean-latency metric is unavailable.; Info: "
def test_v2_weights_with_winner(): expr = ExperimentResource(**er_example_step3) resp = get_weights(expr.convert_to_float()) expected_resp = [ VersionWeight(name="default", value=5), VersionWeight(name="canary", value=95) ] assert resp.data == expected_resp
def test_v2_ab_without_reward_for_feasible_version(): example = copy.deepcopy(ab_er_example_step2) del example['status']['analysis']['aggregatedMetrics']['data'][ 'business-revenue']['data']['canary'] expr = ExperimentResource(**example) resp = get_winner_assessment(expr.convert_to_float()) assert "reward value for feasible version canary is not available" in \ resp.message
def test_v2_abn_with_better_reward_but_not_feasible(): example = copy.deepcopy(abn_er_example_step2) example['status']['analysis']['versionAssessments']['data']['canary1'] = [ False ] expr = ExperimentResource(**example) resp = get_winner_assessment(expr.convert_to_float()) assert resp.data.winnerFound is True assert resp.data.winner == 'canary2'
def test_v2_analytics_assessment_endpoint(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') mock.get(er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) expr = ExperimentResource(**er_example) get_analytics_results(expr.convert_to_float()).convert_to_quantity()
def test_mock_metrics(): ercopy = copy.deepcopy(er_example) ercopy["status"]["metrics"] = mocked_mr_example expr = ExperimentResource(**ercopy) agm = get_aggregated_metrics(expr.convert_to_float()) logger.info(agm) assert agm.data['request-count'].data['default'].value > 100.0 assert agm.data['request-count'].data['canary'].value > 100.0 assert agm.data['mean-latency'].data['default'].value > 15.0 assert agm.data['mean-latency'].data['canary'].value > 9.0
def test_mock_metrics_with_negative_elapsed(): ercopy = copy.deepcopy(er_example) ercopy["status"]["metrics"] = mocked_mr_example expr = ExperimentResource(**ercopy) expr.status.startTime = datetime.now(timezone.utc) + timedelta(hours=10) agm = get_aggregated_metrics(expr.convert_to_float()) logger.info(agm) assert agm.data['request-count'].data['default'].value > 0 assert agm.data['request-count'].data['canary'].value > 0 assert agm.data['mean-latency'].data['default'].value > 0 assert agm.data['mean-latency'].data['canary'].value > 0
def test_v2_abn_am_without_candidates(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') mock.get(abn_er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) example = copy.deepcopy(abn_er_example) del example['spec']['versionInfo']['candidates'] expr = ExperimentResource(**example) get_aggregated_metrics(expr.convert_to_float()).convert_to_quantity()
def test_ab_reward_only_analytics_assessment_endpoint(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') mock.get(ab_er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) ab_expr = copy.deepcopy(ab_er_example) del ab_expr["spec"]["criteria"]["objectives"] expr = ExperimentResource(**ab_expr) get_analytics_results(expr.convert_to_float()).convert_to_quantity()
def test_v2_no_prometheus_response(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_no_response.json') mock.get(er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) expr = ExperimentResource(**er_example) resp = get_aggregated_metrics( expr.convert_to_float()).convert_to_quantity() expected_response = { "request-count": { "max": None, "min": None, "data": { "default": { "max": None, "min": None, "sample_size": None, "value": None }, "canary": { "max": None, "min": None, "sample_size": None, "value": None } } }, "mean-latency": { "max": None, "min": None, "data": { "default": { "max": None, "min": None, "sample_size": None, "value": None }, "canary": { "max": None, "min": None, "sample_size": None, "value": None } } } } assert resp.data == expected_response
def test_v2_no_winner(): example = copy.deepcopy(er_example_step2) example['status']['analysis']['versionAssessments'] = { "data": { "default": [False], "canary": [False] }, "message": "All ok" } expr = ExperimentResource(**example) resp = get_winner_assessment(expr.convert_to_float()) assert resp.data.winnerFound is False
def test_v2_abn_analytics_assessment_conformance(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') mock.get(abn_er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) example = copy.deepcopy(abn_er_example) del example['spec']['versionInfo']['candidates'] example['spec']['strategy']['testingPattern'] = 'Conformance' expr = ExperimentResource(**example) get_analytics_results(expr.convert_to_float()).convert_to_quantity()
def test_canary_is_winner(): example = copy.deepcopy(er_example_step2) example['status']['analysis']['versionAssessments'] = { "data": { "default": [True], "canary": [True] }, "message": "All ok" } expr = ExperimentResource(**example) resp = get_winner_assessment(expr.convert_to_float()) assert resp.data.winnerFound == wa_response['data']['winnerFound'] assert resp.data.winner == wa_response['data']['winner']
def test_v2_abn_aggregated_metrics_endpoint(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') response_json = json.load(open(file_path)) mock.get(abn_er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=response_json) expr = ExperimentResource(**abn_er_example) agm = get_aggregated_metrics( expr.convert_to_float()).convert_to_quantity() assert agm.data['request-count'].data['default'].value == \ response_json['data']['result'][0]['value'][1]
def test_v2_ab_without_reward(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') mock.get(ab_er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) example = copy.deepcopy(ab_er_example) del example['spec']['criteria']['rewards'] expr = ExperimentResource(**example) resp = get_analytics_results( expr.convert_to_float()).convert_to_quantity() assert "No reward metric in experiment" in resp.winner_assessment.message assert resp.winner_assessment.data.winnerFound is False
def test_v2_analytics_assessment_conformance_winner(): example = copy.deepcopy(er_example_step2) example['status']['analysis']['versionAssessments'] = { "data": { "default": [True] }, "message": "All ok" } del example['spec']['versionInfo']['candidates'] example['spec']['strategy']['testingPattern'] = 'Conformance' expr = ExperimentResource(**example) resp = get_winner_assessment(expr.convert_to_float()) assert resp.data.winnerFound is True assert resp.data.winner == 'default'
def test_v2_abn_using_previous_metric_status_none(): with requests_mock.mock(real_http=True) as mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_no_response.json') mock.get(abn_er_example["status"]["metrics"][0]["metricObj"]["spec"] ["urlTemplate"], json=json.load(open(file_path))) example = copy.deepcopy(abn_er_example) example['status']['metrics'] = abn_mr_example[:2] expr = ExperimentResource(**example) resp = get_aggregated_metrics( expr.convert_to_float()).convert_to_quantity() assert resp.data['mean-latency'].data['default'].value is None
def test_no_and_partialsecret_data(self, mock_secret): """When secret is valid, interpolate urlTemplate""" expr = ExperimentResource(**er_example) metric_resource: MetricResource = expr.status.metrics[0].metricObj metric_resource.spec.urlTemplate = "https://prometheus.com:${port}/$endpoint" metric_resource.spec.secret = "valid" mock_secret.return_value = ({ "port": 8080, "endpoint": "nothingtosee" }, None) (url, err) = get_url(metric_resource) mock_secret.assert_called_with(metric_resource) assert url == "https://prometheus.com:8080/nothingtosee" assert err is None mock_secret.return_value = ({"port": 8080}, None) (url, err) = get_url(metric_resource) mock_secret.assert_called_with(metric_resource) assert url == "https://prometheus.com:8080/$endpoint" assert err is None mock_secret.return_value = {}, None (url, err) = get_url(metric_resource) mock_secret.assert_called_with(metric_resource) assert url == "https://prometheus.com:${port}/$endpoint" assert err is None mock_secret.return_value = None, None (url, err) = get_url(metric_resource) mock_secret.assert_called_with(metric_resource) assert url == "https://prometheus.com:${port}/$endpoint" assert err is None
def test_sysdig_embedded_token(self): """Test Sysdig with an embedded token""" with requests_mock.mock(real_http=True) as req_mock: sde = MetricResource(**sysdig_embedded) url = sde.spec.urlTemplate response_json = { "data": [{ "t": 1582756200, "d": [6.481] }], "start": 1582755600, "end": 1582756200 } req_mock.register_uri('POST', url, json = response_json, status_code = 200, \ request_headers={'Authorization': 'Bearer 87654321-1234-1234-1234-123456789012'}) expr = ExperimentResource(**er_example) version = expr.spec.versionInfo.baseline version.variables = [ NamedValue(name="userfilter", value='usergroup!~"wakanda"'), NamedValue(name="revision", value='sample-app-v1') ] start_time = expr.status.startTime # verify body body, err = get_body(sde, version, start_time) logger.info(body) assert err is None groups = re.search("'sample-app-v1'", body["filter"]) assert groups is not None # verify jq expression value, err = get_metric_value(sde, version, start_time) assert err is None assert value == 6.481
def test_new_relic_secret(self, mock_secret): """Test New Relic with a secret API Key""" with requests_mock.mock(real_http=True) as req_mock: nre = MetricResource(**new_relic_secret) url = nre.spec.urlTemplate file_path = os.path.join(os.path.dirname(__file__), 'data/newrelic_responses', 'newrelic_sample_response.json') response_json = json.load(open(file_path)) req_mock.register_uri('GET', url, json = response_json, \ status_code = 200, request_headers={'X-Query-Key': 't0p-secret-api-key'}) mock_secret.return_value = ({"mykey": "t0p-secret-api-key"}, None) expr = ExperimentResource(**er_example) version = expr.spec.versionInfo.baseline version.variables = [ NamedValue(name="userfilter", value='usergroup!~"wakanda"'), NamedValue(name="revision", value='sample-app-v1') ] start_time = expr.status.startTime # verify params params = get_params(nre, version, start_time) logger.info(params) groups = re.search("'sample-app-v1'", params[0]["nrql"]) assert groups is not None # verify jq expression value, err = get_metric_value(nre, version, start_time) assert err is None assert value == 80275388
def test_builtin_metrics_creation(self): """Test built in metrics creation""" file_path = os.path.join(os.path.dirname(__file__), 'data/experiments', 'metricscollected.json') expr = ExperimentResource.parse_file(file_path) iam = get_builtin_metrics(expr) assert iam is not None
def test_basic_auth_invalid(self, mock_secret): """When authType is Basic, and secret is invalid, get error""" expr = ExperimentResource(**er_example) metric_resource: MetricResource = expr.status.metrics[0].metricObj metric_resource.spec.authType = AuthType.BASIC auth, err = get_basic_auth(metric_resource) self.assertFalse(mock_secret.called, \ "attempt to fetch secret when no secret is referenced in metric resource") assert auth is None assert err is not None metric_resource.spec.secret = "invalid" mock_secret.return_value = ({}, \ KeyError("cannot find secret invalid in namespace iter8-system")) auth, err = get_basic_auth(metric_resource) mock_secret.assert_called_with(metric_resource) assert auth is None assert err is not None mock_secret.return_value = ({"username": "******"}, None) auth, err = get_basic_auth(metric_resource) mock_secret.assert_called_with(metric_resource) assert auth is None assert err is not None
def test_basic_auth(self, mock_secret): """When authType is Basic, and secret is valid, get basic auth information""" expr = ExperimentResource(**er_example) metric_resource: MetricResource = expr.status.metrics[0].metricObj metric_resource.spec.authType = AuthType.BASIC metric_resource.spec.secret = "valid" mock_secret.return_value = ({ "username": "******", "password": "******" }, None) auth, err = get_basic_auth(metric_resource) mock_secret.assert_called_with(metric_resource) assert auth == HTTPBasicAuth("me", "t0p-secret") assert err is None with requests_mock.mock(real_http=True) as req_mock: file_path = os.path.join(os.path.dirname(__file__), 'data/prom_responses', 'prometheus_sample_response.json') response_json = json.load(open(file_path)) req_mock.get(metric_resource.spec.urlTemplate, json=response_json) version = expr.spec.versionInfo.baseline start_time = expr.status.startTime value, err = get_metric_value(metric_resource, version, start_time) assert err is None assert value == float( response_json["data"]["result"][0]["value"][1])
def provide_aggregated_metrics(ere: ExperimentResource = Body( ..., example=er_example)): """ POST iter8 2.0 experiment resource and metric resources and obtain aggregated metrics. \f :body er: ExperimentResource """ return get_aggregated_metrics(ere.convert_to_float()).convert_to_quantity()
def test_no_secret_ref(self, mock_secret): """When secret is None, url equals urlTemplate""" expr = ExperimentResource(**er_example) metric_resource: MetricResource = expr.status.metrics[0].metricObj url, _ = get_url(metric_resource) self.assertFalse(mock_secret.called, \ "attempt to fetch secret when no secret is referenced in metric resource") assert url == metric_resource.spec.urlTemplate
def provide_analytics_results(expr: ExperimentResource = Body( ..., example=er_example)): """ POST iter8 2.0 experiment resource and metric resources and get analytics results. \f :body expr: ExperimentResource """ return get_analytics_results(expr.convert_to_float()).convert_to_quantity()