def test_multiple_ratio_metrics_as_reward(self): with requests_mock.mock(real_http=True) as m: file_path = os.path.join(os.path.dirname(__file__), '../../../data/prom_responses', 'prometheus_sample_response.json') m.get(metrics_endpoint, json=json.load(open(file_path))) try: eg = copy.deepcopy(eip_example) eg["criteria"].append({ "id": "1", "metric_id": "iter8_error_rate", "is_reward": True }) eg["criteria"].append({ "id": "2", "metric_id": "conversion_rate", "is_reward": True }) eip = ExperimentIterationParameters(**eg) exp = Experiment(eip) exp.run() except HTTPException as he: pass
def test_start_time_with_current_time(self): with requests_mock.mock(real_http=True) as m: file_path = os.path.join(os.path.dirname(__file__), '../../../data/prom_responses', 'prometheus_sample_response.json') m.get(metrics_endpoint, json=json.load(open(file_path))) eip = ExperimentIterationParameters(**eip_with_percentile) eip.start_time = datetime.now(timezone.utc) exp = Experiment(eip) exp.run()
def test_get_ratio_max_min(self): with requests_mock.mock(real_http=True) as m: file_path = os.path.join(os.path.dirname(__file__), '../../../data/prom_responses', 'prometheus_sample_response.json') m.get(metrics_endpoint, json=json.load(open(file_path))) eip = ExperimentIterationParameters(**eip_example) exp = Experiment(eip) exp.run() eip = ExperimentIterationParameters( **reviews_example_with_last_state) exp = Experiment(eip) exp.run() eip = ExperimentIterationParameters( **reviews_example_with_partial_last_state) exp = Experiment(eip) exp.run() eip = ExperimentIterationParameters( **reviews_example_with_ratio_max_mins) exp = Experiment(eip) exp.run()
def test_relative_threshold(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_no_data_response.json"))) eip_with_relative = copy.deepcopy(eip_with_assessment) eip_with_relative["criteria"][0] = { "id": "iter8_mean_latency", "metric_id": "iter8_mean_latency", "is_reward": False, "threshold": { "threshold_type": "relative", "value": 1.6 } } eip = ExperimentIterationParameters(**eip_with_relative) exp = Experiment(eip) res = exp.run() for c in res.candidate_assessments: if c.id == 'productpage-v3': assert c.win_probability == 1.0 assert res.last_state assert res.last_state["traffic_split_recommendation"] assert res.last_state["aggregated_counter_metrics"] assert res.last_state["aggregated_ratio_metrics"] assert res.last_state["ratio_max_mins"]
def test_counter_metric_as_reward(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_sample_response.json"))) try: eg = copy.deepcopy(eip_example) eg["criteria"].append({ "id": "1", "metric_id": "conversion_count", "is_reward": True }) eip = ExperimentIterationParameters(**eg) exp = Experiment(eip) exp.run() except HTTPException as he: pass
def test_start_time_with_current_time(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_sample_response.json"))) eip = ExperimentIterationParameters(**eip_with_percentile) eip.start_time = datetime.now(timezone.utc) exp = Experiment(eip) # print("mert") # time this run # increase number of versions by changing version in 1) eipwithpercentile 2) sample response # logger.info(f"mert:{time.time()}") current_time = time.time() for i in range(10): exp.run() finished = time.time() f = open("demofile2.txt", "a") f.write(f"{finished-current_time}") f.close()
def test_aa_experiment(self): with requests_mock.mock(real_http=True) as m: file_path = os.path.join(os.path.dirname(__file__), '../../../data/prom_responses', 'prometheus_sample_response.json') m.get(metrics_endpoint, json=json.load(open(file_path))) eip_aa = ExperimentIterationParameters(**reviews_example_aa) exp_aa = Experiment(eip_aa) res = exp_aa.run() logger.info("AA Result") logger.info(pformat(res.dict(), indent=2)) assert (res.baseline_assessment.request_count == res.candidate_assessments[0].request_count)
def test_missing_iter8_request_count(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_sample_response.json"))) eip = ExperimentIterationParameters( **reviews_example_without_request_count) exp = Experiment(eip) resp = exp.run() assert resp.baseline_assessment.request_count is None for assessment in resp.candidate_assessments: assert assessment.request_count is None
def test_assessment(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_no_data_response.json"))) eip = ExperimentIterationParameters(**eip_with_assessment) exp = Experiment(eip) res = exp.run() assert res.last_state assert res.last_state["traffic_split_recommendation"] assert res.last_state["aggregated_counter_metrics"] assert res.last_state["aggregated_ratio_metrics"] assert res.last_state["ratio_max_mins"]
def test_relative_win_probability_and_threshold_assessment(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_no_data_response.json"))) eip = copy.deepcopy(eip_with_relative_assessments) eip = ExperimentIterationParameters(**eip) exp = Experiment(eip) res = exp.run() for c in res.candidate_assessments: if c.id == 'productpage-v3': assert c.win_probability == 1.0 assert res.last_state assert res.last_state["traffic_split_recommendation"] assert res.last_state["aggregated_counter_metrics"] assert res.last_state["aggregated_ratio_metrics"] assert res.last_state["ratio_max_mins"]
def test_get_ratio_max_min(self): with requests_mock.mock(real_http=True) as m: m.get(metrics_endpoint, json=json.load( open("tests/data/prometheus_sample_response.json"))) eip = ExperimentIterationParameters(**eip_example) exp = Experiment(eip) exp.run() eip = ExperimentIterationParameters( **reviews_example_with_last_state) exp = Experiment(eip) exp.run() eip = ExperimentIterationParameters( **reviews_example_with_partial_last_state) exp = Experiment(eip) exp.run() eip = ExperimentIterationParameters( **reviews_example_with_ratio_max_mins) exp = Experiment(eip) exp.run()