def _get_task_runtime_history(evg_api, project, task, variant): """ Fetch historical average runtime for all tests in a task from Evergreen API. :param evg_api: Evergreen API. :param project: Project name. :param task: Task name. :param variant: Variant name. :return: Test historical runtimes, parsed into teststat objects. """ try: end_date = datetime.datetime.utcnow().replace(microsecond=0) start_date = end_date - datetime.timedelta(days=AVG_TEST_RUNTIME_ANALYSIS_DAYS) data = evg_api.test_stats_by_project(project, after_date=start_date.strftime("%Y-%m-%d"), before_date=end_date.strftime("%Y-%m-%d"), tasks=[task], variants=[variant], group_by="test", group_num_days=AVG_TEST_RUNTIME_ANALYSIS_DAYS) test_runtimes = teststats.TestStats(data).get_tests_runtimes() LOGGER.debug("Test_runtime data parsed from Evergreen history: %s", test_runtimes) return test_runtimes except requests.HTTPError as err: if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE: # Evergreen may return a 503 when the service is degraded. # We fall back to returning no test history return [] else: raise
def calculate_suites_from_evg_stats(self, data, execution_time_secs): """Divide tests into suites that can be run in less than the specified execution time.""" test_stats = teststats.TestStats(data) tests_runtimes = self.filter_existing_tests(test_stats.get_tests_runtimes()) if not tests_runtimes: return self.calculate_fallback_suites() self.test_list = [info.test_name for info in tests_runtimes] return divide_tests_into_suites(tests_runtimes, execution_time_secs, self.config_options.max_sub_suites)
def test_zero_runs(self): evg_results = [ self._make_evg_result("dir/test1.js", 0, 0), self._make_evg_result("dir/test1.js", 0, 0), ] test_stats = teststats_utils.TestStats(evg_results) expected_runtimes = [ teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=0), ] self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
def test_no_hooks(self): evg_results = [ self._make_evg_result("dir/test1.js", 1, 10), self._make_evg_result("dir/test2.js", 1, 30), self._make_evg_result("dir/test1.js", 2, 25), ] test_stats = teststats_utils.TestStats(evg_results) expected_runtimes = [ teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30), teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20), ] self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
def test_hook_first(self): evg_results = [ self._make_evg_result("test3:CleanEveryN", 10, 35), self._make_evg_result("dir/test1.js", 1, 10), self._make_evg_result("dir/test2.js", 1, 30), self._make_evg_result("dir/test1.js", 2, 25), self._make_evg_result("dir/test3.js", 5, 10), self._make_evg_result("test3:CheckReplDBHash", 10, 35), ] test_stats = teststats_utils.TestStats(evg_results) expected_runtimes = [ teststats_utils.TestRuntime(test_name="dir/test3.js", runtime=80), teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30), teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20), ] self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
def calculate_suites_from_evg_stats(self, data: List[TestStats], execution_time_secs: int) -> List[Suite]: """ Divide tests into suites that can be run in less than the specified execution time. :param data: Historical test results for task being split. :param execution_time_secs: Target execution time of each suite (in seconds). :return: List of sub suites calculated. """ test_stats = teststats.TestStats(data) tests_runtimes = self.filter_tests(test_stats.get_tests_runtimes()) if not tests_runtimes: LOGGER.debug("No test runtimes after filter, using fallback") return self.calculate_fallback_suites() self.test_list = [info.test_name for info in tests_runtimes] return divide_tests_into_suites(self.config_options.suite, tests_runtimes, execution_time_secs, self.config_options.max_sub_suites, self.config_options.max_tests_per_suite)