Esempio n. 1
0
    def calculate_suites(self, start_date: datetime,
                         end_date: datetime) -> List[Suite]:
        """
        Divide tests into suites based on statistics for the provided period.

        :param start_date: Time to start historical analysis.
        :param end_date: Time to end historical analysis.
        :return: List of sub suites to be generated.
        """
        try:
            evg_stats = HistoricTaskData.from_evg(self.evergreen_api,
                                                  self.config_options.project,
                                                  start_date, end_date,
                                                  self.config_options.task,
                                                  self.config_options.variant)
            if not evg_stats:
                LOGGER.debug("No test history, using fallback suites")
                # This is probably a new suite, since there is no test history, just use the
                # fallback values.
                return self.calculate_fallback_suites()
            target_execution_time_secs = self.config_options.target_resmoke_time * 60
            return self.calculate_suites_from_evg_stats(
                evg_stats, target_execution_time_secs)
        except requests.HTTPError as err:
            if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
                # Evergreen may return a 503 when the service is degraded.
                # We fall back to splitting the tests into a fixed number of suites.
                LOGGER.warning("Received 503 from Evergreen, "
                               "dividing the tests evenly among suites")
                return self.calculate_fallback_suites()
            else:
                raise
Esempio n. 2
0
def _get_task_runtime_history(evg_api: Optional[EvergreenApi], project: str,
                              task: str, variant: str) -> List[TestRuntime]:
    """
    Fetch historical average runtime for all tests in a task from Evergreen API.

    :param evg_api: Evergreen API.
    :param project: Project name.
    :param task: Task name.
    :param variant: Variant name.
    :return: Test historical runtimes, parsed into teststat objects.
    """
    if not evg_api:
        return []

    try:
        end_date = datetime.datetime.utcnow().replace(microsecond=0)
        start_date = end_date - datetime.timedelta(
            days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
        test_stats = HistoricTaskData.from_evg(evg_api,
                                               project,
                                               start_date=start_date,
                                               end_date=end_date,
                                               task=task,
                                               variant=variant)
        return test_stats.get_tests_runtimes()
    except requests.HTTPError as err:
        if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
            # Evergreen may return a 503 when the service is degraded.
            # We fall back to returning no test history
            return []
        else:
            raise
Esempio n. 3
0
    def split_suite(self, params: SuiteSplitParameters) -> GeneratedSuite:
        """
        Split the given resmoke suite into multiple sub-suites.

        :param params: Description of suite to split.
        :return: List of sub-suites from the given suite.
        """
        if self.config.default_to_fallback:
            return self.calculate_fallback_suites(params)

        try:
            evg_stats = HistoricTaskData.from_evg(
                self.evg_api, self.config.evg_project, self.config.start_date,
                self.config.end_date, params.task_name, params.build_variant)
            if not evg_stats:
                LOGGER.debug("No test history, using fallback suites")
                # This is probably a new suite, since there is no test history, just use the
                # fallback values.
                return self.calculate_fallback_suites(params)
            return self.calculate_suites_from_evg_stats(evg_stats, params)
        except requests.HTTPError as err:
            if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
                # Evergreen may return a 503 when the service is degraded.
                # We fall back to splitting the tests into a fixed number of suites.
                LOGGER.warning("Received 503 from Evergreen, "
                               "dividing the tests evenly among suites")
                return self.calculate_fallback_suites(params)
            else:
                raise
Esempio n. 4
0
    def get_task_runtime_history(self, task: str) -> List[TestRuntime]:
        """
        Query the runtime history of the specified task.

        :param task: Task to query.
        :return: List of runtime histories for all tests in specified task.
        """
        try:
            project = self.generate_config.project
            variant = self.generate_config.build_variant
            end_date = self.history_end_date
            start_date = end_date - timedelta(
                days=AVG_TEST_RUNTIME_ANALYSIS_DAYS)
            test_stats = HistoricTaskData.from_evg(self.evg_api,
                                                   project,
                                                   start_date=start_date,
                                                   end_date=end_date,
                                                   task=task,
                                                   variant=variant)
            return test_stats.get_tests_runtimes()
        except requests.HTTPError as err:
            if err.response.status_code == requests.codes.SERVICE_UNAVAILABLE:
                # Evergreen may return a 503 when the service is degraded.
                # We fall back to returning no test history
                return []
            else:
                raise
    def add_task_hook_overhead(self, suites: List[Suite],
                               historic_stats: HistoricTaskData) -> None:
        """
        Add how much overhead task-level hooks each suite should account for.

        Certain test hooks need to be accounted for on the task level instead of the test level
        in order to calculate accurate timeouts. So we will add details about those hooks to
        each suite here.

        :param suites: List of suites that were created.
        :param historic_stats: Historic runtime data of the suite.
        """
        # The CleanEveryN hook is run every 'N' tests. The runtime of the
        # hook will be associated with whichever test happens to be running, which could be
        # different every run. So we need to take its runtime into account at the task level.
        clean_every_n_cadence = self._get_clean_every_n_cadence()
        avg_clean_every_n_runtime = historic_stats.get_avg_hook_runtime(
            CLEAN_EVERY_N_HOOK)
        LOGGER.info("task hook overhead",
                    cadence=clean_every_n_cadence,
                    runtime=avg_clean_every_n_runtime)
        if avg_clean_every_n_runtime != 0:
            for suite in suites:
                n_expected_runs = suite.get_test_count(
                ) / clean_every_n_cadence
                suite.task_overhead += n_expected_runs * avg_clean_every_n_runtime
Esempio n. 6
0
    def calculate_suites_from_evg_stats(
            self, test_stats: HistoricTaskData,
            execution_time_secs: int) -> List[Suite]:
        """
        Divide tests into suites that can be run in less than the specified execution time.

        :param test_stats: Historical test results for task being split.
        :param execution_time_secs: Target execution time of each suite (in seconds).
        :return: List of sub suites calculated.
        """
        tests_runtimes = self.filter_tests(test_stats.get_tests_runtimes())
        if not tests_runtimes:
            LOGGER.debug("No test runtimes after filter, using fallback")
            return self.calculate_fallback_suites()

        self.test_list = [info.test_name for info in tests_runtimes]

        suites = divide_tests_into_suites(
            self.config_options.suite, tests_runtimes, execution_time_secs,
            self.config_options.max_sub_suites,
            self.config_options.max_tests_per_suite)

        self.add_task_hook_overhead(suites, test_stats)

        return suites
Esempio n. 7
0
    def test_stats_with_no_clean_every_n_should_return_zero(self):
        timeout_service = build_mock_service()
        test_stats = HistoricTaskData.from_stats_list(
            [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(30)])

        overhead = timeout_service.get_task_hook_overhead(
            "suite", is_asan=False, test_count=30, historic_stats=test_stats)

        self.assertEqual(0.0, overhead)
Esempio n. 8
0
    def test_stats_with_clean_every_n_should_return_overhead(self):
        test_count = 30
        runtime = 25
        timeout_service = build_mock_service()
        test_stat_list = [tst_stat_mock(f"test_{i}.js", 60, 1) for i in range(test_count)]
        test_stat_list.extend([
            tst_stat_mock(f"test_{i}:{under_test.CLEAN_EVERY_N_HOOK}", runtime, 1)
            for i in range(10)
        ])
        random.shuffle(test_stat_list)
        test_stats = HistoricTaskData.from_stats_list(test_stat_list)

        overhead = timeout_service.get_task_hook_overhead(
            "suite", is_asan=True, test_count=test_count, historic_stats=test_stats)

        self.assertEqual(runtime * test_count, overhead)
Esempio n. 9
0
    def calculate_suites_from_evg_stats(self, test_stats: HistoricTaskData,
                                        params: SuiteSplitParameters) -> GeneratedSuite:
        """
        Divide tests into suites that can be run in less than the specified execution time.

        :param test_stats: Historical test results for task being split.
        :param params: Description of how to split the suite.
        :return: List of sub suites calculated.
        """
        execution_time_secs = self.config.target_resmoke_time * 60
        tests_runtimes = self.filter_tests(test_stats.get_tests_runtimes(), params)
        if not tests_runtimes:
            LOGGER.debug("No test runtimes after filter, using fallback")
            return self.calculate_fallback_suites(params)

        test_lists = self.split_strategy(tests_runtimes, execution_time_secs,
                                         self.config.max_sub_suites,
                                         self.config.max_tests_per_suite)

        return self.test_lists_to_suite(test_lists, params, tests_runtimes, test_stats)
Esempio n. 10
0
    def lookup_historic_stats(self, timeout_params: TimeoutParams) -> Optional[HistoricTaskData]:
        """
        Lookup historic test results stats for the given task.

        :param timeout_params: Details about the task to lookup.
        :return: Historic test results if they exist.
        """
        try:
            evg_stats = HistoricTaskData.from_evg(
                self.evg_api, timeout_params.evg_project, self.timeout_settings.start_date,
                self.timeout_settings.end_date, timeout_params.task_name,
                timeout_params.build_variant)
            if not evg_stats:
                LOGGER.warning("No historic runtime information available")
                return None
            return evg_stats
        except Exception:  # pylint: disable=broad-except
            # If we have any trouble getting the historic runtime information, log the issue, but
            # don't fall back to default timeouts instead of failing.
            LOGGER.warning("Error querying history runtime information from evergreen",
                           exc_info=True)
            return None