コード例 #1
0
    def test_filter_blacklist_files_for_windows(self):
        tests_runtimes = [
            TestRuntime(test_name="dir1/file1.js", runtime=20.32),
            TestRuntime(test_name="dir2/file2.js", runtime=24.32),
            TestRuntime(test_name="dir1/dir3/file3.js", runtime=36.32),
        ]

        blacklisted_test = tests_runtimes[1][0]

        with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
            exists_mock.return_value = True
            evg = MagicMock()
            suitesconfig_mock.get_suite.return_value.tests = [
                runtime[0].replace("/", "\\") for runtime in tests_runtimes
                if runtime[0] != blacklisted_test
            ]
            config_options = MagicMock(suite="suite")

            gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
            filtered_list = gen_sub_suites.filter_existing_tests(tests_runtimes)

            self.assertNotIn(blacklisted_test, filtered_list)
            self.assertIn(tests_runtimes[2], filtered_list)
            self.assertIn(tests_runtimes[0], filtered_list)
            self.assertEqual(2, len(filtered_list))
コード例 #2
0
    def test_filter_missing_files(self):
        tests_runtimes = [
            teststats_utils.TestRuntime(test_name="dir1/file1.js",
                                        runtime=20.32),
            teststats_utils.TestRuntime(test_name="dir2/file2.js",
                                        runtime=24.32),
            teststats_utils.TestRuntime(test_name="dir1/file3.js",
                                        runtime=36.32),
        ]

        with patch("os.path.exists") as exists_mock, patch(
                ns("suitesconfig")) as suitesconfig_mock:
            exists_mock.side_effect = [False, True, True]
            evg = Mock()
            suitesconfig_mock.get_suite.return_value.tests = \
                [runtime[0] for runtime in tests_runtimes]
            config_options = MagicMock(suite="suite")

            gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
            filtered_list = gen_sub_suites.filter_existing_tests(
                tests_runtimes)

            self.assertEqual(2, len(filtered_list))
            self.assertNotIn(tests_runtimes[0], filtered_list)
            self.assertIn(tests_runtimes[2], filtered_list)
            self.assertIn(tests_runtimes[1], filtered_list)
コード例 #3
0
    def test_is_asan_build_with_no_san_options(self):
        evg = MagicMock()
        config_options = MagicMock(suite="suite", san_options=None)

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)

        self.assertFalse(gen_sub_suites._is_asan_build())
コード例 #4
0
    def generate_evg_tasks(self, burn_in_test=None, burn_in_idx=0):
        # pylint: disable=too-many-locals
        """
        Generate evergreen tasks for multiversion tests.

        The number of tasks generated equals
        (the number of version configs) * (the number of generated suites).

        :param burn_in_test: The test to be run as part of the burn in multiversion suite.
        """
        is_sharded = is_suite_sharded(TEST_SUITE_DIR, self.options.suite)
        if is_sharded:
            version_configs = SHARDED_MIXED_VERSION_CONFIGS
        else:
            version_configs = REPL_MIXED_VERSION_CONFIGS

        if self.options.is_jstestfuzz:
            return self._generate_fuzzer_tasks(version_configs, is_sharded)

        # Divide tests into suites based on run-time statistics for the last
        # LOOKBACK_DURATION_DAYS. Tests without enough run-time statistics will be placed
        # in the misc suite.
        gen_suites = generate_resmoke.GenerateSubSuites(
            self.evg_api, self.options)
        end_date = datetime.datetime.utcnow().replace(microsecond=0)
        start_date = end_date - datetime.timedelta(
            days=generate_resmoke.LOOKBACK_DURATION_DAYS)
        suites = gen_suites.calculate_suites(start_date, end_date)
        # Render the given suites into yml files that can be used by resmoke.py.
        config_file_dict = generate_resmoke.render_suite_files(
            suites, self.options.suite, gen_suites.test_list, TEST_SUITE_DIR,
            self.options.create_misc_suite)
        generate_resmoke.write_file_dict(CONFIG_DIR, config_file_dict)

        if burn_in_test is not None:
            # Generate the subtasks to run burn_in_test against the appropriate mixed version
            # configurations. The display task is defined later as part of generating the burn
            # in tests.
            self._generate_burn_in_execution_tasks(version_configs, suites,
                                                   burn_in_test, burn_in_idx,
                                                   is_sharded)
            return self.evg_config

        for version_config in version_configs:
            idx = 0
            for suite in suites:
                # Generate the newly divided test suites
                source_suite = os.path.join(CONFIG_DIR, suite.name + ".yml")
                self._generate_sub_task(version_config, self.task, idx,
                                        source_suite, len(suites), is_sharded)
                idx += 1

            # Also generate the misc task.
            misc_suite_name = "{0}_misc".format(self.options.suite)
            misc_suite = os.path.join(CONFIG_DIR, misc_suite_name + ".yml")
            self._generate_sub_task(version_config, self.task, idx, misc_suite,
                                    1, is_sharded)
            idx += 1
        self.create_display_task(self.task, self.task_specs, self.task_names)
        return self.evg_config
コード例 #5
0
    def test_is_asan_build_with_san_options_non_asan(self):
        evg = MagicMock()
        config_options = MagicMock(suite="suite",
                                   san_options="SAN_OPTIONS=\"check_initialization_order=true\"")

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)

        self.assertFalse(gen_sub_suites._is_asan_build())
コード例 #6
0
    def test_is_asan_build_on_asan_builds(self):
        evg = MagicMock()
        config_options = MagicMock(
            suite="suite",
            san_options="ASAN_OPTIONS=\"detect_leaks=1:check_initialization_order=true\"")

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)

        self.assertTrue(gen_sub_suites._is_asan_build())
コード例 #7
0
    def test_clean_every_n_cadence_on_asan(self):
        evg = MagicMock()
        config_options = MagicMock(
            suite="suite",
            san_options="ASAN_OPTIONS=\"detect_leaks=1:check_initialization_order=true\"")

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)

        cadence = gen_sub_suites._get_clean_every_n_cadence()

        self.assertEqual(1, cadence)
コード例 #8
0
    def test_calculate_suites_error(self):
        response = MagicMock()
        response.status_code = requests.codes.INTERNAL_SERVER_ERROR
        evg = MagicMock()
        evg.test_stats_by_project.side_effect = requests.HTTPError(response=response)
        config_options = self.get_mock_options()

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(100))

        with self.assertRaises(requests.HTTPError):
            gen_sub_suites.calculate_suites(_DATE, _DATE)
コード例 #9
0
    def test_clean_every_n_cadence_no_hook_config(self, mock_read_suite_config):
        evg = MagicMock()
        config_options = MagicMock(
            suite="suite",
            san_options=None,
        )
        mock_read_suite_config.return_value = {"executor": {"hooks": [{
            "class": "hook1",
        }, ]}}

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        cadence = gen_sub_suites._get_clean_every_n_cadence()

        self.assertEqual(1, cadence)
コード例 #10
0
    def test_calculate_suites_uses_fallback_for_no_results(self):
        n_tests = 100
        evg = MagicMock()
        evg.test_stats_by_project.return_value = []
        config_options = self.get_mock_options()

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
        suites = gen_sub_suites.calculate_suites(_DATE, _DATE)

        self.assertEqual(gen_sub_suites.config_options.fallback_num_sub_suites, len(suites))
        for suite in suites:
            self.assertEqual(50, len(suite.tests))

        self.assertEqual(n_tests, len(gen_sub_suites.test_list))
コード例 #11
0
    def test_calculate_suites_fallback(self):
        n_tests = 100
        evg = mock_test_stats_unavailable(MagicMock())
        config_options = self.get_mock_options()

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))

        suites = gen_sub_suites.calculate_suites(_DATE, _DATE)

        self.assertEqual(gen_sub_suites.config_options.fallback_num_sub_suites, len(suites))
        for suite in suites:
            self.assertEqual(50, len(suite.tests))

        self.assertEqual(n_tests, len(gen_sub_suites.test_list))
コード例 #12
0
    def test_when_task_has_already_run_successfully(self):
        """
        Given evergreen_generate_resmoke_tasks has already been run successfully by this task,
        When it attempts to run again,
        It does not generate any files.
        """
        evg_api_mock = self._mock_evg_api(successful_task=True)

        mock_config = self._mock_config()
        config = self._config_options(mock_config)

        with TemporaryDirectory() as tmpdir:
            mock_config["generated_config_dir"] = tmpdir
            under_test.GenerateSubSuites(evg_api_mock, config).run()

            self.assertEqual(0, len(os.listdir(tmpdir)))
コード例 #13
0
    def generate_resmoke_suites(self) -> List[Suite]:
        """Generate the resmoke configuration files for this generator."""
        # Divide tests into suites based on run-time statistics for the last
        # LOOKBACK_DURATION_DAYS. Tests without enough run-time statistics will be placed
        # in the misc suite.
        gen_suites = generate_resmoke.GenerateSubSuites(self.evg_api, self.options)
        end_date = datetime.datetime.utcnow().replace(microsecond=0)
        start_date = end_date - datetime.timedelta(days=generate_resmoke.LOOKBACK_DURATION_DAYS)
        suites = gen_suites.calculate_suites(start_date, end_date)
        # Render the given suites into yml files that can be used by resmoke.py.
        config_file_dict = generate_resmoke.render_suite_files(suites, self.options.suite,
                                                               gen_suites.test_list, TEST_SUITE_DIR,
                                                               self.options.create_misc_suite)
        generate_resmoke.write_file_dict(CONFIG_DIR, config_file_dict)

        return suites
コード例 #14
0
    def test_with_each_test_in_own_task(self, suites_config_mock):
        """
        Given a task with all tests having a historic runtime over the target,
        When evergreen_generate_resmoke_tasks attempts to generate suites,
        It generates a suite for each test.
        """
        evg_api_mock = self._mock_evg_api()

        mock_config = self._mock_config()
        config = self._config_options(mock_config)
        task = mock_config["task_name"][:-4]

        mock_config['target_resmoke_time'] = 10  # 10 minute max test time.
        n_tests = 4

        with TemporaryDirectory() as tmpdir:
            target_directory, source_directory = self._prep_dirs(
                tmpdir, mock_config)
            suite_path = os.path.join(source_directory, task)
            mock_config["suite"] = suite_path
            test_list = self._mock_test_files(source_directory, n_tests,
                                              15 * 60, evg_api_mock,
                                              suites_config_mock)
            mock_resmoke_config_file(test_list, suite_path + ".yml")

            under_test.enable_logging(True)

            under_test.GenerateSubSuites(evg_api_mock, config).run()

            # Were all the config files created? There should be one for each suite as well as
            # the evergreen json config.
            generated_files = os.listdir(target_directory)
            # The expected suite count is the number of tests + the _misc suite.
            expected_suite_count = n_tests + 1
            # We expect files for all the suites + the evergreen json config.
            self.assertEqual(expected_suite_count + 1, len(generated_files))

            # Taking a closer look at the evergreen json config.
            expected_shrub_file = f"{config.task}.json"
            self.assertIn(expected_shrub_file, generated_files)
            with open(os.path.join(target_directory,
                                   expected_shrub_file)) as fileh:
                shrub_config = json.load(fileh)

                # Is there a task in the config for all the suites we created?
                self.assertEqual(expected_suite_count,
                                 len(shrub_config["tasks"]))
コード例 #15
0
    def test_calculate_suites_fallback(self):
        n_tests = 100
        response = Mock()
        response.status_code = requests.codes.SERVICE_UNAVAILABLE
        evg = Mock()
        evg.test_stats_by_project.side_effect = requests.HTTPError(response=response)
        config_options = self.get_mock_options()

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        gen_sub_suites.list_tests = Mock(return_value=self.get_test_list(n_tests))

        suites = gen_sub_suites.calculate_suites(_DATE, _DATE)

        self.assertEqual(gen_sub_suites.config_options.fallback_num_sub_suites, len(suites))
        for suite in suites:
            self.assertEqual(50, len(suite.tests))

        self.assertEqual(n_tests, len(gen_sub_suites.test_list))
コード例 #16
0
    def test_when_evg_test_stats_is_down(self, suites_config_mock):
        """
        Given Evergreen historic test stats endpoint is disabled,
        When evergreen_generate_resmoke_tasks attempts to generate suites,
        It generates suites based on "fallback_num_sub_suites".
        """
        evg_api_mock = mock_test_stats_unavailable(self._mock_evg_api())

        mock_config = self._mock_config()
        config = self._config_options(mock_config)
        task = mock_config["task_name"][:-4]

        n_tests = 100

        with TemporaryDirectory() as tmpdir:
            target_directory, source_directory = self._prep_dirs(
                tmpdir, mock_config)
            suite_path = os.path.join(source_directory, task)
            mock_config["suite"] = suite_path
            test_list = self._mock_test_files(source_directory, n_tests, 5,
                                              evg_api_mock, suites_config_mock)
            mock_resmoke_config_file(test_list, suite_path + ".yml")

            under_test.GenerateSubSuites(evg_api_mock, config).run()

            # Were all the config files created? There should be one for each suite as well as
            # the evergreen json config.
            generated_files = os.listdir(target_directory)
            # The expected suite count is the number of fallback suites + the _misc suite.
            expected_suite_count = mock_config["fallback_num_sub_suites"] + 1
            # We expect files for all the suites + the evergreen json config.
            self.assertEqual(expected_suite_count + 1, len(generated_files))

            # Taking a closer look at the evergreen json config.
            expected_shrub_file = f"{config.task}.json"
            self.assertIn(expected_shrub_file, generated_files)
            with open(os.path.join(target_directory,
                                   expected_shrub_file)) as fileh:
                shrub_config = json.load(fileh)

                # Is there a task in the config for all the suites we created?
                self.assertEqual(expected_suite_count,
                                 len(shrub_config["tasks"]))
コード例 #17
0
    def generate_evg_tasks(self):
        """
        Generate evergreen tasks for multiversion tests.

        The number of tasks generated equals
        (the number of configs in MIXED_VERSION_CONFIGS) * (the number of generated suites).
        """
        idx = 0
        # Divide tests into suites based on run-time statistics for the last
        # LOOKBACK_DURATION_DAYS. Tests without enough run-time statistics will be placed
        # in the misc suite.
        gen_suites = generate_resmoke.GenerateSubSuites(self.evg_api, self.options)
        end_date = datetime.datetime.utcnow().replace(microsecond=0)
        start_date = end_date - datetime.timedelta(days=generate_resmoke.LOOKBACK_DURATION_DAYS)
        suites = gen_suites.calculate_suites(start_date, end_date)
        # Render the given suites into yml files that can be used by resmoke.py.
        if self.options.is_sharded:
            config = MultiversionConfig(update_suite_config_for_multiversion_sharded,
                                        SHARDED_MIXED_VERSION_CONFIGS)
        else:
            config = MultiversionConfig(update_suite_config_for_multiversion_replset,
                                        MIXED_VERSION_CONFIGS)
        config_file_dict = generate_resmoke.render_suite_files(
            suites, self.options.suite, gen_suites.test_list, TEST_SUITE_DIR, config.update_yaml)
        for version_config in config.version_configs:
            for suite in suites:
                # Generate the newly divided test suites
                source_suite = os.path.join(CONFIG_DIR, suite.name + ".yml")
                self._generate_sub_task(version_config, idx, source_suite, len(suites))
                idx += 1

            # Also generate the misc task.
            misc_suite_name = "{0}_misc".format(self.options.suite)
            source_suite = os.path.join(CONFIG_DIR, misc_suite_name + ".yml")
            self._generate_sub_task(version_config, idx, source_suite, 1)
            idx += 1

        generate_resmoke.write_file_dict(CONFIG_DIR, config_file_dict)
        dt = DisplayTaskDefinition(self.task).execution_tasks(self.task_names)\
            .execution_task("{0}_gen".format(self.task))
        self.evg_config.variant(self.options.variant).tasks(self.task_specs).display_task(dt)

        return self.evg_config
コード例 #18
0
    def test_calculate_suites_uses_fallback_if_only_results_are_filtered(self):
        n_tests = 100
        evg = MagicMock()
        evg.test_stats_by_project.return_value = [
            tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)
        ]
        config_options = self.get_mock_options()

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        gen_sub_suites.list_tests = MagicMock(return_value=self.get_test_list(n_tests))
        with patch("os.path.exists") as exists_mock:
            exists_mock.return_value = False
            suites = gen_sub_suites.calculate_suites(_DATE, _DATE)

            self.assertEqual(gen_sub_suites.config_options.fallback_num_sub_suites, len(suites))
            for suite in suites:
                self.assertEqual(50, len(suite.tests))

            self.assertEqual(n_tests, len(gen_sub_suites.test_list))
コード例 #19
0
    def test_calculate_suites(self):
        evg = MagicMock()
        evg.test_stats_by_project.return_value = [
            tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)
        ]
        config_options = self.get_mock_options()
        config_options.max_sub_suites = 1000

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)

        with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
            exists_mock.return_value = True
            suitesconfig_mock.get_suite.return_value.tests = \
                [stat.test_file for stat in evg.test_stats_by_project.return_value]
            suites = gen_sub_suites.calculate_suites(_DATE, _DATE)

            # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites.
            self.assertEqual(10, len(suites))
            for suite in suites:
                self.assertEqual(10, len(suite.tests))
コード例 #20
0
    def test_clean_every_n_cadence_from_hook_config(self, mock_read_suite_config):
        evg = MagicMock()
        config_options = MagicMock(
            suite="suite",
            san_options=None,
        )
        expected_n = 42
        mock_read_suite_config.return_value = {
            "executor": {
                "hooks": [{
                    "class": "hook1",
                }, {
                    "class": under_test.CLEAN_EVERY_N_HOOK,
                    "n": expected_n,
                }]
            }
        }

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
        cadence = gen_sub_suites._get_clean_every_n_cadence()

        self.assertEqual(expected_n, cadence)
コード例 #21
0
    def test_calculate_suites_with_selected_tests_to_run(self):
        evg = MagicMock()
        evg.test_stats_by_project.return_value = [
            tst_stat_mock(f"test{i}.js", 60, 1) for i in range(100)
        ]
        config_options = self.get_mock_options()
        config_options.max_sub_suites = 1000
        config_options.selected_tests_to_run = ["test1.js", "test2.js"]

        gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)

        with patch("os.path.exists") as exists_mock, patch(ns("suitesconfig")) as suitesconfig_mock:
            exists_mock.return_value = True
            suitesconfig_mock.get_suite.return_value.tests = \
                [stat.test_file for stat in evg.test_stats_by_project.return_value]
            suites = gen_sub_suites.calculate_suites(_DATE, _DATE)

            # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites.
            # However, since we have selected only 2 tests to run, test1.js and
            # test2.js, only 1 suite should be created.
            self.assertEqual(1, len(suites))
            for suite in suites:
                self.assertEqual(2, len(suite.tests))