def test_item_with_format_function_works(self): config = {"number": "1"} formats = {"number": int} config_options = under_test.ConfigOptions(config, formats=formats) self.assertEqual(1, config_options.number) self.assertIsInstance(config_options.number, int)
def test_lookup_missing_required_key_throws_exception(self): config = {} required_keys = {"key1"} config_options = under_test.ConfigOptions(config, required_keys=required_keys) with self.assertRaises(KeyError): config_options.key1 # pylint: disable=pointless-statement
def _get_fuzzer_options(self, version_config, is_sharded): fuzzer_config = generate_resmoke.ConfigOptions(self.options.config) fuzzer_config.name = f"{self.options.suite}_multiversion" fuzzer_config.num_files = int(self.options.num_files) fuzzer_config.num_tasks = int(self.options.num_tasks) add_resmoke_args = get_multiversion_resmoke_args(is_sharded) fuzzer_config.resmoke_args = f"{self.options.resmoke_args} "\ f"--mixedBinVersions={version_config} {add_resmoke_args}" return fuzzer_config
def _get_fuzzer_options(self, version_config, suite_file): fuzzer_config = generate_resmoke.ConfigOptions(self.options.config) fuzzer_config.name = f"{self.options.suite}_multiversion" fuzzer_config.num_files = int(self.options.num_files) fuzzer_config.num_tasks = int(self.options.num_tasks) fuzzer_config.resmoke_args = f"{self.options.resmoke_args} "\ f"--mixedBinVersions={version_config} --excludeWithAnyTags={EXCLUDE_TAGS}"\ f" --suites={CONFIG_DIR}/{suite_file}" return fuzzer_config
def test_generate_display_task(self): config = {"task_name": "my_task"} config_options = under_test.ConfigOptions(config) display_task = config_options.generate_display_task(["task_1", "task_2"]) self.assertEqual("my_task", display_task._name) self.assertIn(config_options.task + "_gen", display_task.to_map()["execution_tasks"]) self.assertIn("task_1", display_task.to_map()["execution_tasks"]) self.assertIn("task_2", display_task.to_map()["execution_tasks"])
def create_multiversion_generate_tasks_config( tests_by_task: Dict[str, TaskInfo], evg_api: EvergreenApi, generate_config: GenerateConfig) -> BuildVariant: """ Create the multiversion config for the Evergreen generate.tasks file. :param tests_by_task: Dictionary of tests to generate tasks for. :param evg_api: Evergreen API. :param generate_config: Configuration of what to generate. :return: Shrub configuration with added tasks. """ build_variant = BuildVariant(generate_config.build_variant) tasks = set() if tests_by_task: # Get the multiversion suites that will run in as part of burn_in_multiversion. multiversion_suites = get_named_suites_with_root_level_key( MULTIVERSION_CONFIG_KEY) for suite in multiversion_suites: idx = 0 if suite["origin"] not in tests_by_task.keys(): # Only generate burn in multiversion tasks for suites that would run the detected # changed tests. continue LOGGER.debug("Generating multiversion suite", suite=suite["multiversion_name"]) # We hardcode the number of fallback sub suites and the target resmoke time here # since burn_in_tests cares about individual tests and not entire suites. The config # options here are purely used to generate the proper multiversion suites to run # tests against. config_options = { "suite": suite["origin"], "fallback_num_sub_suites": 1, "project": generate_config.project, "build_variant": generate_config.build_variant, "task_id": generate_config.task_id, "task_name": suite["multiversion_name"], "target_resmoke_time": 60, } config_options.update(gen_resmoke.DEFAULT_CONFIG_VALUES) config_generator = gen_multiversion.EvergreenMultiversionConfigGenerator( evg_api, gen_resmoke.ConfigOptions(config_options)) test_list = tests_by_task[suite["origin"]].tests for test in test_list: # Generate the multiversion tasks for each test. sub_tasks = config_generator.get_burn_in_tasks(test, idx) tasks = tasks.union(sub_tasks) idx += 1 existing_tasks = {ExistingTask(f"{BURN_IN_MULTIVERSION_TASK}_gen")} build_variant.display_task(BURN_IN_MULTIVERSION_TASK, tasks, execution_existing_tasks=existing_tasks) return build_variant
def create_multiversion_generate_tasks_config( evg_config: Configuration, tests_by_task: Dict, evg_api: EvergreenApi, generate_config: GenerateConfig) -> Configuration: """ Create the multiversion config for the Evergreen generate.tasks file. :param evg_config: Shrub configuration to add to. :param tests_by_task: Dictionary of tests to generate tasks for. :param evg_api: Evergreen API. :param generate_config: Configuration of what to generate. :return: Shrub configuration with added tasks. """ dt = DisplayTaskDefinition(BURN_IN_MULTIVERSION_TASK) if tests_by_task: multiversion_suites = get_named_suites_with_root_level_key_and_value( MULTIVERSION_CONFIG_KEY, True) for suite in multiversion_suites: idx = 0 if suite not in tests_by_task.keys(): # Only generate burn in multiversion tasks for suites that would run the detected # changed tests. continue LOGGER.debug("Generating multiversion suite", suite=suite) # We hardcode the number of fallback sub suites and the target resmoke time here # since burn_in_tests cares about individual tests and not entire suites. The config # options here are purely used to generate the proper multiversion suites to run # tests against. config_options = { "suite": suite, "fallback_num_sub_suites": 1, "project": generate_config.project, "build_variant": generate_config.build_variant, "task_id": generate_config.task_id, "task_name": suite, "target_resmoke_time": 60, } config_options.update(gen_resmoke.DEFAULT_CONFIG_VALUES) config_generator = gen_multiversion.EvergreenConfigGenerator( evg_api, evg_config, gen_resmoke.ConfigOptions(config_options)) test_list = tests_by_task[suite]["tests"] for test in test_list: # Generate the multiversion tasks for each test. config_generator.generate_evg_tasks(test, idx) idx += 1 dt.execution_tasks(config_generator.task_names) evg_config.variant(generate_config.build_variant).tasks( config_generator.task_specs) dt.execution_task(f"{BURN_IN_MULTIVERSION_TASK}_gen") evg_config.variant(generate_config.build_variant).display_task(dt) return evg_config
def _generate_fuzzer_tasks(self, version_configs, is_sharded): dt = DisplayTaskDefinition(self.task) for version_config in version_configs: fuzzer_config = generate_resmoke.ConfigOptions(self.options.config) fuzzer_config = self._get_fuzzer_options(version_config, is_sharded) gen_fuzzer.generate_evg_tasks(fuzzer_config, self.evg_config, task_name_suffix=version_config, display_task=dt) dt.execution_task(f"{fuzzer_config.name}_gen") self.evg_config.variant(self.options.variant).display_task(dt) return self.evg_config
def test_calculate_suites_error(self): response = Mock() response.status_code = requests.codes.INTERNAL_SERVER_ERROR evg = Mock() evg.test_stats.side_effect = requests.HTTPError(response=response) main = grt.Main(evg) main.options = Mock() main.options.execution_time_minutes = 10 main.config_options = grt.ConfigOptions(2, 15, "project", "", 1, 30, True, "task", "suite", "variant", False, "") main.list_tests = Mock( return_value=["test{}.js".format(i) for i in range(100)]) with self.assertRaises(requests.HTTPError): main.calculate_suites(_DATE, _DATE)
def test_calculate_suites_fallback(self): response = Mock() response.status_code = requests.codes.SERVICE_UNAVAILABLE evg = Mock() evg.test_stats.side_effect = requests.HTTPError(response=response) main = grt.Main(evg) main.options = Mock() main.options.execution_time_minutes = 10 main.config_options = grt.ConfigOptions(2, 15, "project", "", 1, 30, True, "task", "suite", "variant", False, "") main.list_tests = Mock( return_value=["test{}.js".format(i) for i in range(100)]) suites = main.calculate_suites(_DATE, _DATE) self.assertEqual(main.config_options.fallback_num_sub_suites, len(suites)) for suite in suites: self.assertEqual(50, len(suite.tests))
def test_calculate_suites(self): evg = Mock() evg.test_stats.return_value = [{ "test_file": "test{}.js".format(i), "avg_duration_pass": 60, "num_pass": 1 } for i in range(100)] main = grt.Main(evg) main.options = Mock() main.config_options = grt.ConfigOptions(2, 15, "project", "", 1, 10, True, "task", "suite", "variant", False, "") with patch('os.path.exists') as exists_mock: exists_mock.return_value = True suites = main.calculate_suites(_DATE, _DATE) # There are 100 tests taking 1 minute, with a target of 10 min we expect 10 suites. self.assertEqual(10, len(suites)) for suite in suites: self.assertEqual(10, len(suite.tests))
def _config_options(config_values): return under_test.ConfigOptions(config_values, under_test.REQUIRED_CONFIG_KEYS, under_test.DEFAULT_CONFIG_VALUES, under_test.CONFIG_FORMAT_FN)
def test_create_misc_suite(self): config = {} config_options = under_test.ConfigOptions(config) self.assertTrue(config_options.create_misc_suite)
def test_suite_uses_suite_if_provided(self): config = {"task": "task_value", "suite": "suite_value"} config_options = under_test.ConfigOptions(config) self.assertEqual(config["suite"], config_options.suite)
def test_task_uses_task_name(self): config = {"task_name": "task_value"} config_options = under_test.ConfigOptions(config) self.assertEqual(config["task_name"], config_options.task)
def test_suite_uses_task_value_if_no_suite(self): config = {"task": "task_value"} config_options = under_test.ConfigOptions(config) self.assertEqual(config["task"], config_options.suite)
def test_depends_on_splits_values(self): config = {"depends_on": "value1,value2,value3"} config_options = under_test.ConfigOptions(config) self.assertEqual(3, len(config_options.depends_on))
def test_unknown_values_return_none(self): config = {} config_options = under_test.ConfigOptions(config) self.assertIsNone(config_options.key1)
def test_run_tests_build_variant_uses_build_variant(self): config = {"build_variant": "my-build-variant"} config_options = under_test.ConfigOptions(config) self.assertEqual(config["build_variant"], config_options.run_tests_build_variant)
def test_run_tests_build_id_uses_build_id(self): config = {"build_id": "my_build_id"} config_options = under_test.ConfigOptions(config) self.assertEqual(config["build_id"], config_options.run_tests_build_id)