def construct_variant_json(workloads, variants): """ :param list workloads: a list of filenames of workloads to schedule tasks for, each in the format subdirectory/Task.yml :param list variants: a list of buildvariants (strings) that the specified tasks should be run on. :return: json representation of variants running the given workloads, that can be provided to evergreen's generate.tasks command. Note: this function only generates variants, no tasks. It assumes that the tasks have already been generated (i.e. by calling generate.tasks with the result of construct_all_tasks_json()). """ task_specs = [] c = Configuration() for fname in workloads: basename = os.path.basename(fname) base_parts = os.path.splitext(basename) if base_parts[1] != '.yml': # Not a .yml workload file, ignore it. continue task_name = to_snake_case(base_parts[0]) prepare_environment_vars = get_prepare_environment_vars( task_name, fname) for prep_var in prepare_environment_vars: task_specs.append(TaskSpec(prep_var['test'])) for v in variants: c.variant(v).tasks(task_specs) return c.to_json()
def run(self): """Generate resmoke suites that run within a target execution time and write to disk.""" LOGGER.debug("config options", config_options=self.config_options) if not should_tasks_be_generated(self.evergreen_api, self.config_options.task_id): LOGGER.info( "Not generating configuration due to previous successful generation." ) return suites = self.get_suites() LOGGER.debug("Creating suites", num_suites=len(suites), task=self.config_options.task, dir=self.config_options.generated_config_dir) config_dict_of_suites = self.generate_suites_config(suites) shrub_config = Configuration() self.generate_task_config(shrub_config, suites) config_dict_of_suites[self.config_options.task + ".json"] = shrub_config.to_json() write_file_dict(self.config_options.generated_config_dir, config_dict_of_suites)
def construct_all_tasks_json(): """ :return: json representation of tasks for all workloads in the /src/workloads directory relative to the genny root. """ c = Configuration() workload_dir = '{}/src/workloads'.format(get_project_root()) all_workloads = glob.glob('{}/**/*.yml'.format(workload_dir), recursive=True) all_workloads = [s.split('/src/workloads/')[1] for s in all_workloads] for fname in all_workloads: basename = os.path.basename(fname) base_parts = os.path.splitext(basename) if base_parts[1] != '.yml': # Not a .yml workload file, ignore it. continue task_name = to_snake_case(base_parts[0]) t = c.task(task_name) t.priority(5) # The default priority in system_perf.yml prepare_environment_vars = { 'test': task_name, 'auto_workload_path': fname } full_filename = '{}/src/workloads/{}'.format(get_project_root(), fname) with open(full_filename, 'r') as handle: try: workload_dict = yaml.safe_load(handle) autorun_spec = AutoRunSpec.create_from_workload_yaml( workload_dict) if autorun_spec is not None and autorun_spec.prepare_environment_with is not None: prepare_environment_vars.update( autorun_spec.prepare_environment_with) except Exception as e: pass t.commands([ CommandDefinition().function('prepare environment').vars( prepare_environment_vars), CommandDefinition().function('deploy cluster'), CommandDefinition().function('run test'), CommandDefinition().function('analyze'), ]) return c.to_json()
def test_no_suites_or_tasks_are_generated(self, generate_subsuites_mock, selected_tests_config_options_mock): generate_subsuites_mock.return_value.generate_suites_config.return_value = {} def generate_task_config(shrub_config, suites): pass generate_subsuites_mock.return_value.generate_task_config.side_effect = generate_task_config shrub_config = Configuration() config_dict_of_suites_and_tasks = {} under_test._update_config_with_task( evg_api=MagicMock(), shrub_config=shrub_config, config_options=MagicMock(), config_dict_of_suites_and_tasks=config_dict_of_suites_and_tasks) self.assertEqual(config_dict_of_suites_and_tasks, {}) self.assertEqual(shrub_config.to_json(), "{}")
def test_suites_and_tasks_are_generated(self, generate_subsuites_mock, selected_tests_config_options_mock): suites_config_mock = {"my_suite_0.yml": "suite file contents"} generate_subsuites_mock.return_value.generate_suites_config.return_value = suites_config_mock def generate_task_config(shrub_config, suites): shrub_config.task("my_fake_task") generate_subsuites_mock.return_value.generate_task_config.side_effect = generate_task_config shrub_config = Configuration() config_dict_of_suites_and_tasks = {} under_test._update_config_with_task( evg_api=MagicMock(), shrub_config=shrub_config, config_options=MagicMock(), config_dict_of_suites_and_tasks=config_dict_of_suites_and_tasks) self.assertEqual(config_dict_of_suites_and_tasks, suites_config_mock) self.assertIn("my_fake_task", shrub_config.to_json())
def run(evg_api: EvergreenApi, evg_conf: EvergreenProjectConfig, selected_tests_service: SelectedTestsService, selected_tests_variant_expansions: Dict[str, str], repos: List[Repo], origin_build_variants: List[str]) -> Dict[str, dict]: """ Run code to select tasks to run based on test and task mappings for each of the build variants. :param evg_api: Evergreen API object. :param evg_conf: Evergreen configuration. :param selected_tests_service: Selected-tests service. :param selected_tests_variant_expansions: Expansions of the selected-tests variant. :param repos: List of repos containing changed files. :param origin_build_variants: Build variants to collect task info from. :return: Dict of files and file contents for generated tasks. """ shrub_config = Configuration() config_dict_of_suites_and_tasks = {} changed_files = find_changed_files_in_repos(repos) changed_files = {_remove_repo_path_prefix(file_path) for file_path in changed_files} LOGGER.debug("Found changed files", files=changed_files) for build_variant in origin_build_variants: build_variant_config = evg_conf.get_variant(build_variant) origin_variant_expansions = build_variant_config.expansions task_configs = _get_task_configs(evg_conf, selected_tests_service, selected_tests_variant_expansions, build_variant_config, changed_files) for task_config in task_configs.values(): config_options = SelectedTestsConfigOptions.from_file( origin_variant_expansions, selected_tests_variant_expansions, task_config, REQUIRED_CONFIG_KEYS, DEFAULT_CONFIG_VALUES, CONFIG_FORMAT_FN, ) _update_config_with_task(evg_api, shrub_config, config_options, config_dict_of_suites_and_tasks) config_dict_of_suites_and_tasks["selected_tests_config.json"] = shrub_config.to_json() return config_dict_of_suites_and_tasks
def construct_all_tasks_json(): """ :return: json representation of tasks for all workloads in the /src/workloads directory relative to the genny root. """ c = Configuration() c.exec_timeout(64800) # 18 hours workload_dir = '{}/src/workloads'.format(get_project_root()) all_workloads = glob.glob('{}/**/*.yml'.format(workload_dir), recursive=True) all_workloads = [s.split('/src/workloads/')[1] for s in all_workloads] for fname in all_workloads: basename = os.path.basename(fname) base_parts = os.path.splitext(basename) if base_parts[1] != '.yml': # Not a .yml workload file, ignore it. continue task_name = to_snake_case(base_parts[0]) prepare_environment_vars = get_prepare_environment_vars( task_name, fname) for prep_var in prepare_environment_vars: t = c.task(prep_var['test']) t.priority(5) # The default priority in system_perf.yml t.commands([ CommandDefinition().function('prepare environment').vars( prep_var), CommandDefinition().function('deploy cluster'), CommandDefinition().function('run test'), CommandDefinition().function('analyze'), ]) return c.to_json()
def main(): """Generate fuzzer tests to run in evergreen.""" parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( "--expansion-file", dest="expansion_file", type=str, help="Location of expansions file generated by evergreen.") parser.add_argument("--num-files", dest="num_files", type=int, help="Number of files to generate per task.") parser.add_argument("--num-tasks", dest="num_tasks", type=int, help="Number of tasks to generate.") parser.add_argument("--resmoke-args", dest="resmoke_args", help="Arguments to pass to resmoke.") parser.add_argument("--npm-command", dest="npm_command", help="npm command to run for fuzzer.") parser.add_argument("--jstestfuzz-vars", dest="jstestfuzz_vars", help="options to pass to jstestfuzz.") parser.add_argument("--name", dest="name", help="name of task to generate.") parser.add_argument("--variant", dest="build_variant", help="build variant to generate.") parser.add_argument( "--use-multiversion", dest="task_path_suffix", help="Task path suffix for multiversion generated tasks.") parser.add_argument("--continue-on-failure", dest="continue_on_failure", help="continue_on_failure value for generated tasks.") parser.add_argument("--resmoke-jobs-max", dest="resmoke_jobs_max", help="resmoke_jobs_max value for generated tasks.") parser.add_argument("--should-shuffle", dest="should_shuffle", help="should_shuffle value for generated tasks.") parser.add_argument("--timeout-secs", dest="timeout_secs", help="timeout_secs value for generated tasks.") parser.add_argument("--suite", dest="suite", help="Suite to run using resmoke.") options = parser.parse_args() config_options = _get_config_options(options, options.expansion_file) evg_config = Configuration() _write_fuzzer_yaml(config_options) generate_evg_tasks(config_options, evg_config) if not os.path.exists(CONFIG_DIRECTORY): os.makedirs(CONFIG_DIRECTORY) with open(os.path.join(CONFIG_DIRECTORY, config_options.name + ".json"), "w") as file_handle: file_handle.write(evg_config.to_json())
def run(evg_api: EvergreenApi, evg_conf: EvergreenProjectConfig, expansion_file: str, selected_tests_service: SelectedTestsService, changed_files: Set[str], build_variant: str) -> Dict[str, dict]: """ Run code to select tasks to run based on test mappings and task mappings. :param evg_api: Evergreen API object. :param evg_conf: Evergreen configuration. :param expansion_file: Configuration file. :param selected_tests_config: Location of config file to connect to elected-tests service. :param changed_files: Set of changed_files. :param build_variant: Build variant to collect task info from. :return: Dict of files and file contents for generated tasks. """ shrub_config = Configuration() config_dict_of_suites_and_tasks = {} task_configs = {} build_variant_config = evg_conf.get_variant(build_variant) related_test_files = _find_selected_test_files(selected_tests_service, changed_files) LOGGER.debug("related test files found", related_test_files=related_test_files) if related_test_files: tests_by_task = create_task_list_for_tests(related_test_files, build_variant, evg_conf) LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task) test_mapping_task_configs = _get_task_configs_for_test_mappings( expansion_file, tests_by_task, build_variant_config) task_configs.update(test_mapping_task_configs) related_tasks = _find_selected_tasks(selected_tests_service, changed_files, build_variant_config) LOGGER.debug("related tasks found", related_tasks=related_tasks) if related_tasks: task_mapping_task_configs = _get_task_configs_for_task_mappings( expansion_file, related_tasks, build_variant_config) # task_mapping_task_configs will overwrite test_mapping_task_configs # because task_mapping_task_configs will run all tests rather than a subset of tests and we # should err on the side of running all tests task_configs.update(task_mapping_task_configs) origin_variant_expansions = build_variant_config.expansions for task_config in task_configs.values(): config_options = SelectedTestsConfigOptions.from_file( origin_variant_expansions, expansion_file, task_config, REQUIRED_CONFIG_KEYS, DEFAULT_CONFIG_VALUES, CONFIG_FORMAT_FN, ) _update_config_with_task(evg_api, shrub_config, config_options, config_dict_of_suites_and_tasks) config_dict_of_suites_and_tasks[ "selected_tests_config.json"] = shrub_config.to_json() return config_dict_of_suites_and_tasks