Esempio n. 1
0
    def test_run_tests_no_tests(self, check_call_mock):
        tests_by_task = {}
        resmoke_cmd = ["python", "buildscripts/resmoke.py", "run", "--continueOnFailure"]

        under_test.run_tests(tests_by_task, resmoke_cmd)

        check_call_mock.assert_not_called()
Esempio n. 2
0
    def test_run_tests_some_test(self, check_call_mock):
        n_tasks = 3
        tests_by_task = create_tests_by_task_mock(n_tasks, 5)
        resmoke_cmd = ["python", "buildscripts/resmoke.py", "run", "--continueOnFailure"]

        under_test.run_tests(tests_by_task, resmoke_cmd)

        self.assertEqual(n_tasks, check_call_mock.call_count)
Esempio n. 3
0
 def test_run_tests_noexec(self):
     no_exec = True
     resmoke_cmd = None
     with patch("subprocess.check_call", return_value=None) as mock_subproc,\
          patch(BURN_IN + "._write_json_file", return_value=None) as mock_write_json:
         burn_in.run_tests(no_exec, self.TESTS_BY_TASK, resmoke_cmd, None)
         self.assertEqual(mock_subproc.call_count, 0)
         self.assertEqual(mock_write_json.call_count, 0)
Esempio n. 4
0
 def _test_run_tests(self, no_exec, tests_by_task, resmoke_cmd):
     with patch("subprocess.check_call", return_value=None) as mock_subproc,\
          patch(BURN_IN + "._update_report_data", return_value=None),\
          patch(BURN_IN + "._write_json_file", return_value=None):
         burn_in.run_tests(no_exec, tests_by_task, resmoke_cmd, None)
         self.assertEqual(mock_subproc.call_count, len(tests_by_task.keys()))
         for idx, task in enumerate(sorted(tests_by_task)):
             for task_test in tests_by_task[task].get("tests", []):
                 self.assertIn(task_test, mock_subproc.call_args_list[idx][0][0])
Esempio n. 5
0
 def test_run_tests_tests_resmoke_failure(self):
     no_exec = False
     resmoke_cmd = ["python", "buildscripts/resmoke.py", "--continueOnFailure"]
     error_code = -1
     with patch("subprocess.check_call", return_value=None) as mock_subproc,\
          patch("sys.exit", return_value=error_code) as mock_exit,\
          patch(BURN_IN + "._update_report_data", return_value=None),\
          patch(BURN_IN + "._write_json_file", return_value=None):
         mock_subproc.side_effect = subprocess.CalledProcessError(error_code, "err1")
         mock_exit.side_effect = self.SysExit(error_code)
         with self.assertRaises(self.SysExit):
             burn_in.run_tests(no_exec, self.TESTS_BY_TASK, resmoke_cmd, None)
Esempio n. 6
0
    def test_run_tests_tests_resmoke_failure(self, check_call_mock, exit_mock):
        error_code = 42
        n_tasks = 3
        tests_by_task = create_tests_by_task_mock(n_tasks, 5)
        resmoke_cmd = ["python", "buildscripts/resmoke.py", "run", "--continueOnFailure"]
        check_call_mock.side_effect = subprocess.CalledProcessError(error_code, "err1")
        exit_mock.side_effect = ValueError('exiting')

        with self.assertRaises(ValueError):
            under_test.run_tests(tests_by_task, resmoke_cmd)

        self.assertEqual(1, check_call_mock.call_count)
        exit_mock.assert_called_with(error_code)
Esempio n. 7
0
def main(build_variant, run_build_variant, distro, project,
         generate_tasks_file, no_exec, resmoke_args, evg_api_config, verbose,
         task_id):
    """
    Run new or changed tests in repeated mode to validate their stability.

    Running burn_in_tests_multiversion will run new or changed tests against the appropriate generated multiversion
    suites. The purpose of these tests are to signal bugs in the generated multiversion suites as these tasks are
    excluded from the required build variants and are only run in certain daily build variants. As such, we only expect
    the burn-in multiversion tests to be run once for each binary version configuration, and `--repeat-*` arguments
    should be None when executing this script.

    There are two modes that burn_in_tests_multiversion can run in:

    (1) Normal mode: by default burn_in_tests will attempt to run all detected tests the
    configured number of times. This is useful if you have a test or tests you would like to
    check before submitting a patch to evergreen.

    (2) By specifying the `--generate-tasks-file`, burn_in_tests will run generate a configuration
    file that can then be sent to the Evergreen 'generate.tasks' command to create evergreen tasks
    to do all the test executions. This is the mode used to run tests in patch builds.

    NOTE: There is currently a limit of the number of tasks burn_in_tests will attempt to generate
    in evergreen. The limit is 1000. If you change enough tests that more than 1000 tasks would
    be generated, burn_in_test will fail. This is to avoid generating more tasks than evergreen
    can handle.
    \f

    :param build_variant: Build variant to query tasks from.
    :param run_build_variant:Build variant to actually run against.
    :param distro: Distro to run tests on.
    :param project: Project to run tests on.
    :param generate_tasks_file: Create a generate tasks configuration in this file.
    :param no_exec: Just perform test discover, do not execute the tests.
    :param resmoke_args: Arguments to pass through to resmoke.
    :param evg_api_config: Location of configuration file to connect to evergreen.
    :param verbose: Log extra debug information.
    """
    _configure_logging(verbose)

    evg_conf = parse_evergreen_file(EVERGREEN_FILE)
    repeat_config = RepeatConfig()  # yapf: disable
    generate_config = GenerateConfig(build_variant=build_variant,
                                     run_build_variant=run_build_variant,
                                     distro=distro,
                                     project=project,
                                     task_id=task_id)  # yapf: disable
    if generate_tasks_file:
        generate_config.validate(evg_conf)

    evg_api = _get_evg_api(evg_api_config, False)

    repos = [Repo(x) for x in DEFAULT_REPO_LOCATIONS if os.path.isdir(x)]

    resmoke_cmd = _set_resmoke_cmd(repeat_config, list(resmoke_args))

    tests_by_task = create_tests_by_task(generate_config.build_variant, repos,
                                         evg_conf)
    LOGGER.debug("tests and tasks found", tests_by_task=tests_by_task)

    if generate_tasks_file:
        multiversion_tasks = evg_conf.get_task_names_by_tag(
            MULTIVERSION_PASSTHROUGH_TAG)
        LOGGER.debug("Multiversion tasks by tag",
                     tasks=multiversion_tasks,
                     tag=MULTIVERSION_PASSTHROUGH_TAG)
        # We expect the number of suites with MULTIVERSION_PASSTHROUGH_TAG to be the same as in
        # multiversion_suites. Multiversion passthrough suites must include
        # MULTIVERSION_CONFIG_KEY as a root level key and must be set to true.
        multiversion_suites = get_named_suites_with_root_level_key(
            MULTIVERSION_CONFIG_KEY)
        assert len(multiversion_tasks) == len(multiversion_suites)

        build_variant = create_multiversion_generate_tasks_config(
            tests_by_task, evg_api, generate_config)
        shrub_project = ShrubProject.empty()
        shrub_project.add_build_variant(build_variant)

        if not validate_task_generation_limit(shrub_project):
            sys.exit(1)

        write_file(generate_tasks_file, shrub_project.json())
    elif not no_exec:
        run_tests(tests_by_task, resmoke_cmd)
    else:
        LOGGER.info("Not running tests due to 'no_exec' option.")