Example #1
0
    def _run_hooks_after_tests(self, test):
        """Run the after_test method on each of the hooks.

        Swallows any TestFailure exceptions if set to continue on
        failure, and reraises any other exceptions.
        """
        try:
            for hook in self.hooks:
                self._run_hook(hook, hook.after_test, test)

        except errors.StopExecution:
            raise

        except errors.ServerFailure:
            self.logger.exception(
                "%s marked as a failure by a hook's after_test.",
                test.short_description())
            self.report.setFailure(test, return_code=2)
            raise errors.StopExecution("A hook's after_test failed")

        except errors.TestFailure:
            self.logger.exception(
                "%s marked as a failure by a hook's after_test.",
                test.short_description())
            self.report.setFailure(test, return_code=1)
            if self.suite_options.fail_fast:
                raise errors.StopExecution("A hook's after_test failed")

        except:
            self.report.setError(test)
            raise
Example #2
0
    def _run_hooks_before_tests(self, test):
        """Run the before_test method on each of the hooks.

        Swallows any TestFailure exceptions if set to continue on
        failure, and reraises any other exceptions.
        """
        try:
            for hook in self.hooks:
                self._run_hook(hook, hook.before_test, test)

        except errors.StopExecution:
            raise

        except errors.ServerFailure:
            self.logger.exception(
                "%s marked as a failure by a hook's before_test.",
                test.short_description())
            self._fail_test(test, sys.exc_info(), return_code=2)
            raise errors.StopExecution("A hook's before_test failed")

        except errors.TestFailure:
            self.logger.exception(
                "%s marked as a failure by a hook's before_test.",
                test.short_description())
            self._fail_test(test, sys.exc_info(), return_code=1)
            if self.suite_options.fail_fast:
                raise errors.StopExecution("A hook's before_test failed")

        except:
            # Record the before_test() error in 'self.report'.
            self.report.startTest(test)
            self.report.addError(test, sys.exc_info())
            self.report.stopTest(test)
            raise
Example #3
0
    def _execute_test(self, test, hook_failure_flag):
        """Call the before/after test hooks and execute 'test'."""

        test.configure(self.fixture, config.NUM_CLIENTS_PER_FIXTURE)
        self._run_hooks_before_tests(test, hook_failure_flag)
        self.report.logging_prefix = create_fixture_table(self.fixture)

        test(self.report)
        try:
            if test.propagate_error is not None:
                raise test.propagate_error

            # We are intentionally only checking the individual 'test' status and not calling
            # report.wasSuccessful() here. It is possible that a thread running in the background as
            # part of a hook has added a failed test case to 'self.report'. Checking the individual
            # 'test' status ensures self._run_hooks_after_tests() is called if it is a hook's test
            # case that has failed and not 'test' that has failed.
            if self.suite_options.fail_fast and self.report.find_test_info(
                    test).status != "pass":
                self.logger.info("%s failed, so stopping..." %
                                 (test.short_description()))
                raise errors.StopExecution("%s failed" %
                                           (test.short_description()))

            if self._check_if_fixture_running and not self.fixture.is_running(
            ):
                self.logger.error(
                    "%s marked as a failure because the fixture crashed during the test.",
                    test.short_description())
                self.report.setFailure(test, return_code=2)
                # Always fail fast if the fixture fails.
                raise errors.StopExecution(
                    "%s not running after %s" %
                    (self.fixture, test.short_description()))
        finally:
            success = self.report.find_test_info(test).status == "pass"

            # Stop background hooks first since they can interfere with fixture startup and teardown
            # done as part of archival.
            self._run_hooks_after_tests(test,
                                        hook_failure_flag,
                                        background=True)

            if self.archival:
                result = TestResult(test=test, hook=None, success=success)
                self.archival.archive(self.logger, result, self.manager)

            self._run_hooks_after_tests(test,
                                        hook_failure_flag,
                                        background=False)
Example #4
0
    def _find_executable(self):  # pylint: disable=no-self-use
        binary = os.path.join(config.INSTALL_DIR, "sdam_json_test")
        if os.name == "nt":
            binary += ".exe"

        if not os.path.isfile(binary):
            raise errors.StopExecution(
                f"Failed to locate sdam_json_test binary at {binary}")
        return binary
Example #5
0
    def _run_hooks_after_tests(self,
                               test,
                               hook_failure_flag,
                               background=False):
        """Run the after_test method on each of the hooks.

        Swallows any TestFailure exceptions if set to continue on
        failure, and reraises any other exceptions.

        @param test: the test after which we run the hooks.
        @param background: whether to run background hooks.
        """
        try:
            for hook in self.hooks:
                if hook.IS_BACKGROUND == background:
                    self._run_hook(hook, hook.after_test, test,
                                   hook_failure_flag)

        except errors.StopExecution:
            raise

        except errors.ServerFailure:
            self.logger.exception(
                "%s marked as a failure by a hook's after_test.",
                test.short_description())
            self.report.setFailure(test, return_code=2)
            raise errors.StopExecution("A hook's after_test failed")

        except errors.TestFailure:
            self.logger.exception(
                "%s marked as a failure by a hook's after_test.",
                test.short_description())
            self.report.setFailure(test, return_code=1)
            if self.suite_options.fail_fast:
                raise errors.StopExecution("A hook's after_test failed")

        except:
            self.report.setError(test)
            raise
    def _find_executable(self):
        if config.INSTALL_DIR is not None:
            binary = os.path.join(config.INSTALL_DIR, "server_selection_json_test")
            if os.name == "nt":
                binary += ".exe"

            if os.path.isfile(binary):
                return binary

        execs = globstar.glob(self.EXECUTABLE_BUILD_PATH + '.exe')
        if not execs:
            execs = globstar.glob(self.EXECUTABLE_BUILD_PATH)
        if len(execs) != 1:
            raise errors.StopExecution(
                "There must be a single server_selection_json_test binary in {}".format(execs))
        return execs[0]
Example #7
0
    def _archive_hook_or_test(self, logger, test_name, test, manager):
        """Trigger archive of data files for a test or hook."""

        # We can still attempt archiving even if the teardown fails.
        if not manager.teardown_fixture(logger, abort=True):
            logger.warning(
                "Error while aborting test fixtures; data files may be invalid."
            )
        with self._lock:
            # Test repeat number is how many times the particular test has been archived.
            if test_name not in self._tests_repeat:
                self._tests_repeat[test_name] = 0
            else:
                self._tests_repeat[test_name] += 1
        # Normalize test path from a test or hook name.
        test_path = \
            test_name.replace("/", "_").replace("\\", "_").replace(".", "_").replace(":", "_")
        file_name = "mongo-data-{}-{}-{}-{}.tgz".format(
            config.EVERGREEN_TASK_ID, test_path, config.EVERGREEN_EXECUTION,
            self._tests_repeat[test_name])
        # Retrieve root directory for all dbPaths from fixture.
        input_files = test.fixture.get_dbpath_prefix()
        s3_bucket = config.ARCHIVE_BUCKET
        s3_path = "{}/{}/{}/datafiles/{}".format(config.EVERGREEN_PROJECT_NAME,
                                                 config.EVERGREEN_VARIANT_NAME,
                                                 config.EVERGREEN_REVISION,
                                                 file_name)
        display_name = "Data files {} - Execution {} Repetition {}".format(
            test_name, config.EVERGREEN_EXECUTION,
            self._tests_repeat[test_name])
        logger.info("Archiving data files for test %s from %s", test_name,
                    input_files)
        status, message = self.archive_instance.archive_files_to_s3(
            display_name, input_files, s3_bucket, s3_path)
        if status:
            logger.warning("Archive failed for %s: %s", test_name, message)
        else:
            logger.info("Archive succeeded for %s: %s", test_name, message)

        if not manager.setup_fixture(logger):
            raise errors.StopExecution(
                "Error while restarting test fixtures after archiving.")
    def __init__(self, logger, json_filename, mql_executable=None):
        """Initialize the MqlModelHaskellTestCase with the executable to run."""

        interface.ProcessTestCase.__init__(self, logger,
                                           "MQL Haskell Model test",
                                           json_filename)

        self.json_test_file = json_filename

        # Determine the top level directory where we start a search for a mql binary
        self.top_level_dirname = os.path.join(
            os.path.normpath(json_filename).split(os.sep)[0], "")

        # Our haskell cabal build produces binaries in an unique directory
        # .../dist-sandbox-<some hex hash>/...
        # so we use a glob pattern to fish out the binary
        mql_executable = utils.default_if_none(
            mql_executable, "mql-model/dist/dist*/build/mql/mql")
        execs = globstar.glob(mql_executable)
        if len(execs) != 1:
            raise errors.StopExecution(
                "There must be a single mql binary in {}".format(execs))

        self.program_executable = execs[0]