Пример #1
0
    def run_all(self):
        """For each file in the test suite, run client program
        assuming each file represents an individual test."""

        if not self.tests:
            # noting to test, exit
            return []
        self.server.deploy(silent=False)

        if self.args.start_and_exit:
            color_stdout("    Start and exit requested, exiting...\n",
                         schema='info')
            exit(0)

        longsep = '=' * 70
        shortsep = '-' * 60
        color_stdout(longsep, "\n", schema='separator')
        color_stdout("TEST".ljust(48), schema='t_name')
        color_stdout("RESULT\n", schema='test_pass')
        color_stdout(shortsep, "\n", schema='separator')
        failed_tests = []
        try:
            for test in self.tests:
                color_stdout(test.name.ljust(48), schema='t_name')
                # for better diagnostics in case of a long-running test

                test_name = os.path.basename(test.name)

                if (test_name in self.ini["disabled"] or not self.server.debug
                        and test_name in self.ini["release_disabled"]
                        or self.args.valgrind
                        and test_name in self.ini["valgrind_disabled"]):
                    color_stdout("[ disabled ]\n", schema='t_name')
                else:
                    test.run(self.server)
                    if not test.passed():
                        failed_tests.append(test.name)
            color_stdout(shortsep, "\n", schema='separator')
            self.server.stop(silent=False)
        except (KeyboardInterrupt) as e:
            color_stdout("\n%s\n" % shortsep, schema='separator')
            self.server.stop(silent=False)
            raise
        finally:
            self.server.cleanup()

        if failed_tests:
            color_stdout("Failed {0} tests: {1}.\n".format(
                len(failed_tests), ", ".join(failed_tests)),
                         schema='error')

        if self.args.valgrind and check_valgrind_log(self.server.valgrind_log):
            color_stdout(shortsep, "\n", schema='separator')
            color_stdout(
                "  Error! There were warnings/errors in valgrind log file:\n",
                schema='error')
            print_tail_n(self.server.valgrind_log, 20)
            color_stdout(shortsep, "\n", schema='separator')
            return ['valgrind error in ' + self.suite_path]
        return failed_tests
Пример #2
0
    def run_all(self):
        """For each file in the test suite, run client program
        assuming each file represents an individual test."""

        if not self.tests:
            # noting to test, exit
            return []
        self.server.deploy(silent=False)

        if self.args.start_and_exit:
            color_stdout("    Start and exit requested, exiting...\n", schema='info')
            exit(0)

        longsep = '='*70
        shortsep = '-'*60
        color_stdout(longsep, "\n", schema='separator')
        color_stdout("TEST".ljust(48), schema='t_name')
        color_stdout("RESULT\n", schema='test_pass')
        color_stdout(shortsep, "\n", schema='separator')
        failed_tests = []
        try:
            for test in self.tests:
                color_stdout(test.name.ljust(48), schema='t_name')
                # for better diagnostics in case of a long-running test

                test_name = os.path.basename(test.name)

                if (test_name in self.ini["disabled"]
                    or not self.server.debug and test_name in self.ini["release_disabled"]
                    or self.args.valgrind and test_name in self.ini["valgrind_disabled"]):
                    color_stdout("[ disabled ]\n", schema='t_name')
                else:
                    test.run(self.server)
                    if not test.passed():
                        failed_tests.append(test.name)
            color_stdout(shortsep, "\n", schema='separator')
            self.server.stop(silent=False)
            self.server.cleanup()
        except (KeyboardInterrupt) as e:
            color_stdout('\n')
            color_stdout(shortsep, "\n", schema='separator')
            self.server.stop(silent=False)
            self.server.cleanup()
            raise

        if failed_tests:
            color_stdout("Failed {0} tests: {1}.\n".format(len(failed_tests),
                                                ", ".join(failed_tests)),
                                                schema='error')

        if self.args.valgrind and check_valgrind_log(self.server.valgrind_log):
            color_stdout(shortsep, "\n", schema='separator')
            color_stdout("  Error! There were warnings/errors in valgrind log file:\n", schema='error')
            print_tail_n(self.server.valgrind_log, 20)
            color_stdout(shortsep, "\n", schema='separator')
            return ['valgrind error in ' + self.suite_path]
        return failed_tests
Пример #3
0
    def run(self, server):
        """Execute the test assuming it's a python program.
        If the test aborts, print its output to stdout, and raise
        an exception. Else, comprare result and reject files.
        If there is a difference, print it to stdout and raise an
        exception. The exception is raised only if is_force flag is
        not set."""
        diagnostics = "unknown"
        save_stdout = sys.stdout
        try:
            self.skip = False
            if os.path.exists(self.skip_cond):
                sys.stdout = FilteredStream(self.tmp_result)
                stdout_fileno = sys.stdout.stream.fileno()
                execfile(self.skip_cond, dict(locals(), **server.__dict__))
                sys.stdout.close()
                sys.stdout = save_stdout
            if not self.skip:
                sys.stdout = FilteredStream(self.tmp_result)
                stdout_fileno = sys.stdout.stream.fileno()
                self.execute(server)
                sys.stdout.flush()
            self.is_executed_ok = True
        except Exception as e:
            traceback.print_exc(e)
            diagnostics = str(e)
        finally:
            if sys.stdout and sys.stdout != save_stdout:
                sys.stdout.close()
            sys.stdout = save_stdout
        self.is_executed = True
        sys.stdout.flush()

        if not self.skip:
            if self.is_executed_ok and os.path.isfile(self.result):
                self.is_equal_result = filecmp.cmp(self.result, self.tmp_result)
        else:
            self.is_equal_result = 1

        if self.args.valgrind:
            self.is_valgrind_clean = (check_valgrind_log(server.valgrind_log) == False)

        if self.skip:
            color_stdout("[ skip ]\n", schema='test_skip')
            if os.path.exists(self.tmp_result):
                os.remove(self.tmp_result)
        elif self.is_executed_ok and self.is_equal_result and self.is_valgrind_clean:
            color_stdout("[ pass ]\n", schema='test_pass')
            if os.path.exists(self.tmp_result):
                os.remove(self.tmp_result)
        elif (self.is_executed_ok and not self.is_equal_result and not
              os.path.isfile(self.result)):
            os.rename(self.tmp_result, self.result)
            color_stdout("[ new ]\n", schema='test_new')
        else:
            os.rename(self.tmp_result, self.reject)
            color_stdout("[ fail ]\n", schema='test_fail')

            where = ""
            if not self.is_executed_ok:
                self.print_diagnostics(self.reject, "Test failed! Last 10 lines of the result file:\n")
                server.print_log(15)
                where = ": test execution aborted, reason '{0}'".format(diagnostics)
            elif not self.is_equal_result:
                self.print_unidiff()
                server.print_log(15)
                where = ": wrong test output"
            elif not self.is_valgrind_clean:
                os.remove(self.reject)
                self.print_diagnostics(server.valgrind_log, "Test failed! Last 10 lines of valgrind.log:\n")
                where = ": there were warnings in valgrind.log"

            if not self.args.is_force:
                # gh-1026
                # stop and cleanup tarantool instance for incorrect tests
                server.stop()
                server.cleanup()
                raise RuntimeError("Failed to run test " + self.name + where)
Пример #4
0
    def run(self, server):
        """Execute the test assuming it's a python program.
        If the test aborts, print its output to stdout, and raise
        an exception. Else, comprare result and reject files.
        If there is a difference, print it to stdout and raise an
        exception. The exception is raised only if is_force flag is
        not set."""
        diagnostics = "unknown"
        save_stdout = sys.stdout
        try:
            self.skip = False
            if os.path.exists(self.skip_cond):
                sys.stdout = FilteredStream(self.tmp_result)
                stdout_fileno = sys.stdout.stream.fileno()
                execfile(self.skip_cond, dict(locals(), **server.__dict__))
                sys.stdout.close()
                sys.stdout = save_stdout
            if not self.skip:
                sys.stdout = FilteredStream(self.tmp_result)
                stdout_fileno = sys.stdout.stream.fileno()
                self.execute(server)
                sys.stdout.flush()
            self.is_executed_ok = True
        except Exception as e:
            traceback.print_exc(e)
            diagnostics = str(e)
        finally:
            if sys.stdout and sys.stdout != save_stdout:
                sys.stdout.close()
            sys.stdout = save_stdout
        self.is_executed = True
        sys.stdout.flush()

        if not self.skip:
            if self.is_executed_ok and os.path.isfile(self.result):
                self.is_equal_result = filecmp.cmp(self.result, self.tmp_result)
        else:
            self.is_equal_result = 1

        if self.args.valgrind:
            self.is_valgrind_clean = (check_valgrind_log(server.valgrind_log) == False)

        if self.skip:
            color_stdout("[ skip ]\n", schema='test_skip')
            if os.path.exists(self.tmp_result):
                os.remove(self.tmp_result)
        elif self.is_executed_ok and self.is_equal_result and self.is_valgrind_clean:
            color_stdout("[ pass ]\n", schema='test_pass')
            if os.path.exists(self.tmp_result):
                os.remove(self.tmp_result)
        elif (self.is_executed_ok and not self.is_equal_result and not
              os.path.isfile(self.result)):
            os.rename(self.tmp_result, self.result)
            color_stdout("[ new ]\n", schema='test_new')
        else:
            os.rename(self.tmp_result, self.reject)
            color_stdout("[ fail ]\n", schema='test_fail')

            where = ""
            if not self.is_executed_ok:
                self.print_diagnostics(self.reject, "Test failed! Last 10 lines of the result file:\n")
                server.print_log(15)
                where = ": test execution aborted, reason '{0}'".format(diagnostics)
            elif not self.is_equal_result:
                self.print_unidiff()
                server.print_log(15)
                where = ": wrong test output"
            elif not self.is_valgrind_clean:
                os.remove(self.reject)
                self.print_diagnostics(server.valgrind_log, "Test failed! Last 10 lines of valgrind.log:\n")
                where = ": there were warnings in valgrind.log"

            if not self.args.is_force:
                raise RuntimeError("Failed to run test " + self.name + where)
Пример #5
0
    def run_all(self):
        """For each file in the test suite, run client program
        assuming each file represents an individual test."""
        if not self.tests:
            # noting to test, exit
            return []
        # fixme: remove this string if we fix all legacy tests
        self.server.cls = self.tests[0].__class__
        # create inspectpor daemon for cluster tests
        inspector = TarantoolInspector('localhost', self.server.inspector_port)
        inspector.start()
        self.server.deploy(silent=False)

        longsep = '=' * 80
        shortsep = '-' * 75
        color_stdout(longsep, "\n", schema='separator')
        color_stdout("TEST".ljust(48), schema='t_name')
        color_stdout("PARAMS\t\t", schema='test_var')
        color_stdout("RESULT\n", schema='test_pass')
        color_stdout(shortsep, "\n", schema='separator')
        failed_tests = []
        try:
            for test in self.tests:
                test.inspector = inspector
                color_stdout(os.path.join(self.ini['suite'],
                                          os.path.basename(
                                              test.name)).ljust(48),
                             schema='t_name')
                # for better diagnostics in case of a long-running test

                conf = ''
                if test.run_params:
                    conf = test.conf_name
                color_stdout("%s" % conf.ljust(16), schema='test_var')
                test_name = os.path.basename(test.name)
                if (test_name in self.ini["disabled"] or not self.server.debug
                        and test_name in self.ini["release_disabled"]
                        or self.args.valgrind
                        and test_name in self.ini["valgrind_disabled"]
                        or not self.args.long
                        and test_name in self.ini.get("long_run", [])):
                    color_stdout("[ disabled ]\n", schema='t_name')
                else:
                    test.run(self.server)
                    if not test.passed():
                        failed_tests.append(test.name)
            color_stdout(shortsep, "\n", schema='separator')
            self.server.stop(silent=False)
            # don't delete core files or state of the data dir
            # in case of exception, which is raised when the
            # server crashes
            inspector.stop()
            self.server.cleanup()
        except KeyboardInterrupt:
            color_stdout("\n%s\n" % shortsep, schema='separator')
            self.server.stop(silent=False)
            raise

        if failed_tests:
            color_stdout("Failed {0} tests: {1}.\n".format(
                len(failed_tests), ", ".join(failed_tests)),
                         schema='error')

        if self.args.valgrind and check_valgrind_log(self.server.valgrind_log):
            color_stdout(shortsep, "\n", schema='separator')
            color_stdout(
                "  Error! There were warnings/errors in valgrind log file:\n",
                schema='error')
            print_tail_n(self.server.valgrind_log, 20)
            color_stdout(shortsep, "\n", schema='separator')
            return ['valgrind error in ' + self.suite_path]
        return failed_tests