コード例 #1
0
ファイル: test_runner.py プロジェクト: trentlapinski/zulip
def run_test(test: TestCase, result: TestResult) -> bool:
    failed = False
    test_method = get_test_method(test)

    if fast_tests_only() and is_known_slow_test(test_method):
        return failed

    test_name = full_test_name(test)

    bounce_key_prefix_for_testing(test_name)
    bounce_redis_key_prefix_for_testing(test_name)

    try:
        test._pre_setup()
    except Exception:
        result.addError(test, sys.exc_info())
        return True

    start_time = time.time()

    test(result)  # unittest will handle skipping, error, failure and success.

    delay = time.time() - start_time
    enforce_timely_test_completion(test_method, test_name, delay, result)
    slowness_reason = getattr(test_method, 'slowness_reason', '')
    TEST_TIMINGS.append((delay, test_name, slowness_reason))

    test._post_teardown()
    return failed
コード例 #2
0
def run_test(test: TestCase, result: TestResult) -> bool:
    failed = False
    test_method = get_test_method(test)

    if fast_tests_only() and is_known_slow_test(test_method):
        return failed

    test_name = full_test_name(test)

    bounce_key_prefix_for_testing(test_name)
    bounce_redis_key_prefix_for_testing(test_name)

    flush_caches_for_testing()

    if not hasattr(test, "_pre_setup"):
        msg = "Test doesn't have _pre_setup; something is wrong."
        error_pre_setup = (Exception, Exception(msg), None
                           )  # type: Tuple[Any, Any, Any]
        result.addError(test, error_pre_setup)
        return True
    test._pre_setup()

    start_time = time.time()

    test(result)  # unittest will handle skipping, error, failure and success.

    delay = time.time() - start_time
    enforce_timely_test_completion(test_method, test_name, delay, result)
    slowness_reason = getattr(test_method, 'slowness_reason', '')
    TEST_TIMINGS.append((delay, test_name, slowness_reason))

    test._post_teardown()
    return failed
コード例 #3
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
def run_test(test: TestCase, result: TestResult) -> bool:
    failed = False
    test_method = get_test_method(test)

    if fast_tests_only() and is_known_slow_test(test_method):
        return failed

    test_name = full_test_name(test)

    bounce_key_prefix_for_testing(test_name)
    bounce_redis_key_prefix_for_testing(test_name)

    flush_caches_for_testing()

    if not hasattr(test, "_pre_setup"):
        msg = "Test doesn't have _pre_setup; something is wrong."
        error_pre_setup = (Exception, Exception(msg), None)  # type: Tuple[Any, Any, Any]
        result.addError(test, error_pre_setup)
        return True
    test._pre_setup()

    start_time = time.time()

    test(result)  # unittest will handle skipping, error, failure and success.

    delay = time.time() - start_time
    enforce_timely_test_completion(test_method, test_name, delay, result)
    slowness_reason = getattr(test_method, 'slowness_reason', '')
    TEST_TIMINGS.append((delay, test_name, slowness_reason))

    test._post_teardown()
    return failed
コード例 #4
0
    def _execute_test(cls, test: FirmwareTestClass, router: Router, routers: List[Router]) -> TestResult:
        if not isinstance(router, Router):
            raise ValueError("Chosen Router is not a real Router...")
        # proofed: this method runs in other process as the server
        setproctitle(str(router.id) + " - " + str(test))
        logging.debug("%sExecute test " + str(test) + " on Router(" + str(router.id) + ")", LoggerSetup.get_log_deep(2))

        test_suite = defaultTestLoader.loadTestsFromTestCase(test)

        # prepare all test cases
        for test_case in test_suite:
            logging.debug("%sTestCase " + str(test_case), LoggerSetup.get_log_deep(4))
            test_case.prepare(router, routers)

        result = TestResult()

        cls.__setns(router)
        try:

            result = test_suite.run(result)
        except Exception as e:
            logging.error("%sTestCase execution raised an exception", LoggerSetup.get_log_deep(3))
            logging.error("%s" + str(e), LoggerSetup.get_log_deep(3))

            test_obj = test()
            result.addError(test_obj, sys.exc_info())  # add the reason of the exception
        finally:

            # I'm sry for this dirty hack, but if you don't do this you get an
            # "TypeError: cannot serialize '_io.TextIOWrapper' object" because sys.stdout is not serializeable...
            result._original_stdout = None
            result._original_stderr = None

            logging.debug("%sResult from test " + str(result), LoggerSetup.get_log_deep(3))
            return result
コード例 #5
0
ファイル: test.py プロジェクト: pombredanne/genutility
    def addSubTest(self, test, subtest, outcome):
        """ Called when a subtest finishes. """

        TestResult.addSubTest(self, test, subtest, outcome)
        self.testsRun += 1
        self.current_subtest += 1
        self.progress()
コード例 #6
0
ファイル: test_runner.py プロジェクト: xaoei/zulip
 def startTest(self, test: TestCase) -> None:
     TestResult.startTest(self, test)
     self.stream.writeln(
         "Running {}".format(full_test_name(test))
     )  # type: ignore # https://github.com/python/typeshed/issues/3139
     self.stream.flush(
     )  # type: ignore # https://github.com/python/typeshed/issues/3139
コード例 #7
0
 def addSkip(self, test: TestCase, reason: str) -> None:
     TestResult.addSkip(self, test, reason)
     self.stream.writeln(
         "** Skipping {}: {}".
         format(  # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
             test.id(), reason))
     self.stream.flush()
コード例 #8
0
ファイル: test_runner.py プロジェクト: zackw/zulip
 def startTest(self, test: TestCase) -> None:
     TestResult.startTest(self, test)
     self.stream.writeln(
         f"Running {full_test_name(test)}"
     )  # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
     self.stream.flush(
     )  # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
コード例 #9
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def addSuccess(self, test):
     self.result["success_count"] += 1
     self.result["class_list"][self.class_name]["success_count"] += 1
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["status"] = SUCCESS
     TestResult.addSuccess(self, test)
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["message"] = self.silence_output()
コード例 #10
0
ファイル: test_runner.py プロジェクト: xaoei/zulip
 def addSkip(self, test: TestCase, reason: str) -> None:
     TestResult.addSkip(self, test, reason)
     self.stream.writeln(
         "** Skipping {}: {}".
         format(  # type: ignore # https://github.com/python/typeshed/issues/3139
             full_test_name(test), reason))
     self.stream.flush(
     )  # type: ignore # https://github.com/python/typeshed/issues/3139
コード例 #11
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def addUnexpectedSuccess(self, test):
     self.result["unexpected_success_count"] += 1
     self.result["class_list"][self.class_name]["unexpected_success_count"] \
         += 1
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["status"] = UNEXPECTED_SUCCESS
     TestResult.addUnexpectedSuccess(self, test)
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["message"] = self.silence_output()
コード例 #12
0
 def addSuccess(self, test: TestCase):
     TestResult.addSuccess(self, test)
     self.stream.write("PASSED")
     self.stream.writeln()
     execution_time = self.__get_test_execution_time()
     self.__passed_test.append(
         TestResultInfo(test=test,
                        execution_time=execution_time,
                        status_id=TestStatus.PASS))
コード例 #13
0
 def addUnexpectedSuccess(self, test):
     TestResult.addUnexpectedSuccess(self, test)
     self.stream.write("UNEXPECTED SUCCESS")
     self.stream.writeln()
     execution_time = self.__get_test_execution_time()
     self.__unexpected_successes_test.append(
         TestResultInfo(test=test,
                        execution_time=execution_time,
                        status_id=TestStatus.UNEXPECTED_SUCCESS))
コード例 #14
0
def enforce_timely_test_completion(test_method: Any, test_name: str,
                                   delay: float, result: TestResult) -> None:
    if hasattr(test_method, 'slowness_reason'):
        max_delay = 2.0  # seconds
    else:
        max_delay = 0.4  # seconds

    if delay > max_delay:
        msg = '** Test is TOO slow: %s (%.3f s)\n' % (test_name, delay)
        result.addInfo(test_method, msg)
コード例 #15
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def addError(self, test, err):
     self.result["error_count"] += 1
     self.result["class_list"][self.class_name]["error_count"] += 1
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["status"] = ERROR
     TestResult.addError(self, test, err)
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["message"] = self.silence_output()
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["error"] = self.errors[-1][1]
コード例 #16
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def addFailure(self, test, err):
     self.result["failure_count"] += 1
     self.result["class_list"][self.class_name]["failure_count"] += 1
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["status"] = FAILURE
     TestResult.addFailure(self, test, err)
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["message"] = self.silence_output()
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["error"] = self.failures[-1][1]
コード例 #17
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
def enforce_timely_test_completion(test_method: Any, test_name: str,
                                   delay: float, result: TestResult) -> None:
    if hasattr(test_method, 'slowness_reason'):
        max_delay = 2.0  # seconds
    else:
        max_delay = 0.4  # seconds

    if delay > max_delay:
        msg = '** Test is TOO slow: %s (%.3f s)\n' % (test_name, delay)
        result.addInfo(test_method, msg)
コード例 #18
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def addSkip(self, test, reason):
     self.result["skip_count"] += 1
     self.result["class_list"][self.class_name]["skip_count"] += 1
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["status"] = SKIP
     TestResult.addSkip(self, test, reason)
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["message"] = self.silence_output()
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["error"] = reason
コード例 #19
0
 def addSkip(self, test: TestCase, reason):
     TestResult.addSkip(self, test, reason)
     self.stream.write("SKIPPED")
     self.stream.write(" ... reason: " + str(reason))
     self.stream.writeln()
     execution_time = self.__get_test_execution_time()
     self.__skipped_test.append(
         TestResultInfo(test=test,
                        execution_time=execution_time,
                        status_id=TestStatus.SKIP,
                        error_msg=reason))
コード例 #20
0
 def addFailure(self, test: TestCase, err):
     TestResult.addFailure(self, test, err)
     self.stream.write("FAILED")
     self.stream.write(" ... msg: " + str(err[1]))
     self.stream.writeln()
     execution_time = self.__get_test_execution_time()
     self.__failed_test.append(
         TestResultInfo(
             test=test,
             execution_time=execution_time,
             status_id=TestStatus.FAIL,
             error_msg=err,
             custom_msg=self.__get_api_payload_gui_screenshot(test)))
コード例 #21
0
    def _wait_for_test_done(cls, test: FirmwareTestClass, router: Router, done_event: DoneEvent) -> None:
        """
        Wait 2 minutes until the test is done.
        Handles the result from the tests.
        Triggers the next job/test.

        :param test: test to execute
        :param router: the Router
        :param done_event: event which will be triggered when the task is finished
        """
        logging.debug("%sWait for test" + str(test), LoggerSetup.get_log_deep(2))
        try:
            async_result = cls._task_pool.apply_async(func=cls._execute_test, args=(test, router, cls._routers))
            result = async_result.get(120)  # wait 2 minutes or raise an TimeoutError
            logging.debug("%sTest done " + str(test), LoggerSetup.get_log_deep(1))
            logging.debug("%sFrom Router(" + str(router.id) + ")", LoggerSetup.get_log_deep(2))

            cls._test_results.append((router.id, str(test), result))

            try:
                length = len(cls._test_results)
                t = cls._test_results[(length - 1)]
                cls.write_in_db(str(length), t)
            except Exception as e:
                logging.error("Error at write test results into DB: {0}".format(e))

        except Exception as e:
            logging.error("%sTest raised an Exception: " + str(e), LoggerSetup.get_log_deep(1))

            result = TestResult()
            result._original_stdout = None
            result._original_stderr = None

            cls._test_results.append((router.id, str(test), result))
            cls._task_errors.append((router.id, sys.exc_info()))

            try:
                length = len(cls._test_results)
                t = cls._test_results[(length - 1)]
                cls.write_in_db(str(length), t)
            except Exception as e:
                logging.error("Error at write test results into DB: {0}".format(e))

        finally:
            cls.set_running_task(router, None)
            # logging.debug(str(cls._test_results))
            # start next test in the queue
            done_event.set()
            cls.__start_task(router, None)
コード例 #22
0
    def __init__(self, stream=None, descriptions=None,
                 outputs=DEFAULT_OUTPUTS, main_test=None):

        TestResult.__init__(self, stream, descriptions)

        self.main_test = main_test

        all_result_handlers = get_result_handlers()

        self.result_handlers = [
            all_result_handlers[result_handler_name](
                stream=stream,
                main_test=main_test,
                descriptions=descriptions)
            for result_handler_name in outputs
        ]
コード例 #23
0
ファイル: test_suite.py プロジェクト: seungha-yang/fluster
    def _run_worker(self, test: Test) -> TestVector:
        '''Run one unit test returning the TestVector'''
        # Save the original module and qualname to restore it before returning
        # the TestVector. Otherwise, Pickle will complain if the classes can't
        # be found in global scope. The trick here is that we change the names
        # momentarily just to obtain the error traces in str format
        test_cls = type(test)
        module_orig = test_cls.__module__
        qualname_orig = test_cls.__qualname__
        self._rename_test(test, test.decoder.name, test.test_suite.name)

        test_result = TestResult()
        test(test_result)

        line = '.'
        if test_result.failures:
            line = 'F'
        elif test_result.errors:
            line = 'E'
        print(line, end='', flush=True)

        self._collect_results(test_result)
        self._rename_test(test, module_orig, qualname_orig)

        return test.test_vector
コード例 #24
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def startTest(self, test):
     """
     Starting the test we redirect the output to not show in the screen
     """
     TestResult.startTest(self, test)
     # Start a new output text result
     self.output = StringIO()
     self._stdout.fp = self.output
     self._stderr.fp = self.output
     # Conserve the original output
     self.org_stderr = sys.stderr
     self.org_stdout = sys.stdout
     sys.stdout = self._stdout
     sys.stderr = self._stderr
     # Define the structure
     self.class_name = test.__class__.__name__
     self.method_name = test._testMethodName
     if self.class_name not in self.result["class_list"].keys():
         class_doc = [] if test.__doc__ is None else \
                     [ item.strip() for item in test.__doc__.splitlines() \
                       if item.strip() != "" ]
         self.result["class_list"][self.class_name] = {
                                    "module": test.__module__,
                                    "description": class_doc,
                                    "success_count": 0,
                                    "error_count": 0,
                                    "failure_count": 0,
                                    "skip_count": 0,
                                    "expected_failure_count": 0,
                                    "unexpected_success_count": 0,
                                    "methods": {}
                                    }
     if self.method_name not in \
         self.result["class_list"][self.class_name]["methods"].keys():
         method_doc = [""] if test._testMethodDoc is None else \
                     [ item.strip() for item in \
                      test._testMethodDoc.splitlines() if item.strip() != ""
                      ]
         self.result["class_list"][self.class_name]["methods"][self.method_name] = \
                 {
                  "started": datetime.now(),
                  "status": None,
                  "stopped": None,
                  "message": "",
                  "error": None,
                  "description": method_doc
                  }
コード例 #25
0
ファイル: test_runner.py プロジェクト: zerojuls/zulip
def run_test(test: TestCase, result: TestResult) -> bool:
    failed = False
    test_method = get_test_method(test)

    if fast_tests_only() and is_known_slow_test(test_method):
        return failed

    test_name = full_test_name(test)

    bounce_key_prefix_for_testing(test_name)
    bounce_redis_key_prefix_for_testing(test_name)

    flush_caches_for_testing()

    if not hasattr(test, "_pre_setup"):
        # We are supposed to get here only when running a single test suite
        # on Python 3.5 or higher (the old import failure prefix is being
        # checked just in case). When running several test suites at once,
        # all import failures should be caught in deserialize_suite.
        import_failure_prefix_old = 'unittest.loader.ModuleImportFailure.'
        import_failure_prefix_new = 'unittest.loader._FailedTest.'
        if test_name.startswith(import_failure_prefix_old):
            actual_test_name = test_name[len(import_failure_prefix_old):]
            raise TestSuiteImportError(test_name=actual_test_name)

        elif test_name.startswith(import_failure_prefix_new):
            actual_test_name = test_name[len(import_failure_prefix_new):]
            raise TestSuiteImportError(test_name=actual_test_name)
        else:
            msg = "Test doesn't have _pre_setup; something is wrong."
            error_pre_setup = (Exception, Exception(msg), None)  # type: Tuple[Any, Any, Any]
            result.addError(test, error_pre_setup)
            return True
    test._pre_setup()

    start_time = time.time()

    test(result)  # unittest will handle skipping, error, failure and success.

    delay = time.time() - start_time
    enforce_timely_test_completion(test_method, test_name, delay, result)
    slowness_reason = getattr(test_method, 'slowness_reason', '')
    TEST_TIMINGS.append((delay, test_name, slowness_reason))

    test._post_teardown()
    return failed
コード例 #26
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def __init__(self, stream=None, descriptions=None, verbosity=None):
     self.result = {
                    "success_count": 0,
                    "error_count": 0,
                    "failure_count": 0,
                    "skip_count": 0,
                    "expected_failure_count": 0,
                    "unexpected_success_count": 0,
                    "class_list": {},
                    }
     TestResult.__init__(self, stream=stream, descriptions=descriptions,
                         verbosity=verbosity)
     self._stderr = StandardDevice(sys.stderr)
     self._stdout = StandardDevice(sys.stdout)
     self.org_stderr = None
     self.org_stdout = None
     self.stream = stream
     self.verbosity = verbosity
コード例 #27
0
    def run(self,
            result: TestResult,
            debug: Optional[bool] = False) -> TestResult:
        """
        This function mostly contains the code from
        unittest.TestSuite.run. The need to override this function
        occurred because we use run_test to run the testcase.
        """
        topLevel = False
        if getattr(result, '_testRunEntered', False) is False:
            result._testRunEntered = topLevel = True  # type: ignore[attr-defined]

        for test in self:
            # but this is correct. Taken from unittest.
            if result.shouldStop:
                break

            if isinstance(test, TestSuite):
                test.run(result, debug=debug)
            else:
                self._tearDownPreviousClass(
                    test, result)  # type: ignore[attr-defined]
                self._handleModuleFixture(test,
                                          result)  # type: ignore[attr-defined]
                self._handleClassSetUp(test,
                                       result)  # type: ignore[attr-defined]
                result._previousTestClass = test.__class__  # type: ignore[attr-defined]
                if (getattr(test.__class__, '_classSetupFailed', False)
                        or getattr(result, '_moduleSetUpFailed', False)):
                    continue

                failed = run_test(test, result)
                if failed or result.shouldStop:
                    result.shouldStop = True
                    break

        if topLevel:
            self._tearDownPreviousClass(None,
                                        result)  # type: ignore[attr-defined]
            self._handleModuleTearDown(result)  # type: ignore[attr-defined]
            result._testRunEntered = False  # type: ignore[attr-defined]
        return result
コード例 #28
0
ファイル: server.py プロジェクト: haggi/TestFramework
    def _wait_for_test_done(cls, test: FirmwareTestClass, router: Router,
                            done_event: DoneEvent) -> None:
        """
        Wait 5 minutes until the test is done.
        Handles the result from the tests.
        Triggers the next job/test.

        :param test: test to execute
        :param router: the Router
        """
        logging.debug("%sWait for test" + str(test),
                      LoggerSetup.get_log_deep(2))
        try:
            async_result = cls._task_pool.apply_async(func=cls._execute_test,
                                                      args=(test, router))
            result = async_result.get(
                300)  # wait 5 minutes or raise an TimeoutError
            logging.debug("%sTest done " + str(test),
                          LoggerSetup.get_log_deep(1))
            logging.debug("%sFrom " + str(router), LoggerSetup.get_log_deep(2))

            cls._test_results.append((router.id, str(test), result))
        except Exception as e:
            # TODO #105
            logging.error("%sTest raised an Exception: " + str(e),
                          LoggerSetup.get_log_deep(1))

            result = TestResult()
            result._original_stdout = None
            result._original_stderr = None
            # result.addError(None, (type(exception), exception, None))
            # TODO exception handling for failed Tests

            cls._test_results.append((router.id, str(test), result))

        finally:
            cls.set_running_task(router, None)
            # logging.debug(str(cls._test_results))
            # start next test in the queue
            done_event.set()
            cls.__start_task(router, None)
コード例 #29
0
ファイル: server.py プロジェクト: haggi/TestFramework
    def _execute_test(cls, test: FirmwareTestClass,
                      router: Router) -> TestResult:
        if not isinstance(router, Router):
            raise ValueError("Chosen Router is not a real Router...")
        # proofed: this method runs in other process as the server
        logging.debug("%sExecute test " + str(test) + " on " + str(router),
                      LoggerSetup.get_log_deep(2))

        test_suite = defaultTestLoader.loadTestsFromTestCase(test)

        # prepare all test cases
        for test_case in test_suite:
            logging.debug("%sTestCase " + str(test_case),
                          LoggerSetup.get_log_deep(4))
            test_case.prepare(router)

        result = TestResult()

        cls.__setns(router)
        try:

            result = test_suite.run(
                result)  # TODO if debug set, run as debug()
        except Exception as e:
            logging.error("%sTestCase raised an exception",
                          LoggerSetup.get_log_deep(3))
            logging.error("%s" + str(e), LoggerSetup.get_log_deep(3))
        finally:

            # I'm sry for this dirty hack, but if you don't do this you get an
            # "TypeError: cannot serialize '_io.TextIOWrapper' object" because sys.stdout is not serializeable...
            result._original_stdout = None
            result._original_stderr = None

            logging.debug("%sResult from test " + str(result),
                          LoggerSetup.get_log_deep(3))
            return result
コード例 #30
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
    def run(self, result: TestResult, debug: Optional[bool]=False) -> TestResult:
        """
        This function mostly contains the code from
        unittest.TestSuite.run. The need to override this function
        occurred because we use run_test to run the testcase.
        """
        topLevel = False
        if getattr(result, '_testRunEntered', False) is False:
            result._testRunEntered = topLevel = True

        for test in self:
            # but this is correct. Taken from unittest.
            if result.shouldStop:
                break

            if isinstance(test, TestSuite):
                test.run(result, debug=debug)
            else:
                self._tearDownPreviousClass(test, result)  # type: ignore
                self._handleModuleFixture(test, result)  # type: ignore
                self._handleClassSetUp(test, result)  # type: ignore
                result._previousTestClass = test.__class__
                if (getattr(test.__class__, '_classSetupFailed', False) or
                        getattr(result, '_moduleSetUpFailed', False)):
                    continue

                failed = run_test(test, result)
                if failed or result.shouldStop:
                    result.shouldStop = True
                    break

        if topLevel:
            self._tearDownPreviousClass(None, result)  # type: ignore
            self._handleModuleTearDown(result)  # type: ignore
            result._testRunEntered = False
        return result
コード例 #31
0
 def addError(self, *args: Any, **kwargs: Any) -> None:
     TestResult.addError(self, *args, **kwargs)
     test_name = full_test_name(args[0])
     self.failed_tests.append(test_name)
コード例 #32
0
    def start(cls, config_path: str = CONFIG_PATH) -> None:
        """
        Starts the runtime server with all components.

        :param config_path: Path to an alternative config directory
        """

        # server has to be run with root rights - except on travis CI
        if not os.geteuid() == 0 and not os.environ.get('TRAVIS'):
            sys.exit('Script must be run as root')

        cls._stopped = Lock()
        signal.signal(signal.SIGTERM, cls._signal_term_handler)

        cls.CONFIG_PATH = config_path
        # set the config_path at the manager
        ConfigManager.set_config_path(config_path)

        # read from config the Vlan mode
        vlan_activate = ConfigManager.get_server_property("Vlan_On")
        cls.VLAN = vlan_activate

        # read from config if debug mode is on
        log_level = int(ConfigManager.get_server_property("Log_Level"))
        debug_mode = False
        if log_level is 10:
            debug_mode = True
        cls.DEBUG = debug_mode

        setproctitle("fftserver")

        cls._server_stop_event = Event()

        cls._pid = os.getpid()

        # create instance and give params to the logger object
        LoggerSetup.setup(log_level)

        # load Router configs
        cls.__load_configuration()

        for router in cls.get_routers():
            cls._running_task.append(None)
            cls._waiting_tasks.append(deque())

        # start process/thread pool for job and test handling
        cls._max_subprocesses = (len(cls._routers) + 1)  # plus one for the power strip
        cls._task_pool = Pool(processes=cls._max_subprocesses, initializer=init_process,
                              initargs=(cls._server_stop_event,), maxtasksperchild=1)
        cls._task_wait_executor = ThreadPoolExecutor(max_workers=(cls._max_subprocesses * 2))

        # start thread for multiprocess stop wait
        t = threading.Thread(target=cls._close_wait)
        t.start()

        # add Namespace and Vlan for each Router
        if cls.VLAN:
            cls._nv_assistent = NVAssistent("eth0")

            for router in cls.get_routers():
                logging.debug("Add Namespace and Vlan for Router(" + str(router.id) + ")")
                cls._nv_assistent.create_namespace_vlan(router)

            # add Namespace and Vlan for 1 Powerstrip (expand to more if necessary)
            logging.debug("Add Namespace and Vlan for Powerstrip")
            cls._nv_assistent.create_namespace_vlan(cls.get_power_strip())

            # update Router
            cls.router_online(None, update_all=True, blocked=True)
            cls.update_router_info(None, update_all=True)

        # open database and read old test results
        try:
            with shelve.open('test_results', 'c') as db:
                # read test values
                key_list = db.keys()
                for k in key_list:
                    t = TestResult()
                    dbt = db[str(k)]
                    t.failures = dbt.failures
                    t.errors = dbt.errors
                    t.testsRun = dbt.testsRun
                    t._original_stdout = None
                    t._original_stderr = None
                    cls._test_results.append((dbt.router_id, dbt.test_name, t))
        except Exception as e:
            logging.error("Error at read test results from DB: {0}".format(e))

        logging.info("Runtime Server started")

        try:
            cls._ipc_server.start_ipc_server(cls, True)  # serves forever - works like a while(true)
        except (KeyboardInterrupt, SystemExit):
            logging.info("Received an interrupt signal")
            cls.stop()
コード例 #33
0
ファイル: test_runner.py プロジェクト: aakash-cr7/zulip
 def addError(self, *args, **kwargs):
     # type: (*Any, **Any) -> None
     TestResult.addError(self, *args, **kwargs)
コード例 #34
0
ファイル: test_runner.py プロジェクト: christi3k/zulip
 def addSuccess(self, *args, **kwargs):
     # type: (*Any, **Any) -> None
     TestResult.addSuccess(self, *args, **kwargs)
コード例 #35
0
 def addFailure(self, *args, **kwargs):
     # type: (*Any, **Any) -> None
     TestResult.addFailure(self, *args, **kwargs)
     test_name = full_test_name(args[0])
     self.failed_tests.append(test_name)
コード例 #36
0
 def startTest(self, test):
     # type: (TestCase) -> None
     TestResult.startTest(self, test)
     self.stream.writeln("Running {}".format(full_test_name(test)))
     self.stream.flush()
コード例 #37
0
 def addSuccess(self, *args: Any, **kwargs: Any) -> None:
     TestResult.addSuccess(self, *args, **kwargs)
コード例 #38
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
 def addSkip(self, test: TestCase, reason: Text) -> None:
     TestResult.addSkip(self, test, reason)
     self.stream.writeln("** Skipping {}: {}".format(full_test_name(test),
                                                     reason))
     self.stream.flush()
コード例 #39
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
 def addSuccess(self, *args: Any, **kwargs: Any) -> None:
     TestResult.addSuccess(self, *args, **kwargs)
コード例 #40
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
 def addFailure(self, *args: Any, **kwargs: Any) -> None:
     TestResult.addFailure(self, *args, **kwargs)
     test_name = full_test_name(args[0])
     self.failed_tests.append(test_name)
コード例 #41
0
ファイル: test_runner.py プロジェクト: joydeep1701/zulip
 def startTest(self, test: TestCase) -> None:
     TestResult.startTest(self, test)
     self.stream.writeln("Running {}".format(full_test_name(test)))
     self.stream.flush()
コード例 #42
0
ファイル: test.py プロジェクト: gioman/qgis-tester-plugin
 def __init__(self):
     TestResult.__init__(self)
     self.err = None
コード例 #43
0
ファイル: test.py プロジェクト: NaturalGIS/qgis-tester-plugin
 def __init__(self):
     TestResult.__init__(self)
     self.err = None
コード例 #44
0
 def addSkip(self, test: TestCase, reason: str) -> None:
     TestResult.addSkip(self, test, reason)
     self.stream.writeln("** Skipping {}: {}".format(
         full_test_name(test), reason))
     self.stream.flush()
コード例 #45
0
ファイル: test_runner.py プロジェクト: aakash-cr7/zulip
 def addFailure(self, *args, **kwargs):
     # type: (*Any, **Any) -> None
     TestResult.addFailure(self, *args, **kwargs)
コード例 #46
0
ファイル: result.py プロジェクト: Ozahata/pddoctest
 def stopTest(self, test):
     self.silence_output()
     self.result["class_list"][self.class_name]["methods"][self.method_name]\
         ["stopped"] = datetime.now()
     TestResult.stopTest(self, test)
コード例 #47
0
ファイル: test_runner.py プロジェクト: HydraulicSheep/zulip
def run_test(test: TestCase, result: TestResult) -> bool:
    failed = False
    test_method = get_test_method(test)

    if fast_tests_only() and is_known_slow_test(test_method):
        return failed

    test_name = full_test_name(test)

    bounce_key_prefix_for_testing(test_name)
    bounce_redis_key_prefix_for_testing(test_name)

    flush_caches_for_testing()

    if not hasattr(test, "_pre_setup"):
        # test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
        import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
        if test_name.startswith(import_failure_prefix):
            actual_test_name = test_name[len(import_failure_prefix):]
            error_msg = ("\nActual test to be run is %s, but import failed.\n"
                         "Importing test module directly to generate clearer "
                         "traceback:\n") % (actual_test_name, )
            result.addInfo(test, error_msg)

            try:
                command = [
                    sys.executable, "-c",
                    "import %s" % (actual_test_name, )
                ]
                msg = "Import test command: `%s`" % (' '.join(command), )
                result.addInfo(test, msg)
                subprocess.check_call(command)
            except subprocess.CalledProcessError:
                msg = ("If that traceback is confusing, try doing the "
                       "import inside `./manage.py shell`")
                result.addInfo(test, msg)
                result.addError(test, sys.exc_info())
                return True

            msg = ("Import unexpectedly succeeded! Something is wrong. Try "
                   "running `import %s` inside `./manage.py shell`.\n"
                   "If that works, you may have introduced an import "
                   "cycle.") % (actual_test_name, )
            import_error = (Exception, Exception(msg), None
                            )  # type: Tuple[Any, Any, Any]
            result.addError(test, import_error)
            return True
        else:
            msg = "Test doesn't have _pre_setup; something is wrong."
            error_pre_setup = (Exception, Exception(msg), None
                               )  # type: Tuple[Any, Any, Any]
            result.addError(test, error_pre_setup)
            return True
    test._pre_setup()

    start_time = time.time()

    test(result)  # unittest will handle skipping, error, failure and success.

    delay = time.time() - start_time
    enforce_timely_test_completion(test_method, test_name, delay, result)
    slowness_reason = getattr(test_method, 'slowness_reason', '')
    TEST_TIMINGS.append((delay, test_name, slowness_reason))

    test._post_teardown()
    return failed
コード例 #48
0
 def addFailure(self, *args: Any, **kwargs: Any) -> None:
     TestResult.addFailure(self, *args, **kwargs)
     test_name = args[0].id()
     self.failed_tests.append(test_name)
コード例 #49
0
ファイル: test_runner.py プロジェクト: kagonlineteam/zulip
 def startTest(self, test: TestCase) -> None:
     TestResult.startTest(self, test)
     self.stream.write(f"Running {test.id()}\n")
     self.stream.flush()
コード例 #50
0
 def addSuccess(self, *args, **kwargs):
     # type: (*Any, **Any) -> None
     TestResult.addSuccess(self, *args, **kwargs)
コード例 #51
0
ファイル: test_runner.py プロジェクト: kagonlineteam/zulip
 def addSkip(self, test: TestCase, reason: str) -> None:
     TestResult.addSkip(self, test, reason)
     self.stream.write(f"** Skipping {test.id()}: {reason}\n")
     self.stream.flush()
コード例 #52
0
 def addSkip(self, test, reason):
     # type: (TestCase, Text) -> None
     TestResult.addSkip(self, test, reason)
     self.stream.writeln("** Skipping {}: {}".format(
         full_test_name(test), reason))
     self.stream.flush()
コード例 #53
0
ファイル: test_runner.py プロジェクト: christi3k/zulip
 def addError(self, *args, **kwargs):
     # type: (*Any, **Any) -> None
     TestResult.addError(self, *args, **kwargs)
     test_name = full_test_name(args[0])
     self.failed_tests.append(test_name)