Example #1
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime

        self.json_data["execution_time"] = format(timeTaken, "0.2f")

        total_score = 0
        for test in self.json_data["tests"]:
            total_score += test["score"]
        self.json_data["score"] = total_score

        json.dump(self.json_data, self.stream, indent=4)
        self.stream.write('\n')
        return result
Example #2
0
  def run(self, test):
    """ Copied and modified from TextTestRunner

      Run the given test case or test suite."""

    result = self._makeResult()
    registerResult(result)
    result.failfast = self.failfast
    result.buffer = self.buffer
    start_time = time.time()
    startTestRun = getattr(result, 'startTestRun', None)
    if startTestRun is not None:
      startTestRun()
    try:
      test(result)
    finally:
      stopTestRun = getattr(result, 'stopTestRun', None)
      if stopTestRun is not None:
        stopTestRun()
    stop_time = time.time()
    timeTaken = stop_time - start_time
    result.printErrors()
    if hasattr(result, 'separator2'):
      self.stream.writeln(result.separator1)
    run = result.testsRun
    self.stream.writeln(' \033[1mRan %d test%s in %.3fs' %
        (run, run != 1 and 's' or '', timeTaken))
    self.stream.writeln()

    expectedFails = unexpectedSuccesses = skipped = 0
    try:
      results = map(len, (result.expectedFailures, # pylint: disable=bad-builtin
        result.unexpectedSuccesses,
        result.skipped))
    except AttributeError:
      pass
    else:
      expectedFails, unexpectedSuccesses, skipped = results

    infos = []
    if not result.wasSuccessful():
      self.stream.write(' \033[1;31mFAILED\033[00m')
      failed = len(result.failures)
      errored = len(result.errors)
      if failed:
        infos.append('failures = \033[00;31m%d\033[00m' % failed)
      if errored:
        infos.append('errors = \033[00;31m%d\033[00m' % errored)
    else:
      self.stream.write(' \033[1;32mOK\033[00m')
    if skipped:
      infos.append('skipped = \033[00;33m%d\033[00m' % skipped)
    if expectedFails:
      infos.append('expected failures=%d' % expectedFails)
    if unexpectedSuccesses:
      infos.append('unexpected successes=%d' % unexpectedSuccesses)
    if infos:
      self.stream.writeln(' (%s)' % (', '.join(infos),))
    self.stream.write('\n')
    return result
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime

        self.json_data["execution_time"] = format(timeTaken, "0.2f")

        total_score = 0
        for test in self.json_data["tests"]:
            total_score += test["score"]
        self.json_data["score"] = total_score

        json.dump(self.json_data, self.stream, indent=4)
        self.stream.write('\n')
        return result
Example #4
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        result.printErrors()
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()


        return result
Example #5
0
    def run(self, test):
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer

        with warnings.catch_warnings():
            if getattr(self, "warnings", None):
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                result.total_tests = test.countTestCases()
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()

        return result
    def run(self, test):
        """ Run the given test case or test suite. Copied from unittest to replace the startTestRun
        callback"""
        result = self._makeResult()
        result.failfast = self.failfast
        result.buffer = self.buffer
        registerResult(result)
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun(test.countTestCases())
        try:
            self._injectDevice(test)
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun(time.time() - startTime)
            else:
                result.printErrors()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln('Ran %d test%s in %.3fs' %
                            (run, run != 1 and 's' or '', timeTaken))
        self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses, result.skipped))
            expectedFails, unexpectedSuccesses, skipped = results
        except AttributeError:
            pass
        infos = []
        if not result.wasSuccessful():
            self.stream.write('FAILED')
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                infos.append('failures=%d' % failed)
            if errored:
                infos.append('errors=%d' % errored)
        else:
            self.stream.write('OK')
        if skipped:
            infos.append('skipped=%d' % skipped)
        if expectedFails:
            infos.append('expected failures=%d' % expectedFails)
        if unexpectedSuccesses:
            infos.append('unexpected successes=%d' % unexpectedSuccesses)
        if infos:
            self.stream.writeln(' (%s)' % (', '.join(infos), ))
        else:
            self.stream.write('\n')
        return result
Example #7
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        result.printErrors()
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses,
                                result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            self.stream.write("FAILED")
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            self.stream.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            self.stream.writeln(" (%s)" % (", ".join(infos),))
        else:
            self.stream.write("\n")
        return result
 def __init_test_result_config(self, test: TestSuite):
     registerResult(self.result)
     self.result.failfast = self.failfast
     self.result.buffer = self.buffer
     self.result.tb_locals = self.tb_locals
     self.result.fail_fast = self.failfast
     if hasattr(test, 'properties'):
         # junit test suite properties
         self.result.properties = test.properties
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        run = result.testsRun
        self.log.info("Ran %d test%s in %.3fs",
                      run, run != 1 and "s" or "", timeTaken)

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses,
                                result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        summary = StringIO()
        lvl = logging.INFO
        if not result.wasSuccessful():
            summary.write("FAILED")
            lvl = logging.ERROR
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            summary.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            summary.write(" (%s)" % (", ".join(infos),))
        self.log.log(lvl, summary.getvalue())
        return result
Example #10
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        run = result.testsRun
        self.log.info("Ran %d test%s in %.3fs", run, run != 1 and "s" or "",
                      timeTaken)

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses, result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        summary = StringIO()
        lvl = logging.INFO
        if not result.wasSuccessful():
            summary.write("FAILED")
            lvl = logging.ERROR
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            summary.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            summary.write(" (%s)" % (", ".join(infos), ))
        self.log.log(lvl, summary.getvalue())
        return result
Example #11
0
    def run(self, suite):
        "Run the given test case or test suite."
        result = GreenTestResult(
                self.stream, self.descriptions, self.verbosity, html=self.html,
                termcolor=self.termcolor)
        registerResult(result)
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings('module',
                            category=DeprecationWarning,
                            message='Please use assert\w+ instead.')

            result.startTestRun()

            if self.subprocesses == 1:
                suite.run(result)
            else:
                tests = toProtoTestList(suite)
                pool = LoggingDaemonlessPool(processes=self.subprocesses)
                if tests:
                    async_responses = []
                    for index, test in enumerate(tests):
                        if self.run_coverage:
                            coverage_number = index + 1
                        else:
                            coverage_number = None
                        async_responses.append(pool.apply_async(
                            poolRunner,
                            (test.dotted_name, coverage_number, self.omit)))
                    pool.close()
                    for test, async_response in zip(tests, async_responses):
                        # Prints out the white 'processing...' version of the output
                        result.startTest(test)
                        # This blocks until the worker who is processing this
                        # particular test actually finishes
                        result.addProtoTestResult(async_response.get())
                pool.terminate()
                pool.join()

            result.stopTestRun()

        return result
Example #12
0
 def _before_run(self):
     # Keep the same as lines 145-162 in unittest.TextTextRunner.run
     result = self.resultclass(self.stream, self.descriptions, self.verbosity)
     registerResult(result)
     result.failfast = self.failfast
     result.buffer = self.buffer
     with warnings.catch_warnings():
         warn = getattr(self, 'warnings', None)
         if warn:
             warnings.simplefilter(warn)
             if warn in ['default', 'always']:
                 warnings.filterwarnings('module',
                                         category=DeprecationWarning,
                                         message='Please use assert\w+ instead.')
     return result
Example #13
0
    def run(self, test):
        """
        Run the given test case or test suite.
        """
        result = self.makeResult()
        registerResult(result)
        result.failfast = self.failfast

        result.startTestRun()
        try:
            test(result)
        finally:
            result.stopTestRun()

        result.report()
        return result
Example #14
0
 def _before_run(self):
     # Keep the same as lines 145-162 in unittest.TextTextRunner.run
     result = self.make_result()
     registerResult(result)
     result.failfast = self.failfast
     result.buffer = self.buffer
     with warnings.catch_warnings():
         warn = getattr(self, 'warnings', None)
         if warn:
             warnings.simplefilter(warn)
             if warn in ['default', 'always']:
                 warnings.filterwarnings(
                     'module',
                     category=DeprecationWarning,
                     message='Please use assert\w+ instead.')
     return result
Example #15
0
def run(self, test):
    "Run the given test case or test suite."
    result = self._makeResult()
    registerResult(result)
    result.failfast = self.failfast
    result.buffer = self.buffer
    startTestRun = getattr(result, 'startTestRun', None)
    if startTestRun is not None:
        startTestRun()
    try:
        test(result)
    finally:
        stopTestRun = getattr(result, 'stopTestRun', None)
        if stopTestRun is not None:
            stopTestRun()

    return result
Example #16
0
def run(self, test):
    "Run the given test case or test suite."
    result = self._makeResult()
    registerResult(result)
    result.failfast = self.failfast
    result.buffer = self.buffer
    startTestRun = getattr(result, 'startTestRun', None)
    if startTestRun is not None:
        startTestRun()
    try:
        test(result)
    finally:
        stopTestRun = getattr(result, 'stopTestRun', None)
        if stopTestRun is not None:
            stopTestRun()

    return result
Example #17
0
 def __call__(self, test):
     # Executed in child process
     kwargs = self.runner_args
     # Force recording of output in a buffer (it will be printed out
     # by the parent).
     kwargs['stream'] = StringIO()
     runner = self.runner_cls(**kwargs)
     result = runner._makeResult()
     # Avoid child tracebacks when Ctrl-C is pressed.
     signals.installHandler()
     signals.registerResult(result)
     result.failfast = runner.failfast
     result.buffer = runner.buffer
     with self.cleanup_object(test):
         test(result)
     # HACK as cStringIO.StringIO isn't picklable in 2.x
     result.stream = _FakeStringIO(result.stream.getvalue())
     return _MinimalResult(result, test.id())
Example #18
0
 def __call__(self, test):
     # Executed in child process
     kwargs = self.runner_args
     # Force recording of output in a buffer (it will be printed out
     # by the parent).
     kwargs["stream"] = StringIO()
     runner = self.runner_cls(**kwargs)
     result = runner._makeResult()
     # Avoid child tracebacks when Ctrl-C is pressed.
     signals.installHandler()
     signals.registerResult(result)
     result.failfast = runner.failfast
     result.buffer = runner.buffer
     with self.cleanup_object(test):
         test(result)
     # HACK as cStringIO.StringIO isn't picklable in 2.x
     result.stream = _FakeStringIO(result.stream.getvalue())
     return _MinimalResult(result)
Example #19
0
    def run(self, test):
        """Run the given test case or test suite."""
        
        # This is initializing the given result class. This is how we will treat the result
        result = self._makeSlickResult()
        
        # These are set to keep compatibility with the unit test framework
        registerResult(result)
        result.failfast = self.failfast
        
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            if test:
                test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()

        return result
Example #20
0
    def run(self, suite, testName=None):
        "Run the given test case or test suite."

        errorsOrFailures = 0
        for test in suite:
            if testName == None or test._testMethodName == testName:
                result = ObjectTestResult()
                registerResult(result)
                result.failfast = self.failfast
                startTestRun = getattr(result, 'startTestRun', None)
                if startTestRun is not None:
                    startTestRun()
                try:
                    test(result)
                finally:
                    stopTestRun = getattr(result, 'stopTestRun', None)
                    if stopTestRun is not None:
                        stopTestRun()
                    errorsOrFailures += len(result.errors) + len(
                        result.failures)
                    self.testResults.append(result.getTestResult())
        if testName != None:
            self.testResults = self.testResults[0]
        return errorsOrFailures
Example #21
0
def run(suite, stream, args, testing=False):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """
    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(stream, disable_windows=args.disable_windows)
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings:  # pragma: no cover
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ['default', 'always']:
                warnings.filterwarnings(
                    'module',
                    category=DeprecationWarning,
                    message='Please use assert\w+ instead.')

        result.startTestRun()

        pool = LoggingDaemonlessPool(
            processes=args.processes or None,
            initializer=InitializerOrFinalizer(args.initializer),
            finalizer=InitializerOrFinalizer(args.finalizer))
        manager = multiprocessing.Manager()
        targets = [(target, manager.Queue())
                   for target in toParallelTargets(suite, args.targets)]
        if targets:
            for index, (target, queue) in enumerate(targets):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                debug("Sending {} to runner {}".format(target, poolRunner))
                pool.apply_async(
                    poolRunner,
                    (target, queue, coverage_number, args.omit_patterns))
            pool.close()
            for target, queue in targets:
                abort = False

                while True:
                    msg = queue.get()

                    # Sentinel value, we're done
                    if not msg:
                        break
                    else:
                        # Result guaranteed after this message, we're
                        # currently waiting on this test, so print out
                        # the white 'processing...' version of the output
                        result.startTest(msg)
                        proto_test_result = queue.get()
                        result.addProtoTestResult(proto_test_result)

                    if result.shouldStop:
                        abort = True
                        break

                if abort:
                    break

        pool.close()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Example #22
0
def run(suite, stream, args, testing=False):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """
    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(stream)
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings: # pragma: no cover
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ['default', 'always']:
                warnings.filterwarnings('module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')

        result.startTestRun()

        pool = LoggingDaemonlessPool(processes=args.processes or None,
                initializer=InitializerOrFinalizer(args.initializer),
                finalizer=InitializerOrFinalizer(args.finalizer))
        manager = multiprocessing.Manager()
        targets = [(target, manager.Queue()) for target in toParallelTargets(suite, args.targets)]
        if targets:
            for index, (target, queue) in enumerate(targets):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                pool.apply_async(
                    poolRunner,
                    (target, queue, coverage_number, args.omit_patterns))
            pool.close()
            for target, queue in targets:
                abort = False

                while True:
                    msg = queue.get()

                    # Sentinel value, we're done
                    if not msg:
                        break
                    else:
                        # Result guarunteed after this message, we're
                        # currently waiting on this test, so print out
                        # the white 'processing...' version of the output
                        result.startTest(msg)
                        proto_test_result = queue.get()
                        result.addProtoTestResult(proto_test_result)

                    if result.shouldStop:
                        abort = True
                        break

                if abort:
                    break

        pool.close()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Example #23
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        result.tb_locals = self.tb_locals
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message=r'Please use assert\w+ instead.')
            startTime = datetime.datetime.now()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            stopTime = datetime.datetime.now()
        result.timeTaken = result.getTimeTaken(stopTime, startTime)
        #add by yang
        result.startTime = str(startTime)[:-7]
        result.stopTime = str(stopTime)[:-7]

        # result.printErrors()  annotate by yang
        # if hasattr(result, 'separator2'):
        #     self.stream.writeln(result.separator2)
        # run = result.testsRun
        # self.stream.writeln("Ran %d test%s in %.3fs" %
        #                     (run, run != 1 and "s" or "", timeTaken))
        # self.stream.writeln()

        # expectedFails = unexpectedSuccesses = skipped = 0
        # try:
        #     results = map(len, (result.expectedFailures,
        #                         result.unexpectedSuccesses,
        #                         result.skipped))
        # except AttributeError:
        #     pass
        # else:
        #     expectedFails, unexpectedSuccesses, skipped = results
        #
        # infos = []
        # if not result.wasSuccessful():
        #     self.stream.write("FAILED")
        #     failed, errored = len(result.failures), len(result.errors)
        #     if failed:
        #         infos.append("failures=%d" % failed)
        #     if errored:
        #         infos.append("errors=%d" % errored)
        # else:
        #     self.stream.write("OK")
        # if skipped:
        #     infos.append("skipped=%d" % skipped)
        # if expectedFails:
        #     infos.append("expected failures=%d" % expectedFails)
        # if unexpectedSuccesses:
        #     infos.append("unexpected successes=%d" % unexpectedSuccesses)
        # if infos:
        #     self.stream.writeln(" (%s)" % (", ".join(infos),))
        # else:
        #     self.stream.write("\n")
        return result
Example #24
0
def run(suite, stream, args, testing=False):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """

    # check if the kubefwd is running, then stop the run
    if check_kubefwd_running():
        return GreenTestResult(args, stream)

    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(
            stream,
            disable_windows=args.disable_windows,
            disable_unidecode=args.disable_unidecode,
        )
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings:  # pragma: no cover
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ["default", "always"]:
                warnings.filterwarnings(
                    "module",
                    category=DeprecationWarning,
                    message="Please use assert\w+ instead.",
                )

        result.startTestRun()

        # The call to toParallelTargets needs to happen before pool stuff so we can crash if there
        # are, for example, syntax errors in the code to be loaded.
        parallel_targets = toParallelTargets(suite, args.targets)
        pool = LoggingDaemonlessPool(
            processes=args.processes or None,
            initializer=InitializerOrFinalizer(args.initializer),
            finalizer=InitializerOrFinalizer(args.finalizer),
        )
        manager = multiprocessing.Manager()
        targets = [(target, manager.Queue()) for target in parallel_targets]
        if targets:
            for index, (target, queue) in enumerate(targets):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                debug("Sending {} to poolRunner {}".format(target, poolRunner))
                pool.apply_async(
                    poolRunner,
                    (
                        target,
                        queue,
                        coverage_number,
                        args.omit_patterns,
                        args.cov_config_file,
                    ),
                )
            pool.close()
            for target, queue in targets:
                abort = False

                while True:
                    msg = queue.get()

                    # Sentinel value, we're done
                    if not msg:
                        debug("runner.run(): received sentinal, breaking.", 3)
                        break
                    else:
                        debug("runner.run(): start test: {}".format(msg))
                        # Result guaranteed after this message, we're
                        # currently waiting on this test, so print out
                        # the white 'processing...' version of the output
                        result.startTest(msg)
                        proto_test_result = queue.get()
                        debug(
                            "runner.run(): received proto test result: {}".
                            format(str(proto_test_result)),
                            3,
                        )
                        result.addProtoTestResult(proto_test_result)

                    if result.shouldStop:
                        debug("runner.run(): shouldStop encountered, breaking",
                              3)
                        abort = True
                        break

                if abort:
                    break

        pool.close()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Example #25
0
    def run(self, test):
        """Run the given test case or test suite."""

        self.resultclass.number_of_tests = test.countTestCases()

        if self.verbosity >= 2:
            self.stream.writeln(colored('\nTesting:\n', attrs=['bold']))
        else:
            self.stream.writeln('Testing...')

        self._check_suites(test)

        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer

        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')

            startTime = time.time()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            stopTime = time.time()

        timeTaken = stopTime - startTime
        result.printErrors()

        run = result.testsRun

        details = {
            'success': result.wasSuccessful(),
            'failures': len(result.failures) + len(result.unexpectedSuccesses),
            'errors': len(result.errors),
            'skipped': len(result.skipped),
            'time': round(timeTaken, 3),
            'sum': run,

        }
        details['successes'] = run - (details['failures'] + details['errors'])
        result.printSummary(details)

        return result
Example #26
0
    def run(self, test: object) -> TestResult:
        """ @abstract Run the test suite or case and display the report
            @params test [TestSuite|TestCase] Test suite or case object to run
            @returns [TestResult] Outputs formatted text report and returns
                a TestResult object on success.
        """
        result = self._makeResult()
        registerResult(result)
        result.timing = self.timing
        result.failfast = self.failfast
        result.buffer = self.buffer
        result.tb_locals = self.tb_locals

        timetaken = 0
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message=r'Please use assert\w+ instead.')

            #overall testruntime
            startTime = time.time()

            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()

            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()

            stopTime = time.time()
            timeTaken = stopTime - startTime

        # We need to start building the report to display
        _reportOrder = result.tally.keys()
        if self.mode == "full-report":
            _summary_table = TextTable(self._summary_props)
            _total_count = _total_success = _total_fail = _total_err = 0
            _total_skip = _total_unexpS = _total_expectF = 0

            for _clsName in _reportOrder:
                # "Count", "Pass", "Fail", "Error", "Skip", "UnexpS", "ExpectF"],
                _test_count = sum(result.tally[_clsName].values())
                _summary_table.appendRow([
                    _clsName,
                    str(_test_count), *result.getClassTally(_clsName)
                ])

        _resultTotals = result.getResultTally()
        _stats = ""

        if self.mode == "full-report":
            _summary_table.appendRow(["Totals", *_resultTotals])

            _reports = [("Summary", _summary_table)]

        for _cls in _reportOrder:
            _clsSet = result.results[_cls]
            if self.mode == "full-report":
                _report = TextTable(self._test_props)

            _single = len(_clsSet) == 1
            for _name, _test, _status, _output in _clsSet:
                if self.mode == "full-report":
                    if self.timing:
                        _execTimes = result.execTimes[_test.id()]
                        _runTime = round(
                            _execTimes["stop"] - _execTimes["start"], 3)
                        _statStr = "({}s) {}".format(_runTime,
                                                     result.STATUS[_status])
                    else:
                        _statStr = "{}".format(result.STATUS[_status])

                    _report.appendRow(cells=[_name, _output, _statStr],
                                      single=_single)
                else:
                    _stats += _status

            if self.mode == "full-report":
                _reports.append((_cls, _report))

        if self.mode == "full-report":
            for _heading, _report in _reports:
                print(_heading)
                _report.draw()
            if self.timing:
                print("runtime: {}s".format(round(timeTaken, 3)))
        else:
            _stats += " " if len(_stats) > 0 else ""

            # Simple Summary
            simplereport = "{}pass: {}, failed: {}, errors: {}, runtime: {}s"
            print(
                simplereport.format(_stats, _resultTotals[1], _resultTotals[3],
                                    _resultTotals[2], round(timeTaken, 3)))
        return result
Example #27
0
 def run(self, test, logdir=None):
     "Run the given test case or test suite."
     result = self._makeResult()
     registerResult(result)
     result.failfast = self.failfast
     result.buffer = self.buffer
     result.tb_locals = self.tb_locals
     with warnings.catch_warnings():
         if self.warnings:
             # if self.warnings is set, use it to filter all the warnings
             warnings.simplefilter(self.warnings)
             # if the filter is 'default' or 'always', special-case the
             # warnings from the deprecated unittest methods to show them
             # no more than once per module, because they can be fairly
             # noisy.  The -Wd and -Wa flags can be used to bypass this
             # only when self.warnings is None.
             if self.warnings in ['default', 'always']:
                 warnings.filterwarnings(
                     'module',
                     category=DeprecationWarning,
                     message=r'Please use assert\w+ instead.')
         startTime = time.perf_counter()
         startTestRun = getattr(result, 'startTestRun', None)
         if startTestRun is not None:
             startTestRun()
         all_case_name = [ts.id() for ts in test]
         try:
             test(result)
         finally:
             stopTestRun = getattr(result, 'stopTestRun', None)
             if stopTestRun is not None:
                 stopTestRun()
         stopTime = time.perf_counter()
     timeTaken = stopTime - startTime
     _, cfg_data = utils_lib.get_cfg()
     if logdir is None:
         logdir = cfg_data['results_dir']
     sum_html = logdir + '/sum.html'
     sum_txt = logdir + '/sum.log'
     if os.path.exists(sum_txt):
         os.unlink(sum_txt)
     html_sum = HTMLTemp(sum_html)
     html_sum.run_time = timeTaken
     all_case_name.sort()
     id = 0
     for case in all_case_name:
         id += 1
         is_pass = True
         os.chdir(logdir)
         debug_log = "debug/" + case + '.debug'
         for ts, err in result.failures:
             if case == ts.id():
                 is_pass = False
                 html_sum.result.case_fail += 1
                 html_sum.result.table_rows.append(
                     (id, case, 'FAIL', err, debug_log))
                 with open(debug_log, 'a+') as fh:
                     fh.write(err)
                     fh.write('{} - FAIL'.format(case))
                 with open(sum_txt, 'a+') as fh:
                     fh.write('case: {} - FAIL\n'.format(case))
                     fh.write('info: {}\n'.format(err))
                 break
         if not is_pass:
             continue
         for ts, err in result.errors:
             if case == ts.id():
                 is_pass = False
                 html_sum.result.case_error += 1
                 html_sum.result.table_rows.append(
                     (id, case, 'ERROR', err, debug_log))
                 with open(debug_log, 'a+') as fh:
                     fh.write(err)
                     fh.write('{} - ERROR'.format(case))
                 with open(sum_txt, 'a+') as fh:
                     fh.write('case: {} - ERROR\n'.format(case))
                     fh.write('info: {}\n'.format(err))
                 break
Example #28
0
    def run(self, test, test_result=None):
        "Run the given test case or test suite."
        # Monkey patch on unittest class to add not implemented errors into run
        if test_result is None:
            test_result = self._makeResult()
        registerResult(test_result)
        test_result.failfast = self.failfast
        test_result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(test_result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(test_result)
        finally:
            stopTestRun = getattr(test_result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        test_result.printErrors()
        if hasattr(test_result, 'separator2'):
            self.stream.writeln(test_result.separator2)
        run = test_result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(
                len,
                (test_result.expectedFailures, test_result.unexpectedSuccesses,
                 test_result.skipped, test_result.not_implemented))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped, not_implemented = results

        infos = []
        if not test_result.wasSuccessful():
            self.stream.write("FAILED")
            failed, errored = map(len,
                                  (test_result.failures, test_result.errors))
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            self.stream.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if not_implemented:
            infos.append("not implemented=%d" % not_implemented)

        if infos:
            self.stream.writeln(" (%s)" % (", ".join(infos), ))
        else:
            self.stream.write("\n")
        return test_result
Example #29
0
    def run(self, test):
        "Run the given test case or test suite."
        result = MPITestResult(
            self.comm, self.stream, self.descriptions, self.verbosity
        )
        registerResult(result)
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is "default" or "always", special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ["default", "always"]:
                    warnings.filterwarnings(
                        "module",
                        category=DeprecationWarning,
                        message=r"Please use assert\w+ instead.",
                    )
            startTime = time.time()
            startTestRun = getattr(result, "startTestRun", None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result=result)
            finally:
                stopTestRun = getattr(result, "stopTestRun", None)
                if stopTestRun is not None:
                    stopTestRun()
            stopTime = time.time()
        self.stream.flush()
        if self.comm is None:
            self.stream.write("\n")
            self.stream.flush()
        else:
            self.comm.barrier()
            if self.comm.rank == 0:
                self.stream.write("\n")
                self.stream.flush()
        timeTaken = stopTime - startTime

        result.printErrors()

        if (self.comm is None) or (self.comm.rank == 0):
            if hasattr(result, "separator2"):
                self.stream.writeln(result.separator2)

        run = result.testsRun
        if (self.comm is None) or (self.comm.rank == 0):
            self.stream.writeln(
                "Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)
            )
            self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(
                len,
                (result.expectedFailures, result.unexpectedSuccesses, result.skipped),
            )
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        psize = 1
        prank = 0
        if self.comm is not None:
            psize = self.comm.size
            prank = self.comm.rank

        for p in range(psize):
            if p == prank:
                infos = []
                if not result.wasSuccessful():
                    if self.comm is None:
                        self.stream.write("FAILED")
                    else:
                        self.stream.write("[{}] FAILED".format(self.comm.rank))
                    failed, errored = len(result.failures), len(result.errors)
                    if failed:
                        infos.append("failures=%d" % failed)
                    if errored:
                        infos.append("errors=%d" % errored)
                else:
                    if self.comm is None:
                        self.stream.write("OK")
                    else:
                        self.stream.write("[{}] OK".format(self.comm.rank))
                if skipped:
                    infos.append("skipped=%d" % skipped)
                if expectedFails:
                    infos.append("expected failures=%d" % expectedFails)
                if unexpectedSuccesses:
                    infos.append("unexpected successes=%d" % unexpectedSuccesses)
                if infos:
                    self.stream.writeln(" ({})".format(", ".join(infos)))
                else:
                    self.stream.write("\n")
                self.stream.flush()
            if self.comm is not None:
                self.comm.barrier()

        # if not result.allSuccessful():
        #     self.comm.Abort(1)

        return result
Example #30
0
    def run(self, test, logdir=None):
        "Run the given test case or test suite."
        result = self._makeResult()
        test_result = Result()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        result.tb_locals = self.tb_locals
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message=r'Please use assert\w+ instead.')
            startTime = time.perf_counter()
            id = 0
            all_case_name = [ts.id() for ts in test]
            for ts in test:
                logdir = ts.params['results_dir']
                if not os.path.exists(logdir):
                    os.mkdir(logdir)
                results_dir = logdir + '/results'
                if not os.path.exists(results_dir):
                    os.mkdir(results_dir)
                sum_txt = results_dir + '/sum.log'
                case_status = None
                case_reason = None
                id += 1
                case_startTime = time.perf_counter()
                startTestRun = getattr(result, 'startTestRun', None)
                if startTestRun is not None:
                    startTestRun()
                try:
                    ts(result)
                finally:
                    stopTestRun = getattr(result, 'stopTestRun', None)
                    if stopTestRun is not None:
                        stopTestRun()
                    ts.duration = timeTaken = round(
                        time.perf_counter() - case_startTime, 3)
                test_class_name = ts.__class__.__name__
                case_dir = '.'.join([test_class_name, ts.id()])
                debug_dir = logdir + "/attachments/" + case_dir
                if not os.path.exists(debug_dir):
                    os.mkdir(debug_dir)
                debug_log = "../attachments/" + case_dir + '/' + ts.id(
                ) + '.debug'
                with pushd(results_dir):
                    mapped_result = {
                        'FAIL': result.failures,
                        'ERROR': result.errors,
                        'SKIP': result.skipped
                    }
                    for status in mapped_result.keys():
                        for ts_finished, reason in mapped_result[status]:
                            if ts_finished == ts:
                                if status == 'FAIL':
                                    test_result.case_fail += 1
                                if status == 'ERROR':
                                    test_result.case_error += 1
                                if status == 'SKIP':
                                    test_result.case_skip += 1
                                case_status = status
                                case_reason = reason
                                with open(debug_log, 'a+') as fh:
                                    fh.write(reason)
                                    fh.write('{} - {}'.format(ts.id(), status))
                                break
                    if not case_status:
                        test_result.case_pass += 1
                        with open(debug_log, 'a+') as fh:
                            fh.write('{} - PASS'.format(ts.id()))
                        case_status = 'PASS'
                        case_reason = ''
                test_result.table_rows.append([
                    id,
                    ts.id(), case_status, case_reason, ts.duration, debug_log,
                    test_class_name
                ])
                with open(sum_txt, 'a+') as fh:
                    fh.write('case: {} - {}\n'.format(ts.id(), case_status))
                    if case_reason:
                        fh.write('info: {}\n'.format(case_reason))
            stopTime = time.perf_counter()
        timeTaken = round(stopTime - startTime, 3)
        test_result.run_time = timeTaken
        all_case_name.sort()
        id = 0
        for case in all_case_name:
            id += 1
            is_pass = True
            #  os.chdir(logdir)
            debug_log = "../attachments/" + case + '.debug'
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator2)
        test_result.compute_totals()
        node_info_file = "{}/attachments/node_info".format(logdir)
        if os.path.exists(node_info_file):
            with open(node_info_file) as fh:
                test_result.node_info = fh.read()
        results_dir = logdir + '/results'
        if not os.path.exists(results_dir):
            os.mkdir(results_dir)
        sum_html = os.path.join(results_dir, "sum.html")
        generated_report(sum_html, "sum.html", test_result)
        sum_junit = os.path.join(results_dir, "sum.xml")
        generated_report(sum_junit, "sum.xml", test_result)
        self.stream.writeln("{} generated".format(os.path.realpath(sum_txt)))
        #result.printErrors()
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses, result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            self.stream.write("FAILED")
            failed, errored = len(result.failures), len(result.errors)
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            self.stream.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            self.stream.writeln(" (%s)" % (", ".join(infos), ))
        else:
            self.stream.write("\n")
        return result
Example #31
0
    def run(self, test):
        """ Copied and modified from TextTestRunner

      Run the given test case or test suite."""

        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        start_time = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stop_time = time.time()
        timeTaken = stop_time - start_time
        result.printErrors()
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator1)
        run = result.testsRun
        self.stream.writeln(' \033[1mRan %d test%s in %.3fs' %
                            (run, run != 1 and 's' or '', timeTaken))
        self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(
                len,
                (
                    result.expectedFailures,  # pylint: disable=bad-builtin
                    result.unexpectedSuccesses,
                    result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            self.stream.write(' \033[1;31mFAILED\033[00m')
            failed = len(result.failures)
            errored = len(result.errors)
            if failed:
                infos.append('failures = \033[00;31m%d\033[00m' % failed)
            if errored:
                infos.append('errors = \033[00;31m%d\033[00m' % errored)
        else:
            self.stream.write(' \033[1;32mOK\033[00m')
        if skipped:
            infos.append('skipped = \033[00;33m%d\033[00m' % skipped)
        if expectedFails:
            infos.append('expected failures=%d' % expectedFails)
        if unexpectedSuccesses:
            infos.append('unexpected successes=%d' % unexpectedSuccesses)
        if infos:
            self.stream.writeln(' (%s)' % (', '.join(infos), ))
        self.stream.write('\n')
        return result
Example #32
0
    def run(self, test):
        """Run the given test case or test suite."""
        results = self._make_result()
        registerResult(results)
        results.failfast = self.failfast
        results.buffer = self.buffer
        results.tb_locals = self.tb_locals
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')
            start_time = time.time()
            start_test_run = getattr(results, 'start_test_run', None)
            if start_test_run is not None:
                start_test_run()
            try:
                test(results)
            finally:
                stop_test_run = getattr(results, 'stopTestRun', None)
                if stop_test_run is not None:
                    stop_test_run()
            stop_time = time.time()
        time_taken = stop_time - start_time
        results.printErrors()
        if hasattr(results, 'separator2'):
            self.stream.writeln(results.separator2)
        run = results.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", time_taken))
        self.stream.writeln()

        expected_fails = unexpected_successes = skipped = 0
        try:
            results_text = map(len,
                               (results.expectedFailures,
                                results.unexpectedSuccesses, results.skipped))
        except AttributeError:
            pass
        else:
            expected_fails, unexpected_successes, skipped = results_text

        results.set_test_time(
            time.strftime("%b %d %Y %H:%M:%S", time.gmtime(start_time)),
            format(time_taken, ".2f"))

        if self.report_path is not None:
            results.dump_results_into_json(self.report_path)

        info_results = []
        if not results.wasSuccessful():
            self.stream.write("FAILED")
            failed, error = len(results.failures), len(results.errors)
            if failed:
                info_results.append("failures=%d" % failed)
            if error:
                info_results.append("errors=%d" % error)
        else:
            self.stream.write("OK")
        if skipped:
            info_results.append("skipped=%d" % skipped)
        if expected_fails:
            info_results.append("expected failures=%d" % expected_fails)
        if unexpected_successes:
            info_results.append("unexpected successes=%d" %
                                unexpected_successes)
        if info_results:
            self.stream.writeln(" (%s)" % (", ".join(info_results), ))
        else:
            self.stream.write("\n")

        return results
Example #33
0
    def run(self, test):
        "Run the given test case or test suite."

        # convert test classes to Tango Test Suite compliant
        def convertToTango(test):
            try:
                iter(test)
            except TypeError:
                test.__class__.__bases__ = (TangoTestCase, )
            else:
                test.__class__ = TangoTestSuite
                for t in test:
                    convertToTango(t)

        convertToTango(test)

        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        startTime = time.time()
        startTestRun = getattr(result, 'startTestRun', None)
        if startTestRun is not None:
            startTestRun()
        try:
            test(result)
        finally:
            stopTestRun = getattr(result, 'stopTestRun', None)
            if stopTestRun is not None:
                stopTestRun()
        stopTime = time.time()
        timeTaken = stopTime - startTime
        result.printErrors()
        if hasattr(result, 'separator2'):
            self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses, result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            self.stream.write("FAILED")
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            self.stream.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            self.stream.writeln(" (%s)" % (", ".join(infos), ))
        else:
            self.stream.write("\n")
        return result
Example #34
0
    def run(self, test):
        """Run the given test case or test suite.

        :param test: test suite.
        :type test: unittest.TestSuite

        :return: test result.
        :rtype: TextTestResult
        """
        test_result = self._make_result()
        registerResult(test_result)
        test_result.failfast = self.failfast
        test_result.buffer = self.buffer
        test_result.tb_locals = self.tb_locals
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')
            start_time = time.time()
            start_test_run = getattr(test_result, 'startTestRun', None)
            if start_test_run is not None:
                start_test_run()
            try:
                test(test_result)
            finally:
                stop_test_run = getattr(test_result, 'stopTestRun', None)
                if stop_test_run is not None:
                    stop_test_run()
            stop_time = time.time()
        time_taken = stop_time - start_time
        test_result.printErrors()
        # if hasattr(result, 'separator2'):
        #     self.stream.writeln(result.separator2)
        run = test_result.testsRun

        logline = "Ran %d test%s in %.3fs" % (run, run != 1 and "s"
                                              or "", time_taken)
        easyb.log.inform("RUNNER", logline)

        expected_fails = unexpected_successes = skipped = 0
        try:
            results = map(
                len, (test_result.expectedFailures,
                      test_result.unexpectedSuccesses, test_result.skipped))
        except AttributeError:
            pass
        else:
            expected_fails, unexpected_successes, skipped = results

        infos = []
        if not test_result.wasSuccessful():
            failed, errored = len(test_result.failures), len(
                test_result.errors)
            if failed:
                easyb.log.warn("FAILURES", str(failed))
            if errored:
                easyb.log.warn("ERRORS", str(errored))
        else:
            easyb.log.inform("RUNNER", "OK")

        if skipped:
            infos.append("skipped=%d" % skipped)

        if expected_fails:
            infos.append("expected failures=%d" % expected_fails)

        if unexpected_successes:
            infos.append("unexpected successes=%d" % unexpected_successes)

        if infos:
            logline = " (%s)" % (", ".join(infos), )
            easyb.log.raw(logline)
        return test_result
Example #35
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        result.tb_locals = self.tb_locals
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message=r'Please use assert\w+ instead.')
            startTime = time.time()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            stopTime = time.time()
        timeTaken = stopTime - startTime
        result.printErrors()
        if hasattr(result, 'separator2'):
            logger.info(result.separator2)
        run = result.testsRun
        logger.info("Ran %d test%s in %.3fs\r\n" %
                    (run, run != 1 and "s" or "", timeTaken))
        # 记录测试运行时间
        g.results['totalTime'] = str(round(timeTaken, 2)) + 's'

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses, result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            logger.error("FAILED!" + "\r\n")
            failed, errored = len(result.failures), len(result.errors)
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            logger.info("SUCCESS!" + "\r\n")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            logger.info(" (%s)" % (", ".join(infos), ))
        else:
            logger.info("\n")
        return result
Example #36
0
def run(suite, stream, args):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """
    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(stream)
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings:
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ['default', 'always']:
                warnings.filterwarnings('module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')

        result.startTestRun()

        tests = toProtoTestList(suite)
        pool = LoggingDaemonlessPool(processes=args.subprocesses or None)
        if tests:
            async_responses = []
            for index, test in enumerate(tests):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                async_responses.append(pool.apply_async(
                    poolRunner,
                    (test.dotted_name, coverage_number, args.omit_patterns)))
            pool.close()
            for test, async_response in zip(tests, async_responses):
                # Prints out the white 'processing...' version of the output
                result.startTest(test)
                # This blocks until the worker who is processing this
                # particular test actually finishes
                try:
                    result.addProtoTestResult(async_response.get())
                except KeyboardInterrupt: # pragma: no cover
                    result.shouldStop = True
                if result.shouldStop:
                    break
        pool.terminate()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Example #37
0
    def run(self, test):
        '''
        Run the given test case or test suite.
        '''
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        with warnings.catch_warnings():
            if hasattr(self, 'warnings') and self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ('default', 'always'):
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message=r'Please use assert\w+ instead.')
            # startTime = time.time()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            # stopTime = time.time()
        # timeTaken = stopTime - startTime
        result.printErrors()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses, result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            self.stream.write('FAILED')
            failed, errored = len(result.failures), len(result.errors)
            if failed:
                infos.append(f'failures={failed}')
            if errored:
                infos.append(f'errors={errored}')
        else:
            pass
        if skipped:
            infos.append(f'skipped={skipped}')
        if expectedFails:
            infos.append(f'expected failures={expectedFails}')
        if unexpectedSuccesses:
            infos.append(f'unexpected successes={unexpectedSuccesses}')
        if infos:
            joined = ', '.join(infos)
            self.stream.writeln(f' ({joined})')
        else:
            pass
        return result
Example #38
0
    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer
        with warnings.catch_warnings():
            if hasattr(self, 'warnings') and self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings('module',
                            category=DeprecationWarning,
                            message='Please use assert\w+ instead.')
            #startTime = time.time()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            #stopTime = time.time()
        #timeTaken = stopTime - startTime
        result.printErrors()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses,
                                result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        infos = []
        if not result.wasSuccessful():
            self.stream.write("FAILED")
            failed, errored = len(result.failures), len(result.errors)
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        else:
            pass
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expectedFails:
            infos.append("expected failures=%d" % expectedFails)
        if unexpectedSuccesses:
            infos.append("unexpected successes=%d" % unexpectedSuccesses)
        if infos:
            self.stream.writeln(" (%s)" % (", ".join(infos),))
        else:
            pass
        return result
Example #39
0
    def run(self, test):
        """Run the given test case or test suite."""

        self.resultclass.number_of_tests = test.countTestCases()

        if self.verbosity >= 2:
            self.stream.writeln(colored('\nTesting:\n', attrs=['bold']))
        else:
            self.stream.writeln('Testing...')

        self._check_suites(test)

        result = self._makeResult()
        registerResult(result)
        result.failfast = self.failfast
        result.buffer = self.buffer

        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is 'default' or 'always', special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ['default', 'always']:
                    warnings.filterwarnings(
                        'module',
                        category=DeprecationWarning,
                        message='Please use assert\w+ instead.')

            startTime = time.time()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result)
            finally:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            stopTime = time.time()

        timeTaken = stopTime - startTime
        result.printErrors()

        run = result.testsRun

        details = {
            'success': result.wasSuccessful(),
            'failures': len(result.failures) + len(result.unexpectedSuccesses),
            'errors': len(result.errors),
            'skipped': len(result.skipped),
            'time': round(timeTaken, 3),
            'sum': run,
        }
        details['successes'] = run - (details['failures'] + details['errors'])
        result.printSummary(details)

        return result
Example #40
0
File: mpi.py Project: hpc4cmb/toast
    def run(self, test):
        "Run the given test case or test suite."
        result = MPITestResult(self.comm, self.stream, self.descriptions,
            self.verbosity)
        registerResult(result)
        with warnings.catch_warnings():
            if self.warnings:
                # if self.warnings is set, use it to filter all the warnings
                warnings.simplefilter(self.warnings)
                # if the filter is "default" or "always", special-case the
                # warnings from the deprecated unittest methods to show them
                # no more than once per module, because they can be fairly
                # noisy.  The -Wd and -Wa flags can be used to bypass this
                # only when self.warnings is None.
                if self.warnings in ["default", "always"]:
                    warnings.filterwarnings("module",
                            category=DeprecationWarning,
                            message=r"Please use assert\w+ instead.")
            startTime = time.time()
            startTestRun = getattr(result, "startTestRun", None)
            if startTestRun is not None:
                startTestRun()
            try:
                test(result=result)
            finally:
                stopTestRun = getattr(result, "stopTestRun", None)
                if stopTestRun is not None:
                    stopTestRun()
            stopTime = time.time()
        self.stream.flush()
        self.comm.barrier()
        if self.comm.rank == 0:
            self.stream.write("\n")
            self.stream.flush()
        timeTaken = stopTime - startTime

        result.printErrors()

        if self.comm.rank == 0:
            if hasattr(result, "separator2"):
                self.stream.writeln(result.separator2)

        run = result.testsRun
        if self.comm.rank == 0:
            self.stream.writeln("Ran %d test%s in %.3fs" %
                                (run, run != 1 and "s" or "", timeTaken))
            self.stream.writeln()

        expectedFails = unexpectedSuccesses = skipped = 0
        try:
            results = map(len, (result.expectedFailures,
                                result.unexpectedSuccesses,
                                result.skipped))
        except AttributeError:
            pass
        else:
            expectedFails, unexpectedSuccesses, skipped = results

        for p in range(self.comm.size):
            if p == self.comm.rank:
                infos = []
                if not result.wasSuccessful():
                    self.stream.write("[{}] FAILED".format(self.comm.rank))
                    failed, errored = len(result.failures), len(result.errors)
                    if failed:
                        infos.append("failures=%d" % failed)
                    if errored:
                        infos.append("errors=%d" % errored)
                else:
                    self.stream.write("[{}] OK".format(self.comm.rank))

                if skipped:
                    infos.append("skipped=%d" % skipped)
                if expectedFails:
                    infos.append("expected failures=%d" % expectedFails)
                if unexpectedSuccesses:
                    infos.append("unexpected successes=%d" % unexpectedSuccesses)
                if infos:
                    self.stream.writeln(" ({})".format(", ".join(infos)))
                else:
                    self.stream.write("\n")
                self.stream.flush()
            self.comm.barrier()

        # if not result.allSuccessful():
        #     self.comm.Abort(1)

        return result