Exemple #1
0
def make_human_readable_result(stream):
    """Make a result that emits messages intended for human consumption."""
    def print_result(test, status, start_time, stop_time, tags, details):
        testid = "<none>" if test is None else test.id()
        duration = (stop_time - start_time).total_seconds()
        message = "%s: %s (%0.2fs)" % (status.upper(), testid, abs(duration))
        print(message, file=stream, flush=True)

    return testtools.MultiTestResult(
        testtools.TextTestResult(stream, failfast=False, tb_locals=False),
        testtools.TestByTestResult(print_result))
Exemple #2
0
 def test_report_extensively_enabled(self):
     test = self.get_handle_exceptions_test(with_extended_report=True)
     result = testtools.TextTestResult(cStringIO.StringIO())
     result.startTestRun()
     test.run(result)
     result.stopTestRun()
     self.assertIn('Current url: {{{unavailable}}}',
                   result.stream.getvalue())
     self.assertIn(
         'Original exception: {{{AssertionError : False is not true}}}',
         result.stream.getvalue())
     self.assertIn('Page source: {{{unavailable}}}',
                   result.stream.getvalue())
else:
    tests = []

# Collect summary of all the individual test runs
summary = testtools.StreamSummary()

# Output information to stdout
if not args.subunit:
    # Human readable test output
    pertest = testtools.StreamToExtendedDecorator(
        testtools.MultiTestResult(
            # Individual test progress
            unittest.TextTestResult(
                unittest.runner._WritelnDecorator(sys.stdout), False, 2),
            # End of run, summary of failures.
            testtools.TextTestResult(sys.stdout),
        ))
else:
    from subunit.v2 import StreamResultToBytes
    pertest = StreamResultToBytes(sys.stdout)

    if args.list:
        output = subunit.CopyStreamResult([summary, pertest])
        output.startTestRun()
        for test in re.compile("(?<=').+(?=')").findall(
                file("test/testcases.js").read()):
            output.status(test_status='exists', test_id=test[:-5])

        output.stopTestRun()
        sys.exit(-1)
if args.load_list:
    tests = list(
        set(
            x.split(':')[0].strip() + '.html'
            for x in args.load_list.readlines()))
else:
    tests = []

# Collect summary of all the individual test runs
summary = testtools.StreamSummary()

# Output information to stdout
if not args.subunit:
    # Output test failures
    result_streams = [testtools.TextTestResult(sys.stdout)]
    if args.verbose:
        import unittest
        # Output individual test progress
        result_streams.insert(
            0,
            unittest.TextTestResult(
                unittest.runner._WritelnDecorator(sys.stdout), False, 2))
    # Human readable test output
    pertest = testtools.StreamToExtendedDecorator(
        testtools.MultiTestResult(*result_streams))
else:
    from subunit.v2 import StreamResultToBytes
    pertest = StreamResultToBytes(sys.stdout)

    if args.list: