def main(): if len(sys.argv) < 2: print("Need at least one argument: path to subunit log.") exit(1) subunit_file = sys.argv[1] if len(sys.argv) > 2: html_file = sys.argv[2] else: html_file = 'results.html' html_result = HtmlOutput(html_file) stream = open(subunit_file, 'rb') # Feed the subunit stream through both a V1 and V2 parser. # Depends on having the v2 capable libraries installed. # First V2. # Non-v2 content and captured non-test output will be presented as file # segments called stdout. suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout') # The HTML output code is in legacy mode. result = testtools.StreamToExtendedDecorator(html_result) # Divert non-test output accumulator = FileAccumulator() result = testtools.StreamResultRouter(result) result.add_rule(accumulator, 'test_id', test_id=None) result.startTestRun() suite.run(result) # Now reprocess any found stdout content as V1 subunit for bytes_io in accumulator.route_codes.values(): bytes_io.seek(0) suite = subunit.ProtocolTestCase(bytes_io) suite.run(html_result) result.stopTestRun()
def main(subunit_log_file): fd, results_file = tempfile.mkstemp() result = JsonOutput(results_file) stream = open(subunit_log_file, 'rb') # Feed the subunit stream through both a V1 and V2 parser. # Depends on having the v2 capable libraries installed. # First V2. # Non-v2 content and captured non-test output will be presented as file # segments called stdout. suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout') # The JSON output code is in legacy mode. raw_result = testtools.StreamToExtendedDecorator(result) # Divert non-test output accumulator = FileAccumulator() result = testtools.StreamResultRouter(raw_result) result.add_rule(accumulator, 'test_id', test_id=None) result.startTestRun() suite.run(result) # Now reprocess any found stdout content as V1 subunit for bytes_io in accumulator.route_codes.values(): bytes_io.seek(0) suite = subunit.ProtocolTestCase(bytes_io) suite.run(result) result.stopTestRun() with open(results_file, 'rb') as temp_results_file: data = temp_results_file.read() try: os.unlink(results_file) except OSError as e: if e.errno != errno.ENOENT: raise return data
def main(): result = pysubunit.TestResultStats(sys.stdout) def show_stats(r): r.decorated.formatStats() filters.run_filter_script( lambda output: testtools.StreamToExtendedDecorator(result), __doc__, show_stats, protocol_version=2, passthrough_subunit=False)
def main(): parser = make_options(__doc__) (options, args) = parser.parse_args() case = pysubunit.ByteStreamToStreamResult(filters.find_stream( sys.stdin, args), non_subunit_name='stdout') result = testtools.StreamToExtendedDecorator( pysubunit.decoratedTestProtocolClient(sys.stdout)) result = testtools.StreamResultRouter(result) cat = test_results.CatFiles(sys.stdout) result.add_rule(cat, 'test_id', test_id=None) result.startTestRun() case.run(result) result.stopTestRun() sys.exit(0)
def _make_result(output, options, predicate): """Make the result that we'll send the test outcomes to.""" fixup_expected_failures = set() for path in options.fixup_expected_failures or (): fixup_expected_failures.update(pysubunit.read_test_list(path)) return testtools.StreamToExtendedDecorator( test_results.TestResultFilter( testtools.ExtendedToStreamDecorator( v2.StreamResultToBytes(output)), filter_error=options.error, filter_failure=options.failure, filter_success=options.success, filter_skip=options.skip, filter_xfail=options.xfail, filter_predicate=predicate, fixup_expected_failures=fixup_expected_failures))
def __init__(self, repository, partial=False): # XXX: Perhaps should factor into a decorator and use an unaltered # TestProtocolClient. self._repository = repository fd, name = tempfile.mkstemp(dir=self._repository.base) self.fname = name stream = os.fdopen(fd, 'wb') self.partial = partial # The time take by each test, flushed at the end. self._times = {} self._test_start = None self._time = None subunit_client = testtools.StreamToExtendedDecorator( TestProtocolClient(stream)) self.hook = testtools.CopyStreamResult([ subunit_client, testtools.StreamToDict(self._handle_test)]) self._stream = stream
def parse(subunit_file, non_subunit_name="pythonlogging"): subunit_parser = SubunitParser() stream = open(subunit_file, 'rb') suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name=non_subunit_name) result = testtools.StreamToExtendedDecorator(subunit_parser) accumulator = FileAccumulator(non_subunit_name) result = testtools.StreamResultRouter(result) result.add_rule(accumulator, 'test_id', test_id=None) result.startTestRun() suite.run(result) for bytes_io in accumulator.route_codes.values(): # v1 processing bytes_io.seek(0) suite = subunit.ProtocolTestCase(bytes_io) suite.run(subunit_parser) result.stopTestRun() return subunit_parser
def parse(stream, non_subunit_name, ports): if ports is not None and os.path.exists(ports): ports = json.loads(open(ports).read()) url_parser = UrlParser(ports) suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name=non_subunit_name) result = testtools.StreamToExtendedDecorator(url_parser) accumulator = FileAccumulator(non_subunit_name) result = testtools.StreamResultRouter(result) result.add_rule(accumulator, 'test_id', test_id=None) result.startTestRun() suite.run(result) for bytes_io in accumulator.route_codes.values(): # v1 processing bytes_io.seek(0) suite = subunit.ProtocolTestCase(bytes_io) suite.run(url_parser) result.stopTestRun() return url_parser
set( x.split(':')[0].strip() + '.html' for x in args.load_list.readlines())) else: tests = [] # Collect summary of all the individual test runs summary = testtools.StreamSummary() # Output information to stdout if not args.subunit: # Human readable test output pertest = testtools.StreamToExtendedDecorator( testtools.MultiTestResult( # Individual test progress unittest.TextTestResult( unittest.runner._WritelnDecorator(sys.stdout), False, 2), # End of run, summary of failures. testtools.TextTestResult(sys.stdout), )) else: from subunit.v2 import StreamResultToBytes pertest = StreamResultToBytes(sys.stdout) if args.list: output = subunit.CopyStreamResult([summary, pertest]) output.startTestRun() for test in re.compile("(?<=').+(?=')").findall( file("test/testcases.js").read()): output.status(test_status='exists', test_id=test[:-5]) output.stopTestRun()
# Collect summary of all the individual test runs summary = testtools.StreamSummary() # Output information to stdout if not args.subunit: # Output test failures result_streams = [testtools.TextTestResult(sys.stdout)] if args.verbose: import unittest # Output individual test progress result_streams.insert( 0, unittest.TextTestResult( unittest.runner._WritelnDecorator(sys.stdout), False, 2)) # Human readable test output pertest = testtools.StreamToExtendedDecorator( testtools.MultiTestResult(*result_streams)) else: from subunit.v2 import StreamResultToBytes pertest = StreamResultToBytes(sys.stdout) if args.list: output = subunit.CopyStreamResult([summary, pertest]) output.startTestRun() for test in re.compile("(?<=').+(?=')").findall( file("test/testcases.js").read()): output.status(test_status='exists', test_id=test[:-5]) output.stopTestRun() sys.exit(-1) output = subunit.CopyStreamResult([summary, pertest])
def main(): filters.run_filter_script( lambda output: testtools.StreamToExtendedDecorator( test_results.CsvResult(output)), __doc__, protocol_version=2)
parser.add_option('-s', '--suite', help='Test suite name to use in the output.') (options, _args) = parser.parse_args() return options options = parse_options() if options.input_from: input_stream = open(options.input_from, 'r') else: input_stream = sys.stdin passthrough, forward = False, False result = subunit.filters.filter_by_result( lambda output: testtools.StreamToExtendedDecorator( JenkinsXmlResult(options.suite, output)), options.output_to, passthrough, forward, protocol_version=2, input_stream=input_stream) if options.input_from: input_stream.close() if not extras.safe_hasattr(result, 'wasSuccessful'): result = result.decorated if result.wasSuccessful(): sys.exit(0) else: sys.exit(1)