def setUp(self): super(AsyncByteStreamToStreamResultTest, self).setUp() self.result = StreamResult() self.stream = AsyncBytesIO() self.encoder = StreamResultToBytes(self.stream.buffer) self.decoder = AsyncByteStreamToStreamResult(self.stream) self.decoder.run(self.result)
def setUp(self): super(ControllerTest, self).setUp() stream = BytesIO() self.encoder = StreamResultToBytes(stream) self.loop = MainLoop(Console(self.repository)) watcher = MemoryWatcher(stream) self.controller = Controller(watcher, self.repository)
class AsyncByteStreamToStreamResultTest(TestCase): def setUp(self): super(AsyncByteStreamToStreamResultTest, self).setUp() self.result = StreamResult() self.stream = AsyncBytesIO() self.encoder = StreamResultToBytes(self.stream.buffer) self.decoder = AsyncByteStreamToStreamResult(self.stream) self.decoder.run(self.result) def test_do_read_one(self): """ If there's data for exactly one packet, the relevant event is fired. """ self.encoder.status(test_id="foo", test_status="exists") self.stream.seek(0) self.decoder.do_read() [event] = self.result._events self.assertEqual(("status", "foo", "exists"), event[:3]) def test_do_read_many(self): """ If there's data for several packets, the all the relevant events are fired. """ self.encoder.status(test_id="foo", test_status="exists") self.encoder.status(test_id="bar", test_status="exists") self.stream.seek(0) self.assertFalse(self.decoder.do_read()) [event1, event2] = self.result._events self.assertEqual(("status", "foo", "exists"), event1[:3]) self.assertEqual(("status", "bar", "exists"), event2[:3]) def test_do_read_eof(self): """ If the end of file is reached, the method returns True. """ self.stream.eof = True self.assertTrue(self.decoder.do_read()) def test_do_read_unexpected_os_error(self): """ Unexpected errors are propagated. """ def read(length): raise OSError(errno.ENOMEM, os.strerror(errno.ENOMEM)) self.stream.read = read error = self.assertRaises(OSError, self.decoder.do_read) self.assertEqual(errno.ENOMEM, error.errno) def test_do_read_garbage(self): """ If unexpected data is found, an error is raised. """ self.stream.write(b("boom")) self.stream.seek(0) error = self.assertRaises(ParseError, self.decoder.do_read) self.assertEqual("Invalid packet signature", str(error))
class SubunitContext(): """Context manager for writing subunit results.""" def __init__(self, output_path): self.output_path = output_path def __enter__(self): self.output_stream = open(self.output_path, "wb+") self.output = StreamResultToBytes(self.output_stream) self.output.startTestRun() return self.output def __exit__(self, *args, **kwargs): self.output.stopTestRun() self.output_stream.close()
def tag_stream(original, filtered, tags): """Alter tags on a stream. :param original: The input stream. :param filtered: The output stream. :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or '-TAG' commands. A 'TAG' command will add the tag to the output stream, and override any existing '-TAG' command in that stream. Specifically: * A global 'tags: TAG' will be added to the start of the stream. * Any tags commands with -TAG will have the -TAG removed. A '-TAG' command will remove the TAG command from the stream. Specifically: * A 'tags: -TAG' command will be added to the start of the stream. * Any 'tags: TAG' command will have 'TAG' removed from it. Additionally, any redundant tagging commands (adding a tag globally present, or removing a tag globally removed) are stripped as a by-product of the filtering. :return: 0 """ new_tags, gone_tags = tags_to_new_gone(tags) source = ByteStreamToStreamResult(original, non_subunit_name='stdout') class Tagger(CopyStreamResult): def status(self, **kwargs): tags = kwargs.get('test_tags') if not tags: tags = set() tags.update(new_tags) tags.difference_update(gone_tags) if tags: kwargs['test_tags'] = tags else: kwargs['test_tags'] = None super(Tagger, self).status(**kwargs) output = Tagger([StreamResultToBytes(filtered)]) source.run(output) return 0
def test_smoke(self): output = os.path.join(self.useFixture(TempDir()).path, 'output') stdin = io.BytesIO() stdout = io.StringIO() writer = StreamResultToBytes(stdin) writer.startTestRun() writer.status('foo', 'success', set(['tag']), file_name='fred', file_bytes=b'abcdefg', eof=True, mime_type='text/plain') writer.stopTestRun() stdin.seek(0) _to_disk.to_disk(['-d', output], stdin=stdin, stdout=stdout) self.expectThat( os.path.join(output, 'foo/test.json'), FileContains( '{"details": ["fred"], "id": "foo", "start": null, ' '"status": "success", "stop": null, "tags": ["tag"]}')) self.expectThat(os.path.join(output, 'foo/fred'), FileContains('abcdefg'))
def test_smoke(self): output = os.path.join(self.useFixture(TempDir()).path, 'output') stdin = io.BytesIO() stdout = io.StringIO() writer = StreamResultToBytes(stdin) writer.startTestRun() writer.status( 'foo', 'success', set(['tag']), file_name='fred', file_bytes=b'abcdefg', eof=True, mime_type='text/plain') writer.stopTestRun() stdin.seek(0) _to_disk.to_disk(['-d', output], stdin=stdin, stdout=stdout) self.expectThat( os.path.join(output, 'foo/test.json'), FileContains( '{"details": ["fred"], "id": "foo", "start": null, ' '"status": "success", "stop": null, "tags": ["tag"]}')) self.expectThat( os.path.join(output, 'foo/fred'), FileContains('abcdefg'))
def take_action(self, args): # Setup where the output stream must go if args.output_file == '-': output_stream = sys.stdout else: output_stream = open(args.output_file, 'wb') # Create the output stream output = StreamResultToBytes(output_stream) # Create the test run output.startTestRun() if args.playbook is not None: playbooks = args.playbook results = (models.TaskResult().query.join(models.Task).filter( models.TaskResult.task_id == models.Task.id).filter( models.Task.playbook_id.in_(playbooks))) else: results = models.TaskResult().query.all() for result in results: # Generate a fixed length identifier for the task test_id = utils.generate_identifier(result) # Assign the test_status value if result.status in ('failed', 'unreachable'): if result.ignore_errors is False: test_status = 'xfail' else: test_status = 'fail' elif result.status == 'skipped': test_status = 'skip' else: test_status = 'success' # Determine the play file path if result.task.playbook and result.task.playbook.path: playbook_path = result.task.playbook.path else: playbook_path = '' # Determine the task file path if result.task.file and result.task.file.path: task_path = result.task.file.path else: task_path = '' # Assign the file_bytes value test_data = { 'host': result.host.name, 'playbook_id': result.task.playbook.id, 'playbook_path': playbook_path, 'play_name': result.task.play.name, 'task_action': result.task.action, 'task_action_lineno': result.task.lineno, 'task_id': result.task.id, 'task_name': result.task.name, 'task_path': task_path } file_bytes = encodeutils.safe_encode(jsonutils.dumps(test_data)) # Assign the start_time and stop_time value # The timestamp needs to be an epoch, so we need # to convert it. start_time = datetime.datetime.fromtimestamp( float(result.time_start.strftime('%s'))).replace( tzinfo=iso8601.UTC) end_time = datetime.datetime.fromtimestamp( float(result.time_end.strftime('%s'))).replace( tzinfo=iso8601.UTC) # Output the start of the event output.status(test_id=test_id, timestamp=start_time) # Output the end of the event output.status(test_id=test_id, test_status=test_status, test_tags=None, runnable=False, file_name=test_id, file_bytes=file_bytes, timestamp=end_time, eof=True, mime_type='text/plain; charset=UTF8') output.stopTestRun()
def TAP2SubUnit(tap, output_stream): """Filter a TAP pipe into a subunit pipe. This should be invoked once per TAP script, as TAP scripts get mapped to a single runnable case with multiple components. :param tap: A tap pipe/stream/file object - should emit unicode strings. :param subunit: A pipe/stream/file object to write subunit results to. :return: The exit code to exit with. """ output = StreamResultToBytes(output_stream) UTF8_TEXT = 'text/plain; charset=UTF8' BEFORE_PLAN = 0 AFTER_PLAN = 1 SKIP_STREAM = 2 state = BEFORE_PLAN plan_start = 1 plan_stop = 0 # Test data for the next test to emit test_name = None log = [] result = None def missing_test(plan_start): output.status(test_id='test %d' % plan_start, test_status='fail', runnable=False, mime_type=UTF8_TEXT, eof=True, file_name="tap meta", file_bytes=b"test missing from TAP output") def _emit_test(): "write out a test" if test_name is None: return if log: log_bytes = b'\n'.join(log_line.encode('utf8') for log_line in log) mime_type = UTF8_TEXT file_name = 'tap comment' eof = True else: log_bytes = None mime_type = None file_name = None eof = True del log[:] output.status(test_id=test_name, test_status=result, file_bytes=log_bytes, mime_type=mime_type, eof=eof, file_name=file_name, runnable=False) for line in tap: if state == BEFORE_PLAN: match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line) if match: state = AFTER_PLAN _, plan_stop, comment = match.groups() plan_stop = int(plan_stop) if plan_start > plan_stop and plan_stop == 0: # skipped file state = SKIP_STREAM output.status(test_id='file skip', test_status='skip', file_bytes=comment.encode('utf8'), eof=True, file_name='tap comment') continue # not a plan line, or have seen one before match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line) if match: # new test, emit current one. _emit_test() status, number, description, directive, directive_comment = match.groups() if status == 'ok': result = 'success' else: result = "fail" if description is None: description = '' else: description = ' ' + description if directive is not None: if directive.upper() == 'TODO': result = 'xfail' elif directive.upper() == 'SKIP': result = 'skip' if directive_comment is not None: log.append(directive_comment) if number is not None: number = int(number) while plan_start < number: missing_test(plan_start) plan_start += 1 test_name = "test %d%s" % (plan_start, description) plan_start += 1 continue match = re.match("Bail out\!(?:\s*(.*))?\n", line) if match: reason, = match.groups() if reason is None: extra = '' else: extra = ' %s' % reason _emit_test() test_name = "Bail out!%s" % extra result = "fail" state = SKIP_STREAM continue match = re.match("\#.*\n", line) if match: log.append(line[:-1]) continue # Should look at buffering status and binding this to the prior result. output.status(file_bytes=line.encode('utf8'), file_name='stdout', mime_type=UTF8_TEXT) _emit_test() while plan_start <= plan_stop: # record missed tests missing_test(plan_start) plan_start += 1 return 0
def test_trace_with_stuck_inprogress(self): output = io.BytesIO() stream = StreamResultToBytes(output) stream.startTestRun() stream.status(test_id='test_passes', test_status='inprogress', timestamp=dt.now(UTC)) stream.status(test_id='test_segfault', test_status='inprogress', timestamp=dt.now(UTC)) stream.status(test_id='test_passes', test_status='success', timestamp=dt.now(UTC)) stream.stopTestRun() output.seek(0) # capture stderr for test stderr = io.StringIO() sys_err = sys.stderr sys.stderr = stderr def restore_stderr(): sys.stderr = sys_err self.addCleanup(restore_stderr) stdin = io.TextIOWrapper(io.BufferedReader(output)) returncode = subunit_trace.trace(stdin, sys.stdout) self.assertEqual(1, returncode) stderr.seek(0) expected = """ The following tests exited without returning a status and likely segfaulted or crashed Python: \t* test_segfault """ self.assertEqual(stderr.read(), expected)
def enable(filename=None): file_ = open_file(filename) streamresult = StreamResultToBytes(file_) streamresult.startTestRun() real_stdout = sys.stdout real_stderr = sys.stderr @before.each_scenario def before_scenario(scenario): # create redirects for stdout and stderr scenario.stdout = StringIO() scenario.stderr = StringIO() try: test_tags = scenario.tags except AttributeError: test_tags = () streamresult.status(test_id=get_test_id(scenario), test_status='inprogress', test_tags=test_tags, timestamp=now()) @before.step_output def capture_output(step): # only consider steps for background if not step.scenario: return sys.stdout = step.scenario.stdout sys.stderr = step.scenario.stderr @after.step_output def uncapture_output(step): sys.stdout = real_stdout sys.stderr = real_stderr @after.each_scenario def after_scenario(scenario): streamresult.status(test_id=get_test_id(scenario), file_name='stdout', file_bytes=scenario.stdout.getvalue().encode('utf-8'), mime_type='text/plain; charset=utf8', eof=True) streamresult.status(test_id=get_test_id(scenario), file_name='stderr', file_bytes=scenario.stderr.getvalue().encode('utf-8'), mime_type='text/plain; charset=utf8', eof=True) if scenario.passed: streamresult.status(test_id=get_test_id(scenario), test_status='success', timestamp=now()) else: streamresult.status(test_id=get_test_id(scenario), test_status='fail', timestamp=now()) @after.each_step def after_step(step): # only consider steps for background if not step.scenario: return test_id = get_test_id(step.scenario) if step.passed: marker = u'✔' elif not step.defined_at: marker = u'?' elif step.failed: marker = u'❌' try: streamresult.status(test_id=test_id, file_name='traceback', file_bytes=step.why.traceback.encode('utf-8'), mime_type='text/plain; charset=utf8') except AttributeError: pass elif not step.ran: marker = u' ' else: raise AssertionError("Internal error") steps = u'{marker} {sentence}\n'.format( marker=marker, sentence=step.sentence) streamresult.status(test_id=test_id, file_name='steps', file_bytes=steps.encode('utf-8'), mime_type='text/plain; charset=utf8') @after.all def after_all(total): streamresult.stopTestRun() close_file(file_)
def take_action(self, args): # Setup where the output stream must go if args.output_file == '-': output_stream = sys.stdout else: output_stream = open(args.output_file, 'wb') # Create the output stream output = StreamResultToBytes(output_stream) # Create the test run output.startTestRun() if args.playbook is not None: playbooks = args.playbook results = (models.TaskResult().query .join(models.Task) .filter(models.TaskResult.task_id == models.Task.id) .filter(models.Task.playbook_id.in_(playbooks))) else: results = models.TaskResult().query.all() for result in results: # Generate a fixed length identifier for the task test_id = utils.generate_identifier(result) # Assign the test_status value if result.status in ('failed', 'unreachable'): if result.ignore_errors is False: test_status = 'xfail' else: test_status = 'fail' elif result.status == 'skipped': test_status = 'skip' else: test_status = 'success' # Determine the play file path if result.task.playbook and result.task.playbook.path: playbook_path = result.task.playbook.path else: playbook_path = '' # Determine the task file path if result.task.file and result.task.file.path: task_path = result.task.file.path else: task_path = '' # Assign the file_bytes value test_data = { 'host': result.host.name, 'playbook_id': result.task.playbook.id, 'playbook_path': playbook_path, 'play_name': result.task.play.name, 'task_action': result.task.action, 'task_action_lineno': result.task.lineno, 'task_id': result.task.id, 'task_name': result.task.name, 'task_path': task_path } file_bytes = encodeutils.safe_encode(jsonutils.dumps(test_data)) # Assign the start_time and stop_time value # The timestamp needs to be an epoch, so we need # to convert it. start_time = datetime.datetime.fromtimestamp( float(result.time_start.strftime('%s')) ).replace(tzinfo=iso8601.UTC) end_time = datetime.datetime.fromtimestamp( float(result.time_end.strftime('%s')) ).replace(tzinfo=iso8601.UTC) # Output the start of the event output.status( test_id=test_id, timestamp=start_time ) # Output the end of the event output.status( test_id=test_id, test_status=test_status, test_tags=None, runnable=False, file_name=test_id, file_bytes=file_bytes, timestamp=end_time, eof=True, mime_type='text/plain; charset=UTF8' ) output.stopTestRun()
def _make_subunit(reporter): return AutoTimingTestResultDecorator( ExtendedToStreamDecorator(StreamResultToBytes(reporter.stream, ), ), )
def __enter__(self): self.output_stream = open(self.output_path, "wb+") self.output = StreamResultToBytes(self.output_stream) self.output.startTestRun() return self.output
summary = testtools.StreamSummary() # Output information to stdout if not args.subunit: # Human readable test output pertest = testtools.StreamToExtendedDecorator( testtools.MultiTestResult( # Individual test progress unittest.TextTestResult( unittest.runner._WritelnDecorator(sys.stdout), False, 2), # End of run, summary of failures. testtools.TextTestResult(sys.stdout), )) else: from subunit.v2 import StreamResultToBytes pertest = StreamResultToBytes(sys.stdout) if args.list: output = subunit.CopyStreamResult([summary, pertest]) output.startTestRun() for test in re.compile("(?<=').+(?=')").findall( file("test/testcases.js").read()): output.status(test_status='exists', test_id=test[:-5]) output.stopTestRun() sys.exit(-1) output = subunit.CopyStreamResult([summary, pertest]) output.startTestRun() # Start up a local HTTP server which serves the files to the browser and
def enable(filename=None): file_ = open_file(filename) streamresult = StreamResultToBytes(file_) streamresult.startTestRun() real_stdout = sys.stdout real_stderr = sys.stderr @before.each_scenario def before_scenario(scenario): # create redirects for stdout and stderr scenario.stdout = StringIO() scenario.stderr = StringIO() try: test_tags = scenario.tags except AttributeError: test_tags = () streamresult.status(test_id=get_test_id(scenario), test_status='inprogress', test_tags=test_tags, timestamp=now()) @before.step_output def capture_output(step): # only consider steps for background if not step.scenario: return sys.stdout = step.scenario.stdout sys.stderr = step.scenario.stderr @after.step_output def uncapture_output(step): sys.stdout = real_stdout sys.stderr = real_stderr @after.each_scenario def after_scenario(scenario): streamresult.status( test_id=get_test_id(scenario), file_name='stdout', file_bytes=scenario.stdout.getvalue().encode('utf-8'), mime_type='text/plain; charset=utf8', eof=True) streamresult.status( test_id=get_test_id(scenario), file_name='stderr', file_bytes=scenario.stderr.getvalue().encode('utf-8'), mime_type='text/plain; charset=utf8', eof=True) if scenario.passed: streamresult.status(test_id=get_test_id(scenario), test_status='success', timestamp=now()) else: streamresult.status(test_id=get_test_id(scenario), test_status='fail', timestamp=now()) @after.each_step def after_step(step): # only consider steps for background if not step.scenario: return test_id = get_test_id(step.scenario) if step.passed: marker = u'✔' elif not step.defined_at: marker = u'?' elif step.failed: marker = u'❌' try: streamresult.status( test_id=test_id, file_name='traceback', file_bytes=step.why.traceback.encode('utf-8'), mime_type='text/plain; charset=utf8') except AttributeError: pass elif not step.ran: marker = u' ' else: raise AssertionError("Internal error") steps = u'{marker} {sentence}\n'.format(marker=marker, sentence=step.sentence) streamresult.status(test_id=test_id, file_name='steps', file_bytes=steps.encode('utf-8'), mime_type='text/plain; charset=utf8') @after.all def after_all(total): streamresult.stopTestRun() close_file(file_)
class ControllerTest(ViewTest): def setUp(self): super(ControllerTest, self).setUp() stream = BytesIO() self.encoder = StreamResultToBytes(stream) self.loop = MainLoop(Console(self.repository)) watcher = MemoryWatcher(stream) self.controller = Controller(watcher, self.repository) def test_attach(self): """ When data is available for reading, the protocol gets notified. """ self.encoder.status(test_id="foo", test_status=EXISTS) self.controller.attach(self.loop) record = self.repository.get_record("foo") self.assertEqual(1, self.repository.count_records()) self.assertEqual("foo", record.id) self.assertEqual(EXISTS, record.status) def test_exists(self): """ If packet with an existing test ID is received, the associated record gets updated. """ self.encoder.status(test_id="foo", test_status=EXISTS) self.encoder.status(test_id="foo", test_status=INPROGRESS) self.controller.attach(self.loop) record = self.repository.get_record("foo") self.assertEqual(1, self.repository.count_records()) self.assertEqual("foo", record.id) self.assertEqual(INPROGRESS, record.status) def test_exists_no_test_id(self): """ If packet is received with test status ``exists`` but no test ID, it's simply discarded. """ self.encoder.status(test_status="exists") self.controller.attach(self.loop) self.assertEqual(0, self.repository.count_records()) def test_on_record_progress(self): """ When all the packets for a details entry are received, an event is triggered by the repository. """ records = [] def on_record_progress(repository, record): records.append(record) self.repository.on_record_progress += on_record_progress self.encoder.status(test_id="foo", test_status=EXISTS) self.encoder.status(test_id="foo", test_status=INPROGRESS) self.encoder.status(test_id="foo", file_name="log", file_bytes=b("hello"), mime_type="text/x-log", timestamp=datetime.now(utc)) self.controller.attach(self.loop) [record] = records self.assertEqual("hello", record.details["log"].as_text())
if args.load_list: tests = args.load_list.readlines() else: tests = [] if not args.subunit: # Human readable test output import testtools, unittest #output = testtools.StreamToExtendedDecorator( # unittest.TextTestResult( # unittest.runner._WritelnDecorator(sys.stdout), True, 2)) output = testtools.StreamToExtendedDecorator( testtools.TextTestResult(sys.stdout)) else: from subunit.v2 import StreamResultToBytes output = StreamResultToBytes(sys.stdout) output.startTestRun() # Start up a local HTTP server which serves the files to the browser and # collects the test results. # ----------------------------------------------------------------------------- import SimpleHTTPServer import SocketServer import threading import cgi import simplejson class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): STATUS = {0:'success', 1:'fail', 2:'fail', 3:'skip'}
def main(): parser = argparse.ArgumentParser( description="Test runner that supports both Python 2 and Python 3.") parser.add_argument("--load-list", metavar="PATH", help="Path to read list of tests to run from.", type=str) parser.add_argument("--list", help="List available tests.", action="store_true") args = parser.parse_args() if args.list: testids = [] output = subprocess.check_output( ['python2', './brz', 'selftest', '--subunit2', '--list']) for n in parse_enumeration(output): testids.append('python2.' + n) output = subprocess.check_output( ['python3', './brz', 'selftest', '--subunit2', '--list']) for n in parse_enumeration(output): testids.append('python3.' + n) stream = StreamResultToBytes(sys.stdout) for testid in testids: stream.status(test_id=testid, test_status='exists') else: if args.load_list: py2_tests = [] py3_tests = [] with open(args.load_list, 'r') as f: all_tests = parse_list(f.read()) for testname in all_tests: if testname.startswith("python2."): py2_tests.append(testname[len('python2.'):].strip()) elif testname.startswith("python3."): py3_tests.append(testname[len('python3.'):].strip()) else: sys.stderr.write("unknown prefix %s\n" % testname) if py2_tests: with tempfile.NamedTemporaryFile() as py2f: write_list(py2f, py2_tests) py2f.flush() subprocess.call( 'python2 ./brz selftest --subunit2 --load-list=%s | subunit-filter -s --passthrough --rename "^" "python2."' % py2f.name, shell=True) if py3_tests: with tempfile.NamedTemporaryFile() as py3f: write_list(py3f, py3_tests) py3f.flush() subprocess.call( 'python3 ./brz selftest --subunit2 --load-list=%s | subunit-filter -s --passthrough --rename "^" "python3."' % py3f.name, shell=True) else: subprocess.call( 'python2 ./brz selftest --subunit2 | subunit-filter -s --passthrough --rename "^" "python2."', shell=True) subprocess.call( 'python3 ./brz selftest --subunit2 | subunit-filter -s --passthrough --rename "^" "python3."', shell=True)