def main(): args = parse_args() stream = subunit.ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout') outcomes = testtools.StreamToDict( functools.partial(show_outcome, sys.stdout, print_failures=args.print_failures, failonly=args.failonly)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) cat = subunit.test_results.CatFiles(sys.stdout) result.add_rule(cat, 'test_id', test_id=None) start_time = datetime.datetime.utcnow() result.startTestRun() try: stream.run(result) finally: result.stopTestRun() stop_time = datetime.datetime.utcnow() elapsed_time = stop_time - start_time if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests") exit(1) if args.post_fails: print_fails(sys.stdout) print_summary(sys.stdout, elapsed_time) exit(0 if summary.wasSuccessful() else 1)
def startTestRun(self): self._subunit = BytesIO() serialiser = subunit.v2.StreamResultToBytes(self._subunit) self._hook = testtools.CopyStreamResult([ testtools.StreamToDict(self._handle_test), serialiser]) self._hook.startTestRun()
def _get_run_details(stream_file, stdout): stream = subunit.ByteStreamToStreamResult(stream_file, non_subunit_name='stdout') global start_times global stop_times start_times = [] stop_times = [] def collect_data(stream, test): global start_times global stop_times start_times.append(test['timestamps'][0]) stop_times.append(test['timestamps'][1]) outcomes = testtools.StreamToDict(functools.partial(collect_data, stdout)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) cat = subunit.test_results.CatFiles(stdout) result.add_rule(cat, 'test_id', test_id=None) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() successful = results.wasSuccessful(summary) if start_times and stop_times: start_time = min(start_times) stop_time = max(stop_times) run_time = subunit_trace.get_duration([start_time, stop_time]) else: run_time = '---' successful = '---' start_time = '---' return {'passed': successful, 'runtime': run_time, 'start': start_time}
def __init__(self, stream_file, attachments=False, attr_regex=None, targets=None, use_wall_time=False, non_subunit_name=None): if targets is None: targets = [] else: targets = targets[:] self.use_wall_time = use_wall_time self.stream_file = stream_file self.stream = subunit.ByteStreamToStreamResult( self.stream_file, non_subunit_name=non_subunit_name) starts = testtools.StreamResult() summary = testtools.StreamSummary() outcomes = testtools.StreamToDict(functools.partial( self.parse_outcome)) targets.extend([starts, outcomes, summary]) self.result = testtools.CopyStreamResult(targets) self.results = {} self.attachments = attachments if attr_regex: self.attr_regex = re.compile(attr_regex) # NOTE(mtreinish): Default to the previous implicit regex if None is # specified for backwards compat else: self.attr_regex = re.compile('\[(.*)\]')
def trace(stdin, stdout, print_failures=False, failonly=False, enable_diff=False, abbreviate=False, color=False, post_fails=False, no_summary=False, suppress_attachments=False, all_attachments=False, show_binary_attachments=False): stream = subunit.ByteStreamToStreamResult(stdin, non_subunit_name='stdout') outcomes = testtools.StreamToDict( functools.partial(show_outcome, stdout, print_failures=print_failures, failonly=failonly, enable_diff=enable_diff, abbreviate=abbreviate, enable_color=color, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) cat = subunit.test_results.CatFiles(stdout) result.add_rule(cat, 'test_id', test_id=None) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() start_times = [] stop_times = [] for worker in RESULTS: start_times += [x['timestamps'][0] for x in RESULTS[worker]] stop_times += [x['timestamps'][1] for x in RESULTS[worker]] start_time = min(start_times) stop_time = max(stop_times) elapsed_time = stop_time - start_time if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests", file=sys.stderr) return 1 if post_fails: print_fails(stdout) if not no_summary: print_summary(stdout, elapsed_time) # NOTE(mtreinish): Ideally this should live in testtools streamSummary # this is just in place until the behavior lands there (if it ever does) if count_tests('status', '^success$') == 0: print("\nNo tests were successful during the run", file=sys.stderr) return 1 return 0 if results.wasSuccessful(summary) else 1
def main(): stream = subunit.ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout') starts = Starts(sys.stdout) outcomes = testtools.StreamToDict( functools.partial(show_outcome, sys.stdout)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() print_summary(sys.stdout) return (0 if summary.wasSuccessful() else 1)
def startTestRun(self): self._subunit = io.BytesIO() self.subunit_stream = subunit.v2.StreamResultToBytes(self._subunit) self.hook = testtools.CopyStreamResult( [testtools.StreamToDict(self._handle_test), self.subunit_stream]) self.hook.startTestRun() self.start_time = datetime.datetime.utcnow() session = self.session_factory() if not self._run_id: self.run = db_api.create_run(session=session) self._run_id = self.run.uuid else: int_id = db_api.get_run_id_from_uuid(self._run_id, session=session) self.run = db_api.get_run_by_id(int_id, session=session) session.close() self.totals = {}
def trace(stdin, stdout, print_failures=False, failonly=False, enable_diff=False, abbreviate=False, color=False, post_fails=False, no_summary=False): stream = subunit.ByteStreamToStreamResult(stdin, non_subunit_name='stdout') outcomes = testtools.StreamToDict( functools.partial(show_outcome, stdout, print_failures=print_failures, failonly=failonly, enable_diff=enable_diff, abbreviate=abbreviate, enable_color=color)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) cat = subunit.test_results.CatFiles(stdout) result.add_rule(cat, 'test_id', test_id=None) start_time = datetime.datetime.utcnow() result.startTestRun() try: stream.run(result) finally: result.stopTestRun() stop_time = datetime.datetime.utcnow() elapsed_time = stop_time - start_time if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests") return 1 if post_fails: print_fails(stdout) if not no_summary: print_summary(stdout, elapsed_time) # NOTE(mtreinish): Ideally this should live in testtools streamSummary # this is just in place until the behavior lands there (if it ever does) if count_tests('status', '^success$') == 0: print("\nNo tests were successful during the run") return 1 return 0 if summary.wasSuccessful() else 1
def _check_subunit(self, output_stream): stream = subunit_lib.ByteStreamToStreamResult(output_stream) starts = testtools.StreamResult() summary = testtools.StreamSummary() tests = [] def _add_dict(test): tests.append(test) outcomes = testtools.StreamToDict(functools.partial(_add_dict)) result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() self.assertThat(len(tests), testtools.matchers.GreaterThan(0))
def __init__(self, repository, partial=False): # XXX: Perhaps should factor into a decorator and use an unaltered # TestProtocolClient. self._repository = repository fd, name = tempfile.mkstemp(dir=self._repository.base) self.fname = name stream = os.fdopen(fd, 'wb') self.partial = partial # The time take by each test, flushed at the end. self._times = {} self._test_start = None self._time = None subunit_client = testtools.StreamToExtendedDecorator( TestProtocolClient(stream)) self.hook = testtools.CopyStreamResult([ subunit_client, testtools.StreamToDict(self._handle_test)]) self._stream = stream
def assertRunExit(self, cmd, expected, subunit=False, stdin=None): if stdin: p = subprocess.Popen("%s" % cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate(stdin) else: p = subprocess.Popen("%s" % cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if not subunit: self.assertEqual(p.returncode, expected, "Stdout: %s; Stderr: %s" % (out, err)) return (out, err) else: self.assertEqual( p.returncode, expected, "Expected return code: %s doesn't match actual " "return code of: %s" % (expected, p.returncode)) output_stream = io.BytesIO(out) stream = subunit_lib.ByteStreamToStreamResult(output_stream) starts = testtools.StreamResult() summary = testtools.StreamSummary() tests = [] def _add_dict(test): tests.append(test) outcomes = testtools.StreamToDict(functools.partial(_add_dict)) result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() self.assertThat(len(tests), testtools.matchers.GreaterThan(0)) return (out, err)
def main(): args = parse_args() stream = subunit.ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout') outcomes = testtools.StreamToDict( functools.partial(show_outcome, sys.stdout, print_failures=args.print_failures)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests") return 1 if args.post_fails: print_fails(sys.stdout) print_summary(sys.stdout) return (0 if summary.wasSuccessful() else 1)
def main(): parser = optparse.OptionParser(description=__doc__) parser.add_option( "--times", action="store_true", help="list the time each test took (requires a timestamped stream)", default=False) parser.add_option( "--exists", action="store_true", help="list tests that are reported as existing (as well as ran)", default=False) parser.add_option("--no-passthrough", action="store_true", help="Hide all non subunit input.", default=False, dest="no_passthrough") (options, args) = parser.parse_args() test = pysubunit.ByteStreamToStreamResult(filters.find_stream( sys.stdin, args), non_subunit_name="stdout") result = test_results.TestIdPrintingResult(sys.stdout, options.times, options.exists) if not options.no_passthrough: result = testtools.StreamResultRouter(result) cat = test_results.CatFiles(sys.stdout) result.add_rule(cat, 'test_id', test_id=None) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([result, summary]) result.startTestRun() test.run(result) result.stopTestRun() if summary.wasSuccessful(): exit_code = 0 else: exit_code = 1 sys.exit(exit_code)
def _load_case(inserter, repo, case, subunit_out, pretty_out, color, stdout, abbreviate, suppress_attachments, all_attachments): if subunit_out: output_result, summary_result = output.make_result(inserter.get_id, output=stdout) elif pretty_out: outcomes = testtools.StreamToDict( functools.partial(subunit_trace.show_outcome, stdout, enable_color=color, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments)) summary_result = testtools.StreamSummary() output_result = testtools.CopyStreamResult([outcomes, summary_result]) output_result = testtools.StreamResultRouter(output_result) cat = subunit.test_results.CatFiles(stdout) output_result.add_rule(cat, 'test_id', test_id=None) else: try: previous_run = repo.get_latest_run() except KeyError: previous_run = None output_result = results.CLITestResult(inserter.get_id, stdout, previous_run) summary_result = output_result.get_summary() result = testtools.CopyStreamResult([inserter, output_result]) result.startTestRun() try: case.run(result)
class load(Command): """Load a subunit stream into a repository. Failing tests are shown on the console and a summary of the stream is printed at the end. Unless the stream is a partial stream, any existing failures are discarded. """ input_streams = ['subunit+', 'interactive?'] args = [ExistingPathArgument('streams', min=0, max=None)] options = [ optparse.Option("--partial", action="store_true", default=False, help="The stream being loaded was a partial run."), optparse.Option( "--force-init", action="store_true", default=False, help="Initialise the repository if it does not exist already"), optparse.Option("--subunit", action="store_true", default=False, help="Display results in subunit format."), optparse.Option( "--full-results", action="store_true", default=False, help="No-op - deprecated and kept only for backwards compat."), ] # Can be assigned to to inject a custom command factory. command_factory = TestCommand def run(self): path = self.ui.here try: repo = self.repository_factory.open(path) except RepositoryNotFound: if self.ui.options.force_init: repo = self.repository_factory.initialise(path) else: raise testcommand = self.command_factory(self.ui, repo) # Not a full implementation of TestCase, but we only need to iterate # back to it. Needs to be a callable - its a head fake for # testsuite.add. # XXX: Be nice if we could declare that the argument, which is a path, # is to be an input stream - and thus push this conditional down into # the UI object. if self.ui.arguments.get('streams'): opener = partial(open, mode='rb') streams = map(opener, self.ui.arguments['streams']) else: streams = self.ui.iter_streams('subunit') mktagger = lambda pos, result: testtools.StreamTagger( [result], add=['worker-%d' % pos]) def make_tests(): for pos, stream in enumerate(streams): # Calls StreamResult API. case = subunit.ByteStreamToStreamResult( stream, non_subunit_name='stdout') decorate = partial(mktagger, pos) case = testtools.DecorateTestCaseResult(case, decorate) yield (case, str(pos)) case = testtools.ConcurrentStreamTestSuite(make_tests) # One unmodified copy of the stream to repository storage inserter = repo.get_inserter(partial=self.ui.options.partial) # One copy of the stream to the UI layer after performing global # filters. try: previous_run = repo.get_latest_run() except KeyError: previous_run = None output_result, summary_result = self.ui.make_result( inserter.get_id, testcommand, previous_run=previous_run) result = testtools.CopyStreamResult([inserter, output_result]) runner_thread = None result.startTestRun() try: # Convert user input into a stdin event stream interactive_streams = list(self.ui.iter_streams('interactive')) if interactive_streams: case = InputToStreamResult(interactive_streams[0]) runner_thread = threading.Thread(target=case.run, args=(result, )) runner_thread.daemon = True runner_thread.start() case.run(result)
for line in res: line = line.encode('utf8') stream.write("%s\n" % line) stream.write('\n\n') ADDPROP_FAIL.append(test) break else: FAILS.append(test) elif status == 'success' or status == 'xfail': SUCCESS.append(test) elif status == 'skip': SKIPS.append(test) stream = subunit.ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout') outcome = testtools.StreamToDict(functools.partial(show_outcome, sys.stdout)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcome, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() print("\n\n------------------------------------------------------------------") print("%s Tests Failed" % len(FAILS)) print("%s Tests Failed with AdditionalProperties" % len(ADDPROP_FAIL)) print("%s Tests Skipped" % len(SKIPS)) print("%s Tests Passed" % len(SUCCESS)) print("To see the full details run this subunit stream through subunit-trace")
def run_tests_from_stream(input_stream, result, passthrough_stream=None, forward_stream=None, protocol_version=1, passthrough_subunit=True): """Run tests from a subunit input stream through 'result'. Non-test events - top level file attachments - are expected to be dropped by v2 StreamResults at the present time (as all the analysis code is in ExtendedTestResult API's), so to implement passthrough_stream they are diverted and copied directly when that is set. :param input_stream: A stream containing subunit input. :param result: A TestResult that will receive the test events. NB: This should be an ExtendedTestResult for v1 and a StreamResult for v2. :param passthrough_stream: All non-subunit input received will be sent to this stream. If not provided, uses the ``TestProtocolServer`` default, which is ``sys.stdout``. :param forward_stream: All subunit input received will be forwarded to this stream. If not provided, uses the ``TestProtocolServer`` default, which is to not forward any input. Do not set this when transforming the stream - items would be double-reported. :param protocol_version: What version of the subunit protocol to expect. :param passthrough_subunit: If True, passthrough should be as subunit otherwise unwrap it. Only has effect when forward_stream is None. (when forwarding as subunit non-subunit input is always turned into subunit) """ if 1 == protocol_version: test = pysubunit.ProtocolTestCase(input_stream, passthrough=passthrough_stream, forward=forward_stream) elif 2 == protocol_version: # In all cases we encapsulate unknown inputs. if forward_stream is not None: # Send events to forward_stream as subunit. forward_result = v2.StreamResultToBytes(forward_stream) # If we're passing non-subunit through, copy: if passthrough_stream is None: # Not passing non-test events - split them off to nothing. router = testtools.StreamResultRouter(forward_result) router.add_rule(testtools.StreamResult(), 'test_id', test_id=None) result = testtools.CopyStreamResult([router, result]) else: # otherwise, copy all events to forward_result result = testtools.CopyStreamResult([forward_result, result]) elif passthrough_stream is not None: # Route non-test events to passthrough_stream, unwrapping them for # display. if not passthrough_subunit: passthrough_result = test_results.CatFiles(passthrough_stream) else: passthrough_result = v2.StreamResultToBytes(passthrough_stream) result = testtools.StreamResultRouter(result) result.add_rule(passthrough_result, 'test_id', test_id=None) test = v2.ByteStreamToStreamResult(input_stream, non_subunit_name='stdout') else: raise Exception("Unknown protocol version.") result.startTestRun() test.run(result) result.stopTestRun()
decorate = partial(mktagger, pos) case = testtools.DecorateTestCaseResult(case, decorate) yield (case, str(pos)) case = testtools.ConcurrentStreamTestSuite(make_tests) # One unmodified copy of the stream to repository storage inserter = repo.get_inserter(partial=self.ui.options.partial) # One copy of the stream to the UI layer after performing global # filters. try: previous_run = repo.get_latest_run() except KeyError: previous_run = None output_result, summary_result = self.ui.make_result( inserter.get_id, testcommand, previous_run=previous_run) result = testtools.CopyStreamResult([inserter, output_result]) runner_thread = None result.startTestRun() try: # Convert user input into a stdin event stream interactive_streams = list(self.ui.iter_streams('interactive')) if interactive_streams: case = InputToStreamResult(interactive_streams[0]) runner_thread = threading.Thread(target=case.run, args=(result, )) runner_thread.daemon = True runner_thread.start() case.run(result) finally: result.stopTestRun() if interactive_streams and runner_thread: