class ResultsSink: def __init__(self, testsuite, options, testcount): self.options = options self.fp = options.output_fp if self.options.format == 'automation': self.slog = TestLogger(testsuite) self.slog.suite_start() self.wptreport = None if self.options.wptreport: try: from .wptreport import WptreportHandler self.wptreport = WptreportHandler(self.options.wptreport) self.wptreport.suite_start() except ImportError: pass self.groups = {} self.output_dict = {} self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0} self.slow_tests = [] self.n = 0 if options.hide_progress: self.pb = NullProgressBar() else: fmt = [ { 'value': 'PASS', 'color': 'green' }, { 'value': 'FAIL', 'color': 'red' }, { 'value': 'TIMEOUT', 'color': 'blue' }, { 'value': 'SKIP', 'color': 'brightgray' }, ] self.pb = ProgressBar(testcount, fmt) def push(self, output): if self.options.show_slow and output.dt >= self.options.slow_test_threshold: self.slow_tests.append(TestDuration(output.test, output.dt)) if output.timed_out: self.counts['TIMEOUT'] += 1 if isinstance(output, NullTestOutput): if self.options.format == 'automation': self.print_automation_result('TEST-KNOWN-FAIL', output.test, time=output.dt, skip=True) self.counts['SKIP'] += 1 self.n += 1 else: result = TestResult.from_output(output) if self.wptreport is not None and result.wpt_results: self.wptreport.test(result.wpt_results, output.dt) tup = (result.result, result.test.expect, result.test.random) dev_label = self.LABELS[tup][1] if self.options.check_output: if output.test.path in self.output_dict.keys(): if self.output_dict[output.test.path] != output: self.counts['FAIL'] += 1 self.print_automation_result( "TEST-UNEXPECTED-FAIL", result.test, time=output.dt, message= "Same test with different flag producing different output" ) else: self.output_dict[output.test.path] = output if output.timed_out: dev_label = 'TIMEOUTS' self.groups.setdefault(dev_label, []).append(result) if dev_label == 'REGRESSIONS': show_output = self.options.show_output \ or not self.options.no_show_failed elif dev_label == 'TIMEOUTS': show_output = self.options.show_output else: show_output = self.options.show_output \ and not self.options.failed_only if dev_label in ('REGRESSIONS', 'TIMEOUTS'): show_cmd = self.options.show_cmd else: show_cmd = self.options.show_cmd \ and not self.options.failed_only if show_output or show_cmd: self.pb.beginline() if show_output: print('## {}: rc = {:d}, run time = {}'.format( output.test.path, output.rc, output.dt), file=self.fp) if show_cmd: print(escape_cmdline(output.cmd), file=self.fp) if show_output: self.fp.write(output.out) self.fp.write(output.err) self.n += 1 if result.result == TestResult.PASS and not result.test.random: self.counts['PASS'] += 1 elif result.test.expect and not result.test.random: self.counts['FAIL'] += 1 else: self.counts['SKIP'] += 1 if self.options.format == 'automation': if result.result != TestResult.PASS and len( result.results) > 1: for sub_ok, msg in result.results: tup = (sub_ok, result.test.expect, result.test.random) label = self.LABELS[tup][0] if label == 'TEST-UNEXPECTED-PASS': label = 'TEST-PASS (EXPECTED RANDOM)' self.print_automation_result(label, result.test, time=output.dt, message=msg) tup = (result.result, result.test.expect, result.test.random) self.print_automation_result(self.LABELS[tup][0], result.test, time=output.dt, extra=getattr( output, 'extra', None)) return if dev_label: def singular(label): return "FIXED" if label == "FIXES" else label[:-1] self.pb.message("{} - {}".format(singular(dev_label), output.test.path)) self.pb.update(self.n, self.counts) def finish(self, completed): self.pb.finish(completed) if self.options.format == 'automation': self.slog.suite_end() else: self.list(completed) if self.wptreport is not None: self.wptreport.suite_end() # Conceptually, this maps (test result x test expection) to text labels. # key is (result, expect, random) # value is (automation label, dev test category) LABELS = { (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''), (TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'), (TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''), (TestResult.PASS, True, False): ('TEST-PASS', ''), (TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''), } def list(self, completed): for label, results in sorted(self.groups.items()): if label == '': continue print(label) for result in results: print(' {}'.format( ' '.join(result.test.jitflags + result.test.options + [result.test.path]))) if self.options.failure_file: failure_file = open(self.options.failure_file, 'w') if not self.all_passed(): if 'REGRESSIONS' in self.groups: for result in self.groups['REGRESSIONS']: print(result.test.path, file=failure_file) if 'TIMEOUTS' in self.groups: for result in self.groups['TIMEOUTS']: print(result.test.path, file=failure_file) failure_file.close() suffix = '' if completed else ' (partial run -- interrupted by user)' if self.all_passed(): print('PASS' + suffix) else: print('FAIL' + suffix) if self.options.show_slow: min_duration = self.options.slow_test_threshold print('Slow tests (duration > {}s)'.format(min_duration)) slow_tests = sorted(self.slow_tests, key=lambda x: x.duration, reverse=True) any = False for test in slow_tests: print('{:>5} {}'.format(round(test.duration, 2), test.test)) any = True if not any: print('None') def all_passed(self): return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups def print_automation_result(self, label, test, message=None, skip=False, time=None, extra=None): result = label result += " | " + test.path args = [] if self.options.shell_args: args.append(self.options.shell_args) args += test.jitflags result += ' | (args: "{}")'.format(' '.join(args)) if message: result += " | " + message if skip: result += ' | (SKIP)' if time > self.options.timeout: result += ' | (TIMEOUT)' result += ' [{:.1f} s]'.format(time) print(result) details = {'extra': extra.copy() if extra else {}} if self.options.shell_args: details['extra']['shell_args'] = self.options.shell_args details['extra']['jitflags'] = test.jitflags if message: details['message'] = message status = 'FAIL' if 'TEST-UNEXPECTED' in label else 'PASS' self.slog.test(test.path, status, time or 0, **details)
class ResultsSink: def __init__(self, options, testcount): self.options = options self.fp = options.output_fp self.groups = {} self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0} self.n = 0 if options.hide_progress: self.pb = NullProgressBar() else: fmt = [ { 'value': 'PASS', 'color': 'green' }, { 'value': 'FAIL', 'color': 'red' }, { 'value': 'TIMEOUT', 'color': 'blue' }, { 'value': 'SKIP', 'color': 'brightgray' }, ] self.pb = ProgressBar(testcount, fmt) def push(self, output): if output.timed_out: self.counts['TIMEOUT'] += 1 if isinstance(output, NullTestOutput): if self.options.tinderbox: self.print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True) self.counts['SKIP'] += 1 self.n += 1 else: result = TestResult.from_output(output) tup = (result.result, result.test.expect, result.test.random) dev_label = self.LABELS[tup][1] if output.timed_out: dev_label = 'TIMEOUTS' self.groups.setdefault(dev_label, []).append(result.test.path) show = self.options.show if self.options.failed_only and dev_label not in ('REGRESSIONS', 'TIMEOUTS'): show = False if show: self.pb.beginline() if show: if self.options.show_output: print >> self.fp, '## %s: rc = %d, run time = %f' % ( output.test.path, output.rc, output.dt) if self.options.show_cmd: print >> self.fp, escape_cmdline(output.cmd) if self.options.show_output: self.fp.write(output.out) self.fp.write(output.err) self.n += 1 if result.result == TestResult.PASS and not result.test.random: self.counts['PASS'] += 1 elif result.test.expect and not result.test.random: self.counts['FAIL'] += 1 else: self.counts['SKIP'] += 1 if self.options.tinderbox: if len(result.results) > 1: for sub_ok, msg in result.results: label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0] if label == 'TEST-UNEXPECTED-PASS': label = 'TEST-PASS (EXPECTED RANDOM)' self.print_tinderbox_result(label, result.test.path, time=output.dt, message=msg) self.print_tinderbox_result( self.LABELS[(result.result, result.test.expect, result.test.random)][0], result.test.path, time=output.dt) return if dev_label: def singular(label): return "FIXED" if label == "FIXES" else label[:-1] self.pb.message("%s - %s" % (singular(dev_label), output.test.path)) self.pb.update(self.n, self.counts) def finish(self, completed): self.pb.finish(completed) if not self.options.tinderbox: self.list(completed) # Conceptually, this maps (test result x test expection) to text labels. # key is (result, expect, random) # value is (tinderbox label, dev test category) LABELS = { (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''), (TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'), (TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''), (TestResult.PASS, True, False): ('TEST-PASS', ''), (TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''), } def list(self, completed): for label, paths in sorted(self.groups.items()): if label == '': continue print label for path in paths: print ' %s' % path if self.options.failure_file: failure_file = open(self.options.failure_file, 'w') if not self.all_passed(): if 'REGRESSIONS' in self.groups: for path in self.groups['REGRESSIONS']: print >> failure_file, path if 'TIMEOUTS' in self.groups: for path in self.groups['TIMEOUTS']: print >> failure_file, path failure_file.close() suffix = '' if completed else ' (partial run -- interrupted by user)' if self.all_passed(): print 'PASS' + suffix else: print 'FAIL' + suffix def all_passed(self): return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups def print_tinderbox_result(self, label, path, message=None, skip=False, time=None): result = label result += " | " + path result += " |" + self.options.shell_args if message: result += " | " + message if skip: result += ' | (SKIP)' if time > self.options.timeout: result += ' | (TIMEOUT)' print result
class ResultsSink: def __init__(self, testsuite, options, testcount): self.options = options self.fp = options.output_fp if self.options.format == 'automation': self.slog = TestLogger(testsuite) self.slog.suite_start() self.wptreport = None if self.options.wptreport: try: from .wptreport import WptreportHandler self.wptreport = WptreportHandler(self.options.wptreport) self.wptreport.suite_start() except ImportError: pass self.groups = {} self.output_dict = {} self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0} self.slow_tests = [] self.n = 0 if options.hide_progress: self.pb = NullProgressBar() else: fmt = [ {'value': 'PASS', 'color': 'green'}, {'value': 'FAIL', 'color': 'red'}, {'value': 'TIMEOUT', 'color': 'blue'}, {'value': 'SKIP', 'color': 'brightgray'}, ] self.pb = ProgressBar(testcount, fmt) def push(self, output): if self.options.show_slow and output.dt >= self.options.slow_test_threshold: self.slow_tests.append(TestDuration(output.test, output.dt)) if output.timed_out: self.counts['TIMEOUT'] += 1 if isinstance(output, NullTestOutput): if self.options.format == 'automation': self.print_automation_result( 'TEST-KNOWN-FAIL', output.test, time=output.dt, skip=True) self.counts['SKIP'] += 1 self.n += 1 else: result = TestResult.from_output(output) if self.wptreport is not None and result.wpt_results: self.wptreport.test(result.wpt_results, output.dt) tup = (result.result, result.test.expect, result.test.random) dev_label = self.LABELS[tup][1] if self.options.check_output: if output.test.path in self.output_dict.keys(): if self.output_dict[output.test.path] != output: self.counts['FAIL'] += 1 self.print_automation_result( "TEST-UNEXPECTED-FAIL", result.test, time=output.dt, message="Same test with different flag producing different output") else: self.output_dict[output.test.path] = output if output.timed_out: dev_label = 'TIMEOUTS' self.groups.setdefault(dev_label, []).append(result) if dev_label == 'REGRESSIONS': show_output = self.options.show_output \ or not self.options.no_show_failed elif dev_label == 'TIMEOUTS': show_output = self.options.show_output else: show_output = self.options.show_output \ and not self.options.failed_only if dev_label in ('REGRESSIONS', 'TIMEOUTS'): show_cmd = self.options.show_cmd else: show_cmd = self.options.show_cmd \ and not self.options.failed_only if show_output or show_cmd: self.pb.beginline() if show_output: print('## {}: rc = {:d}, run time = {}'.format( output.test.path, output.rc, output.dt), file=self.fp) if show_cmd: print(escape_cmdline(output.cmd), file=self.fp) if show_output: self.fp.write(output.out) self.fp.write(output.err) self.n += 1 if result.result == TestResult.PASS and not result.test.random: self.counts['PASS'] += 1 elif result.test.expect and not result.test.random: self.counts['FAIL'] += 1 else: self.counts['SKIP'] += 1 if self.options.format == 'automation': if result.result != TestResult.PASS and len(result.results) > 1: for sub_ok, msg in result.results: tup = (sub_ok, result.test.expect, result.test.random) label = self.LABELS[tup][0] if label == 'TEST-UNEXPECTED-PASS': label = 'TEST-PASS (EXPECTED RANDOM)' self.print_automation_result( label, result.test, time=output.dt, message=msg) tup = (result.result, result.test.expect, result.test.random) self.print_automation_result(self.LABELS[tup][0], result.test, time=output.dt, extra=getattr(output, 'extra', None)) return if dev_label: def singular(label): return "FIXED" if label == "FIXES" else label[:-1] self.pb.message("{} - {}".format(singular(dev_label), output.test.path)) self.pb.update(self.n, self.counts) def finish(self, completed): self.pb.finish(completed) if self.options.format == 'automation': self.slog.suite_end() else: self.list(completed) if self.wptreport is not None: self.wptreport.suite_end() # Conceptually, this maps (test result x test expection) to text labels. # key is (result, expect, random) # value is (automation label, dev test category) LABELS = { (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''), (TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'), (TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''), (TestResult.PASS, True, False): ('TEST-PASS', ''), (TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''), } def list(self, completed): for label, results in sorted(self.groups.items()): if label == '': continue print(label) for result in results: print(' {}'.format(' '.join(result.test.jitflags + [result.test.path]))) if self.options.failure_file: failure_file = open(self.options.failure_file, 'w') if not self.all_passed(): if 'REGRESSIONS' in self.groups: for result in self.groups['REGRESSIONS']: print(result.test.path, file=failure_file) if 'TIMEOUTS' in self.groups: for result in self.groups['TIMEOUTS']: print(result.test.path, file=failure_file) failure_file.close() suffix = '' if completed else ' (partial run -- interrupted by user)' if self.all_passed(): print('PASS' + suffix) else: print('FAIL' + suffix) if self.options.show_slow: min_duration = self.options.slow_test_threshold print('Slow tests (duration > {}s)'.format(min_duration)) slow_tests = sorted(self.slow_tests, key=lambda x: x.duration, reverse=True) any = False for test in slow_tests: print('{:>5} {}'.format(round(test.duration, 2), test.test)) any = True if not any: print('None') def all_passed(self): return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups def print_automation_result(self, label, test, message=None, skip=False, time=None, extra=None): result = label result += " | " + test.path args = [] if self.options.shell_args: args.append(self.options.shell_args) args += test.jitflags result += ' | (args: "{}")'.format(' '.join(args)) if message: result += " | " + message if skip: result += ' | (SKIP)' if time > self.options.timeout: result += ' | (TIMEOUT)' result += ' [{:.1f} s]'.format(time) print(result) details = {'extra': extra.copy() if extra else {}} if self.options.shell_args: details['extra']['shell_args'] = self.options.shell_args details['extra']['jitflags'] = test.jitflags if message: details['message'] = message status = 'FAIL' if 'TEST-UNEXPECTED' in label else 'PASS' self.slog.test(test.path, status, time or 0, **details)
class ResultsSink: def __init__(self, options, testcount): self.options = options self.fp = options.output_fp self.groups = {} self.output_dict = {} self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0} self.n = 0 if options.hide_progress: self.pb = NullProgressBar() else: fmt = [ {'value': 'PASS', 'color': 'green'}, {'value': 'FAIL', 'color': 'red'}, {'value': 'TIMEOUT', 'color': 'blue'}, {'value': 'SKIP', 'color': 'brightgray'}, ] self.pb = ProgressBar(testcount, fmt) def push(self, output): if output.timed_out: self.counts['TIMEOUT'] += 1 if isinstance(output, NullTestOutput): if self.options.format == 'automation': self.print_automation_result( 'TEST-KNOWN-FAIL', output.test, time=output.dt, skip=True) self.counts['SKIP'] += 1 self.n += 1 else: result = TestResult.from_output(output) tup = (result.result, result.test.expect, result.test.random) dev_label = self.LABELS[tup][1] if self.options.check_output: if output.test.path in self.output_dict.keys(): if self.output_dict[output.test.path] != output: self.counts['FAIL'] += 1 self.print_automation_result( "TEST-UNEXPECTED-FAIL", result.test, time=output.dt, message="Same test with different flag producing different output") else: self.output_dict[output.test.path] = output if output.timed_out: dev_label = 'TIMEOUTS' self.groups.setdefault(dev_label, []).append(result) if dev_label == 'REGRESSIONS': show_output = self.options.show_output \ or not self.options.no_show_failed elif dev_label == 'TIMEOUTS': show_output = self.options.show_output else: show_output = self.options.show_output \ and not self.options.failed_only if dev_label in ('REGRESSIONS', 'TIMEOUTS'): show_cmd = self.options.show_cmd else: show_cmd = self.options.show_cmd \ and not self.options.failed_only if show_output or show_cmd: self.pb.beginline() if show_output: print('## {}: rc = {:d}, run time = {}'.format( output.test.path, output.rc, output.dt), file=self.fp) if show_cmd: print(escape_cmdline(output.cmd), file=self.fp) if show_output: self.fp.write(output.out) self.fp.write(output.err) self.n += 1 if result.result == TestResult.PASS and not result.test.random: self.counts['PASS'] += 1 elif result.test.expect and not result.test.random: self.counts['FAIL'] += 1 else: self.counts['SKIP'] += 1 if self.options.format == 'automation': if result.result != TestResult.PASS and len(result.results) > 1: for sub_ok, msg in result.results: tup = (sub_ok, result.test.expect, result.test.random) label = self.LABELS[tup][0] if label == 'TEST-UNEXPECTED-PASS': label = 'TEST-PASS (EXPECTED RANDOM)' self.print_automation_result( label, result.test, time=output.dt, message=msg) tup = (result.result, result.test.expect, result.test.random) self.print_automation_result( self.LABELS[tup][0], result.test, time=output.dt) return if dev_label: def singular(label): return "FIXED" if label == "FIXES" else label[:-1] self.pb.message("{} - {}".format(singular(dev_label), output.test.path)) self.pb.update(self.n, self.counts) def finish(self, completed): self.pb.finish(completed) if not self.options.format == 'automation': self.list(completed) # Conceptually, this maps (test result x test expection) to text labels. # key is (result, expect, random) # value is (automation label, dev test category) LABELS = { (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''), (TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'), (TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''), (TestResult.PASS, True, False): ('TEST-PASS', ''), (TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''), } def list(self, completed): for label, results in sorted(self.groups.items()): if label == '': continue print(label) for result in results: print(' {}'.format(' '.join(result.test.jitflags + [result.test.path]))) if self.options.failure_file: failure_file = open(self.options.failure_file, 'w') if not self.all_passed(): if 'REGRESSIONS' in self.groups: for result in self.groups['REGRESSIONS']: print(result.test.path, file=failure_file) if 'TIMEOUTS' in self.groups: for result in self.groups['TIMEOUTS']: print(result.test.path, file=failure_file) failure_file.close() suffix = '' if completed else ' (partial run -- interrupted by user)' if self.all_passed(): print('PASS' + suffix) else: print('FAIL' + suffix) def all_passed(self): return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups def print_automation_result(self, label, test, message=None, skip=False, time=None): result = label result += " | " + test.path args = [] if self.options.shell_args: args.append(self.options.shell_args) args += test.jitflags result += ' | (args: "{}")'.format(' '.join(args)) if message: result += " | " + message if skip: result += ' | (SKIP)' if time > self.options.timeout: result += ' | (TIMEOUT)' print(result)
class ResultsSink: def __init__(self, options, testcount): self.options = options self.fp = options.output_fp self.groups = {} self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0} self.n = 0 if options.hide_progress: self.pb = NullProgressBar() else: fmt = [ {'value': 'PASS', 'color': 'green'}, {'value': 'FAIL', 'color': 'red'}, {'value': 'TIMEOUT', 'color': 'blue'}, {'value': 'SKIP', 'color': 'brightgray'}, ] self.pb = ProgressBar(testcount, fmt) def push(self, output): if output.timed_out: self.counts['TIMEOUT'] += 1 if isinstance(output, NullTestOutput): if self.options.tinderbox: self.print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True) self.counts['SKIP'] += 1 self.n += 1 else: result = TestResult.from_output(output) tup = (result.result, result.test.expect, result.test.random) dev_label = self.LABELS[tup][1] if output.timed_out: dev_label = 'TIMEOUTS' self.groups.setdefault(dev_label, []).append(result.test.path) show = self.options.show if self.options.failed_only and dev_label not in ('REGRESSIONS', 'TIMEOUTS'): show = False if show: self.pb.beginline() if show: if self.options.show_output: print('## %s: rc = %d, run time = %f' % (output.test.path, output.rc, output.dt), file=self.fp) if self.options.show_cmd: print(escape_cmdline(output.cmd), file=self.fp) if self.options.show_output: self.fp.write(output.out) self.fp.write(output.err) self.n += 1 if result.result == TestResult.PASS and not result.test.random: self.counts['PASS'] += 1 elif result.test.expect and not result.test.random: self.counts['FAIL'] += 1 else: self.counts['SKIP'] += 1 if self.options.tinderbox: if len(result.results) > 1: for sub_ok, msg in result.results: label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0] if label == 'TEST-UNEXPECTED-PASS': label = 'TEST-PASS (EXPECTED RANDOM)' self.print_tinderbox_result(label, result.test.path, time=output.dt, message=msg) self.print_tinderbox_result(self.LABELS[ (result.result, result.test.expect, result.test.random)][0], result.test.path, time=output.dt) return if dev_label: def singular(label): return "FIXED" if label == "FIXES" else label[:-1] self.pb.message("%s - %s" % (singular(dev_label), output.test.path)) self.pb.update(self.n, self.counts) def finish(self, completed): self.pb.finish(completed) if not self.options.tinderbox: self.list(completed) # Conceptually, this maps (test result x test expection) to text labels. # key is (result, expect, random) # value is (tinderbox label, dev test category) LABELS = { (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''), (TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'), (TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''), (TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'), (TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''), (TestResult.PASS, True, False): ('TEST-PASS', ''), (TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''), } def list(self, completed): for label, paths in sorted(self.groups.items()): if label == '': continue print(label) for path in paths: print(' %s' % path) if self.options.failure_file: failure_file = open(self.options.failure_file, 'w') if not self.all_passed(): if 'REGRESSIONS' in self.groups: for path in self.groups['REGRESSIONS']: print(path, file=failure_file) if 'TIMEOUTS' in self.groups: for path in self.groups['TIMEOUTS']: print(path, file=failure_file) failure_file.close() suffix = '' if completed else ' (partial run -- interrupted by user)' if self.all_passed(): print('PASS' + suffix) else: print('FAIL' + suffix) def all_passed(self): return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups def print_tinderbox_result(self, label, path, message=None, skip=False, time=None): result = label result += " | " + path result += " |" + self.options.shell_args if message: result += " | " + message if skip: result += ' | (SKIP)' if time > self.options.timeout: result += ' | (TIMEOUT)' print(result)