class ConsoleLogFormatter(object): ''' Formats output to be sent to an interactive terminal. Colors may be added based on logging level. ''' # For now, just change color based on the logging level. color = terminal.get_termcap() reset = color.Normal level_colormap = { BOLD: color.White + color.Bold, FATAL: color.Red, WARN: color.Yellow } def __init__(self): pass def format(self, record): color_str = self.level_colormap.get(record.levelno, self.color.Normal) return color_str + record.msg + self.reset
class TerminalHandler(log.Handler): color = terminal.get_termcap() verbosity_mapping = { log.LogLevel.Warn: color.Yellow, log.LogLevel.Error: color.Red, } default = color.Normal def __init__(self, stream=False, verbosity=log.LogLevel.Info): self.stream = stream self.verbosity = verbosity self.mapping = { log.TestResult.type_id: self.handle_testresult, log.SuiteStatus.type_id: self.handle_suitestatus, log.TestStatus.type_id: self.handle_teststatus, log.TestStderr.type_id: self.handle_stderr, log.TestStdout.type_id: self.handle_stdout, log.TestMessage.type_id: self.handle_testmessage, log.LibraryMessage.type_id: self.handle_librarymessage, } def _display_outcome(self, name, outcome, reason=None): print(self.color.Bold + SummaryHandler.colormap[outcome] + name + ' ' + state.Result.enums[outcome] + SummaryHandler.reset) if reason is not None: log.test_log.info('') log.test_log.info('Reason:') log.test_log.info(reason) log.test_log.info(terminal.separator('-')) def handle_teststatus(self, record): if record['status'] == state.Status.Running: log.test_log.debug('Starting Test Case: %s' % record['metadata'].name) def handle_testresult(self, record): self._display_outcome( 'Test: %s' % record['metadata'].name, record['result'].value) def handle_suitestatus(self, record): if record['status'] == state.Status.Running: log.test_log.debug('Starting Test Suite: %s ' % record['metadata'].name) def handle_stderr(self, record): if self.stream: print(record.data['buffer'], file=sys.stderr, end='') def handle_stdout(self, record): if self.stream: print(record.data['buffer'], file=sys.stdout, end='') def handle_testmessage(self, record): if self.stream: print(self._colorize(record['message'], record['level'])) def handle_librarymessage(self, record): print(self._colorize(record['message'], record['level'], record['bold'])) def _colorize(self, message, level, bold=False): return '%s%s%s%s' % ( self.color.Bold if bold else '', self.verbosity_mapping.get(level, ''), message, self.default) def handle(self, record): if record.data.get('level', self.verbosity) > self.verbosity: return self.mapping.get(record.type_id, lambda _:None)(record) def set_verbosity(self, verbosity): self.verbosity = verbosity
class SummaryHandler(log.Handler): color = terminal.get_termcap() reset = color.Normal colormap = { state.Result.Errored: color.Red, state.Result.Failed: color.Red, state.Result.Passed: color.Green, state.Result.Skipped: color.Cyan, } sep_fmtkey = 'separator' sep_fmtstr = '{%s}' % sep_fmtkey def __init__(self): self.mapping = { log.TestResult.type_id: self.handle_testresult, log.LibraryStatus.type_id: self.handle_library_status, } self._timer = Timer() self.results = [] def handle_library_status(self, record): if record['status'] == state.Status.Building: self._timer.restart() def handle_testresult(self, record): result = record['result'].value if result in (state.Result.Skipped, state.Result.Failed, state.Result.Passed, state.Result.Errored): self.results.append(result) def handle(self, record): self.mapping.get(record.type_id, lambda _:None)(record) def close(self): print(self._display_summary()) def _display_summary(self): most_severe_outcome = None outcome_fmt = ' {count} {outcome}' strings = [] outcome_count = [0] * len(state.Result.enums) for result in self.results: outcome_count[result] += 1 # Iterate over enums so they are in order of severity for outcome in state.Result.enums: outcome = getattr(state.Result, outcome) count = outcome_count[outcome] if count: strings.append(outcome_fmt.format(count=count, outcome=state.Result.enums[outcome])) most_severe_outcome = outcome string = ','.join(strings) if most_severe_outcome is None: string = ' No testing done' most_severe_outcome = state.Result.Passed else: string = ' Results:' + string + ' in {:.2} seconds '.format( self._timer.active_time()) string += ' ' return terminal.insert_separator( string, color=self.colormap[most_severe_outcome] + self.color.Bold)
class ConsoleLogger(ResultLogger): ''' A logger implementing the streaming ResultLogger interface. This logger is used to stream testing result output to a user terminal. ''' color = terminal.get_termcap() reset = color.Normal colormap = { FAIL: color.Red, ERROR: color.Red, PASS: color.Green, XFAIL: color.Cyan, SKIP: color.Cyan, } sep_fmtkey = 'separator' sep_fmtstr = '{%s}' % sep_fmtkey bad_item = ('Result formatter can only handle test cases' ' and test suites') def __init__(self): self.outcome_count = {outcome: 0 for outcome in Outcome.enums} self._item_list = [] self._current_item = None self.timer = Timer() self._started = False def begin_testing(self): self.timer.start() self._started = True def begin(self, item): if isinstance(item, TestSuite): self._begin_testsuite(item) elif isinstance(item, TestCase): self._begin_testcase(item) elif __debug__: raise AssertionError(self.bad_item) self._item_list.append(self._current_item) self._current_item = item def _begin_testsuite(self, test_suite): log.info('Starting TestSuite: %s' % test_suite.name) def _begin_testcase(self, test_case): log.info('Starting TestCase: %s' % test_case.name) def set_current_outcome(self, outcome, **kwargs): '''Set the outcome of the current item.''' if isinstance(self._current_item, TestSuite): pass # TODO, for now we dont' do anything with this. elif isinstance(self._current_item, TestCase): self._set_testcase_outcome(self._current_item, outcome, **kwargs) elif __debug__: raise AssertionError(self.bad_item) def _set_testcase_outcome(self, test_case, outcome, reason=None, **kwargs): log.bold(self.colormap[outcome] + test_case.name + self.reset) self.outcome_count[outcome] += 1 if reason is not None: log.info('') log.info('Reason:') log.info(reason) log.info(terminal.separator('-')) def _set_testsuite_outcome(self, test_suite, outcome, **kwargs): pass # Do nothing for test suites. def skip(self, item, reason): '''Set the outcome of the current item.''' if isinstance(item, TestSuite): pass # TODO, for now we dont' do anything with this. elif isinstance(item, TestCase): self._skip_testcase(item, reason) elif __debug__: raise AssertionError(self.bad_item) def _skip_testcase(self, test_case, reason): log.display('{color}Skipping: {name}{reset}'.format( color=self.colormap[Outcome.SKIP], name=test_case.name, reset=self.reset)) self.outcome_count[Outcome.SKIP] += 1 def end_current(self): if isinstance(self._current_item, TestSuite): self._end_testsuite(self._current_item) elif isinstance(self._current_item, TestCase): self._end_testcase(self._current_item) elif __debug__: raise AssertionError(self.bad_item) self._current_item = self._item_list.pop() def _end_testcase(self, test_case): pass def _end_testsuite(self, test_suite): pass def end_testing(self): if self._started: self.timer.stop() log.display(self._display_summary()) self._started = False def _display_summary(self): most_severe_outcome = None outcome_fmt = ' {count} {outcome}' strings = [] # Iterate over enums so they are in order of severity for outcome in Outcome.enums: count = self.outcome_count[outcome] if count: strings.append( outcome_fmt.format(count=count, outcome=outcome.name)) most_severe_outcome = outcome string = ','.join(strings) if most_severe_outcome is None: string = ' No testing done' most_severe_outcome = Outcome.PASS string += ' in {time:.2} seconds '.format(time=self.timer.runtime()) return terminal.insert_separator( string, color=self.colormap[most_severe_outcome] + self.color.Bold)