def test_stale_stream(tmpdir): # make mock filestream with write/flush that goes stale after 100 writes # create logger with stream emitter to mocked file stream class StalewardFile(file): def __init__(self, *a, **kw): file.__init__(self, *a, **kw) self._write_count = 0 def write(self, *a, **kw): self._write_count += 1 if self._write_count > 100: exc = IOError('stale file handle') exc.errno = errno.ESTALE self.close() raise exc return file.write(self, *a, **kw) file_path = '%s/not_always_fresh.log' % (tmpdir,) stale_file_obj = StalewardFile(file_path, 'wb') emitter = StreamEmitter(stale_file_obj) sink = SensibleSink(SF('{status_char} - {iso_end}'), emitter, filters=[SensibleFilter(success=True)]) logger = Logger('excelsilog', [sink]) assert emitter.stream is stale_file_obj for i in range(200): logger.info('yay').success() lines = open(file_path).read().splitlines() assert len(lines) == 200 assert len(lines[0]) == len(lines[-1]) assert stale_file_obj.closed assert emitter.stream is not stale_file_obj
def test_dup_sink(): log = Logger('test_exc_logger') agg_sink = AggregateSink() log.add_sink(agg_sink) log.add_sink(agg_sink) assert len(log.sinks) == 1
def test_sensible_basic(): log = Logger('test_ss', [strm_sink, fake_sink]) print() log.debug('greet').success('hey') assert aggr_emtr.get_entry(-1).startswith('s') with log.debug('greet') as t: log.comment('a_{}_quick', 'comment') assert aggr_emtr.get_entry(-1).startswith('#') assert 'a_comment_quick' in aggr_emtr.get_entry(-1) t.success('hello') t.warn("everything ok?") assert aggr_emtr.get_entry(-1).startswith('S') with log.debug('greet') as t: t.failure('bye') assert aggr_emtr.get_entry(-1).startswith('F') try: with log.debug('greet') as t: raise ZeroDivisionError('narwhalbaconderp') except Exception: pass assert aggr_emtr.get_entry(-1).startswith('E') assert 'limit=' in repr(aggr_emtr) assert aggr_emtr.get_entries() aggr_emtr.clear() assert not aggr_emtr.get_entries()
def _test_exception(): _tmpl = ('{iso_end} - {exc_type}: {exc_message}' ' - {func_name}:{line_number} - {exc_tb_list}') sink = SensibleSink(SF(_tmpl), StreamEmitter('stderr'), filters=[SensibleFilter(exception=False)]) logger = Logger('excelsilog', [sink]) with logger.info('A for Effort', reraise=False) as tr: print tr raise ValueError('E for Exception') return
def _test_exception(): _tmpl = ('{iso_end} - {exc_type}: {exc_message}' ' - {func_name}:{line_number} - {exc_tb_list}') sink = SensibleSink(SF(_tmpl), StreamEmitter('stderr'), filters=[SensibleFilter(exception=0)]) logger = Logger('excelsilog', [sink]) with logger.info('A for Effort', reraise=False) as tr: print tr raise ValueError('E for Exception') return
def test_sensible_basic(): log = Logger('test_ss', [strm_sink, fake_sink]) print log.debug('greet').success('hey') assert aggr_emtr.get_entry(-1).startswith('s') with log.debug('greet') as t: log.comment('a_{}_quick', 'comment') assert aggr_emtr.get_entry(-1).startswith('#') assert 'a_comment_quick' in aggr_emtr.get_entry(-1) t.success('hello') t.warn("everything ok?") assert aggr_emtr.get_entry(-1).startswith('S') with log.debug('greet') as t: t.failure('bye') assert aggr_emtr.get_entry(-1).startswith('F') try: with log.debug('greet') as t: raise ZeroDivisionError('narwhalbaconderp') except Exception: pass assert aggr_emtr.get_entry(-1).startswith('E') assert 'limit=' in repr(aggr_emtr) assert aggr_emtr.get_entries() aggr_emtr.clear() assert not aggr_emtr.get_entries()
def test_rate_sink(): sink = RateSink() logger = Logger('testlog', sinks=[sink]) for i in range(10): with logger.info('sleeping', reraise=False): time.sleep(0.02) if i % 2: raise ValueError() test_rates = sink.get_rates()['testlog']['sleeping'] assert 48 <= round(test_rates['__all__']) <= 51 assert 24 <= round(test_rates['exception']) <= 26 counts = sink.get_total_counts() assert counts['__all__'] == 10 assert repr(sink)
def test_callpoint_info(): log = Logger('test_logger', []) t = do_debug_trans(log) assert t.callpoint.module_name == __name__ assert t.callpoint.module_path.endswith(__file__) assert t.callpoint.func_name == 'do_debug_trans' assert t.callpoint.lineno > 0 assert t.callpoint.lasti > 0 assert repr(t)
def test_rate_sink(): sink = RateSink() logger = Logger('testlog', sinks=[sink]) for i in range(10): with logger.info('sleeping', reraise=False): time.sleep(0.02) if i % 2: raise ValueError() test_rates = sink.get_rates()['testlog']['sleeping'] # get_rates() returns per-second rates for logger invocations. These # results are hardware dependent for the lower bound, but the upper bound # is well defined because of the hard coded sleep() call above. assert 40 <= round(test_rates['__all__']) <= 51 assert 20 <= round(test_rates['exception']) <= 26 counts = sink.get_total_counts() assert counts['__all__'] == 10 assert repr(sink)
def test_exception(): log = Logger('test_exc_logger') exc_list = [] class ExcSink(object): def on_exception(self, event, etype, eobj, etb): exc_list.append(event) exc_sink = ExcSink() log.add_sink(exc_sink) try: with log.critical('raise_act'): raise RuntimeError('whooops') except RuntimeError: pass assert len(exc_list) == 1 assert exc_list[0].exc_info.exc_type == 'RuntimeError'
def test_guid(): import string log = Logger('test_logger', []) act = do_debug_act(log) assert act.guid assert act.guid.lower() == act.guid assert all([c in string.hexdigits for c in act.guid]) act2 = do_debug_act(log) assert act.guid != act2.guid assert act.guid < act2.guid # when int2hexguid_seq is in use
def test_rate_sink(): sink = RateSink() logger = Logger('testlog', sinks=[sink]) for i in range(10): with logger.info('sleeping', reraise=False): time.sleep(0.02) if i % 2: raise ValueError() test_rates = sink.get_rates()['testlog']['sleeping'] # TODO: these are a little flaky, esp when moving between # environments, runtimes, and with/without coverage, hence the # range all_lower_limit = 40 if IS_PYPY else 48 assert all_lower_limit <= round(test_rates['__all__']) <= 51 assert 22 <= round(test_rates['exception']) <= 26 counts = sink.get_total_counts() assert counts['__all__'] == 10 assert repr(sink)
def test_comment(): log = Logger('test_comment') events = [] class ComSink(object): def on_comment(self, event): events.append(event) log.add_sink(ComSink()) log.comment('the first') log.comment('the second') assert len(events) == 2
def test_stale_stream(tmpdir): # make mock filestream with write/flush that goes stale after 100 writes # create logger with stream emitter to mocked file stream class StalewardFile(file): def __init__(self, *a, **kw): file.__init__(self, *a, **kw) self._write_count = 0 def write(self, *a, **kw): self._write_count += 1 if self._write_count > 100: exc = IOError('stale file handle') exc.errno = errno.ESTALE self.close() raise exc return file.write(self, *a, **kw) file_path = '%s/not_always_fresh.log' % (tmpdir, ) stale_file_obj = StalewardFile(file_path, 'wb') emitter = StreamEmitter(stale_file_obj) sink = SensibleSink(SF('{status_char} - {iso_end}'), emitter, filters=[SensibleFilter(success=True)]) logger = Logger('excelsilog', [sink]) assert emitter.stream is stale_file_obj for i in range(200): logger.info('yay').success() lines = open(file_path).read().splitlines() assert len(lines) == 200 assert len(lines[0]) == len(lines[-1]) assert stale_file_obj.closed assert emitter.stream is not stale_file_obj
def test_async_basic(): ctx = get_context() ctx.enable_async() log = Logger('async_basic') with log.critical('test', reraise=False) as act: log.comment('i got a bad feeling about this') act.warn('here it comes') 1 / 0 log.flush() ctx.disable_async() return
def test_comment(): log = Logger('test_comment') events = [] class ComSink(object): def on_comment(self, event): events.append(event) log.add_sink(ComSink()) log.comment('the first') log.comment('the second {}', 'commenting') assert len(events) == 2
def test_preflush_catching_and_noting(): ctx = LithoxylContext() def raiser(log): raise RuntimeError('never gonna catch me') log = Logger('test_logger', context=ctx) log.preflush_hooks.append(raiser) notes = [] def add_note(name, message): notes.append((name, message)) ctx.note_handlers.append(add_note) ctx.enable_async() time.sleep(0.3) assert notes # should have at least one note in 300ms
# -*- coding: utf-8 -*- from lithoxyl import DeferredValue from lithoxyl.logger import Logger, Action from lithoxyl.sensible import SensibleFormatter as SF template = ('{status_char}{action_warn_char}{begin_timestamp}' ' - {iso_begin_local} - {iso_begin}' ' - {logger_name} - {status_str} - {action_name}') t_log = Logger('1off') t_riker = Action(t_log, 'DEBUG', 'Riker').success('Hello, Thomas.') ACTS = [t_riker] TCS = [[('{logger_name}', '"1off"'), ('{status_str}', 'success'), ('{level_number}', '20'), ('{action_name}', '"Riker"'), ('{end_message}', '"Hello, Thomas."')]] def test_formatter_basic(): forming = SF(template) output = forming.on_end(t_riker.end_event) expected = '"1off" - success - "Riker"' print(output) assert output[-len(expected):] == expected act = Action(t_log, 'DEBUG', 'Wharf') robf = SF(template) act.success('Mr. Wolf') ret = robf.on_end(act.end_event)
def test_structured(trans_count=5): acc = SimpleStructuredFileSink() log = Logger('test_logger', [acc]) for i in range(trans_count): do_debug_trans(log)
def _get_logger(): acc = AggregateSink() return Logger('test_logger', [acc])