def test_logbook_handler_emit_error(capsys, elasticapm_client): handler = LogbookHandler(elasticapm_client) handler._emit = lambda: 1 / 0 handler.emit(LogRecord("x", 1, "Oops")) out, err = capsys.readouterr() assert "Top level ElasticAPM exception caught" in err assert "Oops" in err
def test_formatter_with_file_handler(monkeypatch): r_fixture = { 'channel': 'formatter.test', 'level': 2, 'msg': 'My test log message', 'args': None, 'kwargs': None, 'exc_info': None, 'extra': None, 'frame': None, 'dispatcher': None } fh_fixture = { 'filename': '/tmp/bogus.log', 'mode': 'a', 'encoding': 'utf-8', 'level': 0, 'format_string': None, 'delay': False, 'filter': None, 'bubble': False } # This date fixture translates to '2014-12-05T14:12:49.303830Z' timestamp_fixture = FROZEN_DATETIME.strftime('%Y-%m-%dT%H:%M:%S.%fZ') monkeypatch.setattr(datetime, 'datetime', MockedDate) lf = LogstashFormatter() json_msg = lf( record=LogRecord(**r_fixture), handler=FileHandler(**fh_fixture) ) expected_json = { '@fields': { 'extra': {}, 'level': r_fixture['level'], 'process': None, 'frame': None, 'args': [], 'kwargs': {}, '_dispatcher': r_fixture['dispatcher'], 'channel': r_fixture['channel'] }, '@handler': { 'level': fh_fixture['level'], '_filename': fh_fixture['filename'], '_mode': fh_fixture['mode'], 'filter': fh_fixture['filter'], 'bubble': fh_fixture['bubble'], 'encoding': fh_fixture['encoding'] }, '@timestamp': timestamp_fixture, '@source_host': 'localhost', '@message': r_fixture['msg'] } assert json.loads(json_msg) == expected_json
async def run(self): """ Run the subcribing task to continuously receive and emit log messages. This can be ran in the background of the event loop using [`create_task`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.AbstractEventLoop.create_task). """ while True: try: msg = await self._rx.readuntil(b"\n") except IncompleteReadError as e: if e.expected is None: break raise self._logger.handle(LogRecord.from_dict(json.loads(msg.decode())))
def emit(self, record: logbook.LogRecord): # trigger the cached proeprties here record.pull_information() self.queue.put_nowait(QueueLogMessage.from_record(record))
def test_logbook_handler_dont_emit_elasticapm(capsys, elasticapm_client): handler = LogbookHandler(elasticapm_client) handler.emit(LogRecord("elasticapm.errors", 1, "Oops")) out, err = capsys.readouterr() assert "Oops" in err
def test_logbook_handler_dont_emit_zuqa(capsys, zuqa_client): handler = LogbookHandler(zuqa_client) handler.emit(LogRecord("zuqa.errors", 1, "Oops")) out, err = capsys.readouterr() assert "Oops" in err
def test_logbook_handler_dont_emit_elasticapm(capsys, elasticapm_client): handler = LogbookHandler(elasticapm_client) handler.emit(LogRecord('elasticapm.errors', 1, 'Oops')) out, err = capsys.readouterr() assert 'Oops' in err