def test_ignore_succeeded_none(self): name = "name" level = 1 pathname = "/" lineno = 2 ignore_check_url = IgnoreSucceededNone(name) task_log = LogRecord( name, level, pathname, lineno, "Task safe_transaction_service.history.tasks.index_internal_" "txs_task[89ad3c46-aeb3-48a1-bd6f-2f3684323ca8] succeeded in " "1.0970600529108196s: None", args=(), exc_info=(), ) other_log = LogRecord(name, level, pathname, lineno, "Not a task log", args=(), exc_info=()) self.assertFalse(ignore_check_url.filter(task_log)) self.assertTrue(ignore_check_url.filter(other_log))
def test_render_max_lines(self): """ If max_lines not None, records get truncated. """ self.logger.max_records = 1 self.logger.emit(LogRecord("root", INFO, None, None, "hi", None, None)) self.logger.emit(LogRecord("root", INFO, None, None, "yo", None, None)) self.assertEqual("2016-11-05 21:57:03,220 [root:INFO] yo", self.logger.text)
def test_render_many(self): """ If there's more than one record, they're separated by new lines. """ self.logger.emit(LogRecord("root", INFO, None, None, "hi", None, None)) self.logger.emit(LogRecord("root", INFO, None, None, "yo", None, None)) self.assertEqual([ "2016-11-05 21:57:03,220 [root:INFO] hi", "2016-11-05 21:57:03,220 [root:INFO] yo" ], self.logger.text.strip().split("\n"))
def test_render_timestamp(self): """ The timestamp is considered. """ self.logger.emit(LogRecord("root", INFO, None, None, "hi", None, None)) time.sleep(1) self.logger.emit(LogRecord("root", INFO, None, None, "hi", None, None)) self.assertEqual([ "2016-11-05 21:57:03,220 [root:INFO] hi", "2016-11-05 21:57:04,220 [root:INFO] hi" ], self.logger.text.strip().split("\n"))
def close(self): if self.closed: return self.closed = True if self.message_count > self.flood_level: hidden = self.message_count - self.flood_level - len(self.tail) if hidden: # send critical error FileHandler.emit( self, LogRecord(name='flood', level=CRITICAL, pathname='', lineno=0, msg=flood_template % (self.message_count - self.flood_level - len(self.tail), self.flood_level), args=(), exc_info=None)) for record in self.tail: FileHandler.emit(self, record) FileHandler.close(self) if PY2: f = os.fdopen(self.fd) summary = f.read().decode(self.charset) else: f = open(self.fd, encoding=self.charset) summary = f.read() f.close() try: encoded_summary = summary.encode('ascii') self.mailer.charset = 'ascii' except UnicodeEncodeError: pass else: if PY2: summary = encoded_summary if os.path.exists(self.filename): os.remove(self.filename) if self.send_level is None or self.maxlevelno >= self.send_level: self.mailer.handle( LogRecord(name='Summary', level=self.maxlevelno, pathname='', lineno=0, msg=summary, args=(), exc_info=None))
def test_log_formatter_scrapy_2(): middleware = get_test_middleware() logformatter = CrawleraFetchLogFormatter() formatter = Formatter() spider = Spider("foo") for case in deepcopy(test_requests): original = case["original"] response = Response(original.url) processed = middleware.process_request(original, spider) crawlera_meta = original.meta.get("crawlera_fetch") or {} if crawlera_meta.get("skip"): assert processed is None continue # crawled result = logformatter.crawled(processed, response, spider) assert result["args"]["request"] == str(original) record = LogRecord(name="logger", pathname="n/a", lineno=1, exc_info=None, **result) logstr = formatter.format(record) assert logstr == "Crawled (200) %s (referer: None)" % str(original) # spider_error result = logformatter.spider_error(Failure(Exception("exc")), processed, response, spider) assert result["args"]["request"] == str(original) record = LogRecord(name="logger", pathname="n/a", lineno=1, exc_info=None, **result) logstr = formatter.format(record) assert logstr == "Spider error processing %s (referer: None)" % str( original) # download_error result = logformatter.download_error(Failure(Exception("exc")), processed, spider, "foo") assert result["args"]["request"] == str(original) record = LogRecord(name="logger", pathname="n/a", lineno=2, exc_info=None, **result) logstr = formatter.format(record) assert logstr == "Error downloading %s: foo" % str(original)
def test_level(self, mocker): from logging import LogRecord, ERROR requests = mocker.patch("opsgenie_logger.requests") handler = OpsGenieHandler("", "", 1) record = LogRecord("", 0, "", 0, "", [], None) handler.emit(record) requests.post.assert_not_called() handler = OpsGenieHandler("", "", "WARNING") record = LogRecord("", ERROR, "", 0, "", [], None) handler.emit(record) requests.post.assert_called_once()
def close(self): if self.closed: return if self.message_count > self.flood_level: hidden = self.message_count - self.flood_level - len(self.tail) if hidden: # send critical error FileHandler.emit(self, LogRecord( name='flood', level=CRITICAL, pathname='', lineno=0, msg=flood_template % ( self.message_count - self.flood_level - len(self.tail), self.flood_level ), args=(), exc_info=None )) for record in self.tail: FileHandler.emit(self, record) FileHandler.close(self) f = os.fdopen(self.fd) summary = f.read().decode(self.charset) f.close() # try and encode in ascii, to keep emails simpler: try: summary = summary.encode('ascii') except UnicodeEncodeError: # unicode it is then pass if os.path.exists(self.filename): os.remove(self.filename) if self.send_level is None or self.maxlevelno >= self.send_level: self.mailer.handle( LogRecord( name='Summary', level=self.maxlevelno, pathname='', lineno=0, msg=summary, args=(), exc_info=None ) ) self.closed = True
def test_warning() -> None: record = LogRecord("n", WARNING, "p", 1, "warn-message", (), None) formatted = formatter.format(record) # yellow WARNING assert formatted.startswith("\x1b[33mWARNING\x1b[0m") # message inside assert formatted.find("warn-message") >= 0
def test_error() -> None: record = LogRecord("n", ERROR, "p", 1, "error-message", (), None) formatted = formatter.format(record) # red ERROR assert formatted.startswith("\x1b[31mERROR\x1b[0m") # message inside assert formatted.find("error-message") >= 0
def test_csh_format_no_highlighter_no_color_support(self): colorizer = GenericColorizer(color_map={ 'bracket': ('[', ']'), }) formatter = ColorizingFormatter(fmt='%(message)s') color_stream = MagicMock() color_stream.isatty = lambda: False handler = ColorizingStreamHandler( stream=color_stream, colorizer=colorizer, ) handler.setFormatter(formatter) record = LogRecord( name='my_record', level=DEBUG, pathname='my_path', lineno=42, msg='%s + %s gives %s', args=( 4, 5, Mark(4 + 5, color_tag='bracket'), ), exc_info=None, ) self.assertEqual('4 + 5 gives 9', handler.format(record)) # Make sure that the colorizer attribute was removed after processing. self.assertFalse(hasattr(record, 'colorizer'))
def safe_print(text): """ Sometimes printing large strings when running in a Docker container triggers exceptions. This function just wraps a print in a try/except block to not crash ViperMonkey when this happens. """ text = str(text) try: print(text) except Exception as e: msg = "ERROR: Printing text failed (len text = " + str( len(text)) + ". " + str(e) if (len(msg) > 100): msg = msg[:100] try: print(msg) except: pass # if our logger has a FileHandler, we need to tee this print to a file as well for handler in log.handlers: if type(handler) is FileHandler or type(handler) is CappedFileHandler: # set the format to be like a print, not a log, then set it back handler.setFormatter(logging.Formatter("%(message)s")) handler.emit( LogRecord(log.name, logging.INFO, "", None, text, None, None, "safe_print")) handler.setFormatter( logging.Formatter("%(levelname)-8s %(message)s"))
def test_emit_shorter_than_limit(self): handler = SyslogHandler() handler.maximum_length = 500 handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_FRAGMENT handler.formatter = Formatter( 'foo_file: %(name)s %(levelname)s %(message)s') record = LogRecord( name='bar_service', level=WARNING, pathname='/path/to/file.py', lineno=122, msg='This is a fairly short message', args=(), exc_info=None, ) with mock.patch.object(handler, '_send') as mock_send: handler.emit(record) priority = '<{:d}>'.format( handler.encodePriority(handler.facility, handler.mapPriority( record.levelname)), ).encode('utf-8') mock_send.assert_called_once_with([ priority + b'foo_file: bar_service WARNING This is a fairly short message\000', ])
def test_logging_logstash_formatter_version_1(): log_record = LogRecord( name="test_logging_ColoredFormatter_format", level=1, pathname="test/logging", lineno=1, msg="hello world", args=["args1", "args2"], exc_info=None, ) time_stamp = 1_400_000_000 log_record.created = time_stamp formater_base = LogstashFormatterVersion1() hostname = socket.gethostname() data_format = formater_base.format(log_record).decode() data = ujson.loads(data_format) assert data == { "@timestamp": "2014-05-13T16:53:20.000Z", "@version": "1", "message": "hello world", "host": f"{hostname}", "path": "test/logging", "tags": [], "type": "Logstash", "level": "Level 1", "logger_name": "test_logging_ColoredFormatter_format", "stack_info": None, }
def test_logging_async_wrapper_handler(): async_wrapper = AsyncWrapperHandler(handler=HandlerCheck) assert async_wrapper.queue.maxsize == MAX_QUEUE_SIZE log_record_array_inter = [] log_record_array_outer = [] assert not HandlerCheck.check_data async_wrapper.listener.stop() time.sleep(2) for _ in range(80): log_record = LogRecord( name="test_logging_ColoredFormatter_format", level=1, pathname="test/logging", lineno=1, msg="hello world", args=["args1", "args2"], exc_info=None, ) time_stamp = 1_400_000_000 log_record.created = time_stamp log_record_array_inter.append(log_record) async_wrapper.emit(log_record) for _ in range(80): check_enqueue_d = async_wrapper.listener.dequeue(False) log_record_array_outer.append(check_enqueue_d) assert async_wrapper.queue.empty() assert log_record_array_inter == log_record_array_outer
def close(self): if self.closed: return FileHandler.close(self) f = os.fdopen(self.fd) summary = f.read().decode(self.charset) f.close() # try and encode in ascii, to keep emails simpler: try: summary = summary.encode('ascii') except UnicodeEncodeError: # unicode it is then pass if os.path.exists(self.filename): os.remove(self.filename) if self.send_level is None or self.maxlevelno >= self.send_level: self.mailer.handle( LogRecord(name='Summary', level=self.maxlevelno, pathname='', lineno=0, msg=summary, args=(), exc_info=None)) self.closed = True
async def test_emit_does_rollover_if_should_rollover(self): handler = BaseAsyncRotatingFileHandler(filename=self.temp_file.name) handler.should_rollover = Mock(return_value=True) async def rollover_is_done(): """ sleep is needed so that the loop can schedule the other `do_rollover` coroutines, simulating a real `do_rollover` behaviour """ await asyncio.sleep(0.1) handler.should_rollover.return_value = False handler.do_rollover = CoroutineMock(side_effect=rollover_is_done) await asyncio.gather( *( handler.emit( LogRecord( name=str(i), level=20, pathname="/aiologger/tests/test_logger.py", lineno=17, msg="Xablau!", exc_info=None, args=None, ) ) for i in range(42) ) ) handler.do_rollover.assert_awaited_once()
def test_colorizing_formatter_with_a_colorizer(self): formatter = ColorizingFormatter(fmt='%(message)s') record = LogRecord( name='my_record', level=DEBUG, pathname='my_path', lineno=42, msg='%s + %s gives %s', args=( 4, 5, 4 + 5, ), exc_info=None, ) setattr( record, ColorizingStreamHandler._RECORD_ATTRIBUTE_NAME, self.create_colorizer(format='[%s]'), ) self.assertEqual('[4] + [5] gives [9]', formatter.format(record)) colorizer = getattr( record, ColorizingStreamHandler._RECORD_ATTRIBUTE_NAME, ) colorizer.colorize.assert_any_call(4, context_color_tag=None) colorizer.colorize.assert_any_call(5, context_color_tag=None) colorizer.colorize.assert_any_call(9, context_color_tag=None)
def on_data(self, data): """ Log stream data. """ # Skip keep-alive newlines. data_stripped = data.strip() if len(data) > 0: record = LogRecord(None, None, None, None, data_stripped, (), None) self._logger.emit(record)
def test_render_one(self): """ If there's only one record, no new line gets inserted. """ self.logger.emit(LogRecord("root", INFO, None, None, "hi", None, None)) self.assertEqual("2016-11-05 21:57:03,220 [root:INFO] hi", self.logger.text)
def patched_refresh( tqdm_self: "tqdm[Any]", nolock: bool = False, lock_args: Optional[Any] = None, ) -> bool: result = orig_refresh(tqdm_self, nolock, lock_args) format_dict = cast( Mapping[str, object], dict(tqdm_self.format_dict, ncols=80) ) progress_bar = tqdm_self.format_meter(**format_dict) self.emit( LogRecord( name="tqdm.std", level=DEBUG, pathname=getfile(tqdm), lineno=-1, msg=progress_bar, args=(), exc_info=None, ) ) return result
def test_gcdt_formatter_warning(capsys): rec = LogRecord('gcdt.kumo_main', logging.WARNING, './test_gcdt_logging.py', 26, 'warning message', None, None) assert GcdtFormatter().format(rec) == \ Fore.YELLOW + 'WARNING: warning message' + Fore.RESET
def emit(self, record): msg = record.getMessage() if not self.send_empty_entries and not msg.strip(): return current_time = self.now() current_hour = current_time.hour if current_hour != self.hour: self.hour = current_hour self.sent = 0 if self.sent == self.flood_level: # send critical error, record = LogRecord(name='flood', level=CRITICAL, pathname='', lineno=0, msg=flood_template % (self.sent, current_time.strftime('%H:%M:%S'), current_hour + 1), args=(), exc_info=None) elif self.flood_level and self.sent > self.flood_level: # do nothing, we've sent too many emails already return self.sent += 1 # actually send the mail, try: msg = self.format(record) if self.template is not None: msg = self.template % msg subtype = self.content_type.split('/')[-1] if isinstance(msg, unicode): email = MIMEText(msg, subtype, self.charset) else: email = MIMEText(msg, subtype) for header, value in self.headers.items(): email[header] = value email['Subject'] = self.getSubject(record) email['From'] = self.fromaddr email['To'] = ', '.join(self.toaddrs) email['X-Mailer'] = x_mailer email['X-Log-Level'] = record.levelname email['Date'] = formatdate() email['Message-ID'] = make_msgid('MailingLogger') smtp = smtplib.SMTP(self.mailhost, self.mailport) secureports = [587] try: port = int(self.mailhost.split(':')[1]) except: port = None if (self.mailport in secureports) or (port in secureports): smtp.starttls() if self.username and self.password: smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, email.as_string()) smtp.quit() except: self.handleError(record)
def test_stacktrace(self, mock_session): handler = handlers.LogglyHTTPSHandler('abcd-123', 'python') try: raise ValueError() except: exc_info = sys.exc_info() record = LogRecord('twyla.logging', ERROR, '/path/to/logging.py', 11, 'The text message', args=[], exc_info=exc_info, func='dostuff') handler.emit(record) assert mock_session.post.call_count == 1 _, _, kwargs = mock_session.post.mock_calls[0] data = json.loads(kwargs['data']) assert data.pop('timestamp') == record.asctime assert data.pop('time') == record.msecs assert data.pop('logRecordCreationTime') == record.created message_lines = data.pop('message').split('\n') assert message_lines[0] == 'The text message' assert message_lines[1] == 'Traceback (most recent call last):' assert data == {'loggerName': 'twyla.logging', 'fileName': 'logging.py', 'functionName': 'dostuff', 'levelNo': ERROR, 'lineNo': 11, 'levelName': 'ERROR'}
def test_logging_handler_emit_error(capsys, elasticapm_client): handler = LoggingHandler(elasticapm_client) handler._emit = lambda: 1 / 0 handler.emit(LogRecord("x", 1, "/ab/c/", 10, "Oops", [], None)) out, err = capsys.readouterr() assert "Top level ElasticAPM exception caught" in err assert "Oops" in err
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): """ A factory method which can be overridden in subclasses to create specialized LogRecords. """ rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] if "type" not in extra: rv.__dict__["type"] = "run" else: rv.__dict__["type"] = "run" return rv
def test_logging_handler_emit_error(capsys, elasticapm_client): handler = LoggingHandler(elasticapm_client) handler._emit = lambda: 1/0 handler.emit(LogRecord('x', 1, '/ab/c/', 10, 'Oops', [], None)) out, err = capsys.readouterr() assert 'Top level ElasticAPM exception caught' in err assert 'Oops' in err
def emit(self, record): """ Emit the specified log record. Provides the following: - Replace newlines with spaces per syslog RFCs. - Emit stack traces in following log records. :param record: A log record. :type record: LogRecord """ records = [record] message = record.getMessage() record.msg = LogHandler.clean(message) record.args = tuple() if record.exc_info: msg = self.formatter.formatException(record.exc_info) for line in msg.split('\n'): _record = LogRecord( name=record.name, level=record.levelno, pathname=record.pathname, lineno=record.lineno, msg=line, args=tuple(), exc_info=None) records.append(_record) record.exc_info = None for r in records: SysLogHandler.emit(self, r)
def test_log_formatter_scrapy_1(): middleware = get_test_middleware() logformatter = CrawleraFetchLogFormatter() formatter = Formatter() for case in get_test_requests(): original = case["original"] response = Response(original.url) processed = middleware.process_request(original, foo_spider) crawlera_meta = original.meta.get("crawlera_fetch") or {} if crawlera_meta.get("skip"): assert processed is None continue # crawled result = logformatter.crawled(processed, response, foo_spider) assert result["args"]["request"] == str(original) record = LogRecord(name="logger", pathname="n/a", lineno=1, exc_info=None, **result) logstr = formatter.format(record) expected = "Crawled (200) {request} ['original url: {url}'] (referer: None)".format( request=original, url=original.url) assert logstr == expected
def test_ident(self): record = LogRecord( 'foo.bar', logging.DEBUG, '/foo/bar.py', 42, 'Something Happened!', [], None, 'quux' ) with mock.patch('logging.handlers.SysLogHandler.emit') as emit: handler = SysLogHandler() handler.emit(record) self.assertEquals(emit.call_count, 1) call = emit.call_args_list[0] args = call[0] self.assertEquals(len(args), 1) self.assertEquals(args[0], record) self.assertEquals( args[0].ident, os.path.basename(sys.argv[0]) )