def test_json_formatter_options(): fmt = JsonFormatter(debugging_fields=False) record = makeLogRecord( {'name':'my.package.logger', 'special_field': 10} ) res = fmt.format(record) d = json.loads(res) assert isinstance(d, dict) for f in ["file", "line", "_function", "_pid", "_thread_name"]: assert f not in d assert d["_special_field"] == 10 fmt = JsonFormatter(datefmt='%Y-%m-%d',) record = makeLogRecord( {'name':'my.package.logger', 'special_field': 10} ) res = fmt.format(record) d = json.loads(res) assert len(d["timestamp"]) == 10 fmt = JsonFormatter(extra_fields=False) record = makeLogRecord( {'name':'my.package.logger', 'special_field': 10} ) res = fmt.format(record) d = json.loads(res) assert "_special_field" not in d
def test_repeated_event(): logger = Centraloger(_conn) logger.logEvent(logging.makeLogRecord({ 'msg': 'first form (%s)', 'args': ('first',), 'levelno': logging.DEBUG, })) logger.logEvent(logging.makeLogRecord({ 'msg': 'second form (%s)', 'args': ('second',), 'levelno': logging.DEBUG, })) logger.logEvent(logging.makeLogRecord({ 'msg': 'first form (%s)', 'args': ('third, but first',), 'levelno': logging.DEBUG, })) evt = logger.getEvent() assert evt['repeats'] == 2.0 assert evt['msg'] == 'first form (%s)' evt = logger.getEvent() assert evt['repeats'] == 1.0 assert evt['msg'] == 'second form (%s)' evt = logger.getEvent() assert evt == None print sys._getframe(0).f_code.co_name, 'ok.'
def testBadLogMessages(self): """Tests log messages with both 8-bit byte strings and unicode.""" basic_log = _BasicLogHandler(max_buffer_bytes=100) record = logging.makeLogRecord({'level': logging.INFO, 'msg': '\x80abc'}) basic_log.emit(record) record = logging.makeLogRecord({'level': logging.INFO, 'msg': u'\x80abc'}) basic_log.emit(record) basic_log.flush()
def setUp(self): self.handler = cloud_logging.CloudLoggingHandler() with mock.patch('time.time', return_value=self.TEST_TIME): self.record = logging.makeLogRecord({'msg': self.EXPECTED_MESSAGE, 'levelname': 'INFO'}) self.record_with_extra = logging.makeLogRecord( {'msg': self.EXPECTED_MESSAGE, 'levelname': 'INFO', 'trace_id': self.EXPECTED_OVERRIDDEN_TRACE_ID,})
def test_BaseHandler(): hdlr = BaseHandler() record = logging.makeLogRecord(dict(name='test', lno=20, msg='hello')) bmsg = hdlr.serialize(record) assert isinstance(bmsg, bytes) record2 = logging.makeLogRecord(unpack(bmsg)) assert record2.name == hdlr.host + '.' + record.name assert record2.msg == record.msg with pytest.raises(NotImplementedError): hdlr.send_bytes(b'')
def test_emit(self, mock_api, mock_handler): mock_api.return_value.status_code = 201 rec = logging.makeLogRecord({'msg': "TestTitle1\nTestBody", 'gh_labels': ["TestLabel"]}) self.handler.emit(rec) self.assertIn("TestTitle1", self.handler.known_issues) rec = logging.makeLogRecord({'msg': "TestTitle2"}) self.handler.emit(rec) self.assertIn("TestTitle2", self.handler.known_issues)
def test_sensitive_data_filter(): """Test the logging sensitive data filter.""" log_filter = logging_util.HideSensitiveDataFilter('mock_sensitive') clean_record = logging.makeLogRecord({'msg': "clean log data"}) log_filter.filter(clean_record) assert clean_record.msg == "clean log data" sensitive_record = logging.makeLogRecord({'msg': "mock_sensitive log"}) log_filter.filter(sensitive_record) assert sensitive_record.msg == "******* log"
def test_memory_handler(self): memory_handler = quorum.MemoryHandler() formatter = logging.Formatter("%(message)s") memory_handler.setFormatter(formatter) latest = memory_handler.get_latest() self.assertEqual(len(latest), 0) self.assertEqual(latest, []) record = logging.makeLogRecord( dict( msg = "hello world", levelname = logging.getLevelName(logging.INFO) ) ) memory_handler.emit(record) latest = memory_handler.get_latest() self.assertEqual(len(latest), 1) self.assertEqual(latest, ["hello world"]) record = logging.makeLogRecord( dict( msg = "hello world 2", levelname = logging.getLevelName(logging.ERROR) ) ) memory_handler.emit(record) latest = memory_handler.get_latest() self.assertEqual(len(latest), 2) self.assertEqual(latest, ["hello world 2", "hello world"]) latest = memory_handler.get_latest(level = logging.ERROR) self.assertEqual(len(latest), 1) self.assertEqual(latest, ["hello world 2"]) latest = memory_handler.get_latest(level = logging.CRITICAL) self.assertEqual(len(latest), 0) self.assertEqual(latest, []) latest = memory_handler.get_latest(level = logging.INFO) self.assertEqual(len(latest), 2) self.assertEqual(latest, ["hello world 2", "hello world"]) latest = memory_handler.get_latest(count = 1, level = logging.INFO) self.assertEqual(len(latest), 1) self.assertEqual(latest, ["hello world 2"])
def handle_read(self): try: data = self.recv(self.dlen) if len(data) == 0: return except socket.error as e: if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): return self.data += data self.dlen -= len(data) if self.dlen > 0: # don't have complete record yet. wait for more data to read return if self.rlen == 0: self.dlen = self.rlen = struct.unpack('>L', self.data)[0] self.data = '' # got record length. now read record return # got complete record obj = pickle.loads(self.data) record = logging.makeLogRecord(obj) # Note: EVERY record gets logged. This is because Logger.handle # is normally called AFTER logger-level filtering. # Filter (e.g., only WARNING or higher) # at the sender to save network bandwidth. globalLogger.handle(record) # reset for next record self.data = '' self.rlen = 0 self.dlen = 4
def _make_log_record(self): attrs = { 'msg': 'my error', } record = logging.makeLogRecord(attrs) return record
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while True: try: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = pickle.loads(chunk) record = logging.makeLogRecord(obj) self.server.handle_record(record) except socket.error as e: if type(e.args) != tuple: raise else: errcode = e.args[0] if errcode != RESET_ERROR: raise break
def test_oovs_can_nest(self): """ Verify that nesting OrderedObjectValues produces nested dicts in the render() return value. """ oov = values.OrderedObjectValue([ ("test_msg", values.RecordValue("msg")), ("nested", values.OrderedObjectValue([ ("nested_path", values.RecordValue("pathname")) ])) ]) record = logging.makeLogRecord({ "msg": "Hi", "pathname": "/some/path.py" }) json = oov.render(record) self.assertEqual(2, len(json)) self.assertEqual(["test_msg", "nested"], json.keys()) nested = json["nested"] self.assertEqual(1, len(nested)) self.assertEqual(["nested_path"], nested.keys())
def _copyLogRecord(self, record): copy = logging.makeLogRecord(record.__dict__) copy.exc_info = None copy.exc_text = None if _PY3: copy.stack_info = None return copy
def testBatching(self): """Tests that the server log writes to object store.""" basic_log = _BasicLogHandler(max_buffer_bytes=100) record = logging.makeLogRecord({'level': logging.INFO, 'msg': 'test'}) basic_log.emit(record) basic_log.flush() self._RunAsync(self._VerifyLog, ['test'])
def testMaxBytesFlush(self): """Tests that the server log flushes based on maximum bytes written.""" basic_log = _BasicLogHandler(max_buffer_bytes=100) msg = 'test' * 100 record = logging.makeLogRecord({'level': logging.INFO, 'msg': msg}) basic_log.emit(record) self._RunAsync(self._VerifyLog, [msg])
def test_oov_maintains_initial_order(self): msg = "This is a msg. foo: %s" args = ("bar",) now_ts = 1372245551.300383 # time.time() return val now_dt = datetime.datetime.fromtimestamp(now_ts) path = "/some/path.py", record = logging.makeLogRecord({ "msg": msg, "args": args, "created": now_ts, "pathname": path, "lineno": None }) oov = values.OrderedObjectValue([ ("test_msg", values.RecordValue("msg")), ("test_lineno", values.RecordValue("lineno")), ("test_path", values.RecordValue("pathname")), ("test_args", values.RecordValue("args")), ("test_formatted_msg", values.FormattedMessageRecordValue()) ]) json = oov.render(record) self.assertEqual( # Note missing test_lineno as it's None ["test_msg", "test_path", "test_args", "test_formatted_msg"], json.keys() )
def _process_msg(self, msg): if msg["type"] == "command": method = getattr(self._methods, msg["method_name"], None) if method != None: try: result = method(*msg["args"]) except: log_exc_traceback() type, value, tb = sys.exc_info() exc_trace = ''.join(traceback.format_exception(type, value, tb)) response = {"type": "exception", "Exception": exc_trace} self._server_handler.send_data_to_ctl(response) return if result != None: response = {"type": "result", "result": result} self._server_handler.send_data_to_ctl(response) else: err = "Method '%s' not supported." % msg["method_name"] response = {"type": "error", "err": err} self._server_handler.send_data_to_ctl(response) elif msg["type"] == "log": logger = logging.getLogger() record = logging.makeLogRecord(msg["record"]) logger.handle(record) elif msg["type"] == "exception": if msg["cmd_id"] != None: logging.debug("Recieved an exception from command with id: %s" % msg["cmd_id"]) else: logging.debug("Recieved an exception from foreground command") logging.error(msg["Exception"]) cmd = self._cmd_context.get_cmd(msg["cmd_id"]) cmd.join() self._cmd_context.del_cmd(cmd) self._server_handler.send_data_to_ctl(msg) elif msg["type"] == "result": if msg["cmd_id"] == None: del msg["cmd_id"] self._server_handler.send_data_to_ctl(msg) cmd = self._cmd_context.get_cmd(None) cmd.join() self._cmd_context.del_cmd(cmd) else: cmd = self._cmd_context.get_cmd(msg["cmd_id"]) cmd.join() del msg["cmd_id"] if cmd.finished(): self._server_handler.send_data_to_ctl(msg) self._cmd_context.del_cmd(cmd) else: cmd.set_result(msg["result"]) else: raise Exception("Recieved unknown command") pipes = self._cmd_context.get_read_pipes() self._server_handler.update_connections(pipes)
def test_id_field_not_supported(): record = makeLogRecord( {'name':'my.package.logger'} ) record.id = "custom value" record._id = "some other value value" handler = CeeSysLogHandler() assert '"_id"' not in handler.format(record)
def data_received(self, data): self._buf.extend(data) while True: if self._state == "header": if len(self._buf) < 6: break header = self._buf[:6] del self._buf[:6] assert header == b"RECORD" self._state = "length" elif self._state == "length": if len(self._buf) < 4: break packed_length = self._buf[:4] del self._buf[:4] self._length, = struct.unpack(">L", packed_length) self._state = "record" elif self._state == "record": if len(self._buf) < self._length: break serialized_record = self._buf[: self._length] del self._buf[: self._length] record_attr = pickle.loads(serialized_record) record = logging.makeLogRecord(record_attr) self._logger.handle(record) self._state = "header"
def test_hs_loghandler_emit_handle_interrupt(hsref): hsref.job.logs._writer.closed = False hsref.job.logs.log.side_effect = KeyboardInterrupt hdlr = HubstorageLogHandler() record = logging.makeLogRecord({'msg': 'test-record'}) with pytest.raises(KeyboardInterrupt): hdlr.emit(record)
def test_hs_loghandler_emit_ok(hsref): hsref.job.logs._writer.closed = False hdlr = HubstorageLogHandler() record = logging.makeLogRecord({'msg': 'test-record'}) hdlr.emit(record) assert hsref.job.logs.log.called assert hsref.job.logs.log.call_args[0] == ('test-record',)
def test_tenant_context_filter_blank_domain_url(self, mock_connection): filter_ = log.TenantContextFilter() record = logging.makeLogRecord({}) res = filter_.filter(record) self.assertEqual(res, True) self.assertEqual(record.schema_name, 'context') self.assertEqual(record.domain_url, '')
def udpreader (dummy1 = "", dummy2=""): global udpin, udpout, UDP_IP, UDP_PORT, udpinmsgs, stats, statsdtime, laststatstime try: data, addr = udpin.recvfrom(8192)#buffer size # log.debug(u"DATA IS:{} const is:{}".format(data[0:3].encode('string_escape'),("\x00\x00\x01\xd1".decode('string_escape')).encode('string_escape'))) if data[0:20] == "command done:msg was" : return True if data[0:14] == "UART<OK TRACKS" : return True if data[0:13] == "UART<OK SERVO" : return True if data[0:2] == "\x00\x00".decode('string_escape') : rec = logging.makeLogRecord(cPickle.loads(data[4:])) rec.msg = unicode(rec.msg.encode('string_escape'), 'utf-8') log.handle(rec) return True elif data[0] == "#" : # found stats in flow stats = cPickle.loads (data[1:]) statsdtime = time.time() - laststatstime laststatstime = time.time() stats['size'] = len (data) #udpinmsgs.append(str(cPickle.loads (data[1:]))) return True udpinmsgs.append(str(data)) log.warn (u"Unhandled packet from {}:{}".format(addr,data.encode('string_escape'))) return str(data) except socket.error: return False
def sender(self): while True: obj = self.queue.get() qsize = self.queue.qsize() if qsize > 100 and qsize % 100 == 0: logger.error("Queue has over %d messages", qsize) record = logging.makeLogRecord(obj) data = self.formatter.format(record, serialize=False) tags = data.pop('tags', []) if sys.version_info < (3, 0): payload = json.dumps(data) else: payload = bytes(json.dumps(data), 'utf-8') log_data = "PLAINTEXT=" + quote(payload) url = "http://logs-01.loggly.com/inputs/%s/tag/%s/" % (self.loggly_token, ','.join(tags)) while True: try: urlopen(url, log_data) break except Exception as exc: logging.error('Can\'t send message to %s: %s', url, exc) gevent.sleep(5) continue
def test_writes_to_database(self): msg = "Foo message" self.assertFalse(ProcessingError.objects.filter(message=msg, dataset=self.d1).exists()) record = logging.makeLogRecord({"msg": msg}) h1 = DatabaseLogHandler(dataset=self.d1) h1.emit(record) self.assertTrue(ProcessingError.objects.filter(message=msg, dataset=self.d1).exists())
def post(self): args = dict( [(k, self._extract(''.join(v))) for (k, v) in self.request.arguments.iteritems()] ) args['remoteIP'] = self.request.remote_ip record = logging.makeLogRecord(args) logger.handle(record)
def test_valid(self): formatter = logging.Formatter('%(asctime)s: %(levelname)s %(message)s') message = "There is a problem." uri = "/some/random/uri" status_code = 400 username = '******' log_record = logging.makeLogRecord({ 'name':'huxley.server', 'level':10, 'fn':'', 'lno':'', 'msg':json.dumps({ 'message': message, 'uri': uri, 'status_code': status_code, 'username': username}), 'args':(), 'exc_info':None}) handler = DatabaseHandler() handler.formatter = formatter handler.emit(log_record) log_entry = LogEntry.objects.get(id=1) self.assertEqual(log_entry.level, log_record.levelname) self.assertEqual(log_entry.message, message) self.assertEqual(log_entry.timestamp, datetime.datetime.strptime(log_record.asctime, "%Y-%m-%d %H:%M:%S,%f")) self.assertEqual(log_entry.uri, uri) self.assertEqual(log_entry.status_code, status_code) self.assertEqual(log_entry.username, username)
def test_queries_cli2(client): dbhandler = DatabaseLogHandler(client) created_list = [] for i in xrange(2, 10): created = time.time() + (2 ** i) created_list.append(created) xdict = dict(created=created, msg="test %d" % created) record = logging.makeLogRecord(xdict) dbhandler.emit(record) child = subprocess.Popen( [sys.executable, '-m', 'dblogger.query', '--app-name', 'dbltest', '--namespace', client._config['namespace'], '--storage-type', client._config['storage_type'], '--storage-address', client._config['storage_addresses'][0], '--begin', '1998-01-03T08', ], stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) child.wait() out = child.stdout.read() err = child.stderr.read() assert child.returncode == 0, err assert out
def testTimeoutFlush(self): """Tests that the server log flushes after maximum flush interval.""" basic_log = _BasicLogHandler(flush_interval_secs=0.100) record = logging.makeLogRecord({'level': logging.INFO, 'msg': 'test'}) basic_log.emit(record) self._RunAsync(self.io_loop.add_timeout, time.time() + 0.150) self._RunAsync(self._VerifyLog, ['test'])
def log_entries(name, agent, pid, level, stream): log = logging.getLogger(name) extra = {'processName': agent, 'process': pid} for line in (l.rstrip('\r\n') for l in stream): if line[0:1] == '{' and line[-1:] == '}': try: obj = jsonapi.loads(line) try: obj['args'] = tuple(obj['args']) except (KeyError, TypeError, ValueError): pass record = logging.makeLogRecord(obj) except Exception: pass else: if record.name in log.manager.loggerDict: if not logging.getLogger( record.name).isEnabledFor(record.levelno): continue elif not log.isEnabledFor(record.levelno): continue record.remote_name, record.name = record.name, name record.__dict__.update(extra) log.handle(record) continue if line[0:1] == '<' and line[2:3] == '>' and line[1:2].isdigit(): yield _level_map.get(int(line[1]), level), line[3:] else: yield level, line
def _handle_serialized_writes(self, obj): """Handle records that must be serialized to the main process This is currently records that are written to a file on disk and those sent to Sentry. """ if obj["exc_info"]: obj["exc_info"] = dill.loads(obj["exc_info"]) if obj["args"]: obj["args"] = dill.loads(obj["args"]) record = logging.makeLogRecord(obj) self._file_handler.emit(record) if self._sentry_dsn: if record.levelno >= self._breadcrumb_handler.level: self._breadcrumb_handler.handle(record) if record.levelno >= self._event_handler.level: self._event_handler.handle(record)
def read_logs(self): try: while True: datagram = self.socket.recv(8192) chunk = datagram[0:4] struct.unpack(">L", chunk)[0] chunk = datagram[4:] obj = cPickle.loads(chunk) record = logging.makeLogRecord(obj) if (record.levelno >= self.level): logger = logging.getLogger(record.name) logger.handle(record) except Exception as e: print "ERROR: " + str(e) finally: self.socket.close()
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while True: chunk = self.connection.recv(CHUNK_LENGTH) if len(chunk) < EXPECTED_LENGTH: break slen, *_ = struct.unpack(STRUCT_FORMAT_STRING, chunk) chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.un_pickle(chunk) record = logging.makeLogRecord(obj) self.handle_log_record(record)
def handle(self): while True: try: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.unPickle(chunk) # 使用SocketHandler发送过来的数据包,要使用解包成为LogRecord # 看SocketHandler文档 record = logging.makeLogRecord(obj) self.handleLogRecord(record) except Exception as e: print(repr(e))
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. """ while True: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) record = logging.makeLogRecord(pickle.loads(chunk)) logging.getLogger(record.name).handle(record)
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while 1: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.un_pickle(chunk) record = logging.makeLogRecord(obj) self.handle_log_record(record)
def test_format_with_extra(self): formater = LogstashFormatter(environment='develop') log_message = 'Log entry message' record = logging.makeLogRecord({ 'msg': log_message, 'extra_field': 'Extra Field' }) str_message = formater.format(record) message = json.loads(str_message) payload = message['payload'] self.assertTrue('payload' in message) self.assertEqual(payload['extra_field'], 'Extra Field') self.assertEqual(message['message'], log_message) self.assertEqual(message['environment'], 'develop')
def format(self, record): record = logging.makeLogRecord(record.__dict__) msg = record.msg for token_re, repl in self.TOKEN_FORMATTING: msg = token_re.sub(repl, msg) record.msg = msg record.reset = self.RESET record.bold = self.BOLD record.boldOff = self.BOLDOFF record.italic = self.ITALIC record.italicOff = self.ITALICOFF record.underline = self.UNDERLINE record.underlineOff = self.UNDERLINEOFF record.levelCol = "" if record.levelname in self.LEVELCOL: record.levelCol = self.LEVELCOL[record.levelname] return super(ColorFormatter, self).format(record)
def handle_msg(self, json_event): try: event = json.loads(json_event) who = event.get('who', None) if not who: raise Exception( "No LogRecord.who field, raw: {}".format(event)) if who not in self.loggers: raise Exception( "Unknown LogRecord.who field: {}, raw event: {}".format( who, event)) log_record = logging.makeLogRecord(event) self.loggers[who].handle(log_record) except Exception as err: self.main_logger.exception(err)
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while True: try: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) recordDict = cPickle.loads(chunk) record = logging.makeLogRecord(recordDict) # the msg in the record is always a string now, see # http://bugs.python.org/issue14436 # this will attempt to force it to a dictionary try: msg = ast.literal_eval(record.msg) self.statsThread.addRecord(msg) timeDict = msg['time'] if timeDict['total'] > LOG_THRESHOLD: logMsg = 'Processed ' + msg['request'] + ' on ' + msg['file'] + '. Timing entries in seconds: ' addComma=False for SECTION in self.SECTION_KEYS: timeKey=SECTION.strip() if timeDict.has_key(timeKey): if addComma: logMsg += ',' else: addComma = True logMsg += ' ' + timeKey + ' ' + ('%.3f' % timeDict[timeKey]) record.msg = logMsg self.handleLogRecord(record) except SyntaxError: # probably was just a string, we have a record, let's log it self.handleLogRecord(record) except Exception, e: print "Unhandled exception in logProcess" import sys, traceback, string t, v, tb = sys.exc_info() print string.join(traceback.format_exception(t, v, tb))
def handle(self): ''' Waits for packets to be sent via the oppen socket connection and logs them once they have been received ''' while True: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack('>L', chunk)[ 0] # '>L' format stands for big-endian (>) unsigned long (L) chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = pickle.loads(chunk) record = logging.makeLogRecord(obj) self.handleRecord(record)
def handle(self): try: data = '' while True: data = self.connection.recv(4) if len(data) < 4: break pickle_len = struct.unpack('>L', data)[0] data = self.connection.recv(pickle_len) while len(data) < pickle_len: data = data + self.connection.recv(pickle_len - len(data)) record = logging.makeLogRecord(pickle.loads(data)) logger = utils.getLogger(record.name) logger.handle(record) except: logger = utils.getLogger() logger.exception("Error receiving log record: data: %s", data)
def parse_record(line: str): attrdict = {} parts = line.split(' - ', 2) if len(parts) == 3: time_, levelname, name_msg = parts sub_parts = name_msg.split(': ', 1) if len(sub_parts) == 2: name, message = sub_parts level = logging._nameToLevel.get(levelname) if level is not None: attrdict = { 'level': level, 'name': name, 'message': message.strip() } return logging.makeLogRecord(attrdict) return None
def handle(self): ''' Handle multiple requests - each expected to be a 4 byte length followed by the LogRecord in pickle format. Logs the record according to whatever policyis configured locally ''' while True: chunk = self.connection.recv(4) if len(chunk) < 5: break slen = struct.unpack('>L', chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.unPickle(chunk) record = logging.makeLogRecord(obj) self.handleLogRecord(record)
def handle(self): while True: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack('>L', chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.unPickle(chunk) record = logging.makeLogRecord(obj) self.handleLogRecord(record)
def test_color_handler(): import logging from ldap2pg.config import ColoredStreamHandler handler = ColoredStreamHandler() record = logging.makeLogRecord( dict( name='pouet', level=logging.DEBUG, fn="(unknown file)", msg="Message", lno=0, args=(), exc_info=None, )) payload = handler.format(record) assert "\033[0" in payload
def handle(self) -> None: """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while True: chunk = self.connection.recv(4) # type: ignore[attr-defined] if len(chunk) < 4: break slen = struct.unpack('>L', chunk)[0] chunk = self.connection.recv(slen) # type: ignore[attr-defined] while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) # type: ignore[attr-defined] # noqa: E501 obj = self.unPickle(chunk) record = logging.makeLogRecord(obj) self.handleLogRecord(record)
def test_handler_respects_proxy(): proxies = { 'http': 'http_proxy_sample', 'https': 'https_proxy_sample', } handler = telegram_handler.handlers.TelegramHandler('foo', 'bar', level=logging.INFO, proxies=proxies) record = logging.makeLogRecord({'msg': 'hello'}) with mock.patch('requests.post') as patch: handler.emit(record) assert patch.call_args[1]['proxies'] == proxies
def daemon(log_queue): while True: try: record_data = log_queue.get() if record_data is None: break record = logging.makeLogRecord(record_data) logger = logging.getLogger(record.name) if logger.isEnabledFor(record.levelno): logger.handle(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except: logging.exception('Error in log handler.')
def format(self, record): """ Extract ``structlog``'s `event_dict` from ``record.msg`` and format it. """ # Make a shallow copy of the record to let other handlers/formatters # process the original one record = logging.makeLogRecord(record.__dict__) try: # Both attached by wrap_for_formatter logger = record._logger meth_name = record._name # We need to copy because it's possible that the same record gets # processed by multiple logging formatters. LogRecord.getMessage # would transform our dict into a str. ed = record.msg.copy() except AttributeError: logger = None meth_name = record.levelname.lower() ed = {"event": record.getMessage(), "_record": record} record.args = () # Add stack-related attributes to event_dict and unset them # on the record copy so that the base implementation wouldn't # append stacktraces to the output. if record.exc_info: ed["exc_info"] = record.exc_info if PY3 and record.stack_info: ed["stack_info"] = record.stack_info if not self.keep_exc_info: record.exc_text = None record.exc_info = None if not self.keep_stack_info: record.stack_info = None # Non-structlog allows to run through a chain to prepare it for the # final processor (e.g. adding timestamps and log levels). for proc in self.foreign_pre_chain or (): ed = proc(None, meth_name, ed) del ed["_record"] record.msg = self.processor(logger, meth_name, ed) return super(ProcessorFormatter, self).format(record)
def read(self, connection): """Callback for read events""" try: raw_msglen = connection.recv(4) if raw_msglen: # A readable client socket has data msglen = struct.unpack('>I', raw_msglen)[0] data = recvall(connection, msglen) action, args = actionSerializer.deserialize(data) if action: worker = threading.Thread(target=self._interpret_message, args=(action, args, connection)) worker.start() else: # Not an action message - trying to decode as log message record = logging.makeLogRecord(args) try: self.slave_log_handlers[connection.getpeername() [0]].handle(record) except KeyError: self.logger.debug( "Got log message from yet unhandled slave socket logger" ) pass else: # Handle uncontrolled connection loss hostname = self.port_mapping.get(connection) self.send_queues.pop(connection) self.sel.unregister(connection) self.logger.error("Connection to client %s was lost!" % hostname) self.notify_queue.put( events.SlaveDisconnectEvent(hostname, connection.getpeername()[1])) connection.close() except socket.error as e: self.logger.error( "Something went wrong while receiving a message. Check debug for more information" ) self.logger.debug("Socket excpetion: %s" % e) self.send_queues.pop(connection) self.sel.unregister(connection) connection.close()
def handle(self): """ Handle messages coming in over self.connection. Messages are 4-byte-length-prefixed JSON-encoded logging module records. """ while True: # Loop until we run out of messages # Parse the length length_data = self.rfile.read(4) if len(length_data) < 4: # The connection was closed, or we didn't get enough data # TODO: complain? break # Actually parse the length length = struct.unpack(">L", length_data)[0] # This is where we'll put the received message message_parts = [] length_received = 0 while length_received < length: # Keep trying to get enough data part = self.rfile.read(length - length_received) length_received += len(part) message_parts.append(part) # Stitch it all together message = "".join(message_parts) try: # Parse it as JSON message_attrs = json.loads(message) # Fluff it up into a proper logging record record = logging.makeLogRecord(message_attrs) except: logging.error("Malformed record") # TODO: do log level filtering logging.getLogger("remote").handle(record)
def pytest_sessionstart(session): session_id = str(uuid.uuid4()) session_id_filter = SessionIdFilter(session_id) node_id_filter = NodeIdFilter(node_id=None) log_handlers = [] if session.config.getoption("instrument") is not None: current_timestamp = datetime.now().strftime("%Y%m%dT%H%M%S") if "json" in session.config.getoption("instrument"): base_filename = f"{current_timestamp}_{session_id[:8]}" log_handler_json = setup_log_file_handler(base_filename, "json") log_handler_json.addFilter(session_id_filter) log_handler_json.addFilter(node_id_filter) log_handlers.append(log_handler_json) if "log" in session.config.getoption("instrument"): base_filename = f"{current_timestamp}_{session_id[:8]}" log_handler_plain = setup_log_file_handler(base_filename, "log") log_handlers.append(log_handler_plain) # add record with session id to plain log file record = { "name": "instr.report", "node_id": "", "levelname": logging.getLevelName(logging.INFO), "levelno": logging.INFO, "msg": f"session id: {session_id}", } log_record = logging.makeLogRecord(record) log_handler_plain.emit(log_record) else: log_handlers.append(logging.NullHandler()) logger = logging.getLogger("instr.log") logger.setLevel("DEBUG") for handler in log_handlers: logger.addHandler(handler) session.config.instrument = { "session_id": session_id, "logger": logger, "logfile_handler": log_handlers, "node_id_filter": node_id_filter, }
def test_log_args_error(log_queue, log_proto, log_handler): record = logging.makeLogRecord({ 'levelno': logging.INFO, 'levelname': 'INFO', 'name': 'tests.test_event.test_logging', 'msg': u'asdf %d %d', 'args': ('foo', 573), }) log_handler.emit(record) assert len(log_queue.q) == 1 item = log_queue.get() other = log_proto.deserialize(item) msg = other.getMessage() print(repr(msg)) assert 'asdf' in msg assert 'foo' in msg assert '573' in msg
def _handleLogRecord(obj): """ Handle log, logs everything sent. Should filter client-side """ # Log message came from browser extension: requires special handling if len(obj) == 2 and obj[0] == 'EXT': obj = json.loads(obj[1]) record = logging.LogRecord(name=__name__, level=obj['level'], pathname=obj['pathname'], lineno=obj['lineno'], msg=obj['msg'], args=obj['args'], exc_info=obj['exc_info'], func=obj['func']) else: record = logging.makeLogRecord(obj) logger = logging.getLogger(record.name) logger.handle(record)
def test_redacting_filter(): regex = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" replace_string = "-#sensitive#-" string_to_be_redacted = "172.24.41.42" record = makeLogRecord({ "name": "my.package.logger", "msg": "Connect by IP 172.24.41.42" }) handler = CollectingNamedCeeLogger(_DUMMY_HOST, _DUMMY_PROTOCOL, "myname") handler.addFilter( RegexRedactFilter(filter_regex=regex, replace_string=replace_string)) handler.handle(record) assert len(handler.emitted_records) == 1 message = handler.emitted_records[0].getMessage() assert string_to_be_redacted not in message assert replace_string in message
def test_logs_formatted_message_as_text(self, mock_dd): handler = DatadogLogHandler() exc_info = make_exc_info() record = logging.makeLogRecord({ "msg": "Some message", "exc_info": exc_info }) expected_text = "\n".join( ["Some message", "".join(traceback.format_exception(*exc_info))]).rstrip("\n") handler.emit(record) mock_dd.api.Event.create.assert_called_with(title="Some message", text=expected_text)
def dataReceived(self, data): self.__buf += data length = self.__blen if self.__len is None else self.__len while length <= len(self.__buf): if self.__len is None: self.__len = struct.unpack('>L', self.__buf[:length])[0] self.__buf = self.__buf[length:] else: record = pickle.loads(self.__buf[:length]) self.__actual.handle(logging.makeLogRecord(record)) self.__actual.flush() self.__buf = self.__buf[length:] self.__len = None pass length = self.__blen if self.__len is None else self.__len pass return
def test_format_with_app_name_and_extra(self): formater = StackDriverFormatter(app_name='stackdriver-app', environment='develop') log_message = 'Log entry message' record = logging.makeLogRecord({ 'msg': log_message, 'extra_field': 'Extra Field' }) message = formater.format(record) payload = message['payload'] self.assertTrue('payload' in message) self.assertEqual(payload['extra_field'], 'Extra Field') self.assertEqual(message['message'], log_message) self.assertEqual(message['environment'], 'develop') self.assertEqual(message['app_name'], 'stackdriver-app')
def format(self, record): action = record.__dict__.get("action") status = record.__dict__.get("status") task = _tasks.get(task_key()) task_id = task[0] if task else record.__dict__.get("task_id") d = { "action": action, "task_id": task_id, "status": status, "time": record.created, "level": record.levelname.lower(), "message": record.getMessage(), } base = logging.makeLogRecord({}) for key, value in record.__dict__.items(): if key not in base.__dict__: d[key] = value return json.dumps(d)