def test_run_after_stopped(self): # No-op name = 'python_logger' logger = _Logger(name) worker = self._make_one(logger) python_logger_name = 'mylogger' message = 'hello world' record = logging.LogRecord(python_logger_name, logging.INFO, None, None, message, None, None) worker._start() while not worker.started: time.sleep(1) # pragma: NO COVER worker._stop_timeout = None worker._stop() worker.enqueue(record, message) self.assertFalse(worker.batch.commit_called) worker._stop()
def main(): """ obtain a database connection using get_db and retrieve all rows in the users table and display each row under a filtered format """ db = get_db() cursor = db.cursor() cursor.execute("SELECT * FROM users;") result = cursor.fetchall() for row in result: message = f"name={row[0]}; " + \ f"email={row[1]}; " + \ f"phone={row[2]}; " + \ f"ssn={row[3]}; " + \ f"password={row[4]};" print(message) log_record = logging.LogRecord("my_logger", logging.INFO, None, None, message, None, None) formatter = RedactingFormatter(PII_FIELDS) formatter.format(log_record) cursor.close() db.close()
def test_record_none_exc_info(self): # sys.exc_info can return (None, None, None) if no exception is being # handled anywhere on the stack. See: # http://docs.python.org/library/sys.html#sys.exc_info record = logging.LogRecord( 'foo', logging.INFO, pathname=None, lineno=None, msg='test', args=(), exc_info=(None, None, None), ) handler = SentryHandler() handler.emit(record) self.assertEquals(len(self.raven.events), 1) event = self.raven.events.pop(0) self.assertEquals(event['message'], 'test')
def test_emit_prepends_pid_if_too_long(self, super_emit): """Test emit() with a message that is longer than the maximum allowable length.""" with mock.patch('pulp.server.logs.CompliantSysLogHandler._log_id') as _log_id: _log_id.return_value = "PID-TID-" format_string = 'pulp: %(name)s:%(levelname)s: %(message)s' formatter = logging.Formatter(format_string) handler = logs.CompliantSysLogHandler('/dev/log', facility=logs.CompliantSysLogHandler.LOG_DAEMON) handler.setFormatter(formatter) log_message = 'This %(message)s is too long for a single line.' log_args = ({'message': 'message'},) record = logging.LogRecord( name='pulp.test.module', level=logging.INFO, pathname='/some/path', lineno=527, msg=log_message, args=log_args, exc_info=None, func='some_function') handler.emit(record) messages = [mock_call[1][1].msg for mock_call in super_emit.mock_calls] expected_messages = ['PID-TID-This message is too long ', 'PID-TID-for a single line.'] self.assertEqual(expected_messages, messages)
def dataProvider_testFilter(self): r = logging.LogRecord( name='noname', pathname=__file__, args=None, exc_info=None, level=logging.INFO, lineno=10, msg='', ) yield '', False, None, r, '[INFO]', False yield '', False, None, r, '[_INFO]', False yield '', 'ONLY', None, r, '[DEBUG2]', False yield '', 'ONLY', None, r, '[ONLY]', True yield '', 'ONLY', None, r, '[PROCESS]', True yield '', [], ['ONLY'], r, '[PROCESS]', True yield '', None, ['ONLY'], r, '[PROCESS]', False yield '', None, ['ONLY'], r, '[_a]', True yield '', 'all', None, r, '[debug]', True yield '', 'nodebug', None, r, '[debug]', False
def test_format(self): formatter = HumioKVFormatter() message = "This is the fisrt test's test" level = logging.ERROR record = logging.LogRecord( name='foo', level=logging.ERROR, pathname='/x/y/z.py', lineno=321, msg=message, args=(), exc_info=None ) result = formatter.format(record) self.assertIn('name=foo', result) self.assertIn('asctime=', result) self.assertIn('args=None', result) self.assertIn(f'levelno={level}', result) self.assertIn('msg={}'.format(repr(message)), result) self.assertIn('formattedMessage={}'.format(repr(message)), result)
def _emit_queue(self): self._keep_if_debug = self.PRINT if len(self._debug) == self._debug.maxlen: r = self._debug.popleft() self.emit( logging.LogRecord( r.name, r.levelno, r.pathname, r.lineno, "<truncated - see config.log for full output>", (), None, )) while True: try: self.emit(self._debug.popleft()) except IndexError: break self._keep_if_debug = self.KEEP
def test_format_minimal(self): import logging import json handler = self._make_one() record = logging.LogRecord(None, logging.INFO, None, None, None, None, None,) record.created = None expected_payload = { "message": "", "logging.googleapis.com/trace": "", "logging.googleapis.com/sourceLocation": {}, "httpRequest": {}, "logging.googleapis.com/labels": {}, } handler.filter(record) result = json.loads(handler.format(record)) for (key, value) in expected_payload.items(): self.assertEqual( value, result[key], f"expected_payload[{key}] != result[{key}]" )
def wrapped(*args, **kwargs): name = f.__module__ logger = logging.getLogger(name) level = logging.DEBUG frame = inspect.currentframe() if frame is None: raise Exception("Can't get current frame!") s = frame.f_back to_print = [ "\t%s:%s %s. Args: args=%s, kwargs=%s" % (pathname, linenum, func_name, args, kwargs) ] while s: if True or s.f_globals["__name__"].startswith("synapse"): filename, lineno, function, _, _ = inspect.getframeinfo(s) args_string = inspect.formatargvalues(*inspect.getargvalues(s)) to_print.append( "\t%s:%d %s. Args: %s" % (filename, lineno, function, args_string) ) s = s.f_back msg = "\nTraceback for %s:\n" % (func_name,) + "\n".join(to_print) record = logging.LogRecord( name=name, level=level, pathname=pathname, lineno=lineno, msg=msg, args=tuple(), exc_info=None, ) logger.handle(record) return f(*args, **kwargs)
def test_format(self): import logging import json labels = {"default_key": "default-value"} handler = self._make_one(labels=labels) logname = "loggername" message = "hello world,嗨 世界" pathname = "testpath" lineno = 1 func = "test-function" record = logging.LogRecord(logname, logging.INFO, pathname, lineno, message, None, None, func=func) expected_payload = { "message": message, "severity": record.levelname, "logging.googleapis.com/trace": "", "logging.googleapis.com/spanId": "", "logging.googleapis.com/sourceLocation": { "file": pathname, "line": lineno, "function": func, }, "httpRequest": {}, "logging.googleapis.com/labels": labels, } handler.filter(record) result = json.loads(handler.format(record)) for (key, value) in expected_payload.items(): self.assertEqual(value, result[key]) self.assertEqual( len(expected_payload.keys()), len(result.keys()), f"result dictionary has unexpected keys: {result.keys()}", )
def test_formatter_does_something(): record = logging.LogRecord(name='my.logger', level=logging.INFO, pathname='/opt/thing/file.py', lineno=1337, msg='%s says %r', args=('Alice', 'hi!'), exc_info=None) formatter = yajl.JsonFormatter() formatted = formatter.format(record) expected = { 'hostname': socket.getfqdn(), 'pwd': os.getcwd(), 'user': getpass.getuser(), 'name': record.name, 'module': record.module, 'level': { 'name': record.levelname, 'number': record.levelno }, 'file': { 'path': record.pathname, 'filename': record.filename, 'line': record.lineno, 'func': record.funcName }, 'timestamp': { 'abs': record.created, 'rel': record.relativeCreated }, 'proc': { 'id': record.process, 'name': record.processName }, 'thread': { 'id': record.thread, 'name': record.threadName }, 'message': "Alice says 'hi!'" } assert formatted == json.dumps(expected)
def test_filter_record(self): """ test adding fields to a standard record """ import logging filter_obj = self._make_one() logname = "loggername" message = "hello world,嗨 世界" expected_location = { "line": 1, "file": "testpath", "function": "test-function", } record = logging.LogRecord( logname, logging.INFO, expected_location["file"], expected_location["line"], message, None, None, func=expected_location["function"], ) success = filter_obj.filter(record) self.assertTrue(success) self.assertEqual(record.msg, message) self.assertEqual(record._msg_str, message) self.assertEqual(record._source_location, expected_location) self.assertEqual(record._source_location_str, json.dumps(expected_location)) self.assertIsNone(record._resource) self.assertIsNone(record._trace) self.assertEqual(record._trace_str, "") self.assertIsNone(record._span_id) self.assertEqual(record._span_id_str, "") self.assertIsNone(record._http_request) self.assertEqual(record._http_request_str, "{}") self.assertIsNone(record._labels) self.assertEqual(record._labels_str, "{}")
def test_patroni_logger(self): config = { 'log': { 'traceback_level': 'DEBUG', 'max_queue_size': 5, 'dir': 'foo', 'file_size': 4096, 'file_num': 5, 'loggers': { 'foo.bar': 'INFO' } }, 'restapi': {}, 'postgresql': {'data_dir': 'foo'} } sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = yaml.dump(config, default_flow_style=False) logger = PatroniLogger() patroni_config = Config(None) logger.reload_config(patroni_config['log']) _LOG.exception('test') logger.start() with patch.object(logging.Handler, 'format', Mock(side_effect=Exception)): logging.error('test') self.assertEqual(logger.log_handler.maxBytes, config['log']['file_size']) self.assertEqual(logger.log_handler.backupCount, config['log']['file_num']) config['log']['level'] = 'DEBUG' config['log'].pop('dir') with patch('logging.Handler.close', Mock(side_effect=Exception)): logger.reload_config(config['log']) with patch.object(logging.Logger, 'makeRecord', Mock(side_effect=[logging.LogRecord('', logging.INFO, '', 0, '', (), None), Exception])): logging.exception('test') logging.error('test') with patch.object(Queue, 'put_nowait', Mock(side_effect=Full)): self.assertRaises(SystemExit, logger.shutdown) self.assertRaises(Exception, logger.shutdown) self.assertLessEqual(logger.queue_size, 2) # "Failed to close the old log handler" could be still in the queue self.assertEqual(logger.records_lost, 0)
def emit(self, record): """ Emit message to PagerDuty :param record: record that needs to be passed to PagerDuty :type record: LogRecord """ assert isinstance(record, logging.LogRecord) default_attr_names = [ name for name in dir(logging.LogRecord(None, None, "", 0, "", (), None, None)) ] extra_details = { attr_name: getattr(record, attr_name) for attr_name in dir(record) if attr_name not in default_attr_names and attr_name not in ('incident_key',) } if record.exc_info: # This is an error case - need to add more information about it exc_class, exc_args, trace = record.exc_info if six.PY3: extra_details['error'] = repr(exc_args) else: extra_details['error'] = repr(exc_class(*exc_args)) incident_key = self.incident_key if incident_key is NOT_PROVIDED: incident_key = getattr( record, 'incident_key', record.msg, # Message template is pretty good incident key ) pagerdutyapi.create_trigger( service_key=self.service_id, incident_key=incident_key, message=record.getMessage(), details=self.stringify_details(extra_details), )
def main(): ''' 3 different log type: * syslog * syslog_json * tcp_json ''' log_type = os.environ.get('SUPERVISOR_LOG_TYPE', 'syslog') handler = new_handler(log_type) if handler: docker_client = docker.Client(base_url="tcp://{0}:2375".format(DockerJsonFormatter.HOSTNAME), timeout=10) docker_cid = os.environ.get('DOCKER_CID', '') if docker_cid: DockerJsonFormatter.CONTAINER_ID = docker_cid DockerJsonFormatter.CONTAINER_JSON = docker_client.inspect_container(DockerJsonFormatter.CONTAINER_ID) DockerJsonFormatter.IMAGE_ID = DockerJsonFormatter.CONTAINER_JSON['Image'] images = docker_client.images() for image in images: if image['Id'] == DockerJsonFormatter.IMAGE_ID: image_repo_tags = image['RepoTags'] if len(image_repo_tags) > 0: DockerJsonFormatter.IMAGE_TAG = image_repo_tags[0] for event_headers, event_data in supervisor_events(sys.stdin, sys.stdout): event = logging.LogRecord( name=event_headers['processname'], level=logging.INFO, pathname=None, lineno=0, msg=event_data, args=(), exc_info=None, ) event.process = int(event_headers['pid']) event.processName = event_headers['processname'] or os.getenv('SUPERVISOR_PROCESS_NAME', 'unknown') handler.handle(event)
def test_RawDataFormatter_uses_encoder_of_log_record(): from moler.config.loggers import RAW_DATA, RawDataFormatter raw_formatter = RawDataFormatter() binary_msg = b"1 0.000000000 127.0.0.1 \xe2\x86\x92 127.0.0.1 ICMP 98 Echo (ping) request id=0x693b, seq=48/12288, ttl=64" decoded_msg = binary_msg.decode(encoding='utf-8') record = logging.LogRecord( name=None, level=RAW_DATA, pathname="", lineno=0, msg=decoded_msg, # this is used - not bytes data args=(), exc_info=None) record.encoder = lambda data: data.encode( 'utf-8') # must be combined with encoder # Raw logger (and its formatter) may get already decoded data # but it must produce bytes # so, it converts back decoded data into bytes using encoder # that must come in record together with data raw_msg = raw_formatter.format(record=record) assert raw_msg == binary_msg
class TestJSONFormatter(object): def test_format(self): formatter = JSONFormatter("some app") eq_("some app", formatter.app_name) # Cause an exception so we can capture its exc_info() try: raise ValueError("fake exception") except ValueError, e: pass exception = sys.exc_info() record = logging.LogRecord("some logger", logging.DEBUG, "pathname", 104, "A message", {}, exception, None) data = json.loads(formatter.format(record)) eq_("some logger", data['name']) eq_("some app", data['app']) eq_("DEBUG", data['level']) eq_("A message", data['message']) eq_("pathname", data['filename']) assert 'ValueError: fake exception' in data['traceback']
def test_send(self): from google.cloud.logging_v2.logger import _GLOBAL_RESOURCE client = _Client(self.PROJECT) name = "python_logger" transport, _ = self._make_one(client, name) python_logger_name = "mylogger" message = "hello world" record = logging.LogRecord(python_logger_name, logging.INFO, None, None, message, None, None) transport.send(record, message, resource=_GLOBAL_RESOURCE) transport.worker.enqueue.assert_called_once_with( record, message, resource=_GLOBAL_RESOURCE, )
def test_RawFileHandler_appends_binary_message_into_logfile(): import os.path from moler.config.loggers import RAW_DATA, RawFileHandler cwd = os.getcwd() logfile_full_path = os.path.join(cwd, "tmp.raw.log") raw_handler = RawFileHandler(filename=logfile_full_path, mode='wb') binary_msg = b"1 0.000000000 127.0.0.1 \xe2\x86\x92 127.0.0.1 ICMP 98 Echo (ping) request id=0x693b, seq=48/12288, ttl=64" record = logging.LogRecord( name=None, level=RAW_DATA, pathname="", lineno=0, msg=binary_msg, # only this is used args=(), exc_info=None) raw_handler.emit(record=record) raw_handler.close() with open(logfile_full_path, mode='rb') as logfh: content = logfh.read() assert content == binary_msg os.remove(logfile_full_path)
def test_ignore_exceptions(self): config = { 'sentry_enabled': True, 'sentry_dsn': 'http://*****:*****@example.com/1', 'sentry_ignore_exceptions': 'openerp.exceptions.ValidationError', } level, msg = logging.WARNING, 'Test ValidationError' client = initialize_raven(config, client_cls=InMemoryClient) handlers = list( log_handler_by_class(logging.getLogger(), OdooSentryHandler)) self.assertTrue(handlers) handler = handlers[0] try: raise exceptions.ValidationError(msg) except exceptions.ValidationError: exc_info = sys.exc_info() record = logging.LogRecord(__name__, level, __file__, 42, msg, (), exc_info) handler.emit(record) self.assertEventNotCaptured(client, level, msg)
def _log_debug_as_f(f, msg, msg_args): name = f.__module__ logger = logging.getLogger(name) if logger.isEnabledFor(logging.DEBUG): if PY3: lineno = f.__code__.co_firstlineno pathname = f.__code__.co_filename else: lineno = f.func_code.co_firstlineno pathname = f.func_code.co_filename record = logging.LogRecord(name=name, level=logging.DEBUG, pathname=pathname, lineno=lineno, msg=msg, args=msg_args, exc_info=None) logger.handle(record)
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): """ A factory method which can be overridden in subclasses to create specialized LogRecords. """ rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) try: if extra is not None: rv.__dict__["extra"] = {} for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError( "Attempt to overwrite %r in LogRecord" % key) rv.__dict__["extra"][key] = extra[key] except: pass return rv
def error2(self, depth, msg=None, *args, **kw): try: if self.log_level > logging.ERROR: return msg = self.format_msg(depth + 1, msg, *args, **kw) record = logging.LogRecord(self.module, logging.ERROR, __file__, 1, msg, None, None) Logger.access_log_handler.emit(record) Logger.access_log_handler.flush() Logger.error_log_handler.emit(record) Logger.error_log_handler.flush() Logger.stream_log_handler.emit(record) Logger.stream_log_handler.flush() except Exception as ex: print( "ERROR2 got an exception: {}, depth={}, msg={}, args={}, kw={}, traceback={}" .format(ex, depth, msg, args, kw, get_traceback()))
def test_send(self): from google.cloud.logging.logger import _GLOBAL_RESOURCE client = _Client(self.PROJECT) stackdriver_logger_name = 'python' python_logger_name = 'mylogger' transport = self._make_one(client, stackdriver_logger_name) message = 'hello world' record = logging.LogRecord(python_logger_name, logging.INFO, None, None, message, None, None) transport.send(record, message, _GLOBAL_RESOURCE) EXPECTED_STRUCT = { 'message': message, 'python_logger': python_logger_name, } EXPECTED_SENT = (EXPECTED_STRUCT, 'INFO', _GLOBAL_RESOURCE, None, None, None) self.assertEqual(transport.logger.log_struct_called_with, EXPECTED_SENT)
def test_formatter_format(): record = logging.LogRecord( "name", logging.INFO, "module", 1, "Some message", (), None ) time = datetime.datetime.fromtimestamp(record.created).strftime( "%Y-%m-%d %H:%M:%S.%f" ) result = LogfmtFormatter().format(record) expected = " ".join( [ "id=unknown", "at=INFO", f'time="{time}"', "tag=external", "module=module", 'msg="Some message"', ] ) assert result == expected
def test_log_healthnmon_audit_formatter_format_with_optional_values(self): formatter = HealthnmonAuditFormatter() logrecord = logging.LogRecord( 'healthnmon', 10, '/root/git/healthnmon/healthnmon/log.py', 117, 'foo', None, None) logrecord.componentId = "Healthnmon" logrecord.orgId = "TestOrgId" logrecord.domain = "TestDomain" logrecord.userId = "TestUserId" logrecord.loggingId = "L123" logrecord.taskId = "T123" logrecord.sourceIp = "localhost" logrecord.result = "SUCCESS" logrecord.action = "NOOP" logrecord.severity = "INFO" logrecord.object = "TestObject" logrecord.objectDescription = "TestDescription" logrecord.asctime = time.time() result = formatter.format(logrecord) self.assert_(True)
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None): rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func) if extra is not None: for key, value in extra.items(): if key in ("message", "asctime") or key in rv.__dict__: raise KeyError("Attempt to override %r in LogRecord" % key) rv.__dict__[key] = value if multiprocessing is not None: rv.processName = multiprocessing.current_process()._name else: rv.processName = "" return rv
def make_record(name=__name__, level=logging.INFO, pathname=__file__, lineno=0, msg="", args=tuple(), exc_info=None, func=None): "Creates a log record as done by loggers." # pylint: disable=too-many-arguments record = logging.LogRecord(name=name, level=level, pathname=pathname, lineno=lineno, msg=msg, args=args, exc_info=exc_info, func=func) return record
def critical(self, msg=None, *args, **kw): try: if self.log_level > logging.CRITICAL: return msg = self.format_msg(1, msg, *args, **kw) record = logging.LogRecord(self.module, logging.CRITICAL, __file__, 1, msg, None, None) Logger.access_log_handler.emit(record) Logger.access_log_handler.flush() Logger.error_log_handler.emit(record) Logger.error_log_handler.flush() Logger.stream_log_handler.emit(record) Logger.stream_log_handler.flush() except Exception as ex: print( "CRITICAL got an exception: {}, msg={}, args={}, kw={}, traceback={}" .format(ex, msg, args, kw, get_traceback()))
def emit(self, record): message = record.getMessage() msg_len = len(message) if msg_len > self.MTU: # Chunk message into MTU size parts start_index = 0 end_index = self.MTU - 1 while True: msg = message[start_index:end_index] rec = logging.LogRecord(record.name, record.levelno, record.pathname, record.lineno, msg, None, record.exc_info, record.funcName) SysLogHandler.emit(self, rec) start_index = start_index + self.MTU if start_index >= msg_len: break end_index = end_index + self.MTU if end_index > msg_len: end_index = msg_len - 1 else: SysLogHandler.emit(self, record)