def __init__(self, recipients=None): Handler.__init__(self) self.bot = TwitterDownloaderBot() self.bot.setState('available', 'Twitter Downloader') self.bot.recipients = recipients if recipients is not None else administrator_jid[:] threading.Thread(target=self.bot.start, args=[google_user_name, google_password]).start()
def __init__(self, log, level=NOTSET): """ :param log: Your ready-to-use log object :param level: One of the ordered standard logging constants (INFO, ERROR etc.) """ Handler.__init__(self, level) self._log = log
def __init__(self, level=NOTSET, view_id=None): Handler.__init__(self, level) self.state_deferral = None self.view = None self.buffer_id = None self.follow_bottom = True self.cache = []
def __init__(self, level=logging.NOTSET, error_level=logging.WARNING): """ Records with a level strictly less that error_level are written to standard output. Standard error is used otherwise. """ Handler.__init__(self, level) self.error_level = error_level
def acquire(self): """ Acquire thread and file locks. Re-opening log for 'degraded' mode. """ # handle thread lock if Handler: # under some tests Handler ends up being null due to instantiation # order Handler.acquire(self) # Issue a file lock. (This is inefficient for multiple active threads # within a single process. But if you're worried about high-performance, # you probably aren't using this log handler.) if self.stream_lock: # If stream_lock=None, then assume close() was called or something # else weird and ignore all file-level locks. if self.stream_lock.closed: # Daemonization can close all open file descriptors, see # https://bugzilla.redhat.com/show_bug.cgi?id=952929 # Try opening the lock file again. Should we warn() here?!? try: self._open_lockfile() except Exception: self.handleError(NullLogRecord()) # Don't try to open the stream lock again self.stream_lock = None return lock(self.stream_lock, LOCK_EX)
def __init__(self, q, level=NOTSET): if sys.version_info[:2] > (2, 6): super(QueueHandler, self).__init__(level) else: Handler.__init__(self, level) self.q = q
def __init__(self, toaddrs, subject, sendmail = 'sendmail'): Handler.__init__(self) self.sendmail = '%s -t' % sendmail self.msg = """To: %s Subject: %s """ % (', '.join(toaddrs), subject)
def __init__(self, text, yscroll, size): Handler.__init__(self) self.setLevel(DEBUG) self.display_debug = False self.display_filter = '' self.records = [] self.text = text self.yscroll = yscroll self.size = size self.deleted = 0 boldfont = tkinter.font.Font(font=text['font']) boldfont['weight'] = 'bold' text.tag_config('time', foreground='#888') text.tag_config('DEBUG_levelname', foreground='#888') text.tag_config('INFO_levelname', foreground='#080') text.tag_config('WARNING_levelname', background='red', foreground='white') text.tag_config('ERROR_levelname', background='red', foreground='white') text.tag_config('WARNING_message', font=boldfont) text.tag_config('ERROR_message', font=boldfont) text['state'] = 'disabled'
def __init__(self, stdout=None, stderr=None, formatter=None): """ Initialize the handler. If stream is not specified, sys.stderr is used. Parameters ---------- stdout : file-like object, optional Stream to which DEBUG and INFO messages should be written. \ If `None`, `sys.stdout` will be used. stderr : file-like object, optional Stream to which WARNING, ERROR, CRITICAL messages will be \ written. If `None`, `sys.stderr` will be used. formatter : `logging.Formatter` object, optional Assigned to `self.formatter`, used to format outgoing \ log messages. Notes ----- N.B. it is **not** recommended to pass `sys.stdout` or `sys.stderr` as constructor arguments explicitly, as certain things (like nosetests) can reassign these during code execution! Instead, simply pass `None`. """ Handler.__init__(self) self._stdout = stdout self._stderr = stderr self.formatter = formatter
def acquire(self): """ Acquire thread and file locks. Also re-opening log file when running in 'degraded' mode. """ # handle thread lock Handler.acquire(self) lock(self.stream_lock, LOCK_EX) if self.stream.closed: self._openFile(self.mode)
def close(self): """ Closes the stream. """ if not self.stream.closed: self.stream.flush() self.stream.close() Handler.close(self)
def __init__(self, text_edit): """ Initialize the handler. """ Handler.__init__(self) self.text_edit = text_edit
def __init__(self, host='127.0.0.1', port=5555, transport=TCPTransport): Handler.__init__(self) self.client = Client(host, port, transport) try: conn = transport(host, port) conn.close() except TransportError: raise ConnectionError("Could not connect to Riemann server.")
def __init__(self, base_url=None): if(base_url is None): self.base_url = 'https://logs-01.loggly.com/inputs/b121e4df-f910-4d6a-b6c1-b19ca2776233/tag/python/' else: self.base_url = base_url self.localip = socket.gethostbyname(socket.gethostname()) self.publicip = urllib2.urlopen('http://ip.42.pl/raw').read() Handler.__init__(self)
def __init__(self, host='localhost', port=6379, db=0, key='logstash'): Handler.__init__(self) self._key = key self.r_server = redis.Redis(host) if version == 1: self.formatter = formatter.LogstashFormatterVersion1(message_type, [], fqdn) else: self.formatter = formatter.LogstashFormatterVersion0(message_type, [], fqdn)
def close(self): """Flushes any remaining messages in the queue.""" if self.messages_pending: try: self.scribe_write(self._log_buffer) except self.ScribeHandlerException: pass Handler.close(self)
def __init__(self, flow_api_token, source="PyFlowdock Logging Helper", from_address='*****@*****.**', from_name='Logger'): # super(FlowdockTeamInboxLoggingHandler, self).__init__() Handler.__init__(self) self.level = DEBUG self.api = TeamInbox(flow_api_token) self.source = source self.from_address = from_address self.from_name = from_name
def __init__ ( self, splash_screen ): """ Creates a new handler for a splash screen. """ # Base class constructor: Handler.__init__( self ) # The splash screen that we will display log messages on: self._splash_screen = splash_screen
def close(self): self.flush() self.acquire() try: self.target = NullHandler() Handler.close(self) finally: self.release()
def init__(self, **kwargs): level = kwargs.get('level') if not level: level = logging.ERROR Handler.__init__(self, level=level) self.__whoami = kwargs.get('from') if not self.__whoami: self.__whoami = 'Djanbber Flask Logger' self.sender = JabberSender.create(**kwargs)
def release(self): try: self.stream.flush() finally: try: unlock(self.stream_lock) finally: # release thread lock Handler.release(self)
def __init__(self): Handler.__init__(self) self.__consoleFuncForLevel = { 'DEBUG': self.__debug, 'INFO': self.__info, 'WARNING': self.__warn, 'ERROR': self.__error, 'CRITICAL': self.__error }
def get_logger(name: str, log_level: int=logging.INFO, handler: logging.Handler=static_handler, formatter: logging.Formatter=colored_formatter): logger = logging.getLogger(name) logger.setLevel(log_level) if len(logger.handlers) is 0: logger.addHandler(handler) if formatter is not None and handler is not static_handler: handler.setFormatter(formatter) return logger
def close(self): """ Close log stream and stream_lock. """ try: self._close() if not self.stream_lock.closed: self.stream_lock.close() finally: self.stream_lock = None Handler.close(self)
def close(self): self.acquire() try: if self.__conn and self.__conn.is_open: self.__conn.close() self.__conn = None finally: self.release() Handler.close(self)
def close(self): """ Closes the stream. """ if self.stream and not self.stream.closed: self.stream.flush() self.stream.close() if self.stream_lock and not self.stream_lock.closed: self.stream_lock.close() Handler.close(self)
def __init__(self, tissue, options, noseconfig): Handler.__init__(self) self.tissue = tissue self.message_buffer_size = options.pocket_batch_size self.buffered_messsage_count = 0 self.session = None self.session_objects = {} self.last_error = None getLogger().addHandler(self)
def __init__(self, ident=None, facility=syslog.LOG_USER, log_pid=False): Handler.__init__(self) self.facility = facility if isinstance(facility, basestring): self.facility = self.facility_names[facility] options = 0 if log_pid: options |= syslog.LOG_PID syslog.openlog(ident, options, self.facility) self.formatter = None
def __init__(self, stdout=None, stderr=None, formatter=None): """ Initialize the handler. If stream is not specified, sys.stderr is used. """ Handler.__init__(self) self._stdout = stdout self._stderr = stderr self.formatter = formatter
def release(self): """ Release file and thread locks. Flush stream and take care of closing stream in 'degraded' mode. """ try: self.stream.flush() if self._rotateFailed: self.stream.close() finally: # release thread lock Handler.release(self)
def __init__(self, facility='user', hostname=None, appname=None, procid=None, structured_data={}, socket_path='/dev/log', socket_types=(SOCK_DGRAM, SOCK_STREAM), message_format=SYSLOG_FORMAT_RFC5424, message_framing=SYSLOG_FRAMING_NON_TRANSPARENT, utf8_bom=True, utc_timestamp=False): # first things first: try connecting if not S_ISSOCK(stat(socket_path).st_mode): raise Exception(f"Not a unix domain socket: '{socket_path}'") sock = self._try_connect(socket_path, socket_types) # prepare settings self._is_5424 = message_format == SYSLOG_FORMAT_RFC5424 self._facility = SysLogHandler.facility_names[facility] self._structured_data = structured_data self._get_hostname = mk_get_from_record((hostname, gethostname()), ('hostname', ), 255) self._get_appname = mk_get_from_record((appname, '-'), ('appname', ), 48) self._get_msgid = mk_get_from_record((procid, '-'), ('msgid', ), 32) self._get_procid = mk_get_from_record( (procid, '-' if self._is_5424 else None), ('process', 'procid'), 128) # prepare message assembly methods if sock.type == SOCK_STREAM: self._message_framing = message_framing else: self._message_framing = SYSLOG_FRAMING_UNFRAMED self._msg_encoding = \ 'utf-8-sig' if self._is_5424 and utf8_bom else 'utf8' self._sec_to_struct = \ gmtime if self._is_5424 and utc_timestamp else localtime Handler.__init__(self) AsyncEmitMixin.__init__(self, sock.fileno(), sock.send)
def __init__(self, api_key, channel, stack_trace=True, username='******', icon_url=None, icon_emoji=None, fail_silent=False, ping_users=None, ping_level=None): Handler.__init__(self) self.formatter = NoStacktraceFormatter() self.stack_trace = stack_trace self.fail_silent = fail_silent self.slacker = slacker.Slacker(api_key) self.username = username self.icon_url = icon_url self.icon_emoji = icon_emoji if (icon_emoji or icon_url) else DEFAULT_EMOJI self.channel = channel if not self.channel.startswith('#') and not self.channel.startswith( '@'): self.channel = '#' + self.channel self.ping_level = ping_level self.ping_users = [] if ping_users: user_list = self.slacker.users.list().body['members'] for ping_user in ping_users: ping_user = ping_user.lstrip('@') for user in user_list: if user['name'] == ping_user: self.ping_users.append(user['id']) break else: raise RuntimeError( 'User not found in Slack users list: %s' % ping_user)
def release(self): """ 释放文件和handler线程锁,如果获得文件锁失败,则关闭文件避免冲突。 :return : None """ try: if self._rotateFailed: self._close() except Exception: self.handleError(NullLogRecord()) finally: try: if self.stream_lock and not self.stream_lock.closed: self._unlock(self.stream_lock) except Exception: self.handleError(NullLogRecord()) finally: Handler.release(self)
def __init__(self, path, filename, mode='a', encoding='utf-8', delay=False): filename = os.fspath(filename) if not os.path.exists(path): os.mkdir(path) self.baseFilename = os.path.join(path, filename) self.mode = mode self.encoding = encoding self.delay = delay if delay: Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open())
def acquire(self): """ 获取文件锁和线程锁,如果滚动失败则关闭文件 :return: None :raises: NullLogRecord 日志输出异常 """ # 获得线程锁 Handler.acquire(self) # 处理文件锁,如果stream锁已经close则什么也不做 if self.stream_lock: if self.stream_lock.closed: try: self._open_lockfile() except Exception: self.handleError(NullLogRecord()) self.stream_lock = None return self._lock(self.stream_lock, fcntl.LOCK_EX)
def __init__(self, filename, mode='a', encoding=None, delay=False): """ Open the specified file and use it as the stream for logging. """ # Issue #27493: add support for Path objects to be passed in # keep the absolute path, otherwise derived classes which use this # may come a cropper when the current directory changes self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay if delay: # We don't open the stream, but we still need to call the # Handler constructor to set level, formatter, lock etc. Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open()) with tf.io.gfile.GFile(self.baseFilename, 'w') as f: f.write('Logging ........\n')
def __init__(self, api_key, channel, stack_trace=False, username='******', icon_url=None, icon_emoji=None, fail_silent=False): Handler.__init__(self) self.slacker = slacker.Slacker(api_key) self.channel = channel self.stack_trace = stack_trace self.username = username self.icon_url = icon_url self.icon_emoji = icon_emoji if (icon_emoji or icon_url) else DEFAULT_EMOJI self.fail_silent = fail_silent if not self.channel.startswith('#'): self.channel = '#' + self.channel
def __init__(self, *args, **kwargs): """logging.Handler interface for Scribe. Params: buffer: If True, buffer messages when scribe is unavailable. If False, drop on floor. category: Scribe category for logging events. host: Scribe host. port: Scribe port. """ if not _SCRIBE_PRESENT: raise self.ScribeHandlerException( "Could not initialize ScribeHandler: Scribe modules not present." ) self._buffer_enabled = kwargs.pop("buffer") self._category = kwargs.pop("category") self._client = None self._host = kwargs.pop("host") self._log_buffer = [] self._port = kwargs.pop("port") self._transport = None Handler.__init__(self, *args, **kwargs)
def get_handler_report(h: logging.Handler) -> Dict[str, Any]: """ Returns information on a log handler, as a dictionary. For debugging. """ # noinspection PyUnresolvedReferences return { 'get_name()': h.get_name(), 'level': h.level, 'formatter': get_formatter_report(h.formatter), 'filters': h.filters, }
def _safewrap_handler(self, handler: logging.Handler) -> None: # Make the logger handlers dump internal errors to # :data:`sys.__stderr__` instead of :data:`sys.stderr` to circumvent # infinite loops. class WithSafeHandleError(logging.Handler): def handleError(self, record: logging.LogRecord) -> None: try: traceback.print_exc(None, sys.__stderr__) except IOError: pass # see python issue 5971 handler.handleError = WithSafeHandleError().handleError # type: ignore
def add_handler(handler: logging.Handler, logger_name: Optional[str] = None, level: int = logging.DEBUG, log_format: Optional[str] = None): """ Adds a new handler to an existing logger, with the specified formatter in ```init_logging```. If a new format is specified (is not None) then it will be used for this handler. :param handler: the new handler to be added. :param logger_name: the logger name to which add the handler. :param level: the logging level for that formatter. :param log_format: the log format used if not formatter was created. """ logger = logging.getLogger(logger_name) fmt = logging.Formatter(log_format) if log_format else logging.Formatter( LOG_DEFAULT_FORMAT) handler.setFormatter(fmt) handler.setLevel(level) logger.addHandler(handler)
def configure_logging( stderr_handler: logging.Handler, quiet: bool, debug: bool, enable_color: bool, timestamps: bool, base_logger: logging.Logger = _logger, ) -> None: """Configure logging.""" rdflib_logger = logging.getLogger("rdflib.term") rdflib_logger.addHandler(stderr_handler) rdflib_logger.setLevel(logging.ERROR) if quiet: # Silence STDERR, not an eventual provenance log file stderr_handler.setLevel(logging.WARN) if debug: # Increase to debug for both stderr and provenance log file base_logger.setLevel(logging.DEBUG) stderr_handler.setLevel(logging.DEBUG) rdflib_logger.setLevel(logging.DEBUG) fmtclass = coloredlogs.ColoredFormatter if enable_color else logging.Formatter formatter = fmtclass("%(levelname)s %(message)s") if timestamps: formatter = fmtclass( "[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S" ) stderr_handler.setFormatter(formatter)
def __init__(self, api_key, channel, stack_trace=True, username='******', icon_url=None, icon_emoji=None, fail_silent=False, ping_user=None, ping_level=None, message_in_attachments=True): Handler.__init__(self) self.formatter = NoStacktraceFormatter() self.stack_trace = stack_trace self.fail_silent = fail_silent self.slacker = slacker.Slacker(api_key) self.ping_level = ping_level if not ping_user: self.ping_user = None else: for user in self.slacker.users.list().body['members']: if user['name'] == ping_user: self.ping_user = user['id'] break if not self.ping_user: raise RuntimeError('User not found in Slack users list: %s' % ping_user) self.username = username self.icon_url = icon_url self.icon_emoji = icon_emoji if (icon_emoji or icon_url) else DEFAULT_EMOJI self.channel = channel self.message_in_attachments = message_in_attachments if not self.channel.startswith('#') and not self.channel.startswith( '@'): self.channel = '#' + self.channel
def __init__(self, name: str, strFormat: str, handler: logging.Handler = logging.StreamHandler()): """ Parameters ---------- name: str The name to give this logger. strFormat: str The formatting string to use. handler: logging.StreamHandler, optional The handler to use. """ self.log: logging.Logger = logging.getLogger(name) self.log.setLevel(logging.INFO) handler: logging.Handler = handler handler.setLevel(logging.INFO) handler.setFormatter(logging.Formatter(strFormat)) self.log.addHandler(handler)
def __init__( self, broker_url, broker_port, broker_vhost, broker_username, broker_password, exchange, routing_key, level=logging.NOTSET, message_headers=None, ): Handler.__init__(self, level) # will be useful to specify the system publishing the log e.g System1,System2 self.broker_url = broker_url self.broker_port = broker_port self.broker_vhost = broker_vhost self.broker_username = broker_username self.broker_password = broker_password self.message_headers = message_headers self.exchange = exchange self.routing_key = routing_key
def release(self): """ Release file and thread locks. Flush stream and take care of closing stream in 'degraded' mode. """ try: try: if self.stream: self.stream.flush() except ValueError: #PiCloud: flush sometimes if port disconnected - we'll try to open again self._degrade(True, 'flush failed') if self._rotateFailed and self.stream: self.stream.close() finally: try: unlock(self.stream_lock) except ValueError: if self._rotateFailed: #something is broken (exiting?) self._disable() else: raise finally: # release thread lock Handler.release(self)
def __init__(self, listbox, button): self.button = button self.listbox = listbox Handler.__init__(self) self.image0 = QPixmap() self.image0.loadFromData(image0_data, "PNG") self.image1 = QPixmap() self.image1.loadFromData(image1_data, "PNG") self.image2 = QPixmap() self.image2.loadFromData(image2_data, "PNG") self.image3 = QPixmap() self.image3.loadFromData(image3_data, "PNG") self.image4 = QPixmap() self.image4.loadFromData(image4_data, "PNG") lvl = {} lvl["CRITICAL"] = self.image0 lvl["WARNING"] = self.image1 lvl["DEBUG"] = self.image2 lvl["INFO"] = self.image3 lvl["ERROR"] = self.image4 self.lvlPix = lvl
def __init__(self, queue=None, logging_url="", channel="", username="", icon_emoji=""): QueueListener.__init__(self, queue) Handler.__init__(self) """ logging_url, channel, username, icon_emoji can all be overridden by the extra dictionary parameter of a logging record For example: logging.info('Test messate',extra={'channel':'@someone', 'username':'******', 'icon_emoji':':penguin:'}) """ self.logging_url = logging_url self.payload = { "channel": channel, "username": username, "icon_emoji": icon_emoji }
def __init__(self, filename, mode='w', encoding=None, delay=True): """ Open the specified file each time a record is logged. This Handler is meant to support writing out file contents with the latest information. By default this will overwrite the contents on each log event. """ # Issue #27493: add support for Path objects to be passed in try: filename = os.fspath(filename) except AttributeError: # Python < 3.6 pass #keep the absolute path, otherwise derived classes which use this #may come a cropper when the current directory changes self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay #We don't open the stream, but we still need to call the #Handler constructor to set level, formatter, lock etc. Handler.__init__(self) self.stream = None
def __init__(self, hosts_list, topic, key=None, partition=None, **kargs): """ Kafka logging handler init function :param hosts_list: ‘host[:port]’ string (or list of ‘host[:port]’ strings) :param topic: kafka topic :param key: the key for kafka productor send msg :param partition: partition for kafka productor send msg :param kargs: Keyword Arguments from KafkaProducer, except bootstrap_servers """ Handler.__init__(self) self.hosts_list = hosts_list self.topic = topic self.key = key self.partition = partition self.producter = KafkaProducer(bootstrap_servers=self.hosts_list, **kargs) # 获取 KafkaProducer 的后台线程对象_sender # 通过该线程对象判断 KafkaProducer 是否已经关闭 # 问:为什么不通过KafkaProducer对象中的_closed属性判断 # 答:因为调用close()方法时,到方法末尾才设置_closed标志,但是连接已经断开了,会导致异常的排除 self._pro_sender = getattr(self.producter, "_sender")
def acquire(self): """ Acquire thread and file locks. Re-opening log for 'degraded' mode. """ # handle thread lock Handler.acquire(self) # Issue a file lock. (This is inefficient for multiple active threads # within a single process. But if you're worried about high-performance, # you probably aren't using this log handler.) if self.stream_lock: # If stream_lock=None, then assume close() was called or something # else weird and ignore all file-level locks. if self.stream_lock.closed: # Daemonization can close all open file descriptors, see # https://bugzilla.redhat.com/show_bug.cgi?id=952929 # Try opening the lock file again. Should we warn() here?!? try: self._open_lockfile() except Exception: self.handleError(NullLogRecord()) # Don't try to open the stream lock again self.stream_lock = None return lock(self.stream_lock, LOCK_EX)
def acquire(self): """ Acquire thread and file locks. Re-opening log for 'degraded' mode. """ self._console_log("In acquire", stack=True) # Handle thread lock Handler.acquire(self) # Noinspection PyBroadException try: self._open_lockfile() except Exception: self.handleError(NullLogRecord()) self._stream_lock_count += 1 self._console_log(">> stream_lock_count = %s" % (self._stream_lock_count, )) if self._stream_lock_count == 1: self._console_log(">Getting lock for %s" % (self.stream_lock, ), stack=True) lock(self.stream_lock, LOCK_EX) self.stream = self._open()
def __init__(self, host, port, compress=True, path='/gelf', timeout=5, **kwargs): """ Logging handler that transforms each record into GELF (graylog extended log format) and sends it over HTTP. :param host: GELF HTTP input host :param port: GELF HTTP input port :param compress: compress message before sending it to the server or not :param path: path of the HTTP input (http://docs.graylog.org/en/latest/pages/sending_data.html#gelf-via-http) :param timeout: amount of seconds that HTTP client should wait before it discards the request if the server doesn't respond """ LoggingHandler.__init__(self) BaseHandler.__init__(self, compress=compress, **kwargs) self.host = host self.port = port self.path = path self.timeout = timeout self.headers = {} if compress: self.headers['Content-Encoding'] = 'gzip,deflate'
def configure_loggers(app: Flask, default_handler: logging.Handler) -> None: """ Adds logging handlers to the app instance and configures log formatting. :param default_handler: :param app: The flask app instance. """ mail_handler = SMTPHandler(mailhost=(app.config["SMTP_SERVER"], app.config["SMTP_PORT"]), fromaddr=app.config["SMTP_FROM_ADDRESS"], toaddrs=[app.config["ADMIN_EMAIL"]], subject='[Survey Tool] Application Error', credentials=(app.config["SMTP_FROM_ADDRESS"], app.config["SMTP_PASSWORD"]), secure=()) log_formatter = LogFormatter( '[%(asctime)s] %(levelname)s during %(method)s %(url)s as %(current_user)s@%(remote_addr)s in %(module)s: %(message)s' ) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(log_formatter) default_handler.setFormatter(log_formatter) if not app.config['DEBUG']: # only use logging via email for production instances app.logger.addHandler(mail_handler)
def __init__(self, prefix='', ext='log', folder='logs', year=True, month=True, encoding='utf-8'): """ Класс записи логов в файл с расширинным функционалом. Собственных публичных методов не имеет. Переопределяет родительские методы close и emit. :param prefix: string, префикс в названии файла, отделяется от даты символом "_", по умолчанию пустой :param ext: string, расширение файлов логов, по умолчанию "log" :param folder: string, каталог верхнего уровня для хранения файлов логов, по умолчанию "logs" :param year: bool, добавить к пути файлов логов год, по умолчанию True :param month: bool, добавлять к пути файлов логов, по умолчанию True :param encoding: string, кодировка файла логов, по умолчанию "utf-8" """ self.filename = '' self.prefix = prefix self.ext = ext self.folder = folder self.year = year self.month = month self.encoding = encoding self.stream = None # Вызывается конструктор прапредка класса, чтобы заранее не открывать файл логов, # только перед непосредственной записью строки лога в файл Handler.__init__(self)
def get_logger(name: str = None, handler: logging.Handler = None, level=logging.INFO, formatting: str = DEFAULT_FORMAT_STR, propagate: bool = False, print_trace_id: bool = True): logger = logging.getLogger(name) logger.setLevel(level) logger.propagate = propagate if not handler: handler = logging.StreamHandler(sys.stdout) handler.setLevel(level) # If APP_ENV is `test` or unset we use a colorful formatter 🌈. # Else we use a plain formatter to avoid passing ANSI color characters # into staging/prod env logs app_env = os.getenv('APP_ENV', TEST_ENV) if app_env == TEST_ENV: formatter = ColorfulFormatter(formatting) else: formatter = logging.Formatter(formatting) handler.setFormatter(formatter) if logger.hasHandlers(): # To prevent the same stream handler from being added multiple times to the # same logger. If the same handler (stdout in this case) is added multiple # times to the same logger then each log will show up more and more times in # that stream. logger.handlers.clear() logger.addHandler(handler) if print_trace_id: logger = KubricLogAdapter(logger, {}) return logger
def configure_handler( handler: logging.Handler, level: int, formatter: logging.Formatter, filter_: logging.Filter = None, ): handler.setLevel(level) handler.setFormatter(formatter) if filter_: handler.addFilter(filter_) return handler
def register_handler(handler: logging.Handler, logger: logging.Logger, level: int, format_string: str, record_filter: logging.Filter, verbose: bool = True) -> None: handler.setLevel(level) formatter = logging.Formatter(format_string) handler.setFormatter(formatter) if record_filter: handler.addFilter(record_filter) logger.addHandler(handler) if verbose: print("------------- New Logging Handler ---------------") print("Added handler: {}".format(str(handler))) print("To Logger: {}".format(str(logger))) if record_filter: print("With Filter: {}".format(str(record_filter)))
def patch_async_emit(handler: Handler): base_emit = handler.emit queue = Queue() def loop(): while True: record = queue.get() try: base_emit(record) except: print(sys.exc_info()) def async_emit(record): queue.put(record) thread = Thread(target=loop) thread.daemon = True thread.start() handler.emit = async_emit return handler
def _add_handler(handler: logging.Handler, logger: logging.Logger = logging.getLogger(), name: str = 'log_handler', level: int = logging.INFO, fmt: str = default_fmt, datefmt: str = default_date_fmt) -> logging.Handler: """ >>> result = set_stream_handler() >>> result2 = set_stream_handler() """ handler.addFilter(HostnameFilter()) fmt = format_fmt(fmt) formatter = logging.Formatter(fmt, datefmt) handler.setFormatter(formatter) handler.setLevel(level) handler.name = name logger.addHandler(handler) return handler