def setup_logging(logfile=None, debug=False): try: logging.getLogger('discord').setLevel(logging.WARNING) logging.getLogger('discord.http').setLevel(logging.WARNING) log = logging.getLogger() log.setLevel(logging.INFO if not debug else logging.DEBUG) if logfile: handler = TimedRotatingFileHandler(filename=logfile, when='midnight', utc=True, encoding='utf-8', backupCount=5) fmt = logging.Formatter(_log_fmt, _log_dt_fmt, style='{') handler.setFormatter(fmt) log.addHandler(handler) else: logger.warning('Logging to file is disabled') yield finally: handlers = log.handlers[:] for handler in handlers: handler.close() log.removeHandler(handler)
def close(self): ''' Close file and dismount (must be in this order). Called when 'removeHandler' is invoked ''' TimedRotatingFileHandler.close(self) self._unmount()
class MultiProcessingLog(logging.Handler): def __init__(self, filename, when='d', interval=1, backup_count=0, encoding=None, delay=False, utc=False): logging.Handler.__init__(self) self._handler = TimedRotatingFileHandler(filename, when=when, interval=interval, backupCount=backup_count, encoding=encoding, delay=delay, utc=utc) self.queue = multiprocessing.Queue(-1) t = threading.Thread(target=self.receive) t.daemon = True t.start() def setFormatter(self, fmt): logging.Handler.setFormatter(self, fmt) self._handler.setFormatter(fmt) def receive(self): while True: try: record = self.queue.get() self._handler.emit(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except: traceback.print_exc(file=sys.stderr) def send(self, s): self.queue.put_nowait(s) def _format_record(self, record): # ensure that exc_info and args have been stringified. # Removes any chance of unpickleable things inside and possibly reduces message size sent over the pipe if record.args: try: record.msg = record.msg % record.args except TypeError as e: print(str(e)) print(traceback.format_exc()) record.msg = record.msg + ", args: %s" % str(record.args) record.args = None if record.exc_info: dummy = self.format(record) record.exc_info = None return record def emit(self, record): try: s = self._format_record(record) self.send(s) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def close(self): self._handler.close() logging.Handler.close(self)
class RotatingFileLogger(JSONLogger): """RotatingFileLogger The :class:`RotatingFileLogger` JSON logs SBP messages to a rotating file handler that's turned over at a specified time interval (defaults to 30 minutes). Intended to be a very low-overhead, cross-platform rolling logger for field testing. Parameters ---------- filename : string Path to file to write SBP messages to. when : str Specifies a type of interval interval : int Specifies length of interval backupCount : int Number of backups to keep around tags : dict Tags to add to SBP message envelope dispatcher: dict SBP dispatch table """ def __init__(self, filename, when='M', interval=30, backupCount=3, tags={}, dispatcher=dispatch): self.handler = TimedRotatingFileHandler(filename, when, interval, backupCount) self.logger = logging.getLogger("Rotating Log") self.logger.setLevel(logging.INFO) self.logger.addHandler(_mk_async_emit(self.handler)) self.dispatcher = dispatcher self.base_time = time.time() self.tags = tags def __call__(self, msg): self.call(msg) def flush(self): self.handler.flush() def close(self): self.handler.close() def call(self, msg): self.logger.info(self.dump(msg))
def __init__(self, logger=None): self.logger = logging.getLogger(logger) self.logger.setLevel(logging.INFO) log_filename = '/log/medical_Service.log' fileTimeHandler = TimedRotatingFileHandler(log_filename, "midnight", 1, 7) fileTimeHandler.suffix = "%Y-%m-%d.log" fileTimeHandler.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}.log$") formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') fileTimeHandler.setFormatter(formatter) self.logger.addHandler(fileTimeHandler) fileTimeHandler.close()
class RotatingFileLogger(JSONLogger): """RotatingFileLogger The :class:`RotatingFileLogger` JSON logs SBP messages to a rotating file handler that's turned over at a specified time interval (defaults to 30 minutes). Intended to be a very low-overhead, cross-platform rolling logger for field testing. Parameters ---------- filename : string Path to file to write SBP messages to. when : str Specifies a type of interval interval : int Specifies length of interval backupCount : int Number of backups to keep around tags : dict Tags to add to SBP message envelope dispatcher: dict SBP dispatch table """ def __init__(self, filename, when='M', interval=30, backupCount=3, **kwargs): super(RotatingFileLogger, self).__init__(None, **kwargs) self.handler = TimedRotatingFileHandler(filename, when, interval, backupCount) self.logger = logging.getLogger("Rotating Log") self.logger.setLevel(logging.INFO) self.logger.addHandler(self.handler) def __call__(self, msg, **metadata): self.call(msg, **metadata) def flush(self): self.handler.flush() def close(self): self.handler.close() def call(self, msg, **metadata): self.logger.info(self.dump(msg, **metadata))
class DefaultLogger(AbstractLogger): def __init__(self, level=logging.INFO, fmt_str=None): self.logger = logging.getLogger() self.logger.setLevel(level) self.console_handler = logging.StreamHandler() self.console_handler.setLevel(level) log_path = Path(__file__).parent.parent.joinpath( 'logs', 'mirai_bot.log') self.file_handler = TimedRotatingFileHandler(log_path, when='h', interval=12, backupCount=5, encoding='utf-8') self.file_handler.setLevel(level) formatter = logging.Formatter( fmt_str or "%(asctime)s - %(levelname)s: %(message)s", datefmt='%Y-%m-%d %H:%M:%S') self.file_handler.setFormatter(formatter) self.logger.addHandler(self.file_handler) self.console_handler.setFormatter(formatter) self.logger.addHandler(self.console_handler) def info(self, msg): msg = msg.replace("\n", r"\n") self.logger.info(msg) def error(self, msg): self.logger.error(msg, exc_info=True) def debug(self, msg): self.logger.debug(msg) def warn(self, msg): self.logger.warning(msg) def exception(self, msg): self.logger.exception(msg, exc_info=True) def close(self): self.logger.removeHandler(self.console_handler) self.logger.removeHandler(self.file_handler) self.file_handler.close() self.console_handler.close() logging.shutdown()
def get_logger(self, path=None): logger = logging.getLogger("threading_eg") logger.setLevel(logging.DEBUG) #logger.setLevel(logging.WARNING) path = path or './log/app.log' log_dir = os.path.dirname(path) if not os.path.exists(log_dir): os.mkdir(log_dir) #fh = logging.FileHandler(path) fh = TimedRotatingFileHandler(path, when='d', interval=1, backupCount=7) fmt = '%(asctime)s - %(name)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s' formatter = logging.Formatter(fmt) fh.setFormatter(formatter) logger.addHandler(fh) fh.close() return logger
class LogfileMetricSender(with_metaclass(Singleton, MetricSender)): def __init__(self): super(LogfileMetricSender, self).__init__() logger = logging.getLogger(__name__) filename = "ctaMetric.log" filepath = getTempPath(filename) self.hander = TimedRotatingFileHandler(filepath, when="d", backupCount=7) formater = logging.Formatter(fmt="%(asctime)s|%(message)s") self.hander.setFormatter(formater) logger.addHandler(self.hander) logger.setLevel(logging.INFO) logger.propagate = False self.logger = logger def pushMetrics(self, metrics): for metric in metrics: self.logger.info(metric.to_json()) self.hander.close()
def __console(self, level, message): # 创建一个FileHandler,用于写到本地 # fh = logging.FileHandler(self.logname, 'a', encoding='utf-8') # 这个是python3的 # interval 滚动周期, # when="MIDNIGHT", interval=1 表示每天0点为更新点,每天生成一个文件 # backupCount 表示日志保存个数 if LOG_ON: fh = TimedRotatingFileHandler( filename=self.logname, when="MIDNIGHT", interval=1, backupCount=30 ) # filename="mylog" suffix设置,会生成文件名为mylog.2020-02-25.log fh.suffix = "%Y-%m-%d.log" # extMatch是编译好正则表达式,用于匹配日志文件名后缀 # 需要注意的是suffix和extMatch一定要匹配的上,如果不匹配,过期日志不会被删除。 fh.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}.log$") # 定义日志输出格式 fh.setLevel(logging.INFO) fh.setFormatter(self.formatter) self.logger.addHandler(fh) self.logger.removeHandler(fh) # 关闭打开的文件 fh.close() # 创建一个StreamHandler,用于输出到控制台 ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(self.formatter) self.logger.addHandler(ch) if level == 'info': self.logger.info(message) elif level == 'debug': self.logger.debug(message) elif level == 'warning': self.logger.warning(message) elif level == 'error': self.logger.error(message,exc_info=True) elif level == 'critical': self.logger.critical(message)
def get_common_logger(name, log_level="DEBUG", log_file_path=None, std_out=True, when="D", interval=1, backup_count=180): """ creating share logger :param name: :param log_level: :param log_file_path: :param std_out: :param when: :param interval: :param backup_count: :return: """ logger = getLogger(name) if log_level == "WARN": log_level = WARN elif log_level == "INFO": log_level = INFO else: log_level = DEBUG formatter = Formatter("%(asctime)s %(levelname)s %(module)s %(lineno)s :%(message)s") if log_file_path: handler = TimedRotatingFileHandler(filename=log_file_path, when=when, interval=interval, backupCount=backup_count, encoding="utf-8") handler.setLevel(log_level) handler.setFormatter(formatter) logger.addHandler(handler) handler.close() if std_out: handler = StreamHandler(sys.stdout) handler.setLevel(log_level) handler.setFormatter(formatter) logger.addHandler(handler) handler.close() logger.setLevel(log_level) logger.propagate = False return logger
def close(self): _TimedRotatingFileHandler.close(self) _remove_from_reopenable(self._wr)
def close(self): print 'Html log: stop' self.footer() TimedRotatingFileHandler.close(self)
class MultiProcessingLog(logging.Handler): def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False): self.filename = filename logging.Handler.__init__(self) self._handler = TimedRotatingFileHandler(filename, when, interval, backupCount, encoding, delay, utc) self.queue = multiprocessing.Queue(-1) t = threading.Thread(target=self.receive) t.daemon = True t.start() def setFormatter(self, fmt): logging.Handler.setFormatter(self, fmt) self._handler.setFormatter(fmt) def receive(self): while True: try: record = self.queue.get() self._handler.emit(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except: traceback.print_exc(file=sys.stderr) def send(self, s): self.queue.put_nowait(s) def _format_record(self, record): # ensure that exc_info and args # have been stringified. Removes any chance of # unpickleable things inside and possibly reduces # message size sent over the pipe if record.args: record.msg = record.msg % record.args record.args = None if record.exc_info: dummy = self.format(record) record.exc_info = None return record def emit(self, record): try: s = self._format_record(record) self.send(s) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def close(self): self._handler.close() logging.Handler.close(self)
class MPFileHandler(logging.Handler): ''' Multiprocess-safe Rotating File Handler Copied from: http://stackoverflow.com/questions/641420/how-should-i-log-while-using-multiprocessing-in-python ''' def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0): ''' See TimedRotatingFileHandler for arg docs ''' logging.Handler.__init__(self) self._handler = TimedRotatingFileHandler(filename, when=when, interval=interval, backupCount=backupCount, encoding=encoding, delay=delay, utc=utc) self.queue = multiprocessing.Queue() t = threading.Thread(target=self.receive) t.daemon = True t.start() def setFormatter(self, fmt): logging.Handler.setFormatter(self, fmt) self._handler.setFormatter(fmt) def receive(self): while True: try: record = self.queue.get() self._handler.emit(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except: traceback.print_exc(file=sys.stderr) def send(self, s): self.queue.put_nowait(s) def _format_record(self, record): # ensure that exc_info and args # have been stringified. Removes any chance of # unpickleable things inside and possibly reduces # message size sent over the pipe if record.args: record.msg = record.msg % record.args record.args = None if record.exc_info: dummy = self.format(record) record.exc_info = None return record def emit(self, record): try: s = self._format_record(record) self.send(s) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def close(self): self._handler.close() logging.Handler.close(self)
# Create log path folder if it does not exist if not os.path.exists(LOG_PATH): os.makedirs(LOG_PATH) # Log file name logFileName = LOG_PATH + "StatementLog.log" # initialize logger logger = logging.getLogger(logFileName) # create logger policy handler = TimedRotatingFileHandler(logFileName, when='midnight', interval=1, backupCount=30, encoding=None, delay=False, utc=False) # set log formatting handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) logger.addHandler(handler) handler.close() # 'application' code def debug(s): logger.debug(s) def info(s): logger.info(s) def warning(s): logger.warning(s)
class Py_Logger(object): log_level = global_log_level log_file = global_log_file error_log_file = global_error_log_file def __init__(self): self.logger = logging.getLogger(__name__) log_dir = os.path.join(cur_path, '../log') log_path = os.path.join(log_dir, self.log_file) self.error_log_path = os.path.join(log_dir, self.error_log_file) if not os.path.exists(log_dir): os.mkdir(log_dir) # 清空当前文件的logging logging.Logger.manager.loggerDict.pop(__name__, None) self.logger.handlers = [] self.logger.removeHandler(self.logger.handlers) if not self.logger.handlers: # logger 配置等级 self.logger.setLevel(global_log_level) # file handler self.fh = TimedRotatingFileHandler(log_path, when='MIDNIGHT', interval=1, backupCount=3) self.fh.setLevel(global_log_level) # console handler self.ch = logging.StreamHandler() self.ch.setLevel(global_log_level) # set format for handlers f_format = "%(asctime)s [%(levelname)s] %(message)s" formatter = logging.Formatter(f_format) self.fh.setFormatter(formatter) self.ch.setFormatter(formatter) # 添加handler self.logger.addHandler(self.fh) console_flag = self.get_console_flag() if console_flag: self.logger.addHandler(self.ch) # 获取console_flag def get_console_flag(self): console_flag = True console_flag_file = os.path.join(cur_path, 'console_flag.txt') if os.path.isfile(console_flag_file): with open(console_flag_file, 'r') as f: console_flag = False if f.read().strip() == '0' else True return console_flag def info(self, message=None): self.__init__() self.logger.info(message) def debug(self, message=None): self.__init__() self.logger.debug(message) def warning(self, message=None): self.__init__() self.logger.warning(message) def error(self, message=None): self.__init__() self.logger.error(message) def critical(self, message=None): self.__init__() self.logger.critical(message) def log(self, level, msg): if level == 'DEBUG': self.debug(msg) elif level == 'INFO': self.info(msg) elif level == 'WARNING' or level == 'WARN': self.warning(msg) elif level == 'ERROR': self.error(msg) current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) with open(self.error_log_path, 'a') as f: f.write(str(current_time) + ' - ' + msg + '\n') elif level == 'CRITICAL': self.critical(msg) else: print('level should be DEBUG, INFO, WARNING, ERROR, CRITICAL.') self.info(msg) self.logger.removeHandler(self.logger.handlers) # 关闭句柄 self.fh.close() self.ch.close()
def __init__(self, classname, log_path, when='midnight', interval=1, backupCount=0, level=10): """ 指定保存日志的文件路径,日志级别,以及调用文件 将日志存入到指定的文件中 :param classname: :param log_path: :param when: :param interval: :param backupCount: :param level: default DEBUG=10 """ # self attributes setting if uwsgi_mode: wk_id = uwsgi.worker_id() self.log_path = log_path + '.{}'.format(wk_id) else: self.log_path = log_path # 创建log文件父目录 __makesuredirexist__(os.path.dirname(log_path)) # 创建一个logger self.logger = logging.getLogger(classname) self.logger.setLevel(level) self.logger.propagate = 0 # 创建一个handler,用于写入日志文件 fh = TimedRotatingFileHandler(log_path, when=when, interval=interval, backupCount=backupCount, encoding='utf-8') fh.setLevel(logging.DEBUG) # 再创建一个handler,用于输出到控制台 ch = logging.StreamHandler() ch.setLevel(logging.INFO) # 定义handler的输出格式 formatter = logging.Formatter( '[%(asctime)s - %(name)s - %(levelname)s - %(process)d] %(message)s' ) fh.setFormatter(formatter) ch.setFormatter(formatter) # 给logger添加handler for hdlr in self.logger.handlers: self.logger.removeHandler(hdlr) self.logger.addHandler(fh) self.logger.addHandler(ch) # 添加下面一句,在记录日志之后移除句柄 # self.logger.removeHandler(ch) # self.logger.removeHandler(fh) # 关闭打开的文件 fh.close() ch.close()