def get_logger(name="main", log_file=None, log_level=logging.INFO, maxBytes=10 * 1024 * 1024, backupCount=5): logger = logging.getLogger(name) if name in logger_initialized: return logger for logger_name in logger_initialized: if name.startswith(logger_name): return logger formatter = logging.Formatter( '[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt="%Y/%m/%d %H:%M:%S") #stream_handler = logging.StreamHandler(stream=sys.stdout) #stream_handler.setFormatter(formatter) #logger.addHandler(stream_handler) if log_file is not None: log_file_folder = os.path.split(log_file) os.makedirs(log_file_folder[0], exist_ok=True) #file_handler = logging.FileHandler(log_file, 'a') #file_handler=TimedRotatingFileHandler(filename=log_file,when=when,backupCount=3,interval=interval) file_handler = ConcurrentRotatingFileHandler(log_file, mode='a', maxBytes=maxBytes, backupCount=backupCount) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.setLevel(log_level) logger_initialized[name] = True return logger
def __init__(self, logame, logfile): """ 日志 :param logame: log对象名 :param logfile: 生成log文件路径 """ self.logger = logging.getLogger(logame) # 创建一个handler,用于写入日志文件,每隔一天分割一次日志文件 # backupCount 是保留日志个数。默认的0是不会自动删除掉日志。若设10,则在文件的创建过程中库会判断是否有超过这个10,若超过,则会从最先创建的开始删除。 # file_handler = TimedRotatingFileHandler(logfile, when='D', interval=1, backupCount=30) # 按照大小做切割 将切好的文件放到logfile 1024字节 只保留5个文件 # file_handler = RotatingFileHandler(logfile, maxBytes=1024 * 10, backupCount=5) file_handler = ConcurrentRotatingFileHandler(logfile, maxBytes=1024 * 1024 * 10, backupCount=30) # file_handler = logging.FileHandler(logfile, mode='a') # 再创建一个handler, 用于输出到控制台 console_handler = logging.StreamHandler() # 定义handler的输出格式 formatter = logging.Formatter( '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) # 添加handler self.logger.addHandler(file_handler) # self.logger.addHandler(console_handler) # 设置日志级别 self.logger.setLevel(logging.INFO)
def __init__(self): if not os.path.exists("./logs"): # 如果logs文件夹不存在就自动创建 os.makedirs("./logs") self.__path = './logs/purequant.log' self.__logger = logging.getLogger("purequant") formatter = logging.Formatter( fmt='[%(asctime)s] -> [%(levelname)s] : %(message)s') # 文件输出按照时间分割 self.time_rotating_file_handler = handlers.TimedRotatingFileHandler( filename=self.__path, when='MIDNIGHT', interval=1, backupCount=10) self.time_rotating_file_handler.setFormatter(formatter) self.time_rotating_file_handler.suffix = "%Y%m%d-%H%M%S.log" # 控制台输出 console_formatter = colorlog.ColoredFormatter( fmt='%(log_color)s[%(asctime)s] -> [%(levelname)s] : %(message)s', datefmt='%Y-%m-%d %H:%M:%S', log_colors=log_colors_config) self.stream_handler = logging.StreamHandler() self.stream_handler.setFormatter(console_formatter) # 文件输出按照大小分割 self.rotatingHandler = ConcurrentRotatingFileHandler( self.__path, "a", 1024 * 1024, 10) # a为追加模式,按1M大小分割,保留最近10个文件 self.rotatingHandler.setFormatter(formatter)
def setup_logging(name_, level=None, proj_home=None, attach_stdout=False): """ Sets up generic logging to file with rotating files on disk :param: name_: the name of the logfile (not the destination!) :param: level: the level of the logging DEBUG, INFO, WARN :param: proj_home: optional, starting dir in which we'll check for (and create) 'logs' folder and set the logger there :return: logging instance """ if level is None: config = load_config(extra_frames=1, proj_home=proj_home, app_name=name_) level = config.get('LOGGING_LEVEL', 'INFO') level = getattr(logging, level) # formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt) # formatter = MultilineMessagesFormatter(fmt=logfmt, datefmt=datefmt) formatter = get_json_formatter() formatter.multiline_marker = '' formatter.multiline_fmt = ' %(message)s' formatter.converter = time.gmtime logging_instance = logging.getLogger(name_) if proj_home: proj_home = os.path.abspath(proj_home) fn_path = os.path.join(proj_home, 'logs') else: fn_path = os.path.join(_get_proj_home(), 'logs') if not os.path.exists(fn_path): os.makedirs(fn_path) fn = os.path.join(fn_path, '{0}.log'.format(name_.split('.log')[0])) rfh = ConcurrentRotatingFileHandler(filename=fn, maxBytes=10485760, backupCount=10, mode='a', encoding='UTF-8') # 10MB file rfh.setFormatter(formatter) logging_instance.handlers = [] logging_instance.addHandler(rfh) logging_instance.setLevel(level) if attach_stdout: stdout = logging.StreamHandler(sys.stdout) stdout.formatter = get_json_formatter() logging_instance.addHandler(stdout) # Do not propagate to the parent logger to avoid double logging with different formatters logging_instance.propagate = False return logging_instance
def init_log(): global logger log_handler = ConcurrentRotatingFileHandler('attack.log', maxBytes=10000, backupCount=3) log_format = logging.Formatter('%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') log_handler.setFormatter(log_format) logger = logging.getLogger("Attack_log") logger.setLevel(logging.INFO) logger.addHandler(log_handler)
def __init_handler(self): self.__current_rotating_file_handler = ConcurrentRotatingFileHandler(filename=self.__path, mode='a', maxBytes=self.__max_bytes, backupCount=self.__backup_count, encoding="utf-8", use_gzip=True) self.__set_formatter() self.__set_handler()
def __init__(self): #初始化日志模块 self.logger = logging.getLogger('main') self.logfile = "ll-{0}-{1}_{2}_arbitrage.log".format( self.base_cur, self.mid_cur, self.quote_cur) self.rotateHandler = ConcurrentRotatingFileHandler( self.logfile, "a", 2 * 1024 * 1024, 100) self.formatter = logging.Formatter( "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" ) self.rotateHandler.setFormatter(self.formatter) self.logger.addHandler(self.rotateHandler) # 日志级别,可设置 self.logger.setLevel(logging.DEBUG) self.tax = 0.001 # 0.3% #self.tax = 0 # 0.3% #三种货币的账户余额 self.bal_mid_cur = 0 self.bal_quote_cur = 0 self.bal_base_cur = 0 #三种交易对的depth self.quote_mid_market_sell = dict() self.quote_mid_market_buy = dict() self.base_quote_market_sell = dict() self.base_quote_market_buy = dict() self.base_mid_market_sell = dict() self.base_mid_market_buy = dict() #三种交易对的depth中的第一档数据 # a1/aq1为quote--mid的depth # b1/bq1为base--quote的depth # c1/cq1为base--mid的depth self.a1 = 0 self.aq1 = 0 self.a2 = 0 self.aq2 = 0 self.b1 = 0 self.bq1 = 0 self.b2 = 0 self.bq2 = 0 self.c1 = 0 self.cq1 = 0 self.c2 = 0 self.cq2 = 0 #bigone的api调用 self.client = Client(self.big_one_api_key) #首次获取用户的所有账户余额 self.get_user_balance()
def init_logging(logFilePath = "test.log",level = logging.DEBUG): ################################################################################################# logging.basicConfig(level=level) ################################################################################################# ################################################################################################# # 定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象# Rthandler = LogHandler(logFilePath, maxBytes=10 * 1024 * 1024, backupCount=5) Rthandler.setLevel(level) formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') Rthandler.setFormatter(formatter) logging.root.addHandler(Rthandler)
def initialize_logging(service_name: str, logging_config: Dict, root_path: Path): log_path = path_from_root( root_path, logging_config.get("log_filename", "log/debug.log")) log_date_format = "%Y-%m-%dT%H:%M:%S" mkdir(str(log_path.parent)) file_name_length = 33 - len(service_name) if logging_config["log_stdout"]: handler = colorlog.StreamHandler() handler.setFormatter( colorlog.ColoredFormatter( f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: " f"%(log_color)s%(levelname)-8s%(reset)s %(message)s", datefmt=log_date_format, reset=True, )) logger = colorlog.getLogger() logger.addHandler(handler) else: logger = logging.getLogger() maxrotation = logging_config.get("log_maxfilesrotation", 7) handler = ConcurrentRotatingFileHandler(log_path, "a", maxBytes=20 * 1024 * 1024, backupCount=maxrotation) handler.setFormatter( logging.Formatter( fmt= f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s", datefmt=log_date_format, )) logger.addHandler(handler) if "log_level" in logging_config: if logging_config["log_level"] == "CRITICAL": logger.setLevel(logging.CRITICAL) elif logging_config["log_level"] == "ERROR": logger.setLevel(logging.ERROR) elif logging_config["log_level"] == "WARNING": logger.setLevel(logging.WARNING) elif logging_config["log_level"] == "INFO": logger.setLevel(logging.INFO) elif logging_config["log_level"] == "DEBUG": logger.setLevel(logging.DEBUG) logging.getLogger("aiosqlite").setLevel( logging.INFO) # Too much logging on debug level logging.getLogger("websockets").setLevel( logging.INFO) # Too much logging on debug level else: logger.setLevel(logging.INFO) else: logger.setLevel(logging.INFO)
def initLogging(file): logger = logging.getLogger(file) logger.setLevel(logging.DEBUG) rht = ConcurrentRotatingFileHandler(file, 'a', encoding='utf-8') fmt = logging.Formatter( "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)s - %(levelname)s : %(message)s", "%Y-%m-%d %H:%M:%S") rht.setFormatter(fmt) logger.addHandler(rht) return logger
def setup_logger(log_path, is_debug, logger): """ """ try: from concurrent_log_handler import ConcurrentRotatingFileHandler as HandlerClass except ImportError: from logging.handlers import RotatingFileHandler as HandlerClass handler = HandlerClass(log_path.format(pid=os.getpid()), maxBytes=8000000, backupCount=10) handler.setFormatter(logging.Formatter( fmt='%(asctime)s [%(name)s] %(levelname)s: %(message)s')) logger.addHandler(handler) logger.setLevel(logging.DEBUG if is_debug else logging.INFO)
def get_logger(name: str = "general") -> logging.Logger: # ensure log directory exists. if not, create it if not os.path.exists("./data/logs"): os.mkdir("./data/logs") log = logging.getLogger(name) log.setLevel(logging.INFO) h = ConcurrentRotatingFileHandler(os.path.abspath("data/logs/nparse.log"), "a", 512 * 1000, 3) f = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") h.setFormatter(f) log.addHandler(h) return log
def __init__(self): # 定义名为case的日志收集器对象 self.logger = logging.getLogger(do_config("log", "name")) # 定义日志收集器等级 self.logger.setLevel(do_config("log", "content_level")) # 加个判断避免一条用例写两次 if not self.logger.handlers: # 定义输出到终端 consle_handle = logging.StreamHandler() file_handle = ConcurrentRotatingFileHandler( filename=os.path.join( logDir, do_config("log", "log_name")), mode="a", maxBytes=do_config( "log", "Maxbytes"), backupCount=do_config( "log", "count"), encoding=do_config( "log", "encoding")) # 定义日志输出出道等级 consle_handle.setLevel(do_config("log", "content_level")) file_handle.setLevel(do_config("log", "content_level")) file_handle.setLevel('ERROR') # 定义日志显示格式 consle_format = logging.Formatter(do_config("log", "clear")) file_format = logging.Formatter(do_config("log", "clear")) consle_handle.setFormatter(consle_format) file_handle.setFormatter(file_format) self.logger.addHandler(consle_handle) self.logger.addHandler(file_handle)
def _open_lockfile(self): ConcurrentRotatingFileHandler._open_lockfile(self) if self.baseFilename.endswith(".log"): lock_file = self.baseFilename[:-4] else: lock_file = self.baseFilename lock_file += ".lock" lock_path, lock_name = os.path.split(lock_file) # hide the file on Unix and generally from file completion lock_name = ".__" + lock_name lock_file = os.path.join(lock_path, lock_name) self._do_chown_and_chmod(lock_file)
def logging_setup(conf): logger = logging.getLogger() logger.propagate = False default = '%(asctime)s - %(process)d - %(levelname)s - %(session)s - %(module)s - %(message)s' log_format = conf.get('format', default) formatter = logging.Formatter(log_format) log_level = conf.get('level', 'INFO') logger.setLevel(log_level) logfile = conf.get('logfile') if logfile: logsize = conf.get('logsize', 512 * 1024) retain = conf.get('logretain', 5) handler = ConcurrentRotatingFileHandler(logfile, 'a', logsize, retain) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(log_level) logger.addHandler(handler) logger.addFilter(SessionFilter()) handler.addFilter(SessionFilter()) logging.getLogger('requests').setLevel('WARN') logging.getLogger('urllib3').setLevel('WARN')
def init_config(self, name=None): """ initiaize config of each log level :param name: prefix name of log files For example: if name = 'web', the log files will look like 'z_web_xxx.log', xxx are names of each level Please notice that i've put a "z" if no name passed into Log, for making log files listed at the end of all the codes in project. """ logging.Formatter.converter = self.opti_time base_format = logging.Formatter( '【%(levelname)s】 %(asctime)s [%(process)d] \n%(message)s', datefmt='%Y-%m-%d %H:%M:%S') # logging.Formatter.converter = customTime if name not in self.logs: # create logger logger = logging.getLogger(str(self.log_mapping[name])) logger.setLevel(self.log_mapping[name]) # create handler log_path = self.log_root + '/' + self.public_name + '_' + name + '.log' base_handler = RotatingFileHandler( log_path, maxBytes=self.log_config[name]['maxBytes'] * 1024 * 1024, backupCount=self.log_config[name]['backupCount']) # define output format base_handler.setFormatter(base_format) base_handler.setLevel(self.log_mapping[name]) # add handler logger.addHandler(base_handler) # critical level add console handler if name == 'critical': console_handler = logging.StreamHandler() console_handler.setLevel(self.log_mapping[name]) console_format = logging.Formatter( '【%(levelname)s】 %(asctime)s [%(process)d] \n%(message)s', datefmt='%Y-%m-%d %H:%M:%S') console_handler.setFormatter(console_format) logger.addHandler(console_handler) self.logs.update({name: logger})
def __add_file_handler(self): """ 日志写入日志文件 """ if not os.path.exists(self._log_path): os.makedirs(self._log_path) log_file = os.path.join(self._log_path, self._log_filename) rotate_file_handler = None if os_name == 'nt': # windows下用这个,非进程安全 rotate_file_handler = ConcurrentRotatingFileHandler( log_file, maxBytes=self._log_file_size * 1024 * 1024, backupCount=3, encoding="utf-8") if os_name == 'posix': # linux下可以使用ConcurrentRotatingFileHandler,进程安全的日志方式 rotate_file_handler = ConcurrentRotatingFileHandler( log_file, maxBytes=self._log_file_size * 1024 * 1024, backupCount=3, encoding="utf-8") rotate_file_handler.setLevel(self._logger_level) rotate_file_handler.setFormatter(self._formatter) self.logger.addHandler(rotate_file_handler)
def setup_log(environment): """根据环境配置日志""" # 设置日志的记录等级 logging.basicConfig(level=config[environment].LOG_LEVEL) # 调试debug级 # 创建日志记录器, 指明日志保存的路径, 每个日志文件的最大大小,保存日志的文件上限个数 file_log_handler = ConcurrentRotatingFileHandler("logs/log", maxBytes=1024 * 1024, backupCount=10) # 创建日志文件的记录格式 时间 文件名 行数 等级 信息 formatter = logging.Formatter( '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s' ) # 为日志记录器设置日志的记录格式 file_log_handler.setFormatter(formatter) # 为全局的日志对象添加日志记录器 logging.getLogger().addHandler(file_log_handler)
def init_log_system(): root = logging.getLogger() handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) root.addHandler(handler) path = Path('log/camera.log').absolute() path.parent.mkdir(exist_ok=True) # Rotate log after reaching 512K, keep 5 old copies. rotate_handler = ConcurrentRotatingFileHandler(str(path), "a", 512 * 1024, 5) rotate_handler.setFormatter(formatter) root.addHandler(rotate_handler) root.setLevel(logging.INFO) root.info("Logging system initialized, kept in file {}...".format(str(path)))
def __initialize(self): if config.level == "debug": level = logging.DEBUG elif config.level == "info": level = logging.INFO elif config.level == "warning": level = logging.WARNING elif config.level == "error": level = logging.ERROR elif config.level == "critical": level = logging.CRITICAL else: level = logging.DEBUG self.__logger.setLevel(level=level) formatter = logging.Formatter( fmt='[%(asctime)s] -> [%(levelname)s] : %(message)s') # 文件输出按照时间分割 time_rotating_file_handler = handlers.TimedRotatingFileHandler( filename=self.__path, when='MIDNIGHT', interval=1, backupCount=1000) time_rotating_file_handler.setFormatter(formatter) time_rotating_file_handler.suffix = "%Y%m%d-%H%M%S.log" # 控制台输出 console_formatter = colorlog.ColoredFormatter( fmt='%(log_color)s[%(asctime)s] -> [%(levelname)s] : %(message)s', datefmt='%Y-%m-%d %H:%M:%S', log_colors=log_colors_config) stream_handler = logging.StreamHandler() stream_handler.setFormatter(console_formatter) # 文件输出按照大小分割 rotatingHandler = ConcurrentRotatingFileHandler( self.__path, "a", 1024 * 1024, 1000) # a为追加模式,按1M大小分割,保留最近1000个文件 rotatingHandler.setFormatter(formatter) if config.handler == "time": if not self.__logger.handlers: self.__logger.addHandler(time_rotating_file_handler) elif config.handler == "file": if not self.__logger.handlers: self.__logger.addHandler(rotatingHandler) else: if not self.__logger.handlers: self.__logger.addHandler(stream_handler)
def getLogHandler(self, fn): """ Override this method if you want to test a different logging handler class. """ return ConcurrentRotatingFileHandler( fn, 'a', self.rotateSize, self.rotateCount, delay=self.logger_delay, encoding='utf-8', debug=self.debug, use_gzip=self.use_gzip)
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, debug=False, delay=0, use_gzip=False, owner=None, chmod=None): self.owner = owner self.chmod = chmod # super(ConcurrentRotatingFileHandler, self).__init__(filename, mode, maxBytes, backupCount, encoding, debug, delay, use_gzip) ConcurrentRotatingFileHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding, debug, delay, use_gzip)
def configure_timeline(config: dict = None): """Initialize timeline event logger. Sets up pipeline event logger once to be reused by pipelines in the current runtime. Should be called before any pipeline starts. A good place to initialize it is around the time when the root logger is initialized. :Parameters: ------------ config : dict A dictionary of configuration parameters. """ if config is None: config = {} log_filename = config.get('event_log', None) if not log_filename: log_filename = 'timeline-event-log.yaml' log_directory = os.path.dirname(log_filename) with pathlib.Path(log_directory) as log_dir: log_dir.mkdir(parents=True, exist_ok=True) log.debug( "Timeline event log messages directed to {}".format(log_filename)) event_log = logging.getLogger(TIMELINE_EVENT_LOGGER_NAME) event_log.setLevel(logging.INFO) # Use rotating files as log message handler handler = ConcurrentRotatingFileHandler( log_filename, # each event file will keep up to 100K data maxBytes=100 * 1024, # 100 backup files will be kept. Older will be erased. backupCount=100) fmt = PipelineEventFormatter() handler.setFormatter(fmt) # remove any other handlers that may be assigned previously # and could cause unexpected log collisions event_log.handlers = [] # add custom event handler event_log.addHandler(handler)
def __init__(self): try: logs_dir = get_config_values("python-log", "dir") except: logs_dir = os.path.join(get_project_path(), 'logs') try: f_levl = get_config_values("python-log", "f_level") except: f_levl = 'DEBUG' try: c_levl = get_config_values("python-log", "ch_level") except: c_levl = 'DEBUG' if os.path.exists(logs_dir) and os.path.isdir(logs_dir): pass else: os.mkdir(logs_dir) # 单个日志最大体积到了限制以后新建一个日志,并重命名旧的日志 maxFileSize = int(get_config_values("python-log", "maxBytes")) # 日志总数到了限制以后删除最旧的日志 backUp = int(get_config_values("python-log", "backupCount")) # 设置控制台的输出级别,与文件输出区分开 formatter = logging.Formatter( '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s' ) self.file_logger = logging.getLogger("file_logger") self.file_logger.setLevel(eval('logging.' + f_levl)) console = logging.StreamHandler() console.setLevel(eval('logging.' + c_levl)) console.setFormatter(formatter) # 控制台将包含非本项目的logging的输出 logging.getLogger("").addHandler(console) # 将调试和信息输出到debug日志中 debug_file_name = os.path.join(logs_dir, 'ai_writer.log') debug_rotatingFileHandler = ConcurrentRotatingFileHandler( debug_file_name, 'a', maxFileSize, backUp, 'utf-8') debug_rotatingFileHandler.setFormatter(formatter) self.file_logger.addHandler(debug_rotatingFileHandler) print("初始化logger完成")
def setup_logger(conf): """ Sets up file-based rotating logger. All the parameters are extracted from conf argument: path: /kontext/global/log_path maximum file size (optional, default is 8MB): /kontext/global/log_file_size number of backed-up files (optional, default is 10): /kontext/global/log_num_files """ try: from concurrent_log_handler import ConcurrentRotatingFileHandler as HandlerClass except ImportError: from logging.handlers import RotatingFileHandler as HandlerClass handler = HandlerClass(conf.get('logging', 'path').format(pid=os.getpid()), maxBytes=conf.get_int( 'logging', 'file_size', 8000000), backupCount=conf.get_int('logging', 'num_files', 10)) handler.setFormatter(logging.Formatter( fmt='%(asctime)s [%(name)s] %(levelname)s: %(message)s')) logger.addHandler(handler) logger.setLevel(logging.INFO if not settings.is_debug_mode() else logging.DEBUG)
def get_logger(self): log = logging.getLogger(self.name) formatter = logging.Formatter(self.log_fmt) # 定制handler ,maxBytes=1024 * 1024 * 100 = 100M if not self.filename: # 如果没有filename 不做切割就可以了, # 也不写入文件 pass else: # 判断这个文件是否存在 if not self.exists(): self.touch() rotate_handler = ConcurrentRotatingFileHandler( filename=self.filename, backupCount=self.backup_count, maxBytes=self.max_bytes) rotate_handler.setFormatter(formatter) log.addHandler(rotate_handler) log.addFilter(levelFilter()) return log
def __init__(self, isconsole=True): self.logger = logging.getLogger(do_config('log', 'logger_name')) # 1 定义日志器的名字 self.logger.setLevel(logging.DEBUG) # 2 指定日志收集器的日志等级 file_log_dir = os.path.join(LOG_DIR, do_config("log", "log_file_name")) # 日志文件路径 # 有bug,PermissionError:[WinError 32] 另一个程序正在使用日志文件 # 解决方案1:每个模块都实例化一个日志器对象 # 方案2 安装并导入第三方模块 pip install concurrent-log-handler # file_handle = RotatingFileHandler(file_log_dir, # maxBytes=do_config('log', 'maxBytes'), # backupCount=do_config('log', 'backupCount'), encoding='utf8') # 3 定义文件handle对象,日志回滚 file_handle = ConcurrentRotatingFileHandler( file_log_dir, maxBytes=do_config('log', 'maxBytes'), backupCount=do_config('log', 'backupCount'), encoding='utf8') # 3 定义文件handle对象,日志回滚 file_handle.setLevel(do_config( 'log', 'file_handle_level')) # 4 指定文件handle对象的日志等级 formatter = logging.Formatter(do_config('log', 'formatter')) # 5 定义日志格式对象 file_handle.setFormatter(formatter) # 6 设置文件handle格式 self.logger.addHandler(file_handle) # 7 日志收集器与handle对接 if isinstance(isconsole, bool): if isconsole: console_handle = logging.StreamHandler() # 定义控制台handle对象 console_handle.setLevel( do_config('log', 'console_handle_level')) # 设置控制台handle对象的日志等级 console_handle.setFormatter(formatter) # 设置控制台handle格式 self.logger.addHandler(console_handle) # 日志收集器与控制台handle对接 else: raise ValueError("isconsole为布尔类型")
def _add_handler(self): """ Add output stream for log, including console output and file output :return: None """ # add console handler console_handler = logging.StreamHandler() console_handler.setLevel(CONFIG.LOGGER_LEVEL) console_handler.setFormatter(self._log_format) self._logger.addHandler(console_handler) # create log path if not os.path.exists(CONFIG.LOGGER_PATH): os.mkdir(CONFIG.LOGGER_PATH, mode=0x644) log_file = os.path.join(CONFIG.LOGGER_PATH, CONFIG.LOGGER_FILE_NAME) # add file handler file_handler = ConcurrentRotatingFileHandler( filename=log_file, mode='a', maxBytes=CONFIG.LOGGER_BUFFER, backupCount=CONFIG.LOGGER_FILE_COUNT, encoding="utf-8", use_gzip=True) file_handler.setLevel(CONFIG.LOGGER_LEVEL) file_handler.setFormatter(self._log_format) self._logger.addHandler(file_handler)
def __init__(self): self.case_logger = logging.getLogger(do_config('log', 'logger_name')) self.case_logger.setLevel(do_config('log', 'logger_level')) console_output = logging.StreamHandler() # file_output = RotatingFileHandler(filename=os.path.join(LOG_DIR, do_config('log', 'logger_name')), # maxBytes=do_config('log', 'maxBytes'), # backupCount=do_config('log', 'backupCount'), # encoding='utf8') file_output = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_DIR, do_config('log', 'logger_name')), maxBytes=do_config('log', 'maxBytes'), backupCount=do_config('log', 'backupCount'), encoding='utf8') console_output.setLevel(do_config('log', 'console_level')) file_output.setLevel(do_config('log', 'file_level')) simple_formatter = logging.Formatter( do_config('log', 'simple_formatter')) verbose_formatter = logging.Formatter( do_config('log', 'verbose_formatter')) console_output.setFormatter(simple_formatter) file_output.setFormatter(verbose_formatter) self.case_logger.addHandler(console_output) self.case_logger.addHandler(file_output)
def init_logger(app): """ 初始化日志(按大小滚动) :param app: Flask :return: None """ log_maxsize = app.config.get('LOG_MAXSIZE', 100) log_backup = app.config.get('LOG_BACKUP', 20) log_level = app.config.get('LOG_LEVEL', logging.INFO) app_log = app.config.get('LOG_FILE') if not app_log: app_log = os.path.join(os.path.dirname(app.root_path), 'logs', 'app.log') fh = ConcurrentRotatingFileHandler(app_log, maxBytes=log_maxsize * 1024 * 1024, backupCount=log_backup, encoding='utf-8', use_gzip=True) fh.setLevel(log_level) fh.setFormatter( logging.Formatter( app.config.get( 'LOG_FORMAT', '%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s' ))) app.logger.addHandler(fh)