def get_logger(ENV, BASE_DIR): # temporary is_email_script = pathlib.Path(sys.argv[0]).name == "email_db_report.py" # use Airbrake in production if(ENV=="production" and not is_email_script): log = airbrake.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # print all debug and higher to STDOUT # if the environment is development if(ENV=="development"): stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setLevel(logging.DEBUG) log.addHandler(stdoutHandler) logfile = os.path.abspath(BASE_DIR + "/logs/CivilServant_" + ENV + ".log") print("Logging to " + BASE_DIR + "/logs/CivilServant_" + ENV + ".log") formatter = logging.Formatter('%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format(env=ENV)) rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5) rotateHandler.setLevel(logging.DEBUG) rotateHandler.setFormatter(formatter) log.addHandler(rotateHandler) return log
def build_logger_env(worker_name, log_level=logging.NOTSET): """规范化log输出""" logdir_path = os.path.join(os.path.dirname(__file__), os.pardir, 'log' ) if not os.path.exists(logdir_path): os.makedirs(logdir_path) logger = __loggers.get(worker_name) if logger: return logger logger = logging.getLogger(worker_name) logger.propagate = 0 # 拒绝 父Logger 产生日志 logger.setLevel(log_level) ch = ConcurrentRotatingFileHandler(os.path.join(logdir_path, '%s.log'%worker_name), 'a', 50*1024*1024, 5 ) ch.setLevel(log_level) formatter = logging.Formatter('%(asctime)s %(levelname)s %(module)s.%(funcName)s[%(lineno)d] MSG:%(message)s') ch.setFormatter(formatter) logger.addHandler(ch) __loggers.setdefault(worker_name, logger) return logger
def __init__(self, module=''): today_datetime = dt.now() today_date = dt.date(today_datetime) string_date = str(today_date) if module == '': file_name = LOGGER_FILE + string_date else: file_name = LOGGER_FILE + module + '-' + string_date logger = logging.getLogger( file_name) # log_namespace can be replaced with your namespace logger.setLevel(logging.DEBUG) if not logger.handlers: file_name = os.path.join( LOGGING_DIR, '%s.log' % file_name ) # usually I keep the LOGGING_DIR defined in some global settings file handler = ConcurrentRotatingFileHandler(file_name) formatter = logging.Formatter( '%(asctime)s %(levelname)s:%(module)s:%(message)s') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) logger.addHandler(handler) self._logger = logger
def get_logger(ENV, BASE_DIR): # use Airbrake in production if (ENV == "production"): log = airbrake.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # print all debug and higher to STDOUT # if the environment is development if (ENV == "development"): stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setLevel(logging.DEBUG) log.addHandler(stdoutHandler) logfile = os.path.abspath(BASE_DIR + "/logs/CivilServant_" + ENV + ".log") print("Logging to " + BASE_DIR + "/logs/CivilServant_" + ENV + ".log") formatter = logging.Formatter( '%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format( env=ENV)) rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5) rotateHandler.setLevel(logging.DEBUG) rotateHandler.setFormatter(formatter) log.addHandler(rotateHandler) return log
def init_spider_log(self, crawler): log_file = crawler.settings.get('LOG_FILE') if not log_file: self.logger.info(f'{self.name} cant find LOG_FILE in settings !') return import logging from cloghandler import ConcurrentRotatingFileHandler from scrapy.utils.log import configure_logging # Disable default Scrapy log settings. configure_logging(install_root_handler=False) # Define your logging settings. log_format = "[%(asctime)s %(filename)s %(funcName)s line:%(lineno)d %(levelname)s]: %(message)s" logging.basicConfig(format=log_format) rotate_handler = ConcurrentRotatingFileHandler(log_file, mode="a", maxBytes=1 * 1024 * 1024 * 1024, backupCount=2) rotate_handler.setFormatter(logging.Formatter(log_format)) rotate_handler.setLevel(crawler.settings.get('LOG_LEVEL')) root_logger = logging.getLogger() root_logger.addHandler(rotate_handler)
def get_logger(ENV, BASE_DIR): # use Airbrake in production is_email_script = pathlib.Path(sys.argv[0]).name == "email_db_report.py" if ENV == "production" and not is_email_script: log = airbrake.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # Return the logger as-is if it has already been initialized handlers = [h for h in log.handlers if type(h) != airbrake.AirbrakeHandler] if len(handlers) > 0: return log # print all debug and higher to STDOUT # if the environment is development if (ENV == "development"): stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setLevel(logging.DEBUG) log.addHandler(stdoutHandler) logfile = os.path.abspath(BASE_DIR + "/logs/CivilServant_" + ENV + ".log") print("Logging to " + BASE_DIR + "/logs/CivilServant_" + ENV + ".log") formatter = logging.Formatter( '%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format( env=ENV)) rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5) rotateHandler.setLevel(logging.DEBUG) rotateHandler.setFormatter(formatter) log.addHandler(rotateHandler) return log
def get_json_logger(logger_name, log_size=512 * 1024 * 1024, backupCount=2): ''' :param logger_name: :param log_size: default 512M :param backupCount: :return: ''' formatter = CustomJsonFormatter( "%(filename)s %(lineno)d %(funcName)s %(message)s") logger = logging.getLogger(logger_name) logger.setLevel(logging.INFO) log_name = os.path.join(LOG_PATH, "{}.log".format(logger_name)) # rotate_handler = handlers.TimedRotatingFileHandler( # filename=log_name, # when=when, # backupCount=backupCount, # encoding='utf-8' # ) rotate_handler = ConcurrentRotatingFileHandler( log_name, mode="a", maxBytes=log_size, backupCount=backupCount) # 每个文件最多保存512M rotate_handler.setLevel(logging.INFO) rotate_handler.setFormatter(formatter) logger.addHandler(rotate_handler) return logger
def startlogging(log, logfile, loglevel=logging.INFO, consolelevel=None): """Start the logging system to store rotational file based log.""" try: from cloghandler import ConcurrentRotatingFileHandler as RFHandler except ImportError: # Next 2 lines are optional: issue a warning to the user from warnings import warn warn("ConcurrentLogHandler package not installed. Using builtin log handler") from logging.handlers import RotatingFileHandler as RFHandler if not consolelevel: consolelevel = loglevel log.setLevel(loglevel) #create file handler and set level to debug fh = RFHandler(filename=logfile, maxBytes=2**20, backupCount=50) fh.setLevel(loglevel) #create console handler and set level to error ch = logging.StreamHandler() ch.setLevel(consolelevel) #create formatter formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") #add formatter to fh fh.setFormatter(formatter) #add formatter to ch ch.setFormatter(formatter) #add fh to logger log.addHandler(fh) #add ch to logger log.addHandler(ch) log.debug("Logging started at level %d", loglevel) return log
def get_instance(tag="test"): logging.config.dictConfig(LOGGING) logger = logging.getLogger("clogger") if not os.path.exists(os.path.join(LOG_PATH)): os.makedirs(os.path.join(LOG_PATH)) logfile = os.path.join(LOG_PATH) + '%s.log' % tag fh = RFHandler(logfile, maxBytes=1024 * 1024 * 100, backupCount=10, delay=0.05) formatter = logging.Formatter( '[%(asctime)s - %(levelno)s] - %(message)s') fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) logger.addHandler(fh) # error_logfile = os.path.join(LOG_PATH) + '%s_error.log' % tag # efh = RFHandler(error_logfile, maxBytes=1024 * 1024 * 100, backupCount=10, delay=0.05) # efh.setFormatter(formatter) # efh.setLevel(logging.ERROR) # logger.addHandler(efh) return logger
def initLoggerBySize( name, filename, log_level="INFO", size=1024 * 1024, backup_count=20, format='%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]' ): ''' "S":Second 秒; "M":Minutes 分钟; "H":Hour 小时; "D":Days 天;"W":Week day(0 = Monday) :param name: :param filename: :param log_level: :param when: :param interval: :param backupCount: :param format: :return: ''' # 默认的日志为 INFO,各模块的输出日志等级在 params中定义:(DEBUG/INFO/WARNING/ERROR/CRITICAL re_filename = f'{filename}.log' logging.basicConfig(level=_logger_level[log_level], format=format) rotateHandler = ConcurrentRotatingFileHandler(re_filename, "a", size, backup_count) rotateHandler.setLevel(_logger_level[log_level]) formatter = logging.Formatter(format) rotateHandler.setFormatter(formatter) # console = logging.StreamHandler() # console.setLevel(_logger_level[log_level]) logger = logging.getLogger(name) logger.addHandler(rotateHandler) # logger.addHandler(console) return rotateHandler
def add_rotating_file_logger(logger, logfile, log_level=None, format=None, mode="a", maxBytes=10*(1024**2), backupCount=5): """Add a rotating file logger to the logger.""" log_level = log_level or logging.DEBUG format = format or BRIEF_LOG_FORMAT # touch the logfile if not os.path.exists(logfile): try: fo = open(logfile, "w") fo.close() except (ValueError, IOError): return # is the logfile really a file? if not os.path.isfile(logfile): return # check if the logfile is writable if not os.access(logfile, os.W_OK): return handler = RFHandler(logfile, maxBytes=maxBytes, backupCount=backupCount, mode=mode) handler.setFormatter(logging.Formatter(format, datefmt="%Y-%m-%d %H:%M:%S")) handler.setLevel(log_level) logger.addHandler(handler)
def log_config(f_level=logging.INFO, c_level=logging.CRITICAL, out_path='', filename='info', fix=False): logfile = os.path.join(out_path, filename) + '-' + time.strftime('%Y_%m%d_%H%M%S', time.localtime()) + '.log' \ if not fix else os.path.join(out_path, filename) + '.log' print("2:", logfile) logger = logging.getLogger(logfile) if logger.handlers: logger.removeHandler(logger.handlers) logger.setLevel(f_level) fh = LogHandler(logfile, maxBytes=100 * 1024 * 1024, backupCount=50) fh.setLevel(f_level) ch = logging.StreamHandler() ch.setLevel(c_level) formatter = logging.Formatter( '[%(levelname)s]--%(asctime)s--[%(filename)s %(funcName)s %(lineno)d]: %(message)s' ) fh.setFormatter(formatter) ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) return logger, logfile
def getLogger(logname='root'): logger = logging.getLogger(logname) logger.setLevel(logging.DEBUG) DIR = '../log/' if DIR: # not none subprocess.check_call(['mkdir', '-p', DIR]) #================================ # File Handler #================================ LOG_FILENAME = os.path.abspath(DIR + logname + '.err') handler = ConcurrentRotatingFileHandler(LOG_FILENAME, "a", 200*1024*1024, 5) handler.setLevel(logging.WARN) formatter = logging.Formatter("%(asctime)s\t%(name)s-%(process)s-%(threadName)s\t%(levelname)s\t%(message)s") handler.setFormatter(formatter) logger.addHandler(handler) #================================ # Standard Output Handler: INFO ONLY #================================ # handler = logging.StreamHandler(sys.stdout) LOG_FILENAME = os.path.abspath(DIR + logname + '.info') handler = ConcurrentRotatingFileHandler(LOG_FILENAME, "a", 200*1024*1024, 5) handler.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s\t%(name)s-%(process)s-%(threadName)s\t%(message)s") handler.setFormatter(formatter) handler.addFilter(LevelFilter(logging.INFO)) logger.addHandler(handler) return logger
def __init__(self, json=False, stdout=True, name='scrapy-cluster', dir='logs', file='main.log', bytes=25000000, backups=5, level='INFO', format='%(asctime)s [%(name)s] %(levelname)s: %(message)s', propagate=False): ''' @param stdout: Flag to write logs to stdout or file @param json: Flag to write json logs with objects or just the messages @param name: The logger name @param dir: The directory to write logs into @param file: The file name @param bytes: The max file size in bytes @param backups: The number of backups to keep of the file @param level: The logging level string @param format: The log format @param propagate: Allow the log to propagate to other ancestor loggers ''' # set up logger self.logger = logging.getLogger(name) self.logger.setLevel(logging.DEBUG) self.logger.propagate = propagate self.json = json self.log_level = level self.format_string = format if stdout: # set up to std out stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setLevel(logging.DEBUG) formatter = self._get_formatter(json) stream_handler.setFormatter(formatter) self.logger.addHandler(stream_handler) self._check_log_level(level) self.debug("Logging to stdout") else: # set up to file try: # try to make dir os.makedirs(dir) except OSError as exception: if exception.errno != errno.EEXIST: raise file_handler = ConcurrentRotatingFileHandler(dir + '/' + file, maxBytes=bytes, backupCount=backups) file_handler.setLevel(logging.DEBUG) formatter = self._get_formatter(json) file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) self._check_log_level(level) self.debug("Logging to file: {file}".format(file=dir + '/' + file))
def logger_init(log_path=logging_path('http_server_log')): formatter = Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') debug = ConcurrentRotatingFileHandler( os.path.join(log_path, 'debug.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=DEBUG_BACK_COUNT) debug.setLevel(DEBUG) debug.setFormatter(formatter) info = ConcurrentRotatingFileHandler( os.path.join(log_path, 'info.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=INFO_BACK_COUNT) info.setLevel(INFO) info.setFormatter(formatter) warning = ConcurrentRotatingFileHandler( os.path.join(log_path, 'warning.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=WARNING_BACK_COUNT) warning.setLevel(WARNING) warning.setFormatter(formatter) error = ConcurrentRotatingFileHandler( os.path.join(log_path, 'error.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=ERROR_BACK_COUNT) error.setLevel(ERROR) error.setFormatter(formatter) critical = ConcurrentRotatingFileHandler( os.path.join(log_path, 'critical.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=CRITICAL_BACK_COUNT) critical.setLevel(CRITICAL) crit_format = Formatter('%(asctime)s %(message)s') critical.setFormatter(crit_format) logger = getLogger('') logger.addHandler(debug) logger.addHandler(info) logger.addHandler(warning) logger.addHandler(error) logger.addHandler(critical) if LOGGER_SET_LEVEL == 2: LEVEL = INFO elif LOGGER_SET_LEVEL == 3: LEVEL = WARNING elif LOGGER_SET_LEVEL == 4: LEVEL = ERROR elif LOGGER_SET_LEVEL == 5: LEVEL = CRITICAL else: LEVEL = DEBUG logger.setLevel(LEVEL)
def setlog(): rotateHandler = ConcurrentRotatingFileHandler( log_config['file_path'], "a", 20 * 1024 * 1024, 100) rotateHandler.setLevel(logging.INFO) formatter = logging.Formatter( '[%(asctime)s] [process:%(process)s] [%(filename)s:%(lineno)d] %(levelname)s %(message)s') rotateHandler.setFormatter(formatter) log = logging.getLogger() log.addHandler(rotateHandler) log.setLevel(logging.INFO)
def __init__(self, json=False, stdout=True, name='scrapy-cluster', dir='logs', file='main.log', bytes=25000000, backups=5, level='INFO', format='%(asctime)s [%(name)s] %(levelname)s: %(message)s', propagate=False): ''' @param stdout: Flag to write logs to stdout or file @param json: Flag to write json logs with objects or just the messages @param name: The logger name @param dir: The directory to write logs into @param file: The file name @param bytes: The max file size in bytes @param backups: The number of backups to keep of the file @param level: The logging level string @param format: The log format @param propagate: Allow the log to propagate to other ancestor loggers ''' # set up logger self.logger = logging.getLogger(name) self.logger.setLevel(logging.DEBUG) self.logger.propagate = propagate self.json = json self.log_level = level self.format_string = format if stdout: # set up to std out stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setLevel(logging.DEBUG) formatter = self._get_formatter(json) stream_handler.setFormatter(formatter) self.logger.addHandler(stream_handler) self._check_log_level(level) self.debug("Logging to stdout") else: # set up to file try: # try to make dir os.makedirs(dir) except OSError as exception: if exception.errno != errno.EEXIST: raise file_handler = ConcurrentRotatingFileHandler(dir + '/' + file, maxBytes=bytes, backupCount=backups) file_handler.setLevel(logging.DEBUG) formatter = self._get_formatter(json) file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) self._check_log_level(level) self.debug("Logging to file: {file}".format( file=dir+'/'+file))
def get_handler(filename, level=None): ext = logging.getLevelName(level).lower() if level is not None else 'log' handler = ConcurrentRotatingFileHandler(''.join([LOG_PATH, filename, '.', ext]), mode='a', maxBytes=MAX_FILE_SIZE, backupCount=5, encoding='utf-8') handler.setFormatter(formatter) if level is not None: handler.setLevel(level) return handler
def log(message, log_name="app", level="INFO"): logfile = os.path.join(ROOT_PATH, "%s.log" % log_name) logger = logging.getLogger(logfile) filehandler = LogHandler(logfile, maxBytes=100 * 1024 * 1024, backupCount=50) streamhandler = logging.StreamHandler() formatter = logging.Formatter( '[%(levelname)s]--%(asctime)s--[%(filename)s %(funcName)s %(lineno)d]: %(message)s' ) streamhandler.setFormatter(formatter) filehandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.addHandler(filehandler) if level.lower() == "info": streamhandler.setLevel(logging.INFO) filehandler.setLevel(logging.INFO) logger.info(message) elif level.lower() == "error": streamhandler.setLevel(logging.ERROR) filehandler.setLevel(logging.ERROR) logger.error(message, exc_info=True) elif level.lower() == "critical": streamhandler.setLevel(logging.CRITICAL) filehandler.setLevel(logging.CRITICAL) logger.critical(message, exc_info=True) elif level.lower() == "warning": streamhandler.setLevel(logging.WARNING) filehandler.setLevel(logging.WARNING) logger.warning(message) logger.removeHandler(streamhandler) logger.removeHandler(filehandler)
def init_logging(stdout_enabled=True): root_logger = logging.getLogger() root_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO'))) # root_logger.setLevel(logging.WARN) logging.getLogger('requests.packages.urllib3.connectionpool').setLevel( logging.ERROR) logging.getLogger('boto').setLevel(logging.ERROR) logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN) log_formatter = logging.Formatter( fmt='%(asctime)s | ' + ECID + ' | %(name)s | %(processName)s | %(levelname)s | %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') stdout_logger = logging.StreamHandler(sys.stdout) stdout_logger.setFormatter(log_formatter) root_logger.addHandler(stdout_logger) if stdout_enabled: stdout_logger.setLevel( logging.getLevelName(config.get('log_level', 'INFO'))) # base log file log_file_name = '%s/migrator.log' % config.get('log_dir') # ConcurrentRotatingFileHandler rotating_file = ConcurrentRotatingFileHandler(filename=log_file_name, mode='a', maxBytes=404857600, backupCount=0) rotating_file.setFormatter(log_formatter) rotating_file.setLevel(logging.INFO) root_logger.addHandler(rotating_file) error_log_file_name = '%s/migrator_errors.log' % config.get('log_dir') error_rotating_file = ConcurrentRotatingFileHandler( filename=error_log_file_name, mode='a', maxBytes=404857600, backupCount=0) error_rotating_file.setFormatter(log_formatter) error_rotating_file.setLevel(logging.ERROR) root_logger.addHandler(error_rotating_file)
def set_app_log(app): """ 设置flask自带的log :param app: :return: """ l_format = logging.Formatter( "%(asctime)s [%(levelname)s] [%(filename)s %(funcName)s %(lineno)d]: %(message)s " ) r_handler = LogHandler("app.log", maxBytes=20480000, backupCount=10, encoding='UTF-8') r_handler.setLevel(logging.INFO) r_handler.setFormatter(l_format) app.logger.addHandler(r_handler)
def __init__(self, log_level='error', log_name='debug'): path = os.path.join(LOG_DIR, '%s_%s.csv' % (log_name, log_level)) from conf.setting import develop_e_name if BASE_TYPE == develop_e_name: if log_level == 'error': log_level = 'debug' if log_level == 'debug': formatter = LOG_TEM_DEBUG clevel = logging.DEBUG flevel = logging.INFO if log_level == 'error': formatter = LOG_TEM_ERROR clevel = logging.INFO flevel = logging.WARNING if log_level == 'info': formatter = LOG_TEM_INFO clevel = logging.INFO flevel = logging.INFO if log_level == 'DB': formatter = LOG_TEM_DB clevel = logging.DEBUG flevel = logging.INFO # 初始化日志 self.logger = logging.getLogger(path) self.logger.setLevel(logging.DEBUG) fmt = logging.Formatter(*formatter) # 设置sheel日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(clevel) maxBytes = 50 * 1024 * 1024 # 50M # 设置file日志 fh = ConcurrentRotatingFileHandler(path, mode='a', maxBytes=maxBytes, backupCount=10, encoding='utf-8') fh.setFormatter(fmt) fh.setLevel(flevel) # 注册日志 if not self.logger.handlers: self.logger.addHandler(sh) self.logger.addHandler(fh)
def init_logging(stdout_enabled=True): root_logger = logging.getLogger() root_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO'))) # root_logger.setLevel(logging.WARN) logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR) logging.getLogger('boto').setLevel(logging.ERROR) logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN) log_formatter = logging.Formatter( fmt='%(asctime)s | ' + ECID + ' | %(name)s | %(processName)s | %(levelname)s | %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') stdout_logger = logging.StreamHandler(sys.stdout) stdout_logger.setFormatter(log_formatter) root_logger.addHandler(stdout_logger) if stdout_enabled: stdout_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO'))) # base log file log_file_name = '%s/migrator.log' % config.get('log_dir') # ConcurrentRotatingFileHandler rotating_file = ConcurrentRotatingFileHandler(filename=log_file_name, mode='a', maxBytes=404857600, backupCount=0) rotating_file.setFormatter(log_formatter) rotating_file.setLevel(logging.INFO) root_logger.addHandler(rotating_file) error_log_file_name = '%s/migrator_errors.log' % config.get('log_dir') error_rotating_file = ConcurrentRotatingFileHandler(filename=error_log_file_name, mode='a', maxBytes=404857600, backupCount=0) error_rotating_file.setFormatter(log_formatter) error_rotating_file.setLevel(logging.ERROR) root_logger.addHandler(error_rotating_file)
def get_logger(logfile, name, level=INFO, maxSzie_M=1024, backupCount=15, console=True): ''' 获取多进程日志 :param logfile: :param name: :param level: :param maxSzie_M: 按M计算 :param backupCount: 多少个备份文件 :return: ''' _log = getLogger(name) _log.setLevel(level) # logfile = os.path.abspath('logs/upload.log') rotateHandler = ConcurrentRotatingFileHandler(logfile, 'a', 1024 * 1024 * maxSzie_M, backupCount, encoding='utf-8') # formatter = logging.Formatter( # fmt="%(asctime)s %(levelname)s: %(name)s %(filename)s:%(lineno)d %(message)s", # datefmt="%Y-%m-%d %X" # ) datefmt_str = '%Y-%m-%d %H:%M:%S' format_str = "%(asctime)s %(levelname)s: %(name)s %(filename)s:%(lineno)d %(message)s" formatter = Formatter(format_str, datefmt_str) rotateHandler.setFormatter(formatter) rotateHandler.setLevel(level) if console: _console = StreamHandler() _console.setFormatter(formatter) _console.setLevel(INFO) _log.addHandler(_console) _log.addHandler(rotateHandler) return _log
def log_config(f_level=logging.INFO, c_level=logging.INFO, out_path='', filename='info', fix=False): logfile = os.path.join(out_path, filename) + '-' + time.strftime('%Y_%m%d_%H%M%S', time.localtime()) + '.log' \ if not fix else os.path.join(out_path, filename) + '.log' logger = logging.getLogger(logfile) if f_level is None: logger.setLevel(c_level) else: logger.setLevel(f_level) formatter = logging.Formatter( '[%(levelname)s][%(process)d][%(thread)d]--%(asctime)s--[%(filename)s %(funcName)s %(lineno)d]: %(message)s' ) if platform.system() == "Windows": FORMAT = '%(levelname)s[%(process)d][%(thread)d]--%(asctime)s--[%(filename)s %(funcName)s %(lineno)d]: %(message)s$RESET' COLOR_FORMAT = formatter_message(FORMAT, True) color_formatter = ColoredFormatter(COLOR_FORMAT) ch = logging.StreamHandler() ch.setFormatter(color_formatter) ch.setLevel(c_level) logger.addHandler(ch) else: ch = logging.StreamHandler() ch.setLevel(c_level) ch.setFormatter(formatter) logger.addHandler(ch) if f_level is not None: fh = LogHandler(logfile, maxBytes=100 * 1024 * 1024, backupCount=50, encoding='utf-8') fh.setLevel(f_level) fh.setFormatter(formatter) logger.addHandler(fh) if len(logger.handlers) > 2: logger.removeHandler(ch) f_level and logger.removeHandler(fh) return logger, logfile
def __add_RotateHandler(self, logfilename): if platform.system() == 'Windows': log_path = config.LogConfig.log_path_windows else: log_path = config.LogConfig.log_path_linux if not os.path.exists(log_path): os.makedirs(log_path) '''进程安全的日志,多个进程写入同一个文件时不出错''' filename = log_path + logfilename rotate_handler = ConcurrentRotatingFileHandler(filename, mode="a", maxBytes=200 * 1024 * 1024, backupCount=5, encoding="utf-8") rotate_handler.setLevel(logging.DEBUG) rotate_handler.setFormatter(self.formatter) self.logger.addHandler(rotate_handler)
def get_logger(ENV=None, BASE_DIR=None): if ENV is None: ENV = os.getenv('HUMANIKI_ENV', 'development') search_from = BASE_DIR if BASE_DIR else __file__ BASE_DIR = get_ancestor_directory_that_has_xdir_as_child( xdir='logs', caller__file__=search_from) # use Airbrake in production if ENV == "production": log = airbrake.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # Return the logger as-is if it has already been initialized handlers = [h for h in log.handlers if type(h) != airbrake.AirbrakeHandler] if len(handlers) > 0: return log # print all debug and higher to STDOUT # if the environment is development if (ENV == "development"): stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setLevel(logging.DEBUG) log.addHandler(stdoutHandler) logfile = os.path.abspath( os.path.join(BASE_DIR, "logs", f"humaniki_{ENV}.log")) print(f"Logging to {logfile}") formatter = logging.Formatter( '%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format( env=ENV)) rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5) rotateHandler.setLevel(logging.DEBUG) rotateHandler.setFormatter(formatter) log.addHandler(rotateHandler) return log
def get_influx_logger(logger_name, log_size=100 * 1024 * 1024, backupCount=2): ''' :param logger_name: :param log_size: :param backupCount: :return: ''' logger_name = "influx_{}".format(logger_name.lower()) logger = logging.getLogger(logger_name) logger.setLevel(logging.INFO) log_name = os.path.join(LOG_DIR, "{}.log".format(logger_name)) rotate_handler = ConcurrentRotatingFileHandler(log_name, mode="a", maxBytes=log_size, backupCount=backupCount) rotate_handler.setLevel(logging.INFO) logger.addHandler(rotate_handler) return logger
def __init__(self, log_name, logger_name): #设置日志文件名称:time.time()取得当前时间;time.localtime()取得本地时间;time.strftime()格式化日期; time_str = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime(time.time())) logname = time_str + '_' + log_name + '.log' self.logger = logging.getLogger(logger_name) self.logger.setLevel(level=logging.INFO) rHandler = ConcurrentRotatingFileHandler(logname, mode="a", maxBytes=512 * 1024, backupCount=3) rHandler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') rHandler.setFormatter(formatter) console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(formatter) self.logger.addHandler(rHandler)
def __init__(self, module=''): today_datetime = dt.now() today_date = dt.date(today_datetime) string_date = str(today_date) if module == '': file_name = LOGGER_FILE + string_date else: file_name = LOGGER_FILE + module + '-' + string_date logger = logging.getLogger(file_name) # log_namespace can be replaced with your namespace logger.setLevel(logging.DEBUG) if not logger.handlers: file_name = os.path.join(LOGGING_DIR, '%s.log' % file_name) # usually I keep the LOGGING_DIR defined in some global settings file handler = ConcurrentRotatingFileHandler(file_name) formatter = logging.Formatter('%(asctime)s %(levelname)s:%(module)s:%(message)s') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) logger.addHandler(handler) self._logger = logger
def setup_logging_handler(level='DEBUG'): """ Sets up generic logging to file with rotating files on disk :param level: the level of the logging DEBUG, INFO, WARN :return: logging instance """ # Get the log level based on the user input level = getattr(logging, level) # Define the format of the output logfmt = '%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s' datefmt = '%m/%d/%Y %H:%M:%S' formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt) file_name_path = os.path.join(os.path.dirname(__file__), PROJECT_HOME, 'logs') # Make the logging directory if it does not exist if not os.path.exists(file_name_path): os.makedirs(file_name_path) # Construct the output path for the logs file_name = os.path.join(file_name_path, 'app.log') # Instantiate the file handler for logging # Rotate every 2MB rotating_file_handler = ConcurrentRotatingFileHandler(filename=file_name, maxBytes=2097152, backupCount=5, mode='a', encoding='UTF-8') # Add the format and log level rotating_file_handler.setFormatter(formatter) rotating_file_handler.setLevel(level) return rotating_file_handler
def set_logging(logger_path, debug=True, force=False, log_name_level=2): """ Create multiprocessing-safe logger :param logger_path: path of the logger :param debug: (Optional) include debug messages in the output :param force: (Optional) force logger re-creation :param log_name_level: (Optional) number of path components to use as logger's name (1 stands for filename) :return: logging.logger object """ logger_path = os.path.normpath(logger_path) logger_name = '_'.join(logger_path.split(os.sep)[-log_name_level:]) logger = logging.getLogger(logger_name) if logger.hasHandlers() and not force: return logger else: logger.handlers = [] logfile = ConcurrentRotatingFileHandler(logger_path, maxBytes=100 * 1024 * 1024, backupCount=10) logfile.setLevel((logging.DEBUG if debug else logging.INFO)) handlers = [logfile] stream = logging.StreamHandler() stream.setLevel(logging.DEBUG) handlers.append(stream) logger.setLevel(logging.DEBUG) formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') for handler in handlers: handler.setFormatter(formatter) logger.addHandler(handler) return logger
def create_concurrent_logger(logger_name, log_level=logging.WARNING, print_level=logging.WARNING): """ 创建一个多进程使用的日志 """ base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) log_path = os.path.join(base_dir, "logs") if not os.path.exists(log_path): os.makedirs(log_path) logfile = os.path.join(log_path, logger_name + ".log") filesize = 800 * 1024 * 1024 log = logging.getLogger(logger_name) # 将日志写入日志文件中 rotate_handler = ConcurrentRotatingFileHandler(logfile, "a", filesize, 5, encoding="utf-8") rotate_handler.setLevel(log_level) fmt = "[%(asctime)-15s %(levelname)-8s %(filename)s:%(lineno)3d] [%(process)s] %(message)s" datefmt = "%a, %d %b %Y %H:%M:%S" formatter = logging.Formatter(fmt, datefmt) rotate_handler.setFormatter(formatter) log.addHandler(rotate_handler) log.setLevel(log_level) # 定义一个StreamHandler,将WARNING级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象 console = logging.StreamHandler() console.setLevel(print_level) formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) return log
def startlogging(log, logfile, loglevel=logging.INFO, consolelevel=None): """Start the logging system to store rotational file based log.""" try: from cloghandler import ConcurrentRotatingFileHandler as RFHandler except ImportError: # Next 2 lines are optional: issue a warning to the user from warnings import warn warn( "ConcurrentLogHandler package not installed. Using builtin log handler" ) from logging.handlers import RotatingFileHandler as RFHandler if not consolelevel: consolelevel = loglevel log.setLevel(loglevel) #create file handler and set level to debug fh = RFHandler(filename=logfile, maxBytes=2**20, backupCount=50) fh.setLevel(loglevel) #create console handler and set level to error ch = logging.StreamHandler() ch.setLevel(consolelevel) #create formatter formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") #add formatter to fh fh.setFormatter(formatter) #add formatter to ch ch.setFormatter(formatter) #add fh to logger log.addHandler(fh) #add ch to logger log.addHandler(ch) log.debug("Logging started at level %d", loglevel) return log
def init(): logger['root_logger'] = logging.getLogger() logfile = os.path.abspath('debug.log') debug_handler = ConcurrentRotatingFileHandler(logfile, 'a', 10485760, 5) debug_handler.setLevel(logging.DEBUG) debug_handler.setFormatter(default_formatter) logger['root_logger'].addHandler(debug_handler) logfile = os.path.abspath('info.log') info_handler = ConcurrentRotatingFileHandler(logfile, 'a', 10485760, 5) info_handler.setLevel(logging.INFO) info_handler.setFormatter(default_formatter) logger['root_logger'].addHandler(info_handler) logfile = os.path.abspath('error.log') error_handler = ConcurrentRotatingFileHandler(logfile, 'a', 10485760, 5) error_handler.setLevel(logging.ERROR) error_handler.setFormatter(default_formatter) logger['root_logger'].addHandler(error_handler)
def initLogging(dir_path=logging_path('compare-proxy'), logger_level=INFO): formatter = Formatter( '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') error = ConcurrentRotatingFileHandler(('%s/%s' % (dir_path, 'error.log')), 'a', 100 * 1024 * 1024, backupCount=10) error.setLevel(ERROR) error.setFormatter(formatter) info = ConcurrentRotatingFileHandler(('%s/%s' % (dir_path, 'info.log')), 'a', 100 * 1024 * 1024, backupCount=10) info.setLevel(INFO) info.setFormatter(formatter) debug = ConcurrentRotatingFileHandler(('%s/%s' % (dir_path, 'debug.log')), 'a', 100 * 1024 * 1024, backupCount=10) debug.setLevel(DEBUG) debug.setFormatter(formatter) from logging import StreamHandler console = StreamHandler() console.setLevel(INFO) console.setFormatter(formatter) logger = getLogger('') logger.addHandler(debug) logger.addHandler(info) logger.addHandler(error) logger.addHandler(console) logger.setLevel(logger_level) getLogger('tornado').level = ERROR
def init(cls, log_path): """ :type log_path: str :return: bool """ try: # 如果目录不存在,创建目录 if not os.path.exists(log_path): os.mkdir(log_path) print 'log_path: ', log_path # 日志格式 formatter = logging.Formatter( '[%(asctime)s]--[%(process)d]--[%(levelname)s]--%(message)s') # 通用日志 log_file = os.path.join(log_path, 'logger.log') logger_fh = ConcurrentRotatingFileHandler(log_file, "a", 10 * 1024 * 1024, 300) logger_fh.setLevel(logging.DEBUG) logger_fh.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(logger_fh) logger.info('%s init successful!' % log_file) cls.logger = logger # DEBUG日志 # debug_file = os.path.join(log_path, 'debug.log') # debug_fh = ConcurrentRotatingFileHandler(debug_file, "a", 10 * 1024 * 1024, 100) # debug_fh.setLevel(logging.DEBUG) # debug_fh.setFormatter(formatter) # debug_logger = logging.getLogger() # debug_logger.setLevel(logging.DEBUG) # debug_logger.addHandler(debug_fh) # debug_logger.info('%s init successful!' % log_file) cls.debug = logger.debug # INFO日志 # info_file = os.path.join(log_path, 'info.log') # info_fh = ConcurrentRotatingFileHandler(info_file, "a", 10 * 1024 * 1024, 100) # info_fh.setLevel(logging.DEBUG) # info_fh.setFormatter(formatter) # info_logger = logging.getLogger() # info_logger.setLevel(logging.DEBUG) # info_logger.addHandler(info_fh) # info_logger.info('%s init successful!' % log_file) cls.info = logger.info # ERROR日志 # error_file = os.path.join(log_path, 'error.log') # error_fh = ConcurrentRotatingFileHandler(error_file, "a", 10 * 1024 * 1024, 100) # error_fh.setLevel(logging.DEBUG) # error_fh.setFormatter(formatter) # error_logger = logging.getLogger() # error_logger.setLevel(logging.DEBUG) # error_logger.addHandler(error_fh) # error_logger.error('%s init successful!' % log_file) cls.error = logger.error # 返回三个日志类 return True except: print traceback.format_exc() return False
# error日志 logfile_error = log_path + "transfer_error.log" error_filesize = 20 * 1024 * 1024 log_error = logging.getLogger() error_handler = ConcurrentRotatingFileHandler(logfile_error, "a", error_filesize, encoding="utf-8", backupCount=30) datefmt_str2 = '%Y-%m-%d %H:%M:%S' format_str2 = '%(asctime)s-%(levelname)s-no.:%(lineno)d-%(message)s ' formatter2 = logging.Formatter(format_str2, datefmt_str2) error_handler.setFormatter(formatter2) log_error.addHandler(error_handler) error_handler.setLevel(logging.ERROR) monkey.patch_all() # 创建flask对象 app = Flask(__name__) @app.route('/selectinfo/', methods=['POST', 'GET']) # 查询数据库接口,将数据传给agent def selectinfo(): ips = request.form.get('ip') ip_list = ips.split(',')[0:-1] try: db = cx_Oracle.connect(db_user, db_pwd, '127.0.0.1:1521/' + db_name) cur = db.cursor()
# Set up logger. _log = logging.getLogger("alpenhornd") _log.setLevel(logging.DEBUG) log_stream = logging.StreamHandler(stream=sys.stdout) log_fmt = logging.Formatter("%(asctime)s %(levelname)s >> %(message)s", "%b %d %H:%M:%S") log_stream.setLevel(logging.INFO) log_stream.setFormatter(log_fmt) _log.addHandler(log_stream) # Find path to use for logging output (get from environment if possible) log_path = "/var/log/alpenhorn/alpenhornd.log" # default path if "ALPENHORN_LOG_FILE" in os.environ: log_path = os.environ["ALPENHORN_LOG_FILE"] # If log_path is set, set up as log handler if log_path != "": log_file = RFHandler(log_path, maxBytes=(2**22), backupCount=100) log_file.setLevel(logging.INFO) log_file.setFormatter(log_fmt) _log.addHandler(log_file) def get_log(): """Get a logging instance.""" return _log