def init_logger(cls, port): formatter = LogFormatter(fmt=cls.__fmt.format(port=port), datefmt="", color=False) access_log_handler = ConcurrentRotatingFileHandler( filename=os.path.join(ServerConfig["log_dir"], "access.log")) access_log_handler.setFormatter(formatter) access_log.addHandler(access_log_handler) server_log_handler = ConcurrentRotatingFileHandler( filename=os.path.join(ServerConfig['log_dir'], 'server.log'), maxBytes=128 * 1024 * 1024, backupCount=5, encoding='utf8') server_log_handler.setFormatter(formatter) gen_log.addHandler(server_log_handler) app_log.addHandler(server_log_handler) access_log.setLevel(logging.INFO) gen_log.setLevel(getattr(logging, ServerConfig['log_level'].upper())) app_log.setLevel(getattr(logging, ServerConfig['log_level'].upper())) access_log.propagate = app_log.propagate = gen_log.propagate = False return
def logger_init(log_path=logging_path('http_server_log')): formatter = Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') debug = ConcurrentRotatingFileHandler( os.path.join(log_path, 'debug.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=DEBUG_BACK_COUNT) debug.setLevel(DEBUG) debug.setFormatter(formatter) info = ConcurrentRotatingFileHandler( os.path.join(log_path, 'info.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=INFO_BACK_COUNT) info.setLevel(INFO) info.setFormatter(formatter) warning = ConcurrentRotatingFileHandler( os.path.join(log_path, 'warning.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=WARNING_BACK_COUNT) warning.setLevel(WARNING) warning.setFormatter(formatter) error = ConcurrentRotatingFileHandler( os.path.join(log_path, 'error.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=ERROR_BACK_COUNT) error.setLevel(ERROR) error.setFormatter(formatter) critical = ConcurrentRotatingFileHandler( os.path.join(log_path, 'critical.log'), maxBytes=LOGGER_FILE_MAXBYTE, backupCount=CRITICAL_BACK_COUNT) critical.setLevel(CRITICAL) crit_format = Formatter('%(asctime)s %(message)s') critical.setFormatter(crit_format) logger = getLogger('') logger.addHandler(debug) logger.addHandler(info) logger.addHandler(warning) logger.addHandler(error) logger.addHandler(critical) if LOGGER_SET_LEVEL == 2: LEVEL = INFO elif LOGGER_SET_LEVEL == 3: LEVEL = WARNING elif LOGGER_SET_LEVEL == 4: LEVEL = ERROR elif LOGGER_SET_LEVEL == 5: LEVEL = CRITICAL else: LEVEL = DEBUG logger.setLevel(LEVEL)
def initialize_logger(self): # Invoke logging with a concurrent logging module since many of these # processes will likely be writing to scan.log at the same time self.dbg_h = logging.getLogger('dbg_log') dbglog = '%s/%s' % (self.log_path, 'dbg.log') dbg_rotateHandler = ConcurrentRotatingFileHandler(dbglog, "a") self.dbg_h.addHandler(dbg_rotateHandler) self.dbg_h.setLevel(logging.ERROR) self.scan_h = logging.getLogger('scan_log') scanlog = '%s/%s' % (self.log_path, 'scan.log') scan_rotateHandler = ConcurrentRotatingFileHandler(scanlog, "a") self.scan_h.addHandler(scan_rotateHandler) self.scan_h.setLevel(logging.INFO)
def init_logging(stdout_enabled=True): root_logger = logging.getLogger() root_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO'))) # root_logger.setLevel(logging.WARN) logging.getLogger('requests.packages.urllib3.connectionpool').setLevel( logging.ERROR) logging.getLogger('boto').setLevel(logging.ERROR) logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN) log_formatter = logging.Formatter( fmt='%(asctime)s | ' + ECID + ' | %(name)s | %(processName)s | %(levelname)s | %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') stdout_logger = logging.StreamHandler(sys.stdout) stdout_logger.setFormatter(log_formatter) root_logger.addHandler(stdout_logger) if stdout_enabled: stdout_logger.setLevel( logging.getLevelName(config.get('log_level', 'INFO'))) # base log file log_file_name = '%s/migrator.log' % config.get('log_dir') # ConcurrentRotatingFileHandler rotating_file = ConcurrentRotatingFileHandler(filename=log_file_name, mode='a', maxBytes=404857600, backupCount=0) rotating_file.setFormatter(log_formatter) rotating_file.setLevel(logging.INFO) root_logger.addHandler(rotating_file) error_log_file_name = '%s/migrator_errors.log' % config.get('log_dir') error_rotating_file = ConcurrentRotatingFileHandler( filename=error_log_file_name, mode='a', maxBytes=404857600, backupCount=0) error_rotating_file.setFormatter(log_formatter) error_rotating_file.setLevel(logging.ERROR) root_logger.addHandler(error_rotating_file)
def log(message, path=None, level=None, filename=None, log_type=None): if not filename: filename = 'python_logs' if not path: path = './logs/' if not os.path.exists(path): os.makedirs(path) logger = logging.getLogger(filename) if not level: logger.setLevel(logging.INFO) elif level == 'debug': logger.setLevel(logging.DEBUG) # 若logger.handlers列表为空,则添加,否则,直接去写日志,避免重复写入日志 if not logger.handlers: filehandler = ConcurrentRotatingFileHandler(path + filename + '.log') formatter = logging.Formatter( '%(asctime)s|%(levelname)s|%(name)s|%(message)s') filehandler.setFormatter(formatter) logger.addHandler(filehandler) if not log_type: logger.info(message) elif log_type == 'error': logger.error(message) elif log_type == 'warning': logger.warning(message) elif log_type == 'debug': logger.debug(message)
def __cmd__(self, lock, command): log = getLogger() # buid log path sefl.__log_id__ = self.__runner_requestor__ + '_' + time.strftime( "%Y_%H_%M_%S") + '.log' logfile = os.path.join(log_path, self.__log_id__) # Rotate log after reaching 10M, keep 5 old copies. rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 1024 * 1024 * 10, 5) log.addHandler(rotateHandler) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, errout) = p.communicate() if stdout: lock.acquire() self.__return_content__ = +command + ':' + stdout lock.release() log.setLevel(INFO) log.info( time.strftime("%b %d %Y %H:%M:%S: ") + command + ':' + stdout) if errout: lock.acquire() self.__return_content__ = +command + ':' + errout lock.release() log.setLevel(ERROR) log.info( time.strftime("%b %d %Y %H:%M:%S: ") + command + ':' + errout)
def init_http_logger(config): logger = logging.getLogger(config.LOGGER_HTTP_NAME) del logger.handlers[:] Rthandler = ConcurrentRotatingFileHandler('/tmp/snapperhttp.log', maxBytes=(100*1024), backupCount=1) Rthandler.setFormatter(Formatter(config.LOG_FORMAT)) logger.addHandler(Rthandler) logger.setLevel(logging.DEBUG)
def __init__(self, logfile, log_size_limit, log_rotate_num, log_level): self.logger = logging.getLogger() try: self.rotateHandler = ConcurrentRotatingFileHandler( logfile, "a", log_size_limit, log_rotate_num) except Exception, e: print 'INTERNAL_ERR'
def get_logger(ENV, BASE_DIR): # use Airbrake in production if (ENV == "production"): log = airbrake.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # print all debug and higher to STDOUT # if the environment is development if (ENV == "development"): stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setLevel(logging.DEBUG) log.addHandler(stdoutHandler) logfile = os.path.abspath(BASE_DIR + "/logs/CivilServant_" + ENV + ".log") print("Logging to " + BASE_DIR + "/logs/CivilServant_" + ENV + ".log") formatter = logging.Formatter( '%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format( env=ENV)) rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5) rotateHandler.setLevel(logging.DEBUG) rotateHandler.setFormatter(formatter) log.addHandler(rotateHandler) return log
def init_spider_log(self, crawler): log_file = crawler.settings.get('LOG_FILE') if not log_file: self.logger.info(f'{self.name} cant find LOG_FILE in settings !') return import logging from cloghandler import ConcurrentRotatingFileHandler from scrapy.utils.log import configure_logging # Disable default Scrapy log settings. configure_logging(install_root_handler=False) # Define your logging settings. log_format = "[%(asctime)s %(filename)s %(funcName)s line:%(lineno)d %(levelname)s]: %(message)s" logging.basicConfig(format=log_format) rotate_handler = ConcurrentRotatingFileHandler(log_file, mode="a", maxBytes=1 * 1024 * 1024 * 1024, backupCount=2) rotate_handler.setFormatter(logging.Formatter(log_format)) rotate_handler.setLevel(crawler.settings.get('LOG_LEVEL')) root_logger = logging.getLogger() root_logger.addHandler(rotate_handler)
def get_json_logger(logger_name, log_size=512 * 1024 * 1024, backupCount=2): ''' :param logger_name: :param log_size: default 512M :param backupCount: :return: ''' formatter = CustomJsonFormatter( "%(filename)s %(lineno)d %(funcName)s %(message)s") logger = logging.getLogger(logger_name) logger.setLevel(logging.INFO) log_name = os.path.join(LOG_PATH, "{}.log".format(logger_name)) # rotate_handler = handlers.TimedRotatingFileHandler( # filename=log_name, # when=when, # backupCount=backupCount, # encoding='utf-8' # ) rotate_handler = ConcurrentRotatingFileHandler( log_name, mode="a", maxBytes=log_size, backupCount=backupCount) # 每个文件最多保存512M rotate_handler.setLevel(logging.INFO) rotate_handler.setFormatter(formatter) logger.addHandler(rotate_handler) return logger
def initLoggerBySize( name, filename, log_level="INFO", size=1024 * 1024, backup_count=20, format='%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]' ): ''' "S":Second 秒; "M":Minutes 分钟; "H":Hour 小时; "D":Days 天;"W":Week day(0 = Monday) :param name: :param filename: :param log_level: :param when: :param interval: :param backupCount: :param format: :return: ''' # 默认的日志为 INFO,各模块的输出日志等级在 params中定义:(DEBUG/INFO/WARNING/ERROR/CRITICAL re_filename = f'{filename}.log' logging.basicConfig(level=_logger_level[log_level], format=format) rotateHandler = ConcurrentRotatingFileHandler(re_filename, "a", size, backup_count) rotateHandler.setLevel(_logger_level[log_level]) formatter = logging.Formatter(format) rotateHandler.setFormatter(formatter) # console = logging.StreamHandler() # console.setLevel(_logger_level[log_level]) logger = logging.getLogger(name) logger.addHandler(rotateHandler) # logger.addHandler(console) return rotateHandler
def set_logger(): log_level = __conf["log"]["level"].upper() log_path = os.path.abspath( os.path.join(os.path.split(__file__)[0], "../logs/threat-detection")) log_size = 4 * 1024 * 1024 log = logging.getLogger("threat_intelligence") if len(log.handlers) == 0: rotate_handler = ConcurrentRotatingFileHandler(log_path, "a", log_size, 5) if log_level == "DEBUG": log.setLevel(logging.DEBUG) elif log_level == "INFO": log.setLevel(logging.INFO) elif log_level == "WARNING": log.setLevel(logging.WARNING) elif log_level == "ERROR": log.setLevel(logging.ERROR) else: raise ValueError, "logLevel should be DEBUG/INFO/WARNING/ERROR." # set logs formatter formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s') rotate_handler.setFormatter(formatter) # add handler to logger log.addHandler(rotate_handler) return log
def f_log_concurrent(log_file): LEVELS = { 'debug': logging.DEBUG, 'info': logging.INFO, 'error': logging.ERROR } log = logging.getLogger() level = LEVELS.get(LOGLEVEL, logging.NOTSET) # Use an absolute path to prevent file rotation trouble. logfile = os.path.abspath(log_file) # Rotate log after reaching 1G, keep 60 old copies. rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 1024 * 1024 * 1024, 60, encoding="utf-8") fm = logging.Formatter( "%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S", ) rotateHandler.setFormatter(fm) log.addHandler(rotateHandler) log.setLevel(level) return log
def get_logger(ENV, BASE_DIR): # use Airbrake in production is_email_script = pathlib.Path(sys.argv[0]).name == "email_db_report.py" if ENV == "production" and not is_email_script: log = airbrake.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # Return the logger as-is if it has already been initialized handlers = [h for h in log.handlers if type(h) != airbrake.AirbrakeHandler] if len(handlers) > 0: return log # print all debug and higher to STDOUT # if the environment is development if (ENV == "development"): stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setLevel(logging.DEBUG) log.addHandler(stdoutHandler) logfile = os.path.abspath(BASE_DIR + "/logs/CivilServant_" + ENV + ".log") print("Logging to " + BASE_DIR + "/logs/CivilServant_" + ENV + ".log") formatter = logging.Formatter( '%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format( env=ENV)) rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5) rotateHandler.setLevel(logging.DEBUG) rotateHandler.setFormatter(formatter) log.addHandler(rotateHandler) return log
def __init__(self, path, clevel=logging.DEBUG, Flevel=logging.DEBUG, when='M', backCount=5, fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'): self.logger = logging.getLogger(path) self.logger.setLevel(logging.DEBUG) fmt = logging.Formatter('%(asctime)s | %(message)s', '%Y-%m-%d %H:%M:%S') # fmt = logging.Formatter('%(asctime)s | %(message)s') # Use an absolute path to prevent file rotation trouble. logfile = os.path.abspath(path) # Rotate log after reaching 512K, keep 5 old copies. rh = ConcurrentRotatingFileHandler(logfile, "a", 10 * 1024 * 1024 * 1024, backCount) # th = handlers.TimedRotatingFileHandler(filename=logfile, when=when, backupCount=backCount, encoding='utf-8') rh.setFormatter(fmt) # 设置CMD日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(clevel) # 设置文件日志 fh = logging.FileHandler(path, encoding='utf-8') fh.setFormatter(fmt) fh.setLevel(Flevel) self.logger.addHandler(sh) # self.logger.addHandler(fh) # self.logger.addHandler(th) self.logger.addHandler(rh)
def setup_logging(file_, name_, level=config['LOGGING_LEVEL']): """ Sets up generic logging to file with rotating files on disk :param file_: the __file__ doc of python module that called the logging :param name_: the name of the file that called the logging :param level: the level of the logging DEBUG, INFO, WARN :return: logging instance """ level = getattr(logging, level) logfmt = '%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s' datefmt = '%m/%d/%Y %H:%M:%S' formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt) logging_instance = logging.getLogger(name_) fn_path = os.path.join(os.path.dirname(file_), PROJ_HOME, 'logs') if not os.path.exists(fn_path): os.makedirs(fn_path) fn = os.path.join(fn_path, '{0}.log'.format(name_)) rfh = ConcurrentRotatingFileHandler(filename=fn, maxBytes=2097152, backupCount=5, mode='a', encoding='UTF-8') # 2MB file rfh.setFormatter(formatter) logging_instance.handlers = [] logging_instance.addHandler(rfh) logging_instance.setLevel(level) return logging_instance
def log_add(): logfile = os.path.abspath("mylogfile.log") log = getLogger() rotate_handler = ConcurrentRotatingFileHandler(logfile, "a", 1024*1024, 5) log.addHandler(rotate_handler) log.setLevel(INFO) log.info("Here is a very exciting log message for you.")
def __init__(self, filename): pwd = os.path.abspath(os.path.dirname(__file__)) directory = os.path.join(pwd, self.folder) if not os.path.exists(directory): os.mkdir(directory) self.file_path = directory + '/' + filename self.log = logging.getLogger(filename) self.log.setLevel(self.level) handler = ConcurrentRotatingFileHandler(self.file_path, 'a', 1024 * 1024 * 100, backupCount=5, encoding='utf-8') # handler.suffix = "%Y-%m-%d" # 设置输出格式 # format_log = "%(asctime)s %(threadName)s %(funcName)s %(filename)s:%(lineno)s %(levelname)s %(message)s" formatter = logging.Formatter( '%(asctime)s [%(processName)s %(threadName)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d] %(message)s' ) # fmt = logging.Formatter(formatter) handler.setFormatter(formatter) self.log.addHandler(handler) # 控制台输出 stream = logging.StreamHandler() stream.setFormatter(formatter) self.log.addHandler(stream)
def create_app(): app = Flask(__name__) # 日志模块 rotateHandler = ConcurrentRotatingFileHandler('%s/logs/service.log' % PROJECT_PATH, 'a', 800 * 1024 * 1024, backupCount=10, encoding='utf-8') datefmt_str = '%Y-%m-%d %H:%M:%S' format_str = '%(asctime)s %(levelname)s %(module)s.%(funcName)s Line:%(lineno)d %(message)s' formatter = logging.Formatter(format_str, datefmt_str) rotateHandler.setFormatter(formatter) app.logger.addHandler(rotateHandler) app.logger.setLevel(logging.DEBUG) app.config.from_object(config) config.init_app(app) # 初始化db db.init_app(app) # 初始化cache cache.init_app(app) return app
def configure_logging(logger, log_level_file, log_level_stderr, filename): """Configures logging for given logger using the given filename. :return None. """ # Create an IdentityFilter. identity = get_identifier() identity_filter = IdentityFilter(identity=identity) formatter = logging.Formatter(LOG_FORMAT) if log_level_file != "NONE": # If the logging directory doesn't exist, create it. if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) # Determine path to log file. log_path = os.path.join(LOG_DIR, filename) # Create a log handler and formatter and apply to _log. handler = ConcurrentRotatingFileHandler(filename=log_path, maxBytes=1000000, backupCount=5) handler.addFilter(identity_filter) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(log_level_file) if log_level_stderr != "NONE": # Attach a stderr handler to the log. stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(log_level_stderr) stderr_handler.addFilter(identity_filter) stderr_handler.setFormatter(formatter) logger.addHandler(stderr_handler)
def __init__(self, module=''): today_datetime = dt.now() today_date = dt.date(today_datetime) string_date = str(today_date) if module == '': file_name = LOGGER_FILE + string_date else: file_name = LOGGER_FILE + module + '-' + string_date logger = logging.getLogger( file_name) # log_namespace can be replaced with your namespace logger.setLevel(logging.DEBUG) if not logger.handlers: file_name = os.path.join( LOGGING_DIR, '%s.log' % file_name ) # usually I keep the LOGGING_DIR defined in some global settings file handler = ConcurrentRotatingFileHandler(file_name) formatter = logging.Formatter( '%(asctime)s %(levelname)s:%(module)s:%(message)s') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) logger.addHandler(handler) self._logger = logger
def __init__(self, json=False, stdout=True, name='scrapy-cluster', dir='logs', file='main.log', bytes=25000000, backups=5, level='INFO', format='%(asctime)s [%(name)s] %(levelname)s: %(message)s', propagate=False): ''' @param stdout: Flag to write logs to stdout or file @param json: Flag to write json logs with objects or just the messages @param name: The logger name @param dir: The directory to write logs into @param file: The file name @param bytes: The max file size in bytes @param backups: The number of backups to keep of the file @param level: The logging level string @param format: The log format @param propagate: Allow the log to propagate to other ancestor loggers ''' # set up logger self.logger = logging.getLogger(name) self.logger.setLevel(logging.DEBUG) self.logger.propagate = propagate self.json = json self.log_level = level self.format_string = format if stdout: # set up to std out stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setLevel(logging.DEBUG) formatter = self._get_formatter(json) stream_handler.setFormatter(formatter) self.logger.addHandler(stream_handler) self._check_log_level(level) self.debug("Logging to stdout") else: # set up to file try: # try to make dir os.makedirs(dir) except OSError as exception: if exception.errno != errno.EEXIST: raise file_handler = ConcurrentRotatingFileHandler(dir + '/' + file, maxBytes=bytes, backupCount=backups) file_handler.setLevel(logging.DEBUG) formatter = self._get_formatter(json) file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) self._check_log_level(level) self.debug("Logging to file: {file}".format(file=dir + '/' + file))
def getLogHandler(self, fn): """ Override this method if you want to test a different logging handler class. """ return ConcurrentRotatingFileHandler(fn, 'a', self.rotateSize, self.rotateCount, debug=self.debug)
def get_json_log_handler(path, app_name, json_fields): handler = ConcurrentRotatingFileHandler(path, "a", 2 * 1024 * 1024 * 1024, 1) formatter = LogstashFormatter() formatter.defaults['@tags'] = ['collector', app_name] formatter.defaults['@fields'] = json_fields handler.setFormatter(formatter) return handler
def setup_logging(name_, level=None, proj_home=None, attach_stdout=False): """ Sets up generic logging to file with rotating files on disk :param: name_: the name of the logfile (not the destination!) :param: level: the level of the logging DEBUG, INFO, WARN :param: proj_home: optional, starting dir in which we'll check for (and create) 'logs' folder and set the logger there :return: logging instance """ if level is None: config = load_config(extra_frames=1, proj_home=proj_home, app_name=name_) level = config.get('LOGGING_LEVEL', 'INFO') level = getattr(logging, level) logfmt = u'%(asctime)s %(msecs)03d %(levelname)-8s [%(process)d:%(threadName)s:%(filename)s:%(lineno)d] %(message)s' datefmt = TIMESTAMP_FMT # formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt) formatter = MultilineMessagesFormatter(fmt=logfmt, datefmt=datefmt) formatter.multiline_marker = u'' formatter.multiline_fmt = u' %(message)s' formatter.converter = time.gmtime logging_instance = logging.getLogger(name_) logging_instance.propagate = False # logging messages are not passed to the handlers of ancestor loggers (i.e., gunicorn) if proj_home: proj_home = os.path.abspath(proj_home) fn_path = os.path.join(proj_home, u'logs') else: fn_path = os.path.join(_get_proj_home(), u'logs') if not os.path.exists(fn_path): os.makedirs(fn_path) fn = os.path.join(fn_path, u'{0}.log'.format(name_.split(u'.log')[0])) rfh = ConcurrentRotatingFileHandler(filename=fn, maxBytes=10485760, backupCount=10, mode=u'a', encoding=u'UTF-8') # 10MB file rfh.setFormatter(formatter) logging_instance.handlers = [] logging_instance.addHandler(rfh) logging_instance.setLevel(level) if attach_stdout: stdout = logging.StreamHandler(sys.stdout) stdout.formatter = get_json_formatter() logging_instance.addHandler(stdout) return logging_instance
def get_logger(file_path): filehandler = ConcurrentRotatingFileHandler(file_path) streamhandler = logging.StreamHandler() logger = logging.getLogger('Distribute training logs.') logger.setLevel(logging.INFO) logger.addHandler(filehandler) logger.addHandler(streamhandler) return logger
def get_logger(): logger = logging.getLogger('job') log_format = '%(asctime)s %(filename)s %(lineno)d %(levelname)s %(message)s' formatter = logging.Formatter(log_format) logfile = os.path.join(collector_agent_path, 'log/job.log') rotate_handler = ConcurrentRotatingFileHandler(logfile, "a", 2000000, 7) rotate_handler.setFormatter(formatter) logger.addHandler(rotate_handler) logger.setLevel(logging.DEBUG) return logger
def getcLoggers(loggerName,loggerLevel,loggerLocation): ''' 生成进程安全的logger ''' logger = logging.getLogger(loggerName) logger.setLevel(loggerLevel) rotateHandler = ConcurrentRotatingFileHandler(loggerLocation,'a',1073741824,5) logger.addHandler(rotateHandler) return logger
def setlog(): rotateHandler = ConcurrentRotatingFileHandler( log_config['file_path'], "a", 20 * 1024 * 1024, 100) rotateHandler.setLevel(logging.INFO) formatter = logging.Formatter( '[%(asctime)s] [process:%(process)s] [%(filename)s:%(lineno)d] %(levelname)s %(message)s') rotateHandler.setFormatter(formatter) log = logging.getLogger() log.addHandler(rotateHandler) log.setLevel(logging.INFO)