def getSharedLogger(self): """Create an asynchronous shared logger to be used by the WSGI threads.""" ## Open defined configurations logSettingsFileName = self.globalSettings[ 'fileContainingServiceLogSettings'] ## Create requested shared log handler for the threads logSettings = loadSettings( os.path.join(env.configPath, logSettingsFileName)) logSettings = logSettings.get(self.multiProcessingLogContext) logFile = os.path.join(env.logPath, 'service', logSettings.get('fileName')) sharedLogger = logging.getLogger(self.multiProcessingLogContext) sharedLogger.setLevel(logSettings.get('logLevel')) mainHandler = RFHandler( logFile, maxBytes=int(logSettings.get('maxSizeInBytes')), backupCount=int(logSettings.get('maxRollovers'))) fmt = logging.Formatter(logSettings.get('lineFormat'), datefmt=logSettings.get('dateFormat')) mainHandler.setFormatter(fmt) sharedLogger.addHandler(mainHandler) ## Setup a queue for all the threads/processes to send messages through, ## so we are only writing to the log from the main thread multiprocessing_logging.install_mp_handler() ## Initialize the log sharedLogger.info('Initializing log from apiService') self.logger.info('Initialized shared log') ## end getSharedLogger return
def get_logger(name, rawData = False, timeRotate=False, dirs="log", config_file_path=None): log = logging.getLogger('migbq-log-' + name) try: if rawData: logfmt = logging.Formatter("%(message)s") else: logfmt = logging.Formatter("# %(asctime)-15s # %(message)s") logging.basicConfig(format="# %(message)s") if config_file_path: log_folder = os.path.join( os.path.dirname(os.path.abspath(config_file_path)), dirs ) else: log_folder = os.path.join( os.path.dirname(os.path.realpath(__file__)), dirs ) #log_filename = os.path.splitext(os.path.basename(__file__))[0] + ".log" log_filename = name + ".log" if not os.path.exists(log_folder): os.makedirs(log_folder) if timeRotate: from logging.handlers import TimedRotatingFileHandler as RFHandler concurrentHandler = RFHandler(os.path.join(log_folder,log_filename), when='H', interval=1, backupCount=100) concurrentHandler.suffix = "%Y-%m-%d_%H_" + str(os.getpid()) else: try: #from concurrent_log_handler import ConcurrentRotatingFileHandler as RFHandler from concurrent_log_handler import ConcurrentRotatingFileHandler as RFHandler except ImportError: # Next 2 lines are optional: issue a warning to the user from warnings import warn warn("ConcurrentLogHandler package not installed. Using builtin log handler") from logging.handlers import RotatingFileHandler as RFHandler concurrentHandler = RFHandler(os.path.join(log_folder,log_filename), maxBytes=1024000, backupCount=100) concurrentHandler.setFormatter(logfmt) log.addHandler(concurrentHandler) #file_handler = handlers.RotatingFileHandler(os.path.join(log_folder,log_filename), maxBytes=1024000, backupCount=5) #file_handler.setFormatter(logfmt) #log.addHandler(file_handler) log.setLevel(logging.DEBUG) except: print "logger cannot initialized %s, %s, %s" % sys.exc_info() return log
def get_worker_log(analytics_name='Unknown'): worker_log = logging.getLogger("r5d4.worker.%s" % analytics_name) if settings.WORKER_LOG: worker_log_handler = RFHandler(settings.WORKER_LOG, "a+", 1048576, 15) else: worker_log_handler = logging.StreamHandler() worker_log.addHandler(worker_log_handler) worker_log.setLevel(settings.WORKER_LOG_LEVEL) worker_log_handler.setFormatter(worker_log_formatter) return worker_log
def get_activity_log(): activity_log = logging.getLogger('r5d4.activity') if settings.ACTIVITY_LOG: act_log_handler = RFHandler(settings.ACTIVITY_LOG, "a+", 1048576, 15) else: act_log_handler = logging.StreamHandler() activity_log.addHandler(act_log_handler) activity_log.setLevel(logging.INFO) act_log_handler.setFormatter(activity_log_formatter) return activity_log
def enable_file_logging(self): config = self._config LOG_FILENAME = os.path.join(config.get('logging', 'log_directory'), 'pypeline.log') hdlr = RFHandler(LOG_FILENAME, maxBytes=int(config.get('logging', 'log_size')), backupCount=int(config.get('logging', 'log_rotate'))) formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) hdlr.setFormatter(formatter) self._logger.addHandler(hdlr) self._fmlogger.addHandler(hdlr) self._iflogger.addHandler(hdlr) self._hdlr = hdlr
def setup_logging_to_file(logfile): try: logging.basicConfig(filename=logfile, filemode='a', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') log = getLogger() rotateHandler = RFHandler(logfile, "a", logfilesize, numberofbackups) log.addHandler(rotateHandler) except Exception, e: log_exception(e) return str(e)
def get_logger(tag="test"): logger = logging.getLogger("clogger") if not os.path.exists(os.path.join(LOG_PATH)): os.makedirs(os.path.join(LOG_PATH)) logfile = os.path.join(LOG_PATH) + '%s.log' % tag fh = RFHandler(logfile, maxBytes=1024 * 1024 * 100, backupCount=10, delay=0.05) formatter = logging.Formatter('[%(asctime)s %(levelno)s] %(message)s') fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) logger.addHandler(fh) return logger
def __init__(self, name, level=logging.NOTSET): logging.Logger.__init__(self, name, level=level) stream_format = logging.Formatter( fmt= "%(asctime)-19s: %(name)s[%(module)s->%(funcName)s]: %(levelname)s: %(message)s" ) stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setFormatter(stream_format) self.addHandler(stream_handler) logfile = config['LOG_FILE'] utils.makedirs(os.path.dirname(logfile)) rfh = RFHandler(filename=logfile, maxBytes=1024 * 1024, backupCount=2) rfh.setFormatter(stream_format) self.addHandler(rfh) self.close_handlers()
def configureLogger(logger, log_file, log_level, console_ouput=True): ''' Only the root logger need to configure. ''' # create RotatingFileHandler and set level to debug global root_logger root_logger = logger if (log_file is not None): print("log_file=%s" % (log_file)) if useRotatingFileHandler: fh = RFHandler(filename=log_file, mode='a', maxBytes=10 * 1024 * 1024, backupCount=10, encoding='utf-8') else: fh = TimedRotatingFileHandler(filename=log_file, when='midnight', interval=1, backupCount=10, encoding='utf-8') formatter = logging.Formatter( fmt= '%(asctime)s,pid=%(process)d,tid=%(thread)d,%(name)s,%(levelname)s:%(message)s' ) fh.setFormatter(formatter) logger.addHandler(fh) logger.setLevel(log_level) # create consoleHanlder if (console_ouput): ch = logging.StreamHandler() shortFormatter = logging.Formatter( fmt= '%(asctime)s,pid=%(process)d,tid=%(thread)d,%(name)s,%(levelname)s:%(message)s', datefmt="%H:%M:%S") ch.setFormatter(shortFormatter) logger.addHandler(ch) logger.setLevel(log_level) # catch the uncatched exception sys.excepthook = my_excepthook
def enable_file_logging(self, filename): """ Hack to define a filename for the log file! It overloads the 'enable_file_logging' method in 'nipype/utils/logger.py' file. """ import logging from logging.handlers import RotatingFileHandler as RFHandler config = self._config LOG_FILENAME = os.path.join(config.get('logging', 'log_directory'), filename) hdlr = RFHandler(LOG_FILENAME, maxBytes=int(config.get('logging', 'log_size')), backupCount=int( config.get('logging', 'log_rotate'))) formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) hdlr.setFormatter(formatter) self._logger.addHandler(hdlr) self._fmlogger.addHandler(hdlr) self._iflogger.addHandler(hdlr) self._hdlr = hdlr
def setup_log(): logger = logging.getLogger(LOG_NAME) _format = '[%(asctime)s - %(levelname)s - %(name)s] %(message)s' formatter = logging.Formatter(_format) # set stdout ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) # set roll file rf = RFHandler(LOG_PATH, maxBytes=1024 * 1024 * 100, backupCount=3, delay=0.05) rf.setLevel(logging.DEBUG) rf.setFormatter(formatter) # log logger.setLevel(logging.ERROR) logger.addHandler(MultiProcessLogHandler()) log = logging.getLogger() log.setLevel(logging.DEBUG) log.addHandler(ch) # log.addHandler(rf) return logger