示例#1
0
def setup_logging() -> Generator[None, None, None]:
    logging.getLogger("discord").setLevel(logging.INFO)
    logging.getLogger("discord.http").setLevel(logging.WARNING)

    log = logging.getLogger()

    try:
        log.setLevel(logging.INFO)
        handler = RotatingFileHandler(
            filename="fizzbotz.log",
            mode="w",
            maxBytes=5 * 1024 * 1024,
            encoding="utf-8",
        )
        dt_fmt = "%Y-%m-%d %H:%M:%S"
        fmt = logging.Formatter(
            "[{asctime}] [{levelname:<7}] {name}: {message}",
            dt_fmt,
            style="{")
        handler.setFormatter(fmt)
        log.addHandler(handler)

        yield
    finally:
        handlers = log.handlers[:]
        for handler in handlers:
            handler.close()
            log.removeHandler(handler)
示例#2
0
	def dbl(self,logmsg):
		try:
			#locate path for debug log in prefs file
			logfilePath=os.path.join(self.rootFolderPath,'errors')
			fileName = 'debugLog'
			logSize = 5000000
			logCount = 5
			#if path does not exist, create it
			if not os.path.exists(logfilePath):
				os.makedirs(logfilePath)
			if os.path.exists(logfilePath):
				logHandler = RotatingFileHandler(logfilePath + "/" + fileName,"a", logSize, logCount)
				logFormatter = logging.Formatter("%(asctime)s:%(message)s")
				logHandler.setFormatter(logFormatter)
				logger = logging.getLogger(__name__)
				logger.disabled = False
				logger.addHandler(logHandler)
				logger.setLevel(logging.DEBUG)
				logger.debug(logmsg)
				logHandler.flush()
				logHandler.close()
				logger.removeHandler(logHandler)
		except Exception:
			#if we can't write to the log for any reason, eat the error and continue.
			pass
示例#3
0
	def dbl(self,logmsg):
		try:
			#locate path for debug log in prefs file
			logfilePath=os.path.join(self.rootpath,'debugLogs')
			fileName = 'debugLog'
			logSize = 5000000
			logCount = 5
			#if path does not exist, create it
			if not os.path.exists(logfilePath):
				os.makedirs(logfilePath)
			if os.path.exists(logfilePath):
				env = envUtils.getEnvironment()
				self.backupReport += "<tr><td>%s - %s</td></tr>" % (env.localizeUTCDate(env.formatUTCDate()), logmsg)
				logHandler = RotatingFileHandler(logfilePath + "/" + fileName,"a", logSize, logCount)
				logFormatter = logging.Formatter("%(asctime)s:%(message)s")
				logHandler.setFormatter(logFormatter)
				logger = logging.getLogger(__name__)
				logger.disabled = False
				logger.addHandler(logHandler)
				logger.setLevel(logging.DEBUG)
				logger.debug(logmsg)
				logHandler.flush()
				logHandler.close()
				logger.removeHandler(logHandler)
		except Exception:
			#if we can't write to the log for any reason, eat the error and continue.
			pass
示例#4
0
    def instance(cls):
        if cls.logger is None:
            #TODO: config to limit logs by days
            path = "/temporary/log"

            if not os.path.exists(path):
                os.makedirs(path)
            filename = '%s/dubhe.log' % path
            formatter = logging.Formatter(
                '%(asctime)s | %(levelname)-8s | %(filename)s:%(lineno)s: %(message)s'
            )
            # 控制台输出
            streamHandler = logging.StreamHandler()
            streamHandler.setFormatter(formatter)
            # 滚动文件输出
            log_file_handler = RotatingFileHandler(filename=filename,
                                                   mode='a',
                                                   maxBytes=10 * 1024 * 1024,
                                                   backupCount=14,
                                                   encoding=None,
                                                   delay=0)
            log_file_handler.setFormatter(formatter)
            cls.logger = logging.getLogger()
            cls.logger.setLevel(logging.INFO)
            cls.logger.addHandler(streamHandler)
            cls.logger.addHandler(log_file_handler)

            streamHandler.close()
            log_file_handler.close()

            # 过滤kafka INFO级别日志
            kafka_logger = logging.getLogger('kafka')
            kafka_logger.setLevel(logging.WARNING)

        return cls.logger
示例#5
0
def setup_logging():

    try:
        # __enter__
        if os.getenv('LOGGING').lower() == 'true':
            if os.getenv('DEBUG').lower() == 'true':
                logging.getLogger('discord').setLevel(logging.DEBUG)
                logging.getLogger('discord.http').setLevel(logging.DEBUG)
            else:
                logging.getLogger('discord').setLevel(logging.INFO)
                logging.getLogger('discord.http').setLevel(logging.WARNING)

            log = logging.getLogger()
            log.setLevel(logging.INFO)
            handler = RotatingFileHandler(
                filename='logs/log.log', encoding='utf-8', mode='w', maxBytes=32 * 1024 * 1024, backupCount=5)
            formatter = logging.Formatter(
                '[{asctime}] [{levelname:<7}] {name}: {message}', '%Y-%m-%d %H:%M:%S', style='{')
            handler.setFormatter(formatter)
            log.addHandler(handler)

            if os.getenv('DEBUG').lower() == 'true':
                log.info('Logger successfully set up in debug mode')
            else:
                log.info('Logger successfully set up in normal mode')

        yield
    finally:
        # __exit__
        if os.getenv('LOGGING').lower() == 'true':
            handlers = log.handlers[:]
            for handler in handlers:
                handler.close()
                log.removeHandler(handler)
示例#6
0
def set_logging():
    try:
        logging.getLogger('discord').setLevel(logging.INFO)
        logging.getLogger('discord.http').setLevel(logging.WARNING)
        logging.getLogger('discord.state').setLevel(logging.DEBUG)

        log = logging.getLogger()
        log.setLevel(logging.DEBUG)

        date_format = '%Y-%m-%d %H:%M:%S'
        handler = RotatingFileHandler(filename=config.LOG_FILENAME,
                                      encoding='utf-8',
                                      mode='a')
        fmt = logging.Formatter('[{asctime}] [{levelname}] {name}: {message}',
                                date_format,
                                style='{')

        handler.setFormatter(fmt)
        log.addHandler(handler)

        yield
    finally:
        for handler in log.handlers[:]:
            handler.close()
            log.removeHandler(handler)
示例#7
0
    def __init__(self):
        """
        初始化
        """
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        today = time.strftime("%Y-%m-%d", time.localtime(time.time()))
        if not self.logger.handlers or self.logger.handlers[
                0].baseFilename.find(today) < 0:
            self.logger.handlers = []

            path = os.path.abspath(
                os.path.join(os.path.dirname((os.path.abspath(__file__))),
                             "logfile"))
            if not os.path.exists(path):
                os.makedirs(path)

            log_filename = os.path.join(path, '{time}.log'.format(time=today))

            file_handler = RotatingFileHandler(log_filename,
                                               maxBytes=10240,
                                               encoding='utf-8')
            file_handler.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(filename)s %(name)s line:%(lineno)d %(levelname)s %(message)s'
                ))
            self.logger.addHandler(file_handler)
            file_handler.close()
示例#8
0
 def __printconsole(self, level, message):
     # 创建一个logger
     logger = logging.getLogger()
     logger.setLevel(logging.DEBUG)
     # 创建一个handler,用于输出到控制台
     ch = logging.StreamHandler()
     ch.setLevel(logging.DEBUG)
     # 创建一个handler,用于写入日志文件,设置日志大小和备份数
     # fh = logging.FileHandler(self.logname, mode='a', encoding='utf-8')
     fh = RotatingFileHandler(self.logname, mode='a', encoding='utf-8',
                              maxBytes=1024 * 1024, backupCount=50)
     fh.setLevel(logging.DEBUG)
     # 再创建一个handler,用于输出到控制台
     ch = logging.StreamHandler()
     ch.setLevel(logging.DEBUG)
     # 定义handler的输出格式
     formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     fh.setFormatter(formatter)
     ch.setFormatter(formatter)
     # 给logger添加handler
     logger.addHandler(fh)
     logger.addHandler(ch)
     # 记录一条日志
     if level == 'info':
         logger.info(message)
     elif level == 'debug':
         logger.debug(message)
     elif level == 'warning':
         logger.warning(message)
     elif level == 'error':
         logger.error(message)
     # 移除handler并关闭打开的文件
     logger.removeHandler(ch)
     logger.removeHandler(fh)
     fh.close()
class MultiProcessingLog(logging.Handler):
    def __init__(self, filename, mode="a", maxBytes=0, backupCount=0, encoding=None, delay=0):
        logging.Handler.__init__(self)

        # In case our call to RotatingFileHandler blows up we first set
        # the _handler to None
        self._handler = None
        self._handler = RotatingFileHandler(filename, mode, maxBytes, backupCount, encoding, delay)
        self.queue = multiprocessing.Queue(-1)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        while True:
            try:
                record = self.queue.get()
                self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        self.queue.put_nowait(s)

    def _format_record(self, record):
        # ensure that exc_info and args
        # have been stringified.  Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            dummy = self.format(record)
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        if self._handler is not None:
            self._handler.close()
        logging.Handler.close(self)
示例#10
0
    def __init__(self, logger=None, level='info'):
        if not os.path.exists(LOG_DIR):
            os.mkdir(LOG_DIR)

        # 先获取记录器:
        self.logger = logging.getLogger(logger)

        # 设置日志等级
        self.logger.setLevel(self.levels[level])

        # 设置日志输出格式
        formatter = logging.Formatter('[%(asctime)s] [%(pathname)s:%(lineno)d] - %(levelname)s: %(message)s')

        # 设置日志文件及回滚
        frHandler = RotatingFileHandler(LOG_FILE, maxBytes=20 * 1024 * 1024, backupCount=30, encoding='utf-8')
        frHandler.setLevel(self.levels[level])
        frHandler.setFormatter(formatter)

        # 设置console输出
        console = logging.StreamHandler()
        console.setLevel(self.levels[level])
        console.setFormatter(formatter)

        # 添加以上两个handler
        self.logger.addHandler(frHandler)
        self.logger.addHandler(console)

        frHandler.close()
        console.close()
示例#11
0
文件: logger.py 项目: LeGaulois/soc
class log(object):
    def __init__(self,fichier,instance):
        self.logger = logging.getLogger(instance)
        
        self.logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        self.file_handler = RotatingFileHandler(fichier, 'a', 5000000, 1)
        self.file_handler.setLevel(logging.DEBUG)   
        self.file_handler.setFormatter(formatter)
        self.file_handler.createLock()
        self.logger.addHandler(self.file_handler)
        
    def ecrire(self,message,niveau):
        if niveau=='critical':
            self.logger.critical(message)

        elif niveau=='error':
            self.logger.error(message)

        elif niveau=='warning':
            self.logger.warning(message)

        elif niveau=='info':
            self.logger.info(message)
        else:        
            self.logger.debug(message)

    def fermer(self):
        self.file_handler.close()
示例#12
0
class MultiProcessingLog(logging.Handler):
    def __init__(self, filename, mode="timed", max_bytes=0, backup_count=0):
        logging.Handler.__init__(self)
        self.queue = multiprocessing.Queue(-1)

        if mode == "timed":
            self._handler = TimedRotatingFileHandler(filename, "midnight", 1, backup_count)
        else:
            self._handler = RotatingFileHandler(filename, "a", max_bytes, backup_count)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        while self.queue.empty() == False:
            try:
                record = self.queue.get()
                self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        self.queue.put_nowait(s)

    def _format_record(self, record):
        # ensure that exc_info and args
        # have been stringified.  Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            dummy = self.format(record)
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        logging.Handler.close(self)
示例#13
0
class MultiProcessingLog(logging.Handler):
	def __init__(self, name, mode = 'a', maxsize = 0, rotate = 0):
		'''
		By default the RotatingFileHandler is set as an usual Handler (no rotating, no maxsize)
		'''
		logging.Handler.__init__(self)

		self._handler = RotatingFileHandler(name, mode, maxsize, rotate)
		self.queue = multiprocessing.Queue(-1)

		t = threading.Thread(target=self.receive)
		t.daemon = True
		t.start()

	def setFormatter(self, fmt):
		logging.Handler.setFormatter(self, fmt)
		self._handler.setFormatter(fmt)

	def receive(self):
		while True:
			try:
				record = self.queue.get()
				self._handler.emit(record)
			except (KeyboardInterrupt, SystemExit):
				raise
			except EOFError:
				break
			except:
				traceback.print_exc(file=sys.stderr)

	def send(self, s):
		self.queue.put_nowait(s)

	def _format_record(self, record):
		# ensure that exc_info and args
		# have been stringified.  Removes any chance of
		# unpickleable things inside and possibly reduces
		# message size sent over the pipe
		if record.args:
			record.msg = record.msg % record.args
			record.args = None
		if record.exc_info:
			dummy = self.format(record)
			record.exc_info = None

		return record

	def emit(self, record):
		try:
			s = self._format_record(record)
			self.send(s)
		except (KeyboardInterrupt, SystemExit):
			raise
		except:
			self.handleError(record)

	def close(self):
		self._handler.close()
		logging.Handler.close(self)
示例#14
0
class LoggerSetup:
    '''
    This class sets up the Logger for the project. Ideally, no Print statements should be in the code so that automatic
    scripts can run in the background and logs can be generated in a file. Also a logging channel for stdout is created
    for runtime printing of the information/debug messages.
    '''
    def __init__(self, TAG, MAX_FILE_SIZE, BACKUP_COUNT, FILE_LOG_LEVEL,
                 CONSOLE_LOG_LEVEL):
        '''
        Initialization of the logging class.
        :param TAG: TAG to appear in every logged message line. Any custom string
        :param MAX_FILE_SIZE: After this file size (in bytes) the log file will be rotated and previous file be backedup
        :param BACKUP_COUNT: Number of previous logfiles to backup after the size limit is reached on the logs
        :param FILE_LOG_LEVEL: The log level to be logged in the file (Like logging.NOTSET, logging.DEBUG, logging.INFO,
                logging.WARNING, logging.ERROR, logging.CRITICAL)
        :param CONSOLE_LOG_LEVEL:The log level to be logged in the stdout (Like logging.NOTSET, logging.DEBUG,
                logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)
        '''
        self.TAG = TAG
        self.MAX_FILE_SIZE = MAX_FILE_SIZE
        self.BACKUP_COUNT = BACKUP_COUNT
        self.FILE_LOG_LEVEL = FILE_LOG_LEVEL
        self.CONSOLE_LOG_LEVEL = CONSOLE_LOG_LEVEL

    def run(self):
        '''
        Just sets up the logger and returns the logger instance which was setup with the init function.

        :return: logger instance
        '''
        LOG_TAG = 'HUW_XML_PARSER'
        self.myLogger = logging.getLogger(self.TAG)
        self.myLogger.setLevel(logging.DEBUG)
        self.fh = RotatingFileHandler(self.TAG + ".log",
                                      'a',
                                      maxBytes=self.MAX_FILE_SIZE,
                                      backupCount=self.BACKUP_COUNT)
        self.fh.setLevel(self.FILE_LOG_LEVEL)
        self.ch = logging.StreamHandler()
        self.ch.setLevel(self.CONSOLE_LOG_LEVEL)
        formatter = logging.Formatter(
            '[ %(asctime)s ] [ %(name)s ][ %(levelname)s ] %(message)s')
        self.ch.setFormatter(formatter)
        self.fh.setFormatter(formatter)
        self.myLogger.addHandler(self.ch)
        self.myLogger.addHandler(self.fh)
        return self.myLogger

    def close(self):
        try:
            self.fh.close()
            self.ch.close()
        except Exception as e:
            self.myLogger.error(
                "Error while trying to close logger. Maybe this will not be reported :)"
            )
示例#15
0
class MultiprocessingRotatingFileHandler(logging.Handler):
    def __init__(self, *args, **kwargs):
        logging.Handler.__init__(self)

        self._handler = RotatingFileHandler(*args, **kwargs)
        self.queue = multiprocessing.Queue()

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        while True:
            try:
                record = self.queue.get()
                self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        self.queue.put_nowait(s)

    def _format_record(self, record):
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            self.format(record)
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        logging.Handler.close(self)
示例#16
0
def monitor_combine_parameter():
    online_combine_log = LOGS_PATH + "/online_combine.log"
    logger = logging.getLogger()
    handler = RotatingFileHandler(online_combine_log,
                                  maxBytes=1024 * 1024 * 1024,
                                  backupCount=3)
    logger.addHandler(handler)
    pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=0)
    r = redis.StrictRedis(connection_pool=pool)
    p = r.pubsub()
    p.subscribe(TOPIC_HEARTBEAT)
    while True:
        for i_item in p.listen():
            if 'data' in i_item:
                if type(i_item['data']) is str:
                    a = eval(i_item['data'])
                    province_id = str(a['province_id'])
                    isp_id = str(a['isp_id'])
                    timestamp = long(a['timestamp'])
                    peer_id = str(a['peer_id'])
                    user_id = peer_id[0:8]
                    nat = int(a['nat_type'])
                    sdk_version = str(a['sdk_version'])
                    combine = {
                        "timestamp":
                        timestamp,
                        "peer_id":
                        peer_id,
                        "user":
                        user_id,
                        "nat_type":
                        nat,
                        "sdk_version":
                        sdk_version,
                        "province_id":
                        province_id,
                        "isp_id":
                        isp_id,
                        "province&isp":
                        "{0}&{1}".format(province_id, isp_id),
                        "province&user":
                        "******".format(province_id, user_id),
                        "province&version":
                        "{0}&{1}".format(province_id, sdk_version),
                        "user&sdk":
                        "{0}&{1}".format(user_id, sdk_version),
                        "sdk&nat":
                        "{0}&{1}".format(sdk_version, nat)
                    }
                    combine_json = json.dumps(combine)
                    logger.warning(combine_json)
                handler.close()
示例#17
0
def register_logging(app):
    app.logger.setLevel(logging.INFO)

    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = RotatingFileHandler(os.path.join(basedir, "logs/yutou.log"), maxBytes=10 * 1024 * 1024,
                                       backupCount=10)
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.INFO)

    if not app.debug:
        app.logger.addHandler(file_handler)
    else:
        file_handler.close()
示例#18
0
    def __console(self, level, message):
        self.logger = logging.getLogger(self.fileName)
        self.logger.setLevel(logging.DEBUG)

        if self.handler is not None:
            self.logger.addHandler(self.handler)

        formatter = logging.Formatter(
            '[%(asctime)s] [%(filename)s:%(funcName)s:%(lineno)d] [%(levelname)s]- %(message)s'
        )  # 日志输出格式
        # 创建一个FileHandler,用于写到本地
        fh = RotatingFileHandler(
            filename=self.logName,
            mode='a',
            maxBytes=1024 * 1024 * 5,
            backupCount=5,
            encoding='utf-8')  # 使用RotatingFileHandler类,滚动备份日志
        fh.setLevel(logging.INFO)
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)

        # 创建一个StreamHandler,用于输出到控制台
        formatter = colorlog.ColoredFormatter(
            '%(log_color)s[%(asctime)s] [%(filename)s:%(funcName)s:%(lineno)d] [%(levelname)s]- %(message)s',
            log_colors=log_colors_config)  # 日志输出格式
        ch = colorlog.StreamHandler()
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(formatter)
        self.logger.addHandler(ch)

        if level == 'info':
            with up_stacked_logger(self.logger, n=2) as logger:
                logger.info(message)
        elif level == 'debug':
            with up_stacked_logger(self.logger, n=2) as logger:
                logger.debug(message)
        elif level == 'warning':
            with up_stacked_logger(self.logger, n=2) as logger:
                logger.warning(message)
        elif level == 'error':
            with up_stacked_logger(self.logger, n=2) as logger:
                logger.error(message)

        # 这两行代码是为了避免日志输出重复问题
        self.logger.removeHandler(ch)
        self.logger.removeHandler(fh)
        if self.handler is not None:
            self.logger.removeHandler(self.handler)
        fh.close()  # 关闭打开的文件
示例#19
0
def logger(logname, logmessage, logfile = 'log.log', loglevel = 'INFO'):
    try:
        info = {'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10}
        loglevel = info[loglevel.upper()]
        handler = RotatingFileHandler(logfile, maxBytes = 1024*1024*100, backupCount = 3)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger = logging.getLogger(logname)
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)
        logger.log(loglevel, logmessage)
    finally:
        logger.removeHandler(handler)
        handler.close()
        del logger
示例#20
0
    def __console(self, level, message):
        # if self.parent.splitter.splitterState == SplitterState.expanded:
        #     self.handler.new_record.connect(self.textEdit.appendPlainText)
        if self.handler is not None:
            self.logger.addHandler(self.handler)

        formatter = logging.Formatter(
            '[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s'
        )  # 日志输出格式
        # 创建一个FileHandler,用于写到本地
        fh = RotatingFileHandler(
            filename=self.logName,
            mode='a',
            maxBytes=1024 * 1024 * 5,
            backupCount=5,
            encoding='utf-8')  # 使用RotatingFileHandler类,滚动备份日志
        fh.setLevel(logging.INFO)
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)

        # 创建一个StreamHandler,用于输出到控制台
        formatter = colorlog.ColoredFormatter(
            '%(log_color)s[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s',
            log_colors=log_colors_config)  # 日志输出格式
        ch = colorlog.StreamHandler()
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(formatter)
        self.logger.addHandler(ch)

        if level == 'info':
            self.logger.info(message)
        elif level == 'debug':
            self.logger.debug(message)
        elif level == 'warning':
            self.logger.warning(message)
        elif level == 'error':
            self.logger.error(message)

        # 这两行代码是为了避免日志输出重复问题
        self.logger.removeHandler(ch)
        self.logger.removeHandler(fh)
        if self.handler is not None:
            self.logger.removeHandler(self.handler)
        # if self.parent.splitter.splitterState == SplitterState.expanded:
        #     self.handler.new_record.disconnect(self.textEdit.appendPlainText)
        fh.close()  # 关闭打开的文件
示例#21
0
class SmoresFileLog(logging.Handler):
    def __init__(self, name, mode, maxsize=0, backup=1):
        global handler_list
        logging.Handler.__init__(self)
        self._handler = RotatingFileHandler(name,
                                            mode,
                                            maxBytes=maxsize,
                                            backupCount=backup,
                                            encoding='UTF-8')
        if self.name not in handler_list:
            handler_list.append(name)
            try:
                self._handler.doRollover()
            except (PermissionError, ValueError):
                pass

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def _format_record(self, record):
        # ensure that exc_info and args have been stringified. Removes any
        # chance of unpickleable things inside and possibly reduces message size
        # sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            dummy = self.format(record)
            record.exc_info = None
        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self._handler.emit(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        logging.Handler.close(self)
示例#22
0
class Log(object):
  def __init__(self, log_id, log_path, log_level=logging.INFO):
    self.logger_ = logging.getLogger(log_id)
    self._handler = RotatingFileHandler(log_path, mode='a', maxBytes=100 * 1024 * 1024, backupCount=2)
    self._handler.setFormatter(logging.Formatter("%(asctime)s-%(filename)s:%(lineno)d[%(levelname)s]:%(message)s"))
    self.logger_.setLevel(log_level)
    self._handler.setLevel(log_level)
    self.logger_.addHandler(self._handler)

  def __del__(self):
    self._handler.close()

  def setdebuglevel(self):
    self.logger_.setLevel(logging.DEBUG)

  def setinfolevel(self):
    self.logger_.setLevel(logging.INFO)

  @property
  def log(self):
    return self.logger_
示例#23
0
def monitor_lsm_free():
    lsm_free_log = LOGS_PATH + "/sdk_lsm_free.log"
    logger = logging.getLogger('lsm_free')
    handler = RotatingFileHandler(lsm_free_log,
                                  maxBytes=1024 * 1024 * 1024,
                                  backupCount=3)
    logger.addHandler(handler)
    pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=0)
    r = redis.StrictRedis(connection_pool=pool)
    p = r.pubsub()
    p.subscribe(TOPIC_VOD_LSM)
    while True:
        for i_item in p.listen():
            if 'data' in i_item:
                if type(i_item['data']) is str:
                    a = eval(i_item['data'])
                    # lsm_free unit : B ---> MB
                    lsm_free = int(a['lsm_free']) / 1024 / 1024
                    if lsm_free <= 10:
                        lsm_free = LSM_FREE_LEVEL_A
                    elif 10 < lsm_free <= 50:
                        lsm_free = LSM_FREE_LEVEL_B
                    elif 50 < lsm_free <= 100:
                        lsm_free = LSM_FREE_LEVEL_C
                    elif 100 < lsm_free <= 300:
                        lsm_free = LSM_FREE_LEVEL_D
                    elif 300 < lsm_free <= 600:
                        lsm_free = LSM_FREE_LEVEL_E
                    else:
                        lsm_free = LSM_FREE_LEVEL_F
                    timestamp = long(a['timestamp'])
                    peer_id = str(a['peer_id'])
                    combine = {
                        "timestamp": timestamp,
                        "peer_id": peer_id,
                        "lsm_level": lsm_free
                    }
                    combine_json = json.dumps(combine)
                    logger.warning(combine_json)
                handler.close()
示例#24
0
def monitor_p2p_ratio():
    online_combine_log = LOGS_PATH + "/p2p_ratio.log"
    logger = logging.getLogger()
    handler = RotatingFileHandler(online_combine_log,
                                  maxBytes=1024 * 1024 * 1024,
                                  backupCount=3)
    logger.addHandler(handler)
    pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=0)
    r = redis.StrictRedis(connection_pool=pool)
    p = r.pubsub()
    p.subscribe(TOPIC_FLOW)
    while True:
        for i_item in p.listen():
            if 'data' in i_item:
                if type(i_item['data']) is str:
                    p2p_data = eval(i_item['data'])
                    timestamp = long(p2p_data['timestamp'])
                    p2p_download = int(p2p_data['p2p_download'])
                    cdn_download = int(p2p_data['cdn_download'])
                    file_id = str(p2p_data['file_id'])
                    if p2p_download > 0 or cdn_download > 0:
                        p2p_combine = {
                            "timestamp":
                            timestamp,
                            "p2p_download":
                            p2p_download,
                            "cdn_download":
                            cdn_download,
                            "file_id":
                            file_id,
                            "p2p_ratio":
                            float(p2p_download * 100 /
                                  (p2p_download + cdn_download))
                        }
                        combine_json = json.dumps(p2p_combine)
                        logger.warning(combine_json)
                handler.close()
示例#25
0
class FileCollector(object):
  def __init__(self, remote_ip, remote_dir='/letv/crawler_delta', local_dir='./in'):
    self._remote_ip = remote_ip
    self._remote_dir = remote_dir
    self._local_dir = local_dir
    self._init_dir()
    self._init_log()

  def _init_dir(self):
    if not os.path.exists(self._local_dir):
      os.mkdir(self._local_dir)

  def _init_log(self):
    log_name = "file_collector_%s.error" % self._remote_ip
    self._handler = RotatingFileHandler(log_name, mode='a', maxBytes=100 * 1024 * 1024, backupCount=2)
    self._handler.setFormatter(logging.Formatter('[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s'))
    #self._handler.setLevel(logging.DEBUG)
    self._logger = logging.getLogger(log_name)
    self._logger.addHandler(self._handler)
    self._logger.setLevel(logging.DEBUG)

  def __del__(self):
    self._handler.close()
    self._logger.removeHandler(self._handler)

  def _ls_file_list(self):
    cmd = 'ssh %s ls %s' % (self._remote_ip, self._remote_dir)
    #self._logger.debug('excute cmd:%s', cmd)
    sta, result = call_cmd(cmd)
    #print sta, result
    if sta == 0:
      #self._logger.debug('success get filelist:\n%s', result)
      return result.split()
    else:
      return None

  def _scp_file(self, filename, filename_tmp):
    cmd = 'scp search@%s:%s/%s %s/%s' % (self._remote_ip, self._remote_dir, filename, self._local_dir, filename_tmp)
    #self._logger.debug('excute cmd:%s', cmd)
    sta, result = call_cmd(cmd)
    return True if sta==0 else False

  def _delete_file_remote(self, filename):
    cmd = 'ssh search@%s rm %s/%s' % (self._remote_ip, self._remote_dir, filename)
    #self._logger.debug('excute cmd:%s', cmd)
    sta, result = call_cmd(cmd)
    return True if sta == 0 else False

  def _rename_file(self, filename, filename_tmp):
    if not filename or not filename_tmp:
      return
    local_filename = '%s/%s' % (self._local_dir, filename)
    local_filename_tmp = '%s/%s' % (self._local_dir, filename_tmp)
    try:
      os.rename(local_filename_tmp, local_filename)
    except Exception:
      self._logger.exception('failed to rename %s to %s' % (local_filename_tmp, local_filename))

  def run(self):
    file_list = self._ls_file_list()
    if not file_list:
      return
    for filename in file_list:
      if not filename.endswith('.txt'):
        continue
      filename_tmp = '%s_copying' % filename
      if self._scp_file(filename, filename_tmp):
        self._rename_file(filename, filename_tmp)
        self._logger.debug('success scp file filename:[%s]', filename)
        if self._delete_file_remote(filename):
          self._logger.debug('success delete remote file filename:[%s]', filename)
        else:
          self._logger.error('failed delete remote file filename:[%s]', filename)
示例#26
0
class TaskProcess(Process):
    """Process taking care of performing the measures.

    When started this process sets up a logger redirecting all records to a
    queue. It then redirects stdout and stderr to the logging system. Then as
    long as it is not stopped it waits for the main process to send a
    measures through the pipe. Upon reception of the `ConfigObj` object
    describing the measure it rebuilds it, set up a logger for that specific
    measure and if necessary starts a spy transmitting the value of all
    monitored entries to the main process. It finally run the checks of the
    measure and run it. It can be interrupted by setting an event and upon
    exit close the communication pipe and signal all listeners that it is
    closing.

    Parameters
    ----------
    pipe : double ended multiprocessing pipe
        Pipe used to communicate with the parent process which is transferring
        the measure to perform.
    log_queue : multiprocessing queue
        Queue in which all log records are sent to be procesed later in the
        main process.
    monitor_queue : multiprocessing queue
        Queue in which all the informations the user asked to monitor during
        the measurement are sent to be processed in the main process.
    task_pause : multiprocessing event
        Event set when the user asked the running measurement to pause.
    task_paused : multiprocessing event
        Event set when the current measure is paused.
    task_stop : multiprocessing event
        Event set when the user asked the running measurement to stop.
    process_stop : multiprocessing event
        Event set when the user asked the process to stop.

    Attributes
    ----------
    meas_log_handler : log handler
        Log handler used to save the running measurement specific records.
    see `Parameters`

    Methods
    -------
    run():
        Method called when the new process starts.

    """

    def __init__(self, pipe, log_queue, monitor_queue, task_pause, task_paused,
                 task_stop, process_stop):
        super(TaskProcess, self).__init__(name='MeasureProcess')
        self.daemon = True
        self.task_pause = task_pause
        self.task_paused = task_paused
        self.task_stop = task_stop
        self.process_stop = process_stop
        self.pipe = pipe
        self.log_queue = log_queue
        self.monitor_queue = monitor_queue
        self.meas_log_handler = None

    def run(self):
        """Method called when the new process starts.

        For a complete description of the workflow see the class
        docstring.

        """
        self._config_log()
        # Ugly patch to avoid pyvisa complaining about missing filters
        warnings.simplefilter("ignore")

        # Redirecting stdout and stderr to the logging system.
        logger = logging.getLogger()
        redir_stdout = StreamToLogRedirector(logger)
        sys.stdout = redir_stdout
        redir_stderr = StreamToLogRedirector(logger, 'stderr')
        sys.stderr = redir_stderr
        logger.info('Logger parametrised')

        logger.info('Process running')
        self.pipe.send('READY')
        while not self.process_stop.is_set():

            # Prevent us from crash if the pipe is closed at the wrong moment.
            try:

                # Wait for a measurement.
                while not self.pipe.poll(2):
                    if self.process_stop.is_set():
                        break

                if self.process_stop.is_set():
                    break

                # Get the measure.
                name, config, build, runtime, mon_entries = self.pipe.recv()

                # Build it by using the given build dependencies.
                root = build_task_from_config(config, build, True)

                # Give all runtime dependencies to the root task.
                root.run_time = runtime

                logger.info('Task built')

                # There are entries in the database we are supposed to
                # monitor start a spy to do it.
                if mon_entries:
                    spy = MeasureSpy(
                        self.monitor_queue, mon_entries,
                        root.task_database)

                # Set up the logger for this specific measurement.
                if self.meas_log_handler is not None:
                    logger.removeHandler(self.meas_log_handler)
                    self.meas_log_handler.close()
                    self.meas_log_handler = None

                log_path = os.path.join(
                    root.get_from_database('default_path'),
                    name + '.log')
                if os.path.isfile(log_path):
                    os.remove(log_path)
                self.meas_log_handler = RotatingFileHandler(log_path,
                                                            mode='w',
                                                            maxBytes=10**6,
                                                            backupCount=10)
                aux = '%(asctime)s | %(levelname)s | %(message)s'
                formatter = logging.Formatter(aux)
                self.meas_log_handler.setFormatter(formatter)
                logger.addHandler(self.meas_log_handler)

                # Pass the events signaling the task it should stop or pause
                # to the task and make the database ready.
                root.should_pause = self.task_pause
                root.paused = self.task_paused
                root.should_stop = self.task_stop
                root.task_database.prepare_for_running()

                # Perform the checks.
                check, errors = root.check(test_instr=True)

                # They pass perform the measure.
                if check:
                    logger.info('Check successful')
                    root.perform_(root)
                    result = ['', '', '']
                    if self.task_stop.is_set():
                        result[0] = 'INTERRUPTED'
                        result[2] = 'Measure {} was stopped'.format(name)
                    else:
                        result[0] = 'COMPLETED'
                        result[2] = 'Measure {} succeeded'.format(name)

                    if self.process_stop.is_set():
                        result[1] = 'STOPPING'
                    else:
                        result[1] = 'READY'

                    self.pipe.send(tuple(result))

                # They fail, mark the measure as failed and go on.
                else:
                    mes = 'Tests failed, see log for full records.'
                    self.pipe.send(('FAILED', 'READY', mes))

                    # Log the tests that failed.
                    fails = errors.iteritems()
                    message = '\n'.join('{} : {}'.format(path, mes)
                                        for path, mes in fails)
                    logger.critical(message)

                # If a spy was started kill it
                if mon_entries:
                    spy.close()
                    del spy

            except IOError:
                pass

        # Clean up before closing.
        logger.info('Process shuting down')
        if self.meas_log_handler:
            self.meas_log_handler.close()
        self.log_queue.put_nowait(None)
        self.monitor_queue.put_nowait((None, None))
        self.pipe.close()

    def _config_log(self):
        """Configuring the logger for the process.

        Sending all record to a multiprocessing queue.

        """
        config_worker = {
            'version': 1,
            'disable_existing_loggers': True,
            'handlers': {
                'queue': {
                    'class': 'hqc_meas.utils.log.tools.QueueHandler',
                    'queue': self.log_queue,
                },
            },
            'root': {
                'level': 'DEBUG',
                'handlers': ['queue']
            },
        }
        logging.config.dictConfig(config_worker)
示例#27
0
class TaskProcess(Process):
    """Process taking care of performing the measures.

    When started this process sets up a logger redirecting all records to a
    queue. It then redirects stdout and stderr to the logging system. Then as
    long as there is measures to perform  it asks the main process to send it
    measures through a pipe. Upon reception of the `ConfigObj` object describing
    the measure it rebuilds it, set up a logger for that specific measure and if
    necessary starts a spy transmitting the value of all monitored entries to
    the main process. It finally run the checks of the measure and run it.
    It can be interrupted by setting an event and upon exit close the
    communication pipe and signal all listeners that it is closing.

    Parameters
    ----------
    pipe : double ended multiprocessing pipe
        Pipe used to communicate with the parent process which is transferring
        the measure to perform.
    log_queue : multiprocessing queue
        Queue in which all log records are sent to be procesed later in the main
        process.
    monitor_queue : multiprocessing queue
        Queue in which all the informations the user asked to monitor during the
        measurement are sent to be processed in the main process.
    task_stop : multiprocessing event
        Event set when the user asked the running measurement to stop.
    process_stop : multiprocessing event
        Event set when the user asked the process to stop.

    Attributes
    ----------
    meas_log_handler : log handler
        Log handler used to save the running measurement specific records.
    see `Parameters`

    Methods
    -------
    run():
        Method called when the new process starts.

    """

    def __init__(self, pipe, log_queue, monitor_queue, task_stop, process_stop):
        super(TaskProcess, self).__init__(name = 'MeasureProcess')
        self.task_stop = task_stop
        self.process_stop = process_stop
        self.pipe = pipe
        self.log_queue = log_queue
        self.monitor_queue = monitor_queue
        self.meas_log_handler = None

    def run(self):
        """Method called when the new process starts.

        For a complete description of the workflow see the class docstring.
        """
        self._config_log()
        # Ugly patch to avoid pyvisa complaining about missing filters
        warnings.simplefilter("ignore")

        # Redirecting stdout and stderr to the logging system.
        logger = logging.getLogger()
        redir_stdout = StreamToLogRedirector(logger)
        sys.stdout = redir_stdout
        logger.info('Logger parametrised')

        print 'Process running'
        while not self.process_stop.is_set():
            try:
                # Request a new measure to perform from the main process
                print 'Need task'
                self.pipe.send('Need task')

                # Get the answer
                self.pipe.poll(None)
                name, config, monitored_entries = self.pipe.recv()

                if config != 'STOP':
                    # If a real measurement was sent, build it.
                    task = IniConfigTask().build_task_from_config(config)
                    print 'Task built'

                    # There are entries in the database we are supposed to
                    # monitor start a spy to do it.
                    if monitored_entries is not None:
                        spy = MeasureSpy(self.monitor_queue, monitored_entries,
                                         task.task_database)

                    # Set up the logger for this specific measurement.
                    if self.meas_log_handler != None:
                        logger.removeHandler(self.meas_log_handler)
                        self.meas_log_handler.close()
                        self.meas_log_handler = None
                        
                    log_path = os.path.join(
                                        task.get_from_database('default_path'),
                                        name + '.log')
                    if os.path.isfile(log_path):
                        os.remove(log_path)
                    self.meas_log_handler = RotatingFileHandler(log_path,
                                                            mode = 'w',
                                                            maxBytes = 10**6,
                                                            backupCount = 10)
                    aux = '%(asctime)s | %(levelname)s | %(message)s'
                    formatter = logging.Formatter(aux)
                    self.meas_log_handler.setFormatter(formatter)
                    logger.addHandler(self.meas_log_handler)

                    # Clear the event signaling the task it should stop, pass it
                    # to the task and make the database ready.
                    self.task_stop.clear()
                    task.should_stop = self.task_stop
                    task.task_database.prepare_for_running()

                    # Perform the checks.
                    check = task.check(test_instr = True)
                    if check[0]:
                        print 'Check successful'
                        # Perform the measure
                        task.process()
                        self.pipe.send('Task processed')
                        if self.task_stop.is_set():
                            print 'Task interrupted'
                        else:
                            print 'Task processed'
                    else:
                        message = '\n'.join('{} : {}'.format(path, mes)
                                    for path, mes in check[1].iteritems())
                        logger.critical(message)

                    # If a spy was started kill it
                    if monitored_entries is not None:
                        spy.close()
                        del spy

            except IOError:
                pass

        # Clean up before closing.
        self.pipe.send('Closing')
        print 'Process shuting down'
        if self.meas_log_handler:
            self.meas_log_handler.close()
        self.log_queue.put_nowait(None)
        self.pipe.close()

    def _config_log(self):
        """Configuring the logger for the process. Sending all record to a
        multiprocessing queue.
        """
        config_worker = {
            'version': 1,
            'disable_existing_loggers': True,
            'handlers': {
                'queue': {
                    'class': 'hqc_meas.log_facility.QueueHandler',
                    'queue': self.log_queue,
                },
            },
            'root': {
                'level': 'DEBUG',
                'handlers': ['queue']
            },
        }
        logging.config.dictConfig(config_worker)
        if os.name == 'posix':
            # On POSIX, the setup logger will have been configured in the
            # parent process, but should have been disabled following the
            # dictConfig call.
            # On Windows, since fork isn't used, the setup logger won't
            # exist in the child, so it would be created and the message
            # would appear - hence the "if posix" clause.
            logger = logging.getLogger('setup')
            logger.critical('Should not appear, because of disabled logger ...')
示例#28
0
文件: smolt.py 项目: gdenning/mythtv
            '-distro.html':self.get_distro_specific_html(),
            '.rst':'\n'.join(map(to_ascii, self.getProfile())),
        }
        logdir = os.path.expanduser('~/.smolt/')
        try:
            if not os.path.exists(logdir):
                os.mkdir(logdir, 1700)

            for k, v in log_matrix.items():
                filename = os.path.expanduser(os.path.join(
                        logdir, 'submission%s' % k))
                r = RotatingFileHandler(filename, \
                        maxBytes=1000000, backupCount=9)
                r.stream.write(v)
                r.doRollover()
                r.close()
                os.remove(filename)
        except:
            pass
        del logdir
        del log_matrix


        debug('sendHostStr: %s' % serialized_host_obj_machine)
        debug('Sending Host')

        if batch:
            entry_point = "/client/batch_add_json"
            logging.debug('Submitting in asynchronous mode')
        else:
            entry_point = "/client/add_json"
示例#29
0
class MultiprocessingLogHandler(logging.Handler):
    """
        by zzzeek on SO:
        http://stackoverflow.com/questions/641420/how-should-i-log-while-using-multiprocessing-in-python
    """
    def __init__(self, *args, **kwargs):
        logging.Handler.__init__(self)
        self.lock = None

        if 'klass' in kwargs:
            klass = kwargs['klass']
            del kwargs['klass']
            self._handler = klass(*args, **kwargs)
        else:
            self._handler = RotatingFileHandler(*args, **kwargs)
        self.queue = multiprocessing.Queue(-1)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        while True:
            try:
                record = self.queue.get()
                self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        self.queue.put_nowait(s)

    def _format_record(self, record):
        # ensure that exc_info and args
        # have been stringified.  Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        logging.Handler.close(self)
示例#30
0
class Logger(Singleton):
    def __init__(self):
        self.__log = None
        self.__hdlr = None


    def __del__(self):
        self.close()

    '''
    introduction: 
        get instance of loggger
    @parameter:
        log_instance: id of logger
        file_name: name you are going to produce
        max_bytes: capacity of file
        backup_count: rollback number of file
        timeopen: 0 -----close, others -----open

    return:
        0 ----- success
        -1 ----- failure
    '''
    def get_log(self, log_instance, file_name, max_bytes, backup_count, timeopen = 0):
        result = 0
        try:
            self.__log = logging.getLogger(log_instance)
        except:
            self.__log = None
            result = -1
        else:
            # print id(self.__log)
            self.__log.setLevel(logging.DEBUG)
            try:
                self.__hdlr = RotatingFileHandler(file_name, maxBytes = max_bytes, backupCount = backup_count)
            except:
                self.__hdlr = None
                result = -1
            else:
                self.__hdlr.setLevel(logging.DEBUG)
                formatter = None
                if 0 != timeopen:
                    formatter = logging.Formatter('%(asctime)s - %(message)s')
                else:
                    formatter = logging.Formatter('%(message)s')
                self.__hdlr.setFormatter(formatter)
                self.__log.addHandler(self.__hdlr)
        finally:
            return result

    def write(self, value):
        if None != self.__log and None != self.__hdlr:
            self.__log.debug(value)

    def flush(self):
        if None != self.__hdlr:
            self.__hdlr.flush()

    def close(self):
        if None != self.__log:
            self.__log.removeHandler(self.__hdlr)
            self.__log = None

        if None != self.__hdlr:
            self.__hdlr.flush()
            self.__hdlr.close()
            self.__hdlr = None
示例#31
0
 def close(self):
     _RotatingFileHandler.close(self)
     _remove_from_reopenable(self._wr)
示例#32
0
class MultiProcessingLog(logging.Handler):
    """
    Synchronised parallel multi processing log.
  """

    def __init__(self, name, mode, maxsize, rotate, encoding="utf-8", delay=0):
        logging.Handler.__init__(self)

        self._handler = RotatingFileHandler(name, mode, maxsize, rotate, encoding, delay)
        self.queue = multiprocessing.Queue(-1)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        """ Default setFormatter impl. """
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        """ Receives one record to log. """
        while True:
            try:
                record = self.queue.get()
                if self._handler:
                    self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        """ Puts to nowait queue. """
        self.queue.put_nowait(s)

    def _format_record(self, record):
        # ensure that exc_info and args
        # have been stringified.  Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            # noinspection PyUnusedLocal
            dummy = self.format(record)
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        self._handler = None
        logging.Handler.close(self)
示例#33
0
文件: utils.py 项目: CSCG/pytagger
class MultiProcessingLogger(logging.Handler):
    """A multiprocessing-safe logger with built in log rolling/backups"""
    def __init__(self, name, mode='a', maxsize=10000, rotate=100):
        """Create a new MultiprocessingLogger for a file named *name*

        :param name: The name of the log file to be written to
        :param mode: The mode in which the file should be opened in
        :param maxsize: The number of bytes this log file should roll over at
        :param rotate: The number of rolled log files to keep hanging around
        """
        logging.Handler.__init__(self)

        self._handler = RotatingFileHandler(name, mode, maxsize, rotate)
        self.queue = multiprocessing.Queue(-1)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        while True:
            try:
                record = self.queue.get()
                self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        self.queue.put_nowait(s)

    def _format_record(self, record):
        # ensure that exc_info and args
        # have been stringified.  Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            dummy = self.format(record)
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        logging.Handler.close(self)
示例#34
0
    try:
        mgr = LciManager(reactor, config, log)
        # we ignore SIGTERM because Squid will close the log FH, which gives
        # us a much cleaner signal that we're to shut down.
        signal.signal(signal.SIGTERM, signal.SIG_IGN)
        mgr.start()
    except ConfigParser.Error, why:
        error("Configuration file: %s" % why)
    except Exception, why:
        error("Error: %s " % why)
    except:
        error("Unknown error.")
        
    # clean up logging
    hdlr.flush()
    hdlr.close()
    logging.shutdown()

def error(msg):
    "Something really bad has happened. Should only be used during startup."
    logging.critical(msg)
    sys.stderr.write("LCI FATAL: %s\n" % msg)
    sys.exit(1)


############################################################################

class ManagerState:
    "Holds the manager's state in an easily persistable way."
    def __init__(self):
        self.groups = {} # key is hashed group_uri; value is set of req uris