def setup_logfile_logger(log_path, log_level=None, log_format=None, date_format=None): """ Set up logging to a file. """ # Create the handler handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0) if log_level: # Grab and set the level level = LOG_LEVELS.get(log_level.lower(), logging.ERROR) handler.setLevel(level) # Set the default console formatter config if not log_format: log_format = '%(asctime)s [%(name)s][%(levelname)s] %(message)s' if not date_format: date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, datefmt=date_format) handler.setFormatter(formatter) root_logger.addHandler(handler) return handler
def get_logger(logname, logfile, loglevel, propagate): """Create and return a logger object.""" stream_handlers = { 'STDOUT': sys.stdout, 'STDERR': sys.stderr, } try: if logfile in stream_handlers: logger_handler = logging.StreamHandler(stream_handlers[logfile]) else: logger_handler = WatchedFileHandler(logfile) except (PermissionError, FileNotFoundError) as err: # pytype: disable=name-error print(err) sys.exit(-1) logger = logging.getLogger(logname) log_fmt = '%(asctime)s %(name)-6s %(levelname)-8s %(message)s' logger_handler.setFormatter( logging.Formatter(log_fmt, '%b %d %H:%M:%S')) logger.addHandler(logger_handler) logger.propagate = propagate logger.setLevel(loglevel) return logger
def setup_log_file(log_file_path): from logging import DEBUG, getLogger, Formatter from logging.handlers import WatchedFileHandler h = WatchedFileHandler(str(log_file_path)) h.setFormatter(Formatter(log_format)) h.setLevel(DEBUG) getLogger('').addHandler(h)
def _file_handler(logger_name: str, formatter: logging.Formatter, log_path: str): os.makedirs(log_path, exist_ok=True) path = f"{log_path}{snakecase(logger_name.replace('.', 'x'))}.log" file_handler = WatchedFileHandler(path) file_handler.setFormatter(formatter) return file_handler
def setup_logger(): logging.basicConfig(level=logging.INFO) handler = WatchedFileHandler(LOG_FILE) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(filename)s@%(funcName)s:%(lineno)s", "%Y-%m-%d %H:%M:%S %z") handler.setFormatter(formatter) logger.addHandler(handler) logger.addHandler(TimedRotatingFileHandler(LOG_FILE, when="d", interval=1, backupCount=100))
def getLogger(name, level=logging.INFO, handlers=[]): logger = logging.getLogger(name) if len(handlers) != 0: logger.setLevel(level) if "console" in handlers: strm = StreamHandler() fmt = logging.Formatter('%(message)s') strm.setLevel(level) strm.setFormatter(fmt) logger.addHandler(strm) if "file" in handlers: conf = handlers['file'] fl = WatchedFileHandler(conf['logfile']) fl.setLevel(level) fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fl.setFormatter(fmt) logger.addHandler(fl) if "syslog" in handlers: sysl = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_SYSLOG) sysl.setLevel(level) formatter = logging.Formatter('%(name)s[' + str(os.getpid()) + '] %(levelname)-8s: %(message)s') sysl.setFormatter(formatter) logger.addHandler(sysl) return logger
def get_log(self, log_name): if log_name in self.registered_logs: return self.registered_logs[log_name] logger = logging.getLogger(log_name) logger.handlers.clear() logger.setLevel(logging.INFO) # Init File Handler file_name = os.path.join(Config.LOG_DIR, '%s.log' % log_name) file_handler = WatchedFileHandler(file_name) file_handler.setLevel(logging.INFO) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ') ) logger.addHandler(file_handler) file_name = os.path.join(Config.LOG_DIR, '%s.err' % log_name) file_handler = WatchedFileHandler(file_name) file_handler.setLevel(logging.ERROR) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ') ) logger.addHandler(file_handler) if Config.DEBUG: console_handler = logging.StreamHandler() console_handler.setLevel(logging.DEBUG) console_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') console_handler.setFormatter(console_format) logger.addHandler(console_handler) self.registered_logs[log_name] = logger return logger
def init_app(app): """Initialize application logger""" # Use default config in DEBUG mode, configure logger otherwise if not app.debug and app.config['LOGGER_ENABLED']: log_level = app.config['LOGGER_LEVEL'] log_dir = Path(app.config['LOGGER_DIR']) log_format = app.config['LOGGER_FORMAT'] # Remove Flask default handler app.logger.removeHandler(default_handler) # Use WatchedFileHandler to reopen if logrotate rotates the log file file_handler = WatchedFileHandler(log_dir / 'pyodhean.log') # Create record formatter formatter = RequestFormatter(log_format) formatter.converter = time.gmtime file_handler.setFormatter(formatter) # Add our custom handler app.logger.addHandler(file_handler) # Set logging level app.logger.setLevel(log_level)
def main(args): logger = logging.getLogger("localftp") logger.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) logger.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) logger.addHandler(ch) logger.setLevel(args.log_level) logger.addHandler(ch) work_dir = args.work if args.work and os.path.isdir( args.work) else tempfile.mkdtemp() return serve(work_dir, "testuser", "password", "127.0.0.1", args.port)
def log_setup(): log_handler = WatchedFileHandler(LOG_FILE) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') log_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(log_handler) logger.setLevel(logging.INFO)
def setup_logging(console_level, log_file=None): ''' Log to stderr with given console level (0: warnings, 1: info, 2+: debug). Log to log file if path given. ''' from logging import DEBUG, INFO, WARNING, Formatter from logging.handlers import WatchedFileHandler logging.getLogger().setLevel(DEBUG) if console_level < 3: # TODO: filter in a handler instead in logger logging.getLogger('botocore').setLevel(INFO) h = logging.StreamHandler() h.setFormatter(Formatter(log_format)) if console_level == 0: h.setLevel(WARNING) elif console_level == 1: h.setLevel(INFO) else: h.setLevel(DEBUG) logging.getLogger().addHandler(h) if log_file: h = WatchedFileHandler(str(log_file)) h.setFormatter(Formatter(log_format)) h.setLevel(DEBUG) logging.getLogger().addHandler(h)
def _setup_logger(self, level, log_file): """Setup log level and log file if set""" if not level: return if logger.handlers: return if hasattr(logging, 'NullHandler'): logger.addHandler(logging.NullHandler()) formatter = logging.Formatter( '[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() ' '- %(message)s') level = getattr(logging, level.upper()) logger.setLevel(level) handler = logging.StreamHandler() logger.addHandler(handler) handler.setFormatter(formatter) if not log_file: return try: _handler = WatchedFileHandler(log_file) except IOError: logger.error("Could not write to %s, falling back to stdout", log_file) else: logger.addHandler(_handler) _handler.setFormatter(formatter)
def __init__(self, application): self.application = application log_file = WatchedFileHandler(settings.API_LOG) log_format = logging.Formatter(LOGFORMAT, DATEFORMAT) log_file.setFormatter(log_format) api_logger.setLevel(logging.DEBUG) api_logger.addHandler(log_file)
def setup_logger(configs): """Setup logging Args: configs (dict): logging configuration Returns: logging.logger: the configured logger """ # TO-DO: use logging.config.dictConfig instead logger = logging.getLogger(LOGGER) level = getattr(logging, configs['level']) log_formatter = ( '%(threadName)s::%(levelname)s::%(asctime)s' '::%(lineno)d::(%(funcName)s) %(message)s' ) fmt = logging.Formatter(log_formatter) logger.setLevel(level) if configs.get('file', False): file_h = WatchedFileHandler(configs.get('file')) file_h.setLevel(level) file_h.setFormatter(fmt) logger.addHandler(file_h) else: std_h = logging.StreamHandler(sys.stdout) std_h.setLevel(level) std_h.setFormatter(fmt) logger.addHandler(std_h) return logger
def init_server(level, sentry_dsn, release, logfile, with_gr_name=True): patch_gevent_hub_print_exception() root = logging.getLogger() root.setLevel(level) fmter = ServerLogFormatter(with_gr_name=with_gr_name) std = logging.StreamHandler(stream=sys.stdout) std.setFormatter(fmter) root.addHandler(std) hdlr = SentryHandler( raven.Client(sentry_dsn, transport=GeventedHTTPTransport, release=release)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) logging.getLogger('sentry.errors').setLevel(1000) if logfile: from logging.handlers import WatchedFileHandler filehdlr = WatchedFileHandler(logfile) filehdlr.setFormatter(fmter) root.addHandler(filehdlr)
def create_app(config_file='settings.py'): app = Flask(__name__) jwt = JWT(app, authenticate, identity) # Auto Creates /auth endpoint app.config.from_pyfile(config_file) api = Api(app) db.init_app(app) app.cli.add_command(create_tables) # To interact with app from CLI api.add_resource(UserRegister, '/users/register') api.add_resource(UserList, '/users') api.add_resource(User, '/users/<int:user_id>') api.add_resource(CreatePost, '/posts/create') # Logging log_level = logging.INFO if app.config['DEBUG'] else logging.ERROR handler = WatchedFileHandler('server.log') formatter = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s', '%d-%m-%Y %H:%M:%S') handler.setFormatter(formatter) root = logging.getLogger() root.setLevel(log_level) root.addHandler(handler) logging.info('\n------------------- Starting Server -------------------') return app
def init(cls, config): formatter = logging.Formatter(config.FORMAT) root = logging.getLogger('') root.setLevel(config.ROOT_LEVEL) if config.LOG_TO_CONSOLE: console_handler = logging.StreamHandler() console_handler.setLevel(config.CONSOLE_LEVEL) console_handler.setFormatter(formatter) root.addHandler(console_handler) if config.LOG_TO_FILE: file_name = cls.generate_log_file_name() file_handler = WatchedFileHandler(os.path.join( config.FILE_BASE, file_name), encoding='utf-8') file_handler.setLevel(config.FILE_LEVEL) file_handler.setFormatter(formatter) root.addHandler(file_handler) if config.TEMP_LOG: file_name = cls.generate_log_file_name() file_handler = WatchedFileHandler(os.path.join('', file_name), encoding='utf-8') file_handler.setLevel(config.FILE_LEVEL) file_handler.setFormatter(formatter) root.addHandler(file_handler)
def _initLogging(self): logging.basicConfig() logger = logging.getLogger() if self.interactive: logger.setLevel(logging.DEBUG) else: logger.removeHandler( logger.handlers[0]) # get rid of the default one if self.error_log: handler = WatchedFileHandler( filename=self.error_log ) # something that can handle logrotate handler.setFormatter( logging.Formatter( fmt= '\n\n%(asctime)s pid:%(process)d thread: %(thread)d\n%(module)s - %(lineno)d\n%(message)s' )) handler.setLevel(logging.ERROR) logger.addHandler(handler) handler = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_DAEMON) handler.setFormatter( logging.Formatter(fmt=self.proc_name + '[%(process)d]: %(message)s')) logger.addHandler(handler) logger.setLevel(logging.INFO)
async def init_app(): # Setup logger if DEBUG_MODE: print("debug mode") logging.basicConfig(level=logging.DEBUG) else: root = logging.getLogger('aiohttp.server') logging.basicConfig(level=logging.WARN) handler = WatchedFileHandler(LOG_FILE) formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S %z") handler.setFormatter(formatter) root.addHandler(handler) root.addHandler(TimedRotatingFileHandler(LOG_FILE, when="d", interval=1, backupCount=100)) app = web.Application() # Global vars app['clients'] = {} # Keep track of connected clients app['limit'] = {} # Limit messages based on IP app['active_messages'] = set() # Avoid duplicate messages from being processes simultaneously app['subscriptions'] = {} # Store subscription UUIDs, this is used for targeting callback accounts app.add_routes([web.get('/', websocket_handler)]) # All WS requests app.add_routes([web.post(f'/callback/{CALLBACK_TOKEN}', callback_handler)]) # http/https callback from node return app
def add_file_handler(self, log_file): ch = WatchedFileHandler(log_file) ch.setLevel(self.logger.level) formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) self.logger.addHandler(ch)
def init_logging(name: str, log_level: int, *, log_file: Optional[str] = None, foreground: Optional[bool] = False): rootlogger = logging.getLogger() rootlogger.setLevel(log_level) if foreground: console = logging.StreamHandler() console.setFormatter( logging.Formatter( '%(asctime)s {}: %(levelname)s: (%(name)s) %(message)s'.format( name))) rootlogger.addHandler(console) elif log_file: logfile = WatchedFileHandler(log_file) logfile.setFormatter( logging.Formatter( '%(asctime)s {}: %(levelname)s: (%(name)s) %(message)s'.format( name))) rootlogger.addHandler(logfile) else: syslog = SysLogHandler('/dev/log') syslog.setFormatter( logging.Formatter( '{}: %(levelname)s: (%(name)s) %(message)s'.format(name))) rootlogger.addHandler(syslog) # Duplicate syslog's file descriptor to stout/stderr. syslog_fd = syslog.socket.fileno() os.dup2(syslog_fd, 1) os.dup2(syslog_fd, 2)
def set_logger(): if logger.hasHandlers(): return logger.setLevel(logging.DEBUG) formatter = CustomFormatter( '[%(levelname)s %(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S.%f' ) file_handler = WatchedFileHandler('logs/output.log', encoding='utf8') file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) logger.info('Added logging handler: ' + str(file_handler)) console_handler = StreamHandler() console_handler.setFormatter(formatter) console_handler.setLevel(logging.DEBUG) logger.addHandler(console_handler) logger.info('Added logging handler: ' + str(console_handler)) logger.info('Set new logger up.') return
def __init__(self, config_file, logger=None): self.config_file = config_file self.config = self._load_config(config_file) logging.basicConfig(format=self.config['log_format']) if logger: self.log = logger else: self.log = logging.getLogger() if self.config['log_file']: fh = WatchedFileHandler(self.config['log_file']) formatter = logging.Formatter(self.config['log_format']) fh.setFormatter(formatter) self.log.addHandler(fh) self.log.setLevel(getattr(logging, self.config['log_level'])) self.log.info('Loaded config') self.log.debug('Config: %s', self.config) self.end_event = threading.Event() self.reload_event = threading.Event() self.__setup_signal_handlers() self.pin_state = False self._setup_pins() self.log.info('FanDriverDaemon inited')
def __prepare_logger(self): self.logger = logging.getLogger('app') self.logger.propagate = False log_level = self.config.log.get("LOG_LEVEL") or self.DEFAULT_LOG_LEVEL log_level = log_level.upper() log_level = getattr(logging, log_level) if "LOG_FILE" in self.config.log: from logging.handlers import WatchedFileHandler handler = WatchedFileHandler(self.config.log["LOG_FILE"]) self.logger.addHandler(handler) if self.config.log.get("DEBUG"): handler = logging.StreamHandler(stream=sys.stdout) log_level = logging.DEBUG self.logger.addHandler(handler) log_format = self.config.log.get( "LOG_FORMAT") or self.DEFAULT_LOG_FORMAT log_format = logging.Formatter(log_format) self.logger.setLevel(log_level) for handler in self.logger.handlers: handler.setLevel(log_level) handler.setFormatter(log_format) self.logger.info("Logger created. Environment type set to %s" % self.envtype)
def main(): # Set up our log level try: filename = config['server.logging_filename'] handler = WatchedFileHandler(filename) except KeyError: handler = StreamHandler() handler.setFormatter(logging.Formatter(config['server.logging_format'])) root_logger = logging.getLogger('') root_logger.setLevel(int(config['server.logging_level'])) root_logger.addHandler(handler) settings = { } if 'debug' in config: log.info('Enabling Tornado Web debug mode') settings['debug'] = config['debug'] host = config['server.socket_host'] port = int(config['server.socket_port']) application = tornado.web.Application([ (r"/event", EventHandler), ], **settings) if config.get('dry-run'): log.info('In dry-run mode') log.info('Starting corgi server http://%s:%d/' % (host, port)) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(port, host) tornado.ioloop.IOLoop.instance().start()
def add_logger(logger_name, file_name=None, parent=None, propagate=False, log_level='INFO', log_dir='/var/log/natrixclient'): """Add a new logger :param logger_name: :param file_name: :param parent: :param propagate: :param log_level: :param log_dir: :return: """ logger = logging.getLogger(logger_name) logger.propagate = propagate logger.parent = parent file_name = logger_name if file_name is None else file_name log_file_name = '{log_dir}/{file_name}.log'.format(log_dir=log_dir, file_name=file_name) handler = WatchedFileHandler(filename=log_file_name) handler_fmt = logging.Formatter(fmt=FILE_LOGGING_FORMAT, datefmt=FILE_LOGGING_DATE_FORMAT) handler.setLevel(log_level) handler.setFormatter(handler_fmt) logger.addHandler(handler)
def __init__(self, name): self.name = name self.plugins.register(self.name, self) self._methods = {} self.debug = bool('DEBUG-%s' % self.name in get_vim_buffers_names()) self.log = logging.getLogger(self.name) self.log.setLevel(logging.DEBUG if self.debug else logging.INFO) self.settings.option('LOG_PATH', default=None) if self.settings['LOG_PATH']: if not os.path.exists(self.settings['LOG_PATH']): os.makedirs(self.settings['LOG_PATH']) log_file_name = '%s.log' % os.path.join(self.settings['LOG_PATH'], self.name) handler = WatchedFileHandler(log_file_name, 'w') else: handler = logging.FileHandler('/dev/null') fm = logging.Formatter( '%(asctime)s %(levelname)s: %(message)s', '%H:%M:%S' ) handler.setFormatter(fm) self.log.addHandler(handler) self.log.debug('Plugin name: %r', self.name)
def set_logger(debug=False, logfile=None): logFormatter = \ logging.Formatter("%(asctime)s %(levelname)s %(process)d %(filename)s:%(funcName)s:%(lineno)d %(message)s") rootLogger = logging.getLogger() if debug: logging.level = logging.DEBUG rootLogger.setLevel(logging.DEBUG) else: logging.level = logging.INFO rootLogger.setLevel(logging.INFO) if logfile: try: logdir = os.path.dirname(logfile) if logdir and not os.path.isdir(logdir): os.makedirs(logdir) fileHandler = WatchedFileHandler(logfile) fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) except Exception: pass consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler)
async def init_app(): """ Initialize the main application instance and return it""" async def close_redis(app): """Close redis connections""" log.server_logger.info('Closing redis connections') app['rdata'].close() async def open_redis(app): """Open redis connections""" log.server_logger.info("Opening redis connections") app['rdata'] = await aioredis.create_redis_pool((redis_host, redis_port), db=int(os.getenv('REDIS_DB', '2')), encoding='utf-8', minsize=2, maxsize=15) # Global vars app['clients'] = {} # Keep track of connected clients app['last_msg'] = {} # Last time a client has sent a message app['active_messages'] = set() # Avoid duplicate messages from being processes simultaneously app['cur_prefs'] = {} # Client currency preferences app['subscriptions'] = {} # Store subscription UUIDs, this is used for targeting callback accounts app['active_work'] = set() # Keep track of active work requests to prevent duplicates # Setup logger if debug_mode: logging.basicConfig(level=logging.DEBUG) else: root = logging.getLogger('aiohttp.server') logging.basicConfig(level=logging.INFO) if options.log_to_stdout: handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S %z") handler.setFormatter(formatter) root.addHandler(handler) else: handler = WatchedFileHandler(log_file) formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S %z") handler.setFormatter(formatter) root.addHandler(handler) root.addHandler(TimedRotatingFileHandler(log_file, when="d", interval=1, backupCount=100)) app = web.Application() cors = aiohttp_cors.setup(app, defaults={ "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*", ) }) app.add_routes([web.get('/', websocket_handler)]) # All WS requests app.add_routes([web.post('/callback', callback)]) # HTTP Callback from node # HTTP API users_resource = cors.add(app.router.add_resource("/api")) cors.add(users_resource.add_route("POST", http_api)) alerts_resource = cors.add(app.router.add_resource("/alerts/{lang}")) cors.add(alerts_resource.add_route("GET", alerts_api)) #app.add_routes([web.post('/callback', callback)]) app.on_startup.append(open_redis) app.on_shutdown.append(close_redis) return app
def parseArgs(): """parse arguments @return: options """ opt = optparse.OptionParser() opt.add_option( "--maxWait", "-W", type="int", action="store", default=0, help="max wait in seconds. 0=> forever", ) opt.add_option("--noWait", action="store_true", default=False, help="do not wait") opt.add_option( "--rd", "-r", action="store_true", default=False, help="drop read cache", ) opt.add_option( "--wr", "-w", action="store_true", default=False, help="drop write cache", ) opt.add_option( "--verbose", action="store_true", default=False, help="more verbose output", ) options, _ = opt.parse_args() if options.verbose: logLevel = logging.DEBUG else: logLevel = logging.WARN _logger.setLevel(logLevel) handler = WatchedFileHandler(_LOG_FILE) handler.setLevel(logLevel) logFmt = "%(asctime)s - %(message)s" handler.setFormatter(logging.Formatter(logFmt)) _logger.addHandler(handler) if not options.rd and not options.wr: _logger.error("Must provide one or more of --rd/--wr") exit(1) return options
def _logger(level, out_stream, name=None, log_file=None, log_file_level=logging.DEBUG, milliseconds=False): """Create the actual logger instance, logging at the given level if name is None, it will get args[0] without the extension (e.g. gina). 'out_stream must be passed, the recommended value is sys.stderr' """ if name is None: # Determine the logger name from the script name name = sys.argv[0] name = re.sub('.py[oc]?$', '', name) # We install our custom handlers and formatters on the root logger. # This means that if the root logger is used, we still get correct # formatting. The root logger should probably not be used. root_logger = logging.getLogger() # reset state of root logger reset_root_logger() # Make it print output in a standard format, suitable for # both command line tools and cron jobs (command line tools often end # up being run from inside cron, so this is a good thing). hdlr = logging.StreamHandler(out_stream) # We set the level on the handler rather than the logger, so other # handlers with different levels can be added for things like debug # logs. root_logger.setLevel(0) hdlr.setLevel(level) if milliseconds: # Python default datefmt includes milliseconds. formatter = LaunchpadFormatter(datefmt=None) else: # Launchpad default datefmt does not include milliseconds. formatter = LaunchpadFormatter() hdlr.setFormatter(formatter) root_logger.addHandler(hdlr) # Add an optional aditional log file. if log_file is not None: handler = WatchedFileHandler(log_file, encoding="UTF8") handler.setFormatter(formatter) handler.setLevel(log_file_level) root_logger.addHandler(handler) # Create our logger logger = logging.getLogger(name) # Set the global log log._log = logger # Inform the user the extra log file is in operation. if log_file is not None: log.info( "Logging %s and higher messages to %s" % ( logging.getLevelName(log_file_level), log_file)) return logger
async def init_app(): """ Initialize the main application instance and return it""" async def close_redis(app): """Close redis connections""" log.server_logger.info('Closing redis connections') app['rdata'].close() async def open_redis(app): """Open redis connections""" log.server_logger.info("Opening redis connections") app['rdata'] = await create_redis_pool( (os.getenv('REDIS_HOST', 'localhost'), 6379), db=int(os.getenv('REDIS_DB', '2')), encoding='utf-8', minsize=2, maxsize=50) app['clients'] = {} # Keep track of connected clients app['last_msg'] = {} # Last time a client has sent a message app['active_messages'] = set( ) # Avoid duplicate messages from being processes simultaneously app['cur_prefs'] = {} # Client currency preferences app['subscriptions'] = { } # Store subscription UUIDs, this is used for targeting new operation pushes # Setup logger if debug_mode: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) root = logging.getLogger('aiohttp.server') logging.basicConfig(level=logging.INFO) if options.log_to_stdout: handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S %z") handler.setFormatter(formatter) root.addHandler(handler) else: handler = WatchedFileHandler(log_file) formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S %z") handler.setFormatter(formatter) root.addHandler(handler) root.addHandler( TimedRotatingFileHandler(log_file, when="d", interval=1, backupCount=100)) app = web.Application() app.add_routes([web.get('/', websocket_handler)]) # All WS requests app.add_routes([web.post('/rawrpc', whitelist_rpc)]) # HTTP API app.add_routes([web.post('/v1', http_api)]) # HTTP API app.on_startup.append(open_redis) app.on_shutdown.append(close_redis) return app
def create_app(config_class=DevelopmentConfig): app = Flask(__name__) app.config.from_object(config_class) load_dotenv() jwt.init_app(app) # CORS(app) api = Api(app) db.init_app(app) app.cli.add_command(create_tables) # To interact with app from CLI b_crypt.init_app(app) ma.init_app(app) migrate = Migrate(app, db) # oauth.init_app(app) # USER API api.add_resource(UserRegister, '/user/register') api.add_resource(UserLogin, '/user/login') api.add_resource(UserLogout, '/user/logout') api.add_resource(UserPasswordRestoreRequest, '/user/restore') api.add_resource(UserPasswordReSetter, '/user/restore/<string:token>') api.add_resource(User, '/user/<int:_id>') api.add_resource(UserList, '/users/<int:limit>') api.add_resource(TokenRefresher, '/user/refreshing') api.add_resource(UserEmail2FA, '/user/fa2_auth/<string:token>') # REQUEST API api.add_resource(RequestsList, '/requests') api.add_resource(RequestCreation, '/requests/new') print(f"App current configuration: {config_class.CONFIG_NAME}") # OAuth API # api.add_resource(GithubLogin, "/login/oauth/github") # api.add_resource(GithubAuthorize, "/login/oauth/github/authorized") # CONFIRMATION API api.add_resource(Confirmation, '/user/confirmation/<string:confirmation_id>') # api.add_resource(User, '/users/<int:user_id>') api.add_resource(Content, '/content') @app.route('/') def home(): return render_template("index.html") # Logging log_level = logging.INFO if app.config['DEBUG'] else logging.ERROR handler = WatchedFileHandler('server.log') formatter = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s', '%d-%m-%Y %H:%M:%S') handler.setFormatter(formatter) root = logging.getLogger() root.setLevel(log_level) root.addHandler(handler) logging.info('\n------------------- Starting Server -------------------') return app
def get_logger(logname, logfile, loglevel, propagate): logger = logging.getLogger(logname) logger_handler = WatchedFileHandler(logfile) log_fmt = '%(asctime)s %(name)-6s %(levelname)-8s %(message)s' logger_handler.setFormatter(logging.Formatter(log_fmt, '%b %d %H:%M:%S')) logger.addHandler(logger_handler) logger.propagate = propagate logger.setLevel(loglevel) return logger
def main(args): log = logging.getLogger("addisonarches.web") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) down = asyncio.Queue(loop=loop) up = asyncio.Queue(loop=loop) #TODO: Read service name from CLI service = "dev" # Cf qa, demo, prod, etc tok = token(args.connect, service, APP_NAME) node = create_udp_node(loop, tok, down, up) loop.create_task(node(token=tok)) app = aiohttp.web.Application() assets = Assets(app, **vars(args)) reg = Registration(app, tok, down, up, **vars(args)) transitions = Transitions(app, **vars(args)) work = Workflow(app, tok, down, up, **vars(args)) for svc in (assets, reg, transitions, work): log.info("{0.__class__.__name__} object serves {1}".format( svc, ", ".join(svc.routes.keys()))) handler = app.make_handler() f = loop.create_server(handler, args.host, args.port) srv = loop.run_until_complete(f) log.info("Serving on {0[0]}:{0[1]}".format(srv.sockets[0].getsockname())) try: loop.run_forever() except KeyboardInterrupt: pass finally: loop.run_until_complete(handler.finish_connections(1.0)) srv.close() loop.run_until_complete(srv.wait_closed()) loop.run_until_complete(app.finish()) loop.close()
def getDaemonLogger(filePath, log_format=None, loglevel=logging.INFO): logger = logging.getLogger() logger.setLevel(loglevel) try: watchedHandler = WatchedFileHandler(filePath) except Exception as e: # pylint: disable=broad-except return e, None watchedHandler.setFormatter(logging.Formatter(log_format or '%(asctime)s %(msg)s')) logger.addHandler(watchedHandler) return logger, watchedHandler
def main(): if len(sys.argv) < 5: usage(f = sys.stderr) sys.exit(-1) http_ip = sys.argv[1] http_port = int(sys.argv[2]) socksip = sys.argv[3] socksport = int(sys.argv[4]) opts, _ = getopt.gnu_getopt(sys.argv[5:], "hdp:l:", ["help", "debug", "pidfile=", "logfile="]) for o, a in opts: if o == "-h" or o == "--help": usage() sys.exit() if o == "-d" or o == "--debug": options.logginglevel = logging.DEBUG elif o == "-p" or o == "--pidfile": options.daemonize = True options.pidfile = a elif o == "-l" or o == "--logfile": options.daemonize = True options.logfile = a if options.daemonize: pid = os.fork() if pid != 0: # write pidfile by father f = open(options.pidfile, "w") print >> f, pid f.close() sys.exit(0) if options.daemonize: logger = logging.getLogger() logger.setLevel(options.logginglevel) ch = WatchedFileHandler(options.logfile) ch.setFormatter(logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] - %(message)s')) logger.addHandler(ch) else: logging.basicConfig( format='[%(asctime)s][%(name)s][%(levelname)s] - %(message)s', datefmt='%Y-%d-%m %H:%M:%S', level=options.logginglevel, ) socks = SocksServer(socksip, socksport, SocksRelayFactory(), timeout=30, maxclient=500) socks.start() globalvars.socksip = socksip globalvars.socksport = socksport globalvars.sockstimeout = 60 WSGIServer((http_ip, http_port), meek_server_application, log=None).serve_forever()
def main(): parser = argparse.ArgumentParser(description="Send out broker notifications") parser.add_argument("-c", "--config", dest="config", help="location of the broker configuration file") parser.add_argument("--one_shot", action="store_true", help="do just a single run and then exit") parser.add_argument("--debug", action="store_true", help="turn on debug logs on stderr") opts = parser.parse_args() config = Config(configfile=opts.config) # These modules must be imported after the configuration has been # initialized from aquilon.aqdb.db_factory import DbFactory db = DbFactory() if opts.debug: level = logging.DEBUG logging.basicConfig(level=level, stream=sys.stderr, format='%(asctime)s [%(levelname)s] %(message)s') else: level = logging.INFO logfile = os.path.join(config.get("broker", "logdir"), "aq_notifyd.log") handler = WatchedFileHandler(logfile) handler.setLevel(level) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s') handler.setFormatter(formatter) rootlog = logging.getLogger() rootlog.addHandler(handler) rootlog.setLevel(level) # Apply configured log settings for logname, level in config.items("logging"): if level not in logging._levelNames: continue logging.getLogger(logname).setLevel(logging._levelNames[level]) logger = logging.getLogger("aq_notifyd") if opts.one_shot: update_index_and_notify(config, logger, db) else: signal.signal(signal.SIGTERM, exit_handler) signal.signal(signal.SIGINT, exit_handler) run_loop(config, logger, db)
def make_api_logger(): """Make logger for REST API writes logs to the file """ # Circular import dependency problem # we import logger module in settings from nailgun.settings import settings logger = logging.getLogger("nailgun-api") log_file = WatchedFileHandler(settings.API_LOG) log_file.setFormatter(formatter) logger.setLevel(logging.DEBUG) logger.addHandler(log_file) return logger
def configure_logger(self): logger = logging.getLogger(self.name) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s') logname = self.logname if hasattr(self, 'logname') else '{}.log'.format(self.name) handler = WatchedFileHandler('{}/{}'.format(self.logging_folder, logname)) handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) return logger
def main(args): log = logging.getLogger(APP_NAME) log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) down = asyncio.Queue(loop=loop) up = asyncio.Queue(loop=loop) tok = token(args.connect, APP_NAME) node = create_udp_node(loop, tok, down, up) loop.create_task(node(token=tok)) loop.create_task(queue_logger(loop, up)) msg = parcel( tok, Alert(datetime.datetime.now(), "Hello World!"), via=Address(tok.namespace, tok.user, tok.service, turberfield.ipc.demo.router.APP_NAME) ) log.info("Sending message: {}".format(msg)) loop.call_soon_threadsafe(functools.partial(down.put_nowait, msg)) try: loop.run_forever() except KeyboardInterrupt: for task in asyncio.Task.all_tasks(loop=loop): task.cancel() for resource in resources: resource.close() finally: loop.close()
def configure_logging(loglevel, logfile=None): loglevel = loglevel.upper() loglevels = ('DEBUG', 'INFO', 'WARNING', 'ERROR') if loglevel not in loglevels: raise Exception('Loglevel must be one of {}'.format(loglevels)) logger.setLevel(getattr(logging, loglevel)) if logfile: handler = WatchedFileHandler(logfile) else: handler = StreamHandler() handler.setFormatter( logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s', '%m-%d %H:%M:%S')) logger.addHandler(handler)
def __init__(self, name, level=None): formatter = logging.Formatter(FORMAT) logging.Logger.__init__(self, name, logging.getLevelName(yagi.config.get('logging', 'default_level'))) handlers = [] stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) handlers.append(stream_handler) logfile = yagi.config.get('logging', 'logfile') if logfile: file_handler = WatchedFileHandler(filename=logfile) file_handler.setFormatter(formatter) handlers.append(file_handler) for handler in handlers: logging.Logger.addHandler(self, handler)
def get_logger(name): logger = logging.getLogger(name) format = logging.Formatter("[%(levelname)s] %(asctime)s - %(name)s - %(message)s", datefmt="%m-%d-%Y %H:%M:%S") logger.propagate = False if PRINT_STDOUT: handler = logging.StreamHandler() handler.setLevel(LOG_LEVEL) handler.setFormatter(format) logger.addHandler(handler) handler = WatchedFileHandler(LOG_FILE) handler.setLevel(LOG_LEVEL) handler.setFormatter(format) logger.addHandler(handler) return logger
def init(config_file): global logger global config log_file = None verbose = False n = 0 count = len(sys.argv) while n < count: arg = sys.argv[n] if arg.startswith('--'): buf = arg[2:] at = buf.find('=') if at != -1: var = buf[:at] val = buf[at + 1:] else: var = buf val = None del sys.argv[n] count -= 1 if var == 'config': config_file = val elif var == 'logfile': log_file = val elif var == 'verbose': verbose = True else: n += 1 logger = logging.getLogger('app') if log_file: logger_handler = WatchedFileHandler(log_file) else: logger_handler = logging.StreamHandler(stream=sys.stdout) if verbose: logger.setLevel(logging.DEBUG) logger_handler.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger_handler.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(levelname)s %(asctime)s.%(msecs)03d %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger_handler.setFormatter(formatter) logger.addHandler(logger_handler) config = ConfigParser.ConfigParser() config.read([config_file])
def handle(self, *args, **options): self.validate() _action = options.get('action') if _action not in ('start', 'restart', 'stop'): raise Exception('{} is not supported action.'.format(_action)) if not options.get('queue_alias'): raise Exception("Please set --queue-alias options.") _queue = RegisteredQueue(**options) # Set logger up. if not logger.handlers: _formatter = logging.Formatter( fmt='[%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d ' + django_sqs.PROJECT + '] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') _handler = WatchedFileHandler(_queue.options['output_log_path']) _handler.setFormatter(_formatter) logger.addHandler(_handler) logger.setLevel(logging.DEBUG) logger.info('Set new logger up.') # Close the DB connection now and let Django reopen it when it # is needed again. The goal is to make sure that every # process gets its own connection from django.db import connection connection.close() if 'start' == _action and os.path.isfile(_queue.options['pid_file_path']): with open(_queue.options['pid_file_path'], 'r') as _pid_file: for _pid in _pid_file: try: _pid = int(_pid.rstrip('\n')) except (AttributeError, ValueError): _pid = -1 logger.info('PID file exists already, so checking whether PID({}) is running.'.format(_pid)) if pid_exists(_pid): logger.info('PID({}) is already running, so exit this process.'.format(_pid)) return _runner = CustomDaemonRunner(_queue, (__name__, _action)) logger.info('Initiated daemon runner to {} {}.'.format(_action, _queue.options['queue_name'])) _runner.do_action() logger.info('Exit process for {}.'.format(_queue))
def initialize_root_logger(): logger = logging.getLogger() logger.setLevel(settings.config['logging.level']) # We'll use a WatchedFileHandler and utilize some external application to # rotate the logs periodically logFilePath = os.path.join(settings.config['logdir'], '{0}.log'.format(settings.config['server-producer'])) handler = WatchedFileHandler(logFilePath) formatter = logging.Formatter(fmt='%(asctime)s|%(name)s|%(levelname)s|%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) handler = logging.StreamHandler() formatter = logging.Formatter(fmt='%(asctime)s|%(levelname)s|%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) return logger
def _create_handlers(filename=LOG_FILEPATH, level=LOG_LEVEL): formatter = logging.Formatter(format) # WatchedFileHandler watches the file it is logging to. # If the file changes, it is closed and reopened using the file name. file_handler = WatchedFileHandler(filename) file_handler.setFormatter(formatter) file_handler.setLevel(level) # Used by internal log monitoring applications syslog_handler = SysLogHandler(facility=LOG_LOCAL3) syslog_handler.setFormatter(formatter) syslog_handler.setLevel(level) global handlers handlers = [file_handler, syslog_handler] return handlers
def setup_logging(log_level=logging.INFO, filename=None, stream=sys.stderr): if log_level == logging.NOTSET: return logger = logging.getLogger() logger.setLevel(log_level) if filename: try: from logging.handlers import WatchedFileHandler handler = WatchedFileHandler(filename) except: from logging.handlers import RotatingFileHandler handler = RotatingFileHandler(filename,maxBytes=52428800, backupCount=7) else: handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S')) logger.addHandler(handler)
def register_loggers(app): if app.debug: return import logging from logging.handlers import WatchedFileHandler log_fname = app.config['DOORMAN_LOGGING_FILENAME'] if log_fname == 'sys.stdout': handler = logging.StreamHandler(log_fname) else: handler = WatchedFileHandler(log_fname) levelname = app.config['DOORMAN_LOGGING_LEVEL'] if levelname in ('DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'CRITICAL'): handler.setLevel(getattr(logging, levelname)) formatter = logging.Formatter(app.config['DOORMAN_LOGGING_FORMAT']) handler.setFormatter(formatter) app.logger.addHandler(handler)
def __create_producer_logger(self, producer, info): self.log('Inside Server.__create_producer_logger, Producer: {0}, Info: {1}'.format(producer, info), level=logging.DEBUG) logger = logging.getLogger(producer) logger.setLevel(logging.DEBUG) logger.propagate = False # We'll use a WatchedFileHandler and utilize some external application to rotate the logs periodically logFilePath = os.path.join(settings.config['logdir'], '{0}.log'.format(producer)) handler = WatchedFileHandler(logFilePath) handler.setLevel(info['logging.level']) formatter = logging.Formatter(fmt='%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) formatter = logging.Formatter(fmt='%(message)s') handler.setFormatter(formatter) logger.addHandler(handler)
def init(cls, config): formatter = logging.Formatter(config.FORMAT) root = logging.getLogger('') root.setLevel(config.ROOT_LEVEL) if config.LOG_TO_CONSOLE: console_handler = logging.StreamHandler() console_handler.setLevel(config.CONSOLE_LEVEL) console_handler.setFormatter(formatter) root.addHandler(console_handler) if config.LOG_TO_FILE: file_name = cls.generate_log_file_name() file_handler = WatchedFileHandler( os.path.join(config.FILE_BASE, file_name), encoding='utf-8' ) file_handler.setLevel(config.FILE_LEVEL) file_handler.setFormatter(formatter) root.addHandler(file_handler)
def main(args): log = logging.getLogger(APP_NAME) log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) down = asyncio.Queue(loop=loop) up = asyncio.Queue(loop=loop) tok = token(args.connect, APP_NAME) node = create_udp_node(loop, tok, down, up) loop.create_task(node(token=tok)) log.info("Starting router node...") try: loop.run_forever() except KeyboardInterrupt: for task in asyncio.Task.all_tasks(loop=loop): task.cancel() for resource in resources: resource.close() finally: loop.close()
class TextLogger(Logger): """Logs messages with plain text. Rotating-friendly.""" FORMAT = "%(asctime)s [%(protocol)s:%(pid)s] %(srcname)s >> %(text)s" def __init__(self, filename, tz=timezone.utc): self.loghandler = WatchedFileHandler(filename, encoding="utf-8", delay=True) self.loghandler.setLevel(logging.INFO) self.loghandler.setFormatter(logging.Formatter("%(message)s")) self.tz = tz def log(self, msg: Message): d = msg._asdict() d["asctime"] = datetime.fromtimestamp(msg.time, self.tz).strftime("%Y-%m-%d %H:%M:%S") d["srcname"] = msg.src.alias d["srcid"] = msg.src.id self.loghandler.emit(logging.makeLogRecord({"msg": self.FORMAT % d})) def commit(self): pass
def initialize_logging(name="unknown"): """Initializes the logging module. This initializes pythons logging module: * set loglevel (from nodes config) * set logfile * define new loglevel BAN with priority 90 * format log messages * log to file as well as stderr Kwargs: name (string): name of the module initializing the logger """ c = config.Config() if not c.logLevel: c.logLevel = logging.DEBUG if not c.logFile: c.logFile = "/dev/null" logging.BAN = 90 logging.addLevelName(logging.BAN, 'BAN') logger = logging.getLogger(name) logger.ban = lambda msg, *args: logger._log(logging.BAN, msg, args) formatter = logging.Formatter('%(asctime)s - %(name)s\t%(levelname)s\t%(message)s') logger.setLevel(c.logLevel) if name == "fail2ban-p2p": # Only add new handlers when called from main.py try: log2file = WatchedFileHandler(c.logFile) log2file.setFormatter(formatter) log2file.setLevel(c.logLevel) logger.addHandler(log2file) except: print("--- WARNING --- LOGFILE " + c.logFile + " IS EITHER NONEXISTENT OR NOT WRITABLE") log2stderr = logging.StreamHandler(sys.stderr) log2stderr.setFormatter(formatter) log2stderr.setLevel(c.logLevel) logger.addHandler(log2stderr) return logger
def setup(): cfg = parse_config() logging.getLogger().setLevel(getattr(logging, cfg.get('log', 'loglevel').upper())) logfile = cfg.get('log', 'logfile') if logfile != '': handler = WatchedFileHandler(logfile) handler.addFilter(RequestIdFilter()) formatter = logging.Formatter( '%(asctime)s [%(process)d] %(levelname)-5s ' '%(request_id)s %(name)s %(message)s' ) handler.setFormatter(formatter) logging.getLogger().addHandler(handler) else: logging.basicConfig() ctx = SygnalContext() ctx.database = sygnal.db.Db(cfg.get('db', 'dbfile')) for key,val in cfg.items('apps'): parts = key.rsplit('.', 1) if len(parts) < 2: continue if parts[1] == 'type': try: pushkins[parts[0]] = make_pushkin(val, parts[0]) except: logger.exception("Failed to load module for kind %s", val) raise if len(pushkins) == 0: logger.error("No app IDs are configured. Edit sygnal.conf to define some.") sys.exit(1) for p in pushkins: pushkins[p].cfg = cfg pushkins[p].setup(ctx) logger.info("Configured with app IDs: %r", pushkins.keys()) logger.error("Setup completed")
def __initLogging(self, file=None, level=1): logger = logging.getLogger("webks") if len(logger.handlers) > 0: # we've already set something up here return if file == '-': handler = logging.StreamHandler(sys.stdout) elif file == None or file == "": handler = logging.StreamHandler(sys.stderr) else: handler = WatchedFileHandler(file) # Time format: Jun 24 10:16:54 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', '%b %2d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(level) logger.info("Logging initialized.")
def setup_watched_file_handler(logger=None, level=logging.DEBUG, format=FORMAT, encoding='utf-8', delay=False, directory='.'): """You may pass either a name or an existing logger as the first argument. This attaches a WatchedFileHandler to the specified logger. Returns the logger object. The WatchedFileHandler detects when the log file is moved, so it is compatible with the logrotate daemon. """ if isinstance(logger, str): filename = '.'.join((logger, encoding, 'log')) logger = logging.getLogger(logger) else: filename = '.'.join((logger.name, encoding, 'log')) hr = WatchedFileHandler(os.path.join(directory, filename), delay=delay, encoding=encoding) if format: hr.setFormatter(logging.Formatter(format)) logger.addHandler(hr) logger.setLevel(level) return logger