def configurelogging(): _LOGGER.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - [%(process)d] - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S') handlers = [] log_level = logging.INFO if os.access(os.path.dirname(LOG_FILE), os.W_OK): fileh = logging.handlers.RotatingFileHandler( LOG_FILE, maxBytes=5 * M, backupCount=5) handlers.append(fileh) if os.path.exists(ENABLE_DEV_LOGGING_FILE) or not handlers: handlers.append(logging.StreamHandler(sys.stdout)) log_level = logging.DEBUG # Configure and add all handlers for handler in handlers: handler.setLevel(log_level) handler.setFormatter(formatter) _LOGGER.addHandler(handler) signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def setup_logger(log_file=settings.LOG_FILE, level=settings.LOG_LEVEL, stdout=settings.LOG_TO_STDOUT): handlers = [] error_format = '%(asctime)s %(levelname)s:%(name)s: %(message)s; (%(filename)s:%(lineno)d).' formatter = CustomFormatter({logging.CRITICAL: error_format}) if log_file: log_dir = os.path.dirname(log_file) if not os.path.exists(log_dir): os.mkdir(log_dir) handlers.append( logging.handlers.RotatingFileHandler(log_file, encoding='utf8', maxBytes=100000000, backupCount=5)) if stdout: handlers.append(logging.StreamHandler()) root_logger = logging.getLogger() root_logger.setLevel(level) root_logger.propagate = False for h in handlers: h.setFormatter(formatter) h.setLevel(level) root_logger.addHandler(h)
def initLogging(quiet, verbose, log_dir): """Initialize logging :param boolean quiet: whether to log in console or not :param boolean verbose: use DEBUG level for console logging :param string log_dir: directory for log files """ root = logging.getLogger() root.setLevel(logging.DEBUG) handlers = [] if not os.path.exists(log_dir): os.makedirs(log_dir) handler = logging.handlers.RotatingFileHandler(os.path.join( log_dir, 'dobby.log'), maxBytes=2097152, backupCount=3, encoding='utf-8') handler.setFormatter( logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s', datefmt='%m/%d/%Y %H:%M:%S')) handlers.append(handler) if not quiet: handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter('%(levelname)-8s:%(name)-32s:%(message)s')) if verbose: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO) handlers.append(handler) root.handlers = handlers
def init_logging(config): handlers = [] format_simple = '[%(levelname)8s]: %(message)s' format_with_ts = '%(asctime)s [%(levelname)8s]: %(message)s' log_file = Config.get_str(config, ConfigKey.LOG_FILE) log_level = Config.get_loglevel(config, ConfigKey.LOG_LEVEL, Constant.DEFAULT_LOGLEVEL) print_console = Config.get_bool(config, ConfigKey.LOG_PRINT, False) if log_file: max_bytes = Config.get_int(config, ConfigKey.LOG_MAX_BYTES, Constant.DEFAULT_LOG_MAX_BYTES) max_count = Config.get_int(config, ConfigKey.LOG_MAX_COUNT, Constant.DEFAULT_LOG_MAX_COUNT) handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=int(max_bytes), backupCount=int(max_count) ) formatter = logging.Formatter(format_with_ts) handler.setFormatter(formatter) handlers.append(handler) format_ = format_with_ts if log_file else format_simple if print_console or not log_file: handlers.append(logging.StreamHandler(sys.stdout)) logging.basicConfig( format=format_, level=log_level, handlers=handlers )
def configure_logger(self, detached): """ Configure the logging system """ log_level = self.log_conf['level'].upper() if not hasattr(logging, log_level): raise mcadminpanel.agent.errors.ConfigurationError( 'Improperly configured log level: {}'.format(log_level), ) log_level = getattr(logging, log_level) handlers = [] file_handler = logging.handlers.TimedRotatingFileHandler( self.log_conf['file'], when='midnight', ) file_handler.setLevel(log_level) handlers.append(file_handler) if not detached: stream_handler = logging.StreamHandler() stream_handler.setLevel(log_level) handlers.append(stream_handler) logging.basicConfig( level=log_level, datefmt=self.log_conf['date_format'], format=self.log_conf['format'], handlers=handlers, )
def remove_handlers(): """Remove root services_logging handlers.""" handlers = [] for handler in logging.root.handlers: if not isinstance(handler, logging.StreamHandler): handlers.append(handler) logging.root.handlers = handlers
def get_handlers(app): handlers = [] standard_formatter = CustomLogFormatter(LOG_FORMAT, TIME_FORMAT) json_formatter = JSONFormatter(LOG_FORMAT, TIME_FORMAT) stream_handler = logging.StreamHandler(sys.stdout) if not app.debug: # machine readable json to both file and stdout # file_handler = logging.handlers.WatchedFileHandler( # filename='{}.json'.format(app.config['NOTIFY_LOG_PATH']) # ) handlers.append(configure_handler(stream_handler, app, json_formatter)) # Do not write to files, stdout logging is only needed # handlers.append(configure_handler(file_handler, app, json_formatter)) else: # turn off 200 OK static logs in development def is_200_static_log(log): msg = log.getMessage() return not ('GET /static/' in msg and ' 200 ' in msg) logging.getLogger('werkzeug').addFilter(is_200_static_log) # human readable stdout logs handlers.append( configure_handler(stream_handler, app, standard_formatter)) return handlers
def __init__(self, url, validate_certificate=True): """Parameters: url: of the HTTP/HTTPS endpoint. Logs will be sent as a POST request. url can be of the form http(s)://user:pass@host/q. The reques will then include de appropiate headers to perform authentication. validate_certificate: if False, SSL certificates will not be validated. Useful to test the system with "snakeoil" (i.e. self-signed) certificates. """ self.url = urllib.parse.urlparse(url) user, _pass = self._username, self._password self._stripped_url = self._strip_user_pass() pw_manager = urllib.request.HTTPPasswordMgrWithDefaultRealm() pw_manager.add_password(realm=None, uri=self._stripped_url, user=user, passwd=_pass) auth_handler = urllib.request.HTTPBasicAuthHandler(pw_manager) if not validate_certificate: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE handlers = [urllib.request.HTTPSHandler(context=ctx)] else: handlers = [] handlers.append(auth_handler) self._opener = urllib.request.build_opener(*handlers).open super().__init__()
def setup(self): captureWarnings(True) # Remove all existing log handlers and install a single QueueHandler. self.root_logger = getLogger() for handler in self.root_logger.handlers: self.root_logger.removeHandler(handler) self.root_logger.setLevel(DEBUG) self.queue = queue.Queue() self.queue_handler = logging.handlers.QueueHandler(self.queue) self.root_logger.addHandler(self.queue_handler) # Make loggers of 3rd party modules less noisy. for other in ('quamash', 'vext'): getLogger(other).setLevel(WARNING) # Create a QueueListener that reads from the queue and sends log # records to the actual log handlers. handlers = [] handlers.append(self.create_stderr_logger()) if self.runtime_settings.log_file: handlers.append(self.create_file_logger()) self.queue_listener = logging.handlers.QueueListener( self.queue, *handlers, respect_handler_level=True) self.queue_listener.start()
def pending_logging(): # type: () -> Generator """Contextmanager to pend logging all logs temporary. For example:: >>> with pending_logging(): >>> logger.warning('Warning message!') # not flushed yet >>> some_long_process() >>> Warning message! # the warning is flushed here """ logger = logging.getLogger(NAMESPACE) memhandler = MemoryHandler() try: handlers = [] for handler in logger.handlers[:]: logger.removeHandler(handler) handlers.append(handler) logger.addHandler(memhandler) yield memhandler finally: logger.removeHandler(memhandler) for handler in handlers: logger.addHandler(handler) memhandler.flushTo(logger)
def __init__(self, name, debug=False, log_dir=None, do_log_name=False): level = logging.DEBUG if debug else logging.INFO fmt = "%(asctime)s" if do_log_name: fmt += "|%(name)s" fmt += "|%(levelname).1s|%(message)s" formatter = logging.Formatter(fmt=fmt, datefmt="%H:%M:%S") sh = logging.StreamHandler() sh.setFormatter(formatter) handlers = [sh] if log_dir is not None: os.makedirs(log_dir, exist_ok=True) fs = logging.FileHandler(os.path.join(log_dir, name + ".log")) fs.setFormatter(formatter) handlers.append(fs) self._queue = queue.Queue(1000) self._handler = logging.handlers.QueueHandler(self._queue) self._listener = logging.handlers.QueueListener(self._queue, *handlers) self._logger = logging.getLogger(name) self._logger.setLevel(level) self._logger.addHandler(self._handler) self._logger.propagate = False self._listener.start()
def get_handlers(names, level, reset=True): handlers = [] if reset: reset_handlers() for item in extract_items(names): if isinstance(item, logging.Handler): handler = item elif isinstance(item, str): try: handler = registered_handlers[item] except KeyError as e: msg = '"{}" unrecognized handler shortcut'.format(item) raise KeyError(msg) from e if not isinstance(handler, logging.Handler): handler = handler(level) registered_handlers[item] = handler elif issubclass(item, logging.Handler): raise ValueError('Cannot instantiate from ...Handler class ') else: handler = item(level) handlers.append(handler) return handlers
def get_logger(): global logger if not logger: logger = logging.getLogger(__package__) handlers = [] log_dst = get_conf()['logging']['log_destination'] if log_dst == 'both' or log_dst == 'file': log_file = get_conf()['logging']['log_file'] if log_file: # Add the log message handler to the logger max_bytes = int(get_conf()['logging']['max_bytes']) backup_count = int(get_conf()['logging']['backup_count']) ch = logging.handlers.RotatingFileHandler( log_file, maxBytes=max_bytes, backupCount=backup_count) handlers.append(ch) else: raise Exception( "Logging set to '%s' but log_file is not set." % log_dst) if log_dst == 'both' or log_dst == 'console': handlers.append(logging.StreamHandler(sys.stdout)) formatter = logging.Formatter('%(asctime)s - ' '%(levelname)s - ' '%(message)s') for handler in handlers: handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(get_conf()['logging']['log_level']) return logger
def _init_logger(logFilePath): import logging.handlers import sys formatter = logging.Formatter( fmt = "%(asctime)s: %(filename)s:%(lineno)d %(levelname)s:%(name)s: %(message)s", datefmt = "%Y-%m-%d %H:%M:%S") handlers = [logging.StreamHandler()] if logFilePath is not None and len(logFilePath.strip()) > 0: handlers.append(logging.handlers.RotatingFileHandler(logFilePath, encoding = 'utf-8', maxBytes = 1000000, backupCount = 1)) root_logger = logging.getLogger() root_logger.handlers = [] # Default root logger contains a FileHandler that writes with cp1252 codec. Screw that. root_logger.setLevel(logging.DEBUG) for h in handlers: h.setFormatter(formatter) h.setLevel(logging.DEBUG) root_logger.addHandler(h) logging.info("Started logging") sys.excepthook = _unhandled_exception
def setup(use_console=True, use_file=False): handlers = [] if use_console: term_handler = logging.StreamHandler() term_handler.setLevel(logging.DEBUG) term_handler.setFormatter( logging.Formatter( '{name:40} [{levelname:^10}] : {message}', style='{', )) handlers.append(term_handler) if use_file: file_handler = logging.handlers.TimedRotatingFileHandler( LOG_BASE_FILENAME, when='d', interval=1, backupCount=7, ) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter( logging.Formatter( '{asctime} {name:40} [{levelname:^10}] : {message}', style='{', )) handlers.append(file_handler) logging.basicConfig( level=logging.DEBUG, handlers=handlers, ) logging.captureWarnings(True)
def pending_warnings() -> Generator[logging.Handler, None, None]: """Contextmanager to pend logging warnings temporary. Similar to :func:`pending_logging`. """ logger = logging.getLogger(NAMESPACE) memhandler = MemoryHandler() memhandler.setLevel(logging.WARNING) try: handlers = [] for handler in logger.handlers[:]: if isinstance(handler, WarningStreamHandler): logger.removeHandler(handler) handlers.append(handler) logger.addHandler(memhandler) yield memhandler finally: logger.removeHandler(memhandler) for handler in handlers: logger.addHandler(handler) memhandler.flushTo(logger)
def init_logger(self) -> None: try: log_path = self.get_app_config_path() except AttributeError: if sys.platform == 'win32': log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower()) elif sys.platform == 'darwin': log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName().lower()) else: log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower()) os.makedirs(log_path, exist_ok=True) self.console = ConsoleWidget(self) # self.consoleLogger = ConsoleHandler(self.console) handlers = [ logging.handlers.RotatingFileHandler(os.path.join( log_path, '%s.log' % qApp.applicationName().lower()), maxBytes=1000000, backupCount=1), self.consoleLogger ] if self.parser.isSet(self.debug_option) or self.verboseLogs: # noinspection PyTypeChecker handlers.append(logging.StreamHandler())
def init_logger(self) -> None: try: log_path = QStandardPaths.writableLocation(QStandardPaths.AppConfigLocation).replace( qApp.applicationName(), qApp.applicationName().lower()) except AttributeError: if sys.platform == 'win32': log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower()) elif sys.platform == 'darwin': log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName().lower()) else: log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower()) os.makedirs(log_path, exist_ok=True) self.console = ConsoleWidget(self) self.consoleLogger = ConsoleHandler(self.console) handlers = [logging.handlers.RotatingFileHandler(os.path.join(log_path, '%s.log' % qApp.applicationName().lower()), maxBytes=1000000, backupCount=1), self.consoleLogger] if self.parser.isSet(self.debug_option) or self.verboseLogs: # noinspection PyTypeChecker handlers.append(logging.StreamHandler()) logging.setLoggerClass(VideoLogger) logging.basicConfig(handlers=handlers, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M', level=logging.INFO) logging.captureWarnings(capture=True) sys.excepthook = MainWindow.log_uncaught_exceptions
def suppress_logging() -> Generator[MemoryHandler, None, None]: """Contextmanager to suppress logging all logs temporary. For example:: >>> with suppress_logging(): >>> logger.warning('Warning message!') # suppressed >>> some_long_process() >>> """ logger = logging.getLogger(NAMESPACE) memhandler = MemoryHandler() try: handlers = [] for handler in logger.handlers[:]: logger.removeHandler(handler) handlers.append(handler) logger.addHandler(memhandler) yield memhandler finally: logger.removeHandler(memhandler) for handler in handlers: logger.addHandler(handler)
def _extract_handlers(site_config, handlers_group): handler_prefix = f'logging/0/{handlers_group}' handlers_list = site_config.get(handler_prefix) handlers = [] for i, handler_config in enumerate(handlers_list): handler_type = handler_config['type'] if handler_type == 'file': hdlr = _create_file_handler(site_config, f'{handler_prefix}/{i}') elif handler_type == 'filelog': hdlr = _create_filelog_handler(site_config, f'{handler_prefix}/{i}') elif handler_type == 'syslog': hdlr = _create_syslog_handler(site_config, f'{handler_prefix}/{i}') elif handler_type == 'stream': hdlr = _create_stream_handler(site_config, f'{handler_prefix}/{i}') elif handler_type == 'graylog': hdlr = _create_graylog_handler(site_config, f'{handler_prefix}/{i}') if hdlr is None: getlogger().warning('could not initialize the ' 'graylog handler; ignoring ...') continue else: # Should not enter here raise AssertionError(f"unknown handler type: {handler_type}") level = site_config.get(f'{handler_prefix}/{i}/level') fmt = site_config.get(f'{handler_prefix}/{i}/format') datefmt = site_config.get(f'{handler_prefix}/{i}/datefmt') hdlr.setFormatter(RFC3339Formatter(fmt=fmt, datefmt=datefmt)) hdlr.setLevel(_check_level(level)) handlers.append(hdlr) return handlers
def _extract_handlers(handlers_dict): handlers = [] if not handlers_dict: raise ConfigurationError('no handlers are defined for logger') for filename, handler_config in handlers_dict.items(): if not isinstance(handler_config, collections.abc.Mapping): raise ConfigurationError('handler %s is not a dictionary' % filename) level = handler_config.get('level', 'debug').lower() fmt = handler_config.get('format', '%(message)s') datefmt = handler_config.get('datefmt', '%FT%T') append = handler_config.get('append', False) timestamp = handler_config.get('timestamp', None) if filename == '&1': hdlr = StreamHandler(stream=sys.stdout) elif filename == '&2': hdlr = StreamHandler(stream=sys.stderr) else: if timestamp: basename, ext = os.path.splitext(filename) filename = '%s_%s%s' % ( basename, datetime.now().strftime(timestamp), ext) hdlr = RotatingFileHandler(filename, mode='a+' if append else 'w+') hdlr.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt)) hdlr.setLevel(level) handlers.append(hdlr) return handlers
def setup(name=None, level='info', short=False, pprint=False, format=None): level = _get_level(level) assert level in ('debug', 'info') short = _get_short(short) format = _get_format(format, short) handlers = [] _add_trace_level() # add trace file handler path = _get_debug_path(name) s.shell.cron_rm_path_later(path, hours=24) handler = logging.handlers.WatchedFileHandler(path) handlers.append( _make_handler(handler, 'trace', '%(message)s', False, _TraceOnly)) # add the stream handler handlers.append( _make_handler(logging.StreamHandler(), level, format, pprint, _NotTrace)) # rm all root handlers [logging.root.removeHandler(x) for x in logging.root.handlers] [logging.root.addHandler(x) for x in handlers] logging.root.setLevel('TRACE')
def get_logger(name): # Config from https://docs.python-guide.org/writing/logging/ and # https://docs.python-guide.org/writing/logging/ logger = logging.getLogger(name) LEVEL = getattr(logging, LOG_LEVEL) formatter = logging.Formatter( "%(asctime)s %(name)s %(levelname)s: %(message)s") handlers = [] ph = logging.StreamHandler() ph.setFormatter(formatter) ph.setLevel(LEVEL) handlers.append(ph) ROOT_DIR = Path(__file__).absolute().parent.parent out = ROOT_DIR / "out" / f"{name}.log" assert out.parent.exists() fh = logging.FileHandler(str(out)) fh.setLevel(LEVEL) fh.setFormatter(formatter) handlers.append(fh) if False: # Works for uvicorn but not for gunicorn logger = background_logger(logger, *handlers) else: for handler in handlers: logger.addHandler(handler) logging.getLogger("fastapi").setLevel(LEVEL) return logger
def init_logger(self) -> None: try: log_path = QStandardPaths.writableLocation( QStandardPaths.AppConfigLocation).lower() except AttributeError: if sys.platform == 'win32': log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower()) elif sys.platform == 'darwin': log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName()).lower() else: log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName()).lower() os.makedirs(log_path, exist_ok=True) handlers = [ logging.handlers.RotatingFileHandler(os.path.join( log_path, '%s.log' % qApp.applicationName().lower()), maxBytes=1000000, backupCount=1) ] if os.getenv('DEBUG', False): handlers.append(logging.StreamHandler()) logging.basicConfig( handlers=handlers, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M', level=logging.INFO) logging.captureWarnings(capture=True) sys.excepthook = self.log_uncaught_exceptions
def init_logging(logFile: str = './log.log', level: int = logging.INFO, formatter: str = _default_format, isAddStreamHandler: bool = True): global __logger if True == __logger: return __logger = True handlers = [] if isAddStreamHandler or str_utils.is_null_or_empty(logFile): consoleLog = logging.StreamHandler() consoleLog.setFormatter(formatter) consoleLog.setLevel(level) handlers.append(consoleLog) if not str_utils.is_null_or_empty(logFile): fileLog = logging.handlers.TimedRotatingFileHandler(filename=logFile, when='d', interval=1, backupCount=0, encoding='utf-8', delay=False, utc=False) fileLog.setFormatter(formatter) fileLog.setLevel(level) logging.basicConfig(handlers=handlers, level=level, format=formatter)
def initLogging(quiet, verbose, log_dir): """Initialize logging :param boolean quiet: whether to log in console or not :param boolean verbose: use DEBUG level for console logging :param string log_dir: directory for log files """ root = logging.getLogger() root.setLevel(logging.DEBUG) handlers = [] if not os.path.exists(log_dir): os.makedirs(log_dir) handler = logging.handlers.RotatingFileHandler(os.path.join(log_dir, 'dobby.log'), maxBytes=2097152, backupCount=3, encoding='utf-8') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s', datefmt='%m/%d/%Y %H:%M:%S')) handlers.append(handler) if not quiet: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter('%(levelname)-8s:%(name)-32s:%(message)s')) if verbose: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO) handlers.append(handler) root.handlers = handlers
def pending_warnings(): # type: () -> Generator """Contextmanager to pend logging warnings temporary. Similar to :func:`pending_logging`. """ logger = logging.getLogger(NAMESPACE) memhandler = MemoryHandler() memhandler.setLevel(logging.WARNING) try: handlers = [] for handler in logger.handlers[:]: if isinstance(handler, WarningStreamHandler): logger.removeHandler(handler) handlers.append(handler) logger.addHandler(memhandler) yield memhandler finally: logger.removeHandler(memhandler) for handler in handlers: logger.addHandler(handler) memhandler.flushTo(logger)
def init_logging(log_base=None, file_level=logging.DEBUG, console_level=logging.NOTSET): """ Configure logging for components (servers, routers, gateways). """ default_formatter = Rfc3339Formatter( "%(asctime)s [%(levelname)s] (%(threadName)s) %(message)s") global _dispatch_formatter _dispatch_formatter = DispatchFormatter(default_formatter) handlers = [] if log_base: for lvl in sorted(logging._levelToName): if lvl < file_level: continue log_file = "%s.%s" % (log_base, logging._levelToName[lvl]) h = _RotatingErrorHandler(log_file, maxBytes=LOG_MAX_SIZE, backupCount=LOG_BACKUP_COUNT, encoding="utf-8") h.setLevel(lvl) handlers.append(h) if console_level: h = _ConsoleErrorHandler() h.setLevel(console_level) handlers.append(h) for h in handlers: h.setFormatter(_dispatch_formatter) # Use logging.DEBUG here, so that the handlers themselves can decide what to # filter. logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def configure_logging(): _LOGGER.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - [%(process)d] - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S') handlers = [] log_level = logging.INFO if os.access(os.path.dirname(LOG_FILE), os.W_OK): fileh = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=5 * 1024 * 1024, backupCount=5) handlers.append(fileh) if os.path.exists(ENABLE_DEV_LOGGING_FILE) or not handlers: handlers.append(logging.StreamHandler(sys.stdout)) log_level = logging.DEBUG # Configure and add all handlers for handler in handlers: handler.setLevel(log_level) handler.setFormatter(formatter) _LOGGER.addHandler(handler) signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def get_logger(): global logger if not logger: logger = logging.getLogger(__package__) handlers = [] log_dst = get_conf()['logging']['log_destination'] if log_dst == 'both' or log_dst == 'file': log_file = get_conf()['logging']['log_file'] if log_file: # Add the log message handler to the logger max_bytes = int(get_conf()['logging']['max_bytes']) backup_count = int(get_conf()['logging']['backup_count']) ch = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes, backupCount=backup_count) handlers.append(ch) else: raise Exception("Logging set to '%s' but log_file is not set." % log_dst) if log_dst == 'both' or log_dst == 'console': handlers.append(logging.StreamHandler(sys.stdout)) formatter = logging.Formatter('%(asctime)s - ' '%(levelname)s - ' '%(message)s') for handler in handlers: handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(get_conf()['logging']['log_level']) return logger
def _close_logger(self): if self.logger: handlers = [] for handler in self.logger.handlers: if hasattr(handler, "close") and callable(handler.close): handler.close() handlers.append(handler) map(self.logger.removeHandler, handlers) self.logger = None
def __init__(self, db: Optional[str], modules: List[Union[base.Module, base.ModuleGroup]]) -> None: modules = _flatten(modules) connections = [] observers = [] handlers = [] for m in modules: if isinstance(m, base.Connection): connections.append(m) if isinstance(m, base.Observer): observers.append(m) if isinstance(m, base.Handler): handlers.append(m) if not isinstance(m, (base.Connection, base.Observer, base.Handler)): raise TypeError( f'{type(m).__name__} is not a Connection, Observer, or Handler.' ) self.connections = connections self.observers = observers # Check for duplicate commands. commands: Dict[str, base.Handler[Any]] = {} for handler in handlers: for command in getattr(handler, 'commands', []): if command in commands: raise ValueError( f"Both {type(commands[command])} and {type(handler)} register '{command}'." ) commands[command] = handler if db is not None: data.startup(db) bot_data = data.Namespace('impbot.core.bot.Bot') db_version = int(bot_data.get('schema_version')) if db_version != data.SCHEMA_VERSION: logger.critical( f'Impbot is at schema version {data.SCHEMA_VERSION}, database is ' f'at {db_version}') sys.exit(1) self.handlers: List[base.Handler[Any]] = [lambda_event.LambdaHandler()] self.handlers.extend(handlers) self._queue: queue.Queue[base.Event] = queue.Queue() ws = [c for c in connections if isinstance(c, web.WebServerConnection)] if ws: self.web: Optional[web.WebServerConnection] = ws[0] self.web.init_routes(self.connections, self.handlers) else: self.web = None # Initialize the handler thread here, but we'll start it in main(). self._handler_thread = threading.Thread(name='Event handler', target=self.handle_queue)
def open(self): # URL接続処理 try: handlers = [] if self.proxy_url: proxy = urlparse.urlsplit(self.proxy_url) proxy_scheme = proxy.scheme proxy_username = proxy.username proxy_password = proxy.password proxy_hostname = proxy.hostname proxy_port = proxy.port if not proxy_port: proxy_port = self.default_proxy_port else: proxy_port = ":" + str(proxy_port) proxy_handler = urllib2.ProxyHandler({proxy_scheme:proxy_scheme + "://" + proxy_hostname + proxy_port}) handlers.append(proxy_handler) if proxy_hostname and proxy_username and proxy_password: proxy_auth_pwmgr = urllib2.HTTPPasswordMgrWithDefaultRealm() proxy_auth_pwmgr.add_password(None, proxy_hostname, proxy_username, proxy_password) proxy_basic_auth_handler = urllib2.ProxyBasicAuthHandler(proxy_auth_pwmgr) proxy_digest_auth_handler = urllib2.ProxyDigestAuthHandler(proxy_auth_pwmgr) handlers.append(proxy_basic_auth_handler) handlers.append(proxy_digest_auth_handler) http = urlparse.urlsplit(self.http_url) http_scheme = http.scheme http_username = http.username http_password = http.password http_hostname = http.hostname http_port = http.port if not http_port: http_port = self.default_http_port else: http_port = ":" + str(http_port) if http_hostname and http_username and http_password: http_auth_pwmgr = urllib2.HTTPPasswordMgrWithDefaultRealm() http_auth_pwmgr.add_password(None, http_hostname, http_username, http_password) http_basic_auth_handler = urllib2.HTTPBasicAuthHandler(http_auth_pwmgr) http_digest_auth_handler = urllib2.HTTPDigestAuthHandler(http_auth_pwmgr) handlers.append(http_basic_auth_handler) handlers.append(http_digest_auth_handler) opener = urllib2.build_opener() for handler in handlers: opener.add_handler(handler) urllib2.install_opener(opener) fh = urllib2.urlopen(http_scheme + "://" + http_hostname + http_port + http.path + http.query + http.fragment, timeout=self.timeout) data = fh.read() self.logger.debug("url open %s - %s, data size = %d" % (self.proxy_url, self.http_url, len(data))) # 問題がなければ OK を返す return "OK" except: trace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) self.logger.info("failed in open url"); self.logger.info("%s" % (trace)); # 問題があれば OK 以外を返す。問題が分かりやすいのでスタックトレース返しておく。 return " ".join(trace)
def _close_logger(self): # type: () -> None if self.logger: handlers = [] for handler in self.logger.handlers: if hasattr(handler, 'close') and callable(handler.close): handler.close() handlers.append(handler) map(self.logger.removeHandler, handlers) self.logger = None
def __init__(self, app_name, init=False): self.app_name = app_name handlers = [logging.StreamHandler()] if sys.platform == 'linux2': handlers.append(logging.handlers.SysLogHandler(address='/dev/log')) logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)s: %(message)s', datefmt='%b %d %H:%M:%S', handlers=handlers) self.logger = logging.getLogger(app_name) if not init: return
def configure(*args, **kwargs): """ Configure logging. Borrowed from logging.basicConfig Uses the IndentFormatter instead of the regular Formatter Also, opts the caller into Syslog output, unless syslog could not be opened for some reason or another, in which case a warning will be printed to the other log handlers. """ level = kwargs.setdefault('level', logging.INFO) handlers = [] # Add stderr output. handlers.append(logging.StreamHandler()) def terrible_log_output(s): import sys print >>sys.stderr, s places = [ # Linux '/dev/log', # FreeBSD '/var/run/log', # Macintosh '/var/run/syslog', ] default_syslog_address = places[0] for p in places: if path.exists(p): default_syslog_address = p break syslog_address = kwargs.setdefault('syslog_address', default_syslog_address) try: # Add syslog output. handlers.append(logging.handlers.SysLogHandler(syslog_address)) except EnvironmentError, e: if e.errno in [errno.ENOENT, errno.EACCES, errno.ECONNREFUSED]: message = ('wal-e: Could not set up syslog, ' 'continuing anyway. ' 'Reason: {0}').format(errno.errorcode[e.errno]) terrible_log_output(message)
def load_module(module_path, settings): handlers = [] modules = [(x[:-3], os.path.join(module_path, x)) for x in os.listdir(module_path) if x.endswith('.py')] for name, path in modules: m = imp.load_source(name, path) for x in dir(m): try: if issubclass(getattr(m, x), tornado.web.RequestHandler): handlers.append((m.url, getattr(m, x), settings)) except TypeError, e: pass
def __init__(self, sinks: list, context: str = ""): self.context = context self.logger = logging.getLogger(context) handlers = [] for sink in sinks: log_queue = queue.Queue(-1) queue_handler = logging.handlers.QueueHandler(log_queue) queue_listener = logging.handlers.QueueListener(log_queue, sink) queue_listener.start() handlers.append(queue_handler) logging.basicConfig(handlers=handlers)
def getLogger(args=None, stderr=None, logfile=None, name=None): '''to log to stderr set stderr = a level from logging to log to ../log/foo.log set logfile = a level from logging and set name = "foo"''' if name == None: # Get the filename of the caller _,filename,_,_,_,_ = inspect.getouterframes(inspect.currentframe())[1] name = os.path.basename(filename) logger = logging.getLogger(name) # for some readon the expression 'args == None' causes a TypeError (motivating isinstance) if stderr == None and logfile == None and not isinstance(args, argparse.Namespace): raise ValueError("You must set at least one of stderr or logfile to a logging level (or args)") if (stderr != None or logfile != None) and isinstance(args, argparse.Namespace): raise ValueError("You must set (stderr and/or logfile) or args, but not both") if isinstance(args, argparse.Namespace): stderr = loggingMap[args.stderr] logfile = loggingMap[args.logfile] handlers = [] if stderr != None: handler = logging.StreamHandler(sys.stderr) handler.setLevel(stderr) handler.setFormatter(FORMATTER_STDERR) handlers.append(handler) if logfile != None: filename = os.path.join(LOGDIR, name) + ".log" handler = logging.handlers.RotatingFileHandler( filename, mode = 'a', maxBytes = MAX_LOGFILE_BYTES, backupCount = BACKUP_COUNT) handler.setLevel(logfile) handler.setFormatter(FORMATTER_LOGFILE) handlers.append(handler) min_level = min([handler.level for handler in handlers]) for handler in handlers: logger.addHandler(handler) logger.setLevel(min_level) logger.debug("New logger instance") if logfile != None: logger.debug("Recording logs in %s" % filename) func = lambda typ, value, traceback: uncaughtException(logger, typ, value, traceback) sys.excepthook = func return logger
def configure_guts(*args, **kwargs): """ Borrowed from logging.basicConfig Uses the IndentFormatter instead of the regular Formatter Also, opts you into syslogging. """ syslog_address = kwargs.setdefault('syslog_address', '/dev/log') handlers = [] if len(logging.root.handlers) == 0: filename = kwargs.get("filename") if filename: mode = kwargs.get("filemode", 'a') handlers.append(logging.FileHandler(filename, mode)) else: stream = kwargs.get("stream") handlers.append(logging.StreamHandler(stream)) #try: # # Nobody can escape syslog, for now, and this default only # # works on Linux. # handlers.append(logging.handlers.SysLogHandler(syslog_address)) #except EnvironmentError, e: # if e.errno == errno.ENOENT: # # Silently do-not-write to syslog if the socket cannot # # be found at all. # pass # else: # raise fs = kwargs.get("format", logging.BASIC_FORMAT) dfs = kwargs.get("datefmt", None) fmt = IndentFormatter(fs, dfs) for handler in handlers: handler.setFormatter(fmt) logging.root.addHandler(handler) level = kwargs.get("level") if level is not None: logging.root.setLevel(level)
def main(): handlers = [] f = server() termHandler = logging.StreamHandler() termHandler.setFormatter(f) handlers.append(termHandler) fileHandler = logging.handlers.RotatingFileHandler( '/Users/balsamo/LDR_Logs/log.txt', maxBytes=1000000000, backupCount=5 ) fileHandler.setFormatter(f) handlers.append(fileHandler) logging.basicConfig(handlers=handlers) tcpserver = LogRecordSocketReceiver() print('About to start TCP server...') tcpserver.serve_until_stopped()
def configure_logging(): _LOGGER.setLevel(LOG_LEVEL) formatter = logging.Formatter("SMAPIv3: [%(process)d] - %(levelname)s - %(message)s") handlers = [] # Log to syslog handlers.append(logging.handlers.SysLogHandler(address="/dev/log", facility=LOG_SYSLOG_FACILITY)) if LOG_TO_STDERR: # Write to stderr handlers.append(logging.StreamHandler(sys.stderr)) # Configure and add handlers for handler in handlers: handler.setLevel(LOG_LEVEL) handler.setFormatter(formatter) _LOGGER.addHandler(handler)
def initialize_logging(app): config = app.config handlers = [] formatter = logging.Formatter(fmt=config["LOG_FORMAT"], datefmt=config["LOG_DATE_FORMAT"]) root_logger = logging.getLogger() exc_info = None try: handlers.append(logging.handlers.WatchedFileHandler(config["LOG_PATH"])) except: handlers = [logging.StreamHandler(),] exc_info = sys.exc_info() root_logger.setLevel(config["LOG_LEVEL"]) for handler in root_logger.handlers: root_logger.removeHandler(handler) for handler in handlers: handler.setFormatter(formatter) root_logger.addHandler(handler) if exc_info is not None: logger = logging.getLogger("slickqaweb.logging.initialize_logging") logger.warning("Unable to write to log file %s: ", config["LOG_PATH"], exc_info=exc_info)
def configure(config): ''' When we finally have the full configuration, we can reconfigure logger. ''' logger = get() logger.removeHandler(_TEMP_HANDLER) # Loading configuration handlers = [] if 'logging-std' in config: handler = logging.StreamHandler() handler.setLevel(_LEVELS[config['logging-std'].get('level', 'DEBUG')]) handlers.append(handler) _TEMP_HANDLER.setTargets(handlers) for handler in handlers: handler.setFormatter(_FORMATTER) logger.addHandler(handler) # Transfering previous message to newly configured logging module. _TEMP_HANDLER.flushAll()
def pending_logging(): # type: () -> Generator """contextmanager to pend logging all logs temporary.""" logger = logging.getLogger() memhandler = MemoryHandler() try: handlers = [] for handler in logger.handlers[:]: logger.removeHandler(handler) handlers.append(handler) logger.addHandler(memhandler) yield memhandler finally: logger.removeHandler(memhandler) for handler in handlers: logger.addHandler(handler) memhandler.flushTo(logger)
def setup_logging(): """Sets up logging in a syslog format by log level :param OPTION_GROUP: options as returned by the OptionParser """ stderr_log_format = "%(levelname) -8s %(asctime)s %(funcName)s line:%(lineno)d: %(message)s" file_log_format = "%(asctime)s - %(levelname)s - %(message)s" syslog_format = "%(processName)s[%(process)d] - %(message)s" LOGGER.setLevel(level=OPTION_GROUP.loglevel) handlers = [] if OPTION_GROUP.syslog: handlers.append( logging.handlers.SysLogHandler(facility=OPTION_GROUP.syslog, address='/dev/log')) handlers[-1].setFormatter(logging.Formatter(syslog_format)) # Use standard format here because timestamp and level will be added by # syslogd. if OPTION_GROUP.logfile: handlers.append(logging.FileHandler(OPTION_GROUP.logfile)) handlers[0].setFormatter(logging.Formatter(file_log_format)) if not handlers: handlers.append(logging.StreamHandler()) handlers[0].setFormatter(logging.Formatter(stderr_log_format)) # Remove all the old handler(s) for handler in logging.root.handlers: logging.root.removeHandler(handler) # Add our new handler(s) back in for handler in handlers: logging.root.addHandler(handler) return
def init_log(args): root_logger = logging.getLogger() root_logger.setLevel(logging.NOTSET) # we use our custom filter only flt = SourceFilter(logging.WARNING + args.quiet*10 - args.verbose*10, "master") handlers = [] console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter( "%(levelname)s:%(source)s:%(name)s:%(message)s")) handlers.append(console_handler) if args.log_file: file_handler = logging.handlers.TimedRotatingFileHandler( args.log_file, when="midnight", backupCount=args.log_backup_count) file_handler.setFormatter(logging.Formatter( "%(asctime)s %(levelname)s:%(source)s:%(name)s:%(message)s")) handlers.append(file_handler) log_forwarder = LogForwarder() handlers.append(log_forwarder) for handler in handlers: handler.addFilter(flt) root_logger.addHandler(handler) return log_forwarder
def main(): '''Main. Parse cmdline, read config etc.''' args = arg_parse() config = RawConfigParser() config.read(args.config) print "Setting timezone to UTC" os.environ["TZ"] = "UTC" time.tzset() handlers = [] if args.log: handlers.append(\ logging.handlers.TimedRotatingFileHandler(args.log, "midnight", backupCount=7)) handlers.append(logging.StreamHandler()) if args.verbose: loglevel = logging.DEBUG else: loglevel = logging.INFO for handler in handlers: handler.setFormatter(logging.Formatter("[%(levelname)s: %(asctime)s :" " %(name)s] %(message)s", '%Y-%m-%d %H:%M:%S')) handler.setLevel(loglevel) logging.getLogger('').setLevel(loglevel) logging.getLogger('').addHandler(handler) logging.getLogger("posttroll").setLevel(logging.INFO) logger = logging.getLogger("segment_gatherer") gatherer = SegmentGatherer(config, args.config_item) gatherer.set_logger(logger) gatherer.run()
def setup_logging(job, daemon=False, verbose=False): log_folder = '%s/%s' % (get_config('webapp')['job_logs_directory'], job) if not os.path.exists(log_folder): os.mkdir(log_folder) log_filename = '%s/log' % log_folder logger = logging.getLogger() if verbose: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) handlers = [] if daemon: handlers.append(logging.handlers. TimedRotatingFileHandler(filename=log_filename, when='midnight')) else: handlers.append(logging.FileHandler(filename='%s.%s' % (log_filename, time.strftime('%Y-%m-%d')))) handlers.append(logging.StreamHandler()) for handler in handlers: if verbose: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO) handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler)
def setup_logging(self, job, daemon=False, verbose=False): #log_folder = '%s/%s' % (JOB_LOGS_DIRECTORY, job) #file_path = '%s/%s' % (MIN_DATA_LOG, self.today) if os.path.exists(JOB_LOGS_DIRECTORY) is False: os.makedirs(JOB_LOGS_DIRECTORY) log_filename = '%s/%s.log' % (JOB_LOGS_DIRECTORY, job) logger = logging.getLogger() if verbose: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) handlers = [] if daemon: handlers.append(logging.handlers.TimedRotatingFileHandler(filename=log_filename, when='midnight')) else: handlers.append(logging.FileHandler(filename='%s.%s' % (log_filename, time.strftime('%Y-%m-%d')))) handlers.append(logging.StreamHandler()) for handler in handlers: if verbose: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO) handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler)
def init_log(args): root_logger = logging.getLogger() root_logger.setLevel(logging.NOTSET) # we use our custom filter only flt = SourceFilter(logging.WARNING + args.quiet*10 - args.verbose*10, "master") handlers = [] console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter( "%(levelname)s:%(source)s:%(name)s:%(message)s")) handlers.append(console_handler) if args.log_file: file_handler = logging.handlers.RotatingFileHandler( args.log_file, maxBytes=args.log_max_size*1024, backupCount=args.log_backup_count) file_handler.setFormatter(logging.Formatter( "%(asctime)s %(levelname)s:%(source)s:%(name)s:%(message)s")) handlers.append(file_handler) log_buffer = LogBuffer(1000) buffer_handler = LogBufferHandler(log_buffer) handlers.append(buffer_handler) for handler in handlers: handler.addFilter(flt) root_logger.addHandler(handler) return log_buffer
def initialize_logging(configuration): assert(isinstance(configuration, configparser.ConfigParser)) logfile = configuration['Logging'].get('logfile') level = configuration['Logging'].get('level', 'DEBUG') stdout = configuration['Logging'].getboolean('stdout', True) format = configuration['Logging'].get('format', '[{asctime}|{levelname:<8}|{name}]: {message}') dateformat = configuration['Logging'].get('dateformat', '%x %I:%M:%S %p') handlers = [] formatter = logging.Formatter(fmt=format, datefmt=dateformat, style='{') root_logger = logging.getLogger() exc_info = None try: if logfile is not None and logfile != '': handlers.append(logging.handlers.WatchedFileHandler(logfile)) if stdout: handlers.append(logging.StreamHandler()) else: # if there is no logfile, you have to have stdout logging handlers.append(logging.StreamHandler()) except PermissionError: handlers = [logging.StreamHandler(),] exc_info = sys.exc_info() root_logger.setLevel(level) if root_logger.hasHandlers(): for handler in root_logger.handlers: root_logger.removeHandler(handler) for handler in handlers: handler.setFormatter(formatter) root_logger.addHandler(handler) if exc_info is not None: logger = logging.getLogger("slick-reporter.initialize_logging") logger.warning("Unable to write to log file {}: ", logfile, exc_info=exc_info)
def setup_logging(): """Sets up logging in a syslog format by log level :param option_group: options as returned by the OptionParser """ stderr_log_format = "%(levelname) -8s %(asctime)s %(funcName)s line:%(lineno)d: %(message)s" file_log_format = "%(asctime)s - %(levelname)s - %(message)s" if option_group.debug: logger.setLevel(level=logging.DEBUG) elif option_group.verbose: logger.setLevel(level=logging.INFO) else: logger.setLevel(level=logging.WARNING) handlers = [] if option_group.syslog: handlers.append( logging.handlers.SysLogHandler(facility=option_group.syslog)) # Use standard format here because timestamp and level will be added by # syslogd. if option_group.logfile: handlers.append(logging.FileHandler(option_group.logfile)) handlers[0].setFormatter(logging.Formatter(file_log_format)) if not handlers: handlers.append(logging.StreamHandler()) handlers[0].setFormatter(logging.Formatter(stderr_log_format)) for handler in logging.root.handlers: logging.root.removeHandler(handler) for handler in handlers: logger.root.addHandler(handler) return
def generate_draft(self): """Create a first draft of the pseudo config file for logging.""" # Write static data to the pseudo config file. self.draft.write( """ [formatters] keys=console,file [formatter_console] format=%(message)s [formatter_file] format=%(asctime)s %(levelname)-8s %(name)-30s %(message)s datefmt=%Y-%m-%dT%H:%M:%S [loggers] keys=root [handler_null] class=libs.LoggingSetup.NullHandler args=() """ ) # Add handlers. handlers = [] if not self.console_quiet: handlers.append('console') self.draft.write( """ [handler_console] class=libs.LoggingSetup.ConsoleHandler level=DEBUG formatter=console args=() """ ) if self.log_file: handlers.append('file') self.draft.write( """ [handler_file] class=libs.LoggingSetup.TimedRotatingFileHandler level=DEBUG formatter=file args=('%s','D',30,5) """ % self.log_file ) if not handlers: handlers.append('null') self.draft.write( """ [logger_root] level={level} handlers={handlers} [handlers] keys={handlers} """.format(level=self.level, handlers=','.join(handlers)) )
def setup(name=None, level='info', short=False, pprint=False, format=None): level = _get_level(level) assert level in ('debug', 'info') short = _get_short(short) format = _get_format(format, short) handlers = [] _add_trace_level() # add trace file handler path = _get_debug_path(name) s.shell.cron_rm_path_later(path, hours=24) handler = logging.handlers.WatchedFileHandler(path) handlers.append(_make_handler(handler, 'trace', '%(message)s', False, _TraceOnly)) # add the stream handler handlers.append(_make_handler(logging.StreamHandler(), level, format, pprint, _NotTrace)) # rm all root handlers [logging.root.removeHandler(x) for x in logging.root.handlers] [logging.root.addHandler(x) for x in handlers] logging.root.setLevel('TRACE')