def __init__(self, tag='', facility=ANACONDA_SYSLOG_FACILITY, identifier=ANACONDA_SYSLOG_IDENTIFIER): self.tag = tag JournalHandler.__init__(self, SYSLOG_FACILITY=facility, SYSLOG_IDENTIFIER=identifier)
def emit(self, record): if self.tag: original_msg = record.msg record.msg = '%s: %s' % (self.tag, original_msg) JournalHandler.emit(self, record) record.msg = original_msg else: JournalHandler.emit(self, record)
def get_main_logger(use_console=True, use_journal=False, use_logbuf=True, console_color=True, log_default_delta=0): """ Returns the top-level logger object. This is the only API call from this file that should be used outside. """ global LOGGER if LOGGER is not None: return LOGGER logging.addLevelName(TRACE, 'TRACE') logging.setLoggerClass(MPMLogger) LOGGER = logging.getLogger('MPM') if use_console: console_handler = ColorStreamHandler( ) if console_color else logging.StreamHandler() console_formatter = logging.Formatter( "[%(name)s] [%(levelname)s] %(message)s") console_handler.setFormatter(console_formatter) LOGGER.addHandler(console_handler) if use_journal: from systemd.journal import JournalHandler journal_handler = JournalHandler(SYSLOG_IDENTIFIER='usrp_hwd') journal_formatter = logging.Formatter( '[%(levelname)s] [%(module)s] %(message)s') journal_handler.setFormatter(journal_formatter) LOGGER.addHandler(journal_handler) if use_logbuf: queue_handler = LossyQueueHandler(LOGGER.py_log_buf) LOGGER.addHandler(queue_handler) # Set default level: from usrp_mpm import prefs mpm_prefs = prefs.get_prefs() default_log_level = int( min(mpm_prefs.get_log_level() - log_default_delta * 10, CRITICAL)) default_log_level = max(TRACE, default_log_level - (default_log_level % 10)) LOGGER.setLevel(default_log_level) # Connect to C++ logging: if LOGGER.cpp_log_buf is not None: lib_logger = LOGGER.getChild('lib') def log_from_cpp(): " Callback for logging from C++ " log_level, component, message = LOGGER.cpp_log_buf.pop() if log_level: lib_logger.log(log_level, "[%s] %s", component, message.strip()) LOGGER.cpp_log_buf.set_notify_callback(log_from_cpp) # Flush errors stuck in the prefs module: log = LOGGER.getChild('prefs') for err_msg in mpm_prefs.get_log_errors(): log.error(err_msg) return LOGGER
def add_journal_handler(): from systemd.journal import \ JournalHandler # pylint: disable=import-outside-toplevel journald_handler = JournalHandler() # set a formatter to include the level name journald_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) # add the journald handler to the current logger LOG.addHandler(journald_handler)
def get_main_logger( use_console=True, use_journal=False, use_logbuf=True, console_color=True, log_default_delta=0 ): """ Returns the top-level logger object. This is the only API call from this file that should be used outside. """ global LOGGER if LOGGER is not None: return LOGGER logging.addLevelName(TRACE, 'TRACE') logging.setLoggerClass(MPMLogger) LOGGER = logging.getLogger('MPM') if use_console: console_handler = ColorStreamHandler() if console_color else logging.StreamHandler() console_formatter = logging.Formatter("[%(name)s] [%(levelname)s] %(message)s") console_handler.setFormatter(console_formatter) LOGGER.addHandler(console_handler) if use_journal: from systemd.journal import JournalHandler journal_handler = JournalHandler(SYSLOG_IDENTIFIER='usrp_hwd') journal_formatter = logging.Formatter('[%(levelname)s] [%(module)s] %(message)s') journal_handler.setFormatter(journal_formatter) LOGGER.addHandler(journal_handler) if use_logbuf: queue_handler = LossyQueueHandler(LOGGER.py_log_buf) LOGGER.addHandler(queue_handler) # Set default level: from usrp_mpm import prefs mpm_prefs = prefs.get_prefs() default_log_level = int(min( mpm_prefs.get_log_level() - log_default_delta * 10, CRITICAL )) default_log_level = max(TRACE, default_log_level - (default_log_level % 10)) LOGGER.setLevel(default_log_level) # Connect to C++ logging: if LOGGER.cpp_log_buf is not None: lib_logger = LOGGER.getChild('lib') def log_from_cpp(): " Callback for logging from C++ " log_level, component, message = LOGGER.cpp_log_buf.pop() if log_level: lib_logger.log(log_level, "[%s] %s", component, message.strip()) LOGGER.cpp_log_buf.set_notify_callback(log_from_cpp) # Flush errors stuck in the prefs module: log = LOGGER.getChild('prefs') for err_key, err_msg in mpm_prefs.get_log_errors(): log.error('%s: %s', err_key, err_msg) return LOGGER
def setup_logging(options, settings): logger = logging.getLogger("pdud") """ Setup the log handler and the log level """ if options.journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER="pdudaemon") handler.setFormatter(logging.Formatter(logging_FORMAT_JOURNAL)) elif options.logfile == "-" or not options.logfile: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter(logging_FORMAT)) else: handler = WatchedFileHandler(options.logfile) handler.setFormatter(logging.Formatter(logging_FORMAT)) logger.addHandler(handler) settings_level = settings.get('daemon', {}).get('logging_level', None) if settings_level: options.loglevel = settings_level.upper() else: options.loglevel = options.loglevel.upper() if options.loglevel == "DEBUG": logger.setLevel(logging.DEBUG) elif options.loglevel == "INFO": logger.setLevel(logging.INFO) elif options.loglevel == "WARNING": logger.setLevel(logging.WARNING) else: logger.setLevel(logging.ERROR)
def __init__(self): logger = logging.getLogger() # instantiate the JournaldHandler to hook into systemd journald_handler = JournalHandler() # set a formatter to include the level name journald_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) # add the journald handler to the current logger logger.addHandler(journald_handler) logging.getLogger().setLevel(logging.INFO)
def setup_logging(log): if enable_journal: root_logger = log.root h = JournalHandler(SYSLOG_IDENTIFIER="ooni-api") formatter = logging.Formatter("%(levelname)s %(message)s") h.setFormatter(formatter) root_logger.addHandler(h) root_logger.setLevel(logging.DEBUG) else: log.setLevel(logging.DEBUG) logging.basicConfig(format="%(message)s")
def on_prepare(self): try: from systemd.journal import JournalHandler self._journal = JournalHandler( SYSLOG_IDENTIFIER='io.github.Pithos') self._journal.setFormatter(logging.Formatter()) self._logger = logging.getLogger() self.preferences_dialog = LoggingPluginPrefsDialog( self.window, self.settings) except ImportError: return _('Systemd Python module not found')
def journald_setup(): """Set up and start journald logging""" if DEBUG_MODE: LOG.setLevel(logging.DEBUG) LOG.addHandler(logging.StreamHandler(sys.stderr)) with suppress(ImportError): from systemd.journal import JournalHandler journal_handler = JournalHandler() log_entry_format = '[%(levelname)s] %(message)s' journal_handler.setFormatter(logging.Formatter(log_entry_format)) LOG.setLevel(logging.INFO) LOG.addHandler(journal_handler)
def setup_logging(options): logger = logging.getLogger("pdud") """ Setup the log handler and the log level """ if options.journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER="pdudaemon") handler.setFormatter(logging.Formatter(logging_FORMAT_JOURNAL)) elif options.logfile == "-" or not options.logfile: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter(logging_FORMAT)) else: handler = WatchedFileHandler(options.logfile) handler.setFormatter(logging.Formatter(logging_FORMAT)) logger.addHandler(handler) options.loglevel = options.loglevel.upper() if options.loglevel == "DEBUG": logger.setLevel(logging.DEBUG) elif options.loglevel == "INFO": logger.setLevel(logging.INFO) elif options.loglevel == "WARNING": logger.setLevel(logging.WARNING) else: logger.setLevel(logging.ERROR)
def init(self, log_level): self._file_formatter = logging.Formatter( "{asctime} [{name:<15.15}] [{levelname:<8.8}]: {message}", style="{") self._syslog_formatter = logging.Formatter( "[{name:>15.15}] [{levelname:<8.8}]: {message}", style="{") self._console_formatter = ColoredFormatter( "{log_color} * {reset}{message}", style="{", log_colors={ 'DEBUG': 'bold_cyan', 'INFO': 'bold_blue', 'WARNING': 'bold_yellow', 'ERROR': 'bold_red' }) if HAS_SYSTEMD: self._log_handler = JournalHandler( SYSLOG_IDENTIFIER="clover-config") self._log_handler.setFormatter(self._syslog_formatter) else: log_path = os.path.join(os.path.expanduser("~"), ".local", "share", "clover-config") if not os.path.exists(log_path): os.makedirs(log_path) log_file = os.path.join(log_path, "clover-config.log") self._log_handler = logging.handlers.RotatingFileHandler( log_file, # 1MB size and 10 files maxBytes=1048576, backupCount=9) self._log_handler.setFormatter(self._file_formatter) self._log_handler.setLevel(logging.INFO) self._console_handler = logging.StreamHandler() self._console_handler.setFormatter(self._console_formatter) self.set_log_level(log_level) self.root = logging.getLogger() self.root.name = "clover-config" self.root.setLevel(logging.DEBUG) self.root.addHandler(self._log_handler) self.root.addHandler(self._console_handler) if HAS_SYSTEMD: self.root.debug("Using journald logging system...") else: self.root.debug("Logging to `{}`".format(log_file))
def do_command_line(self, command_line): options = command_line.get_options_dict() # First, get rid of existing logging handlers due to call in header as per # http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig logging.root.handlers = [] # Show the version on local instance and exit if options.contains('version'): # Broken bindings... type(command_line).do_print_literal(command_line, "Pithos {}\n".format(self.version)) return 0 handlers = [] try: from systemd.journal import JournalHandler journal = JournalHandler(SYSLOG_IDENTIFIER=self.props.application_id) # We can be more verbose with the journal and filter it later # and don't need fancy formatting as its part of the structure journal.setLevel(logging.INFO) journal.setFormatter(logging.Formatter()) handlers.append(journal) except ImportError: pass # Set the logging level to show debug messages if options.contains('debug'): log_level = logging.DEBUG elif options.contains('verbose'): log_level = logging.INFO else: log_level = logging.WARN stream = logging.StreamHandler() stream.setLevel(log_level) stream.setFormatter(logging.Formatter(fmt='%(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s')) handlers.append(stream) logging.basicConfig(level=logging.NOTSET, handlers=handlers) self.test_mode = options.lookup_value('test') self.do_activate() return 0
def set_journal_handler(event): request = event.request params = { 'TENDERS_API_VERSION': VERSION, 'TAGS': 'python,api', 'USER': str(request.authenticated_userid or ''), #'ROLE': str(request.authenticated_role), 'CURRENT_URL': request.url, 'CURRENT_PATH': request.path_info, 'REMOTE_ADDR': request.remote_addr or '', 'USER_AGENT': request.user_agent or '', 'REQUEST_METHOD': request.method, 'AWARD_ID': '', 'BID_ID': '', 'COMPLAINT_ID': '', 'CONTRACT_ID': '', 'DOCUMENT_ID': '', 'QUESTION_ID': '', 'TENDER_ID': '', 'TIMESTAMP': get_now().isoformat(), 'REQUEST_ID': request.environ.get('REQUEST_ID', ''), 'CLIENT_REQUEST_ID': request.headers.get('X-Client-Request-ID', ''), } for i in LOGGER.handlers: LOGGER.removeHandler(i) LOGGER.addHandler(JournalHandler(**params))
def __init__(self, guild_settings_file, enabled_cogs_directory, *arguments, **keyword_arguments): """Initialize the bot with a logger and a dictionary of guild settings.""" super().__init__(*arguments, **keyword_arguments) # Create a new logger. self._logger = logging.getLogger(__name__) self._logger.addHandler(JournalHandler()) self._logger.setLevel(logging.INFO) # Remove the default help command. self.remove_command("help") # Load all cogs in the enabled cogs directory. self.load_cogs(enabled_cogs_directory) # Load the guild settings from the guild settings file. If the file # doesn't exist, create a new empty dictionary. self._guild_settings_file = guild_settings_file try: with open(self._guild_settings_file, "r") as open_guild_settings_file: self._guild_settings = json.load(open_guild_settings_file) except FileNotFoundError: self._guild_settings = {}
def main(): parser = argparse.ArgumentParser(usage='usage: %(prog)s [options]') parser.add_argument('-c', '--config', action='store', type=str, dest='config', default='/etc/rpigw/config.yml', help='Config file path') args = parser.parse_args() log = logging.getLogger('rpigw') log.addHandler(JournalHandler()) log.setLevel(logging.DEBUG) log.info('Starting rpigw...') config = Config(args.config).read() gsm = Gsm(config) iqrf = Iqrf(config) sms = gsm.read_sms('ALL') log.info('Deleting an old text messages...') if config['app']['enable-sms-feedback']: log.info('A text message feedback is enabled.') else: log.info('A text message feedback is disabled.') for i in sms: if i: gsm.delete_sms(i['id']) try: read_sms(gsm, iqrf, config, log) except Exception as error: log.error('An error occured:', type(error), error) return 0
def configure_logging(logging_type, loglevel, logfile): numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % loglevel) if logging_type == 'stdout': logging.basicConfig( format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level=numeric_level) elif logging_type == 'file': assert logfile is not None logging.basicConfig( filename=logfile, format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level=numeric_level) elif logging_type == 'journald': try: from systemd.journal import JournalHandler except ImportError: print( 'Journald logging format needs the systemd-python package. Try installing with "pip install systemd-python".' ) sys.exit(1) logging.basicConfig(handlers=[JournalHandler()], format='%(name)s: %(message)s', level=numeric_level) else: assert False
def main() -> None: global conf log.info("Analysis starting") cp = ConfigParser() with open("/etc/ooni/analysis.conf") as f: cp.read_file(f) conf = parse_args() if conf.devel or conf.stdout or not has_systemd: format = "%(relativeCreated)d %(process)d %(levelname)s %(name)s %(message)s" logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=format) else: log.addHandler(JournalHandler(SYSLOG_IDENTIFIER="analysis")) log.setLevel(logging.DEBUG) log.info("Logging started") conf.output_directory = ( Path("./var/lib/analysis") if conf.devel else Path("/var/lib/analysis") ) os.makedirs(conf.output_directory, exist_ok=True) if conf.backup_db: backup_to_s3.log = log backup_to_s3.run_backup(conf, cp) return try: if conf.update_citizenlab: update_citizenlab_test_lists(conf) except Exception as e: log.error(str(e), exc_info=e) log.info("done")
def main(): delay = timedelta(seconds=10) log.addHandler(JournalHandler(SYSLOG_IDENTIFIER="clickhouse_feeder")) log.setLevel(logging.DEBUG) cp = ConfigParser() with Path("/etc/ooni/clickhouse_feeder.conf").open() as f: cp.read_file(f) conf = dict(cp["DEFAULT"]) log.info("Starting clickhouse_feeder") lockfile_f = Path("/var/lib/analysis/clickhouse_feeder.lock") tstamp_f = Path("/var/lib/analysis/clickhouse_feeder.tstamp") jsonl_tstamp_f = Path("/var/lib/analysis/clickhouse_feeder.jsonl.tstamp") pg_conn = setup_pg_connection(conf) click_conn = setup_click_connection(conf) while True: time.sleep(1) log.info("locking") with FileLock(lockfile_f): log.debug("lock acquired") try: old_tstamp = tstamp_f.read_text().strip() new_tstamp = datetime.utcnow() - delay new_tstamp = new_tstamp.strftime("%Y%m%d%H%M%S") if new_tstamp <= old_tstamp: continue sync_clickhouse_fastpath(pg_conn, click_conn, old_tstamp, new_tstamp) # Update timestamp only on success tstamp_f.write_text(new_tstamp) # Update JSONL table old_tstamp = jsonl_tstamp_f.read_text().strip() new_tstamp = datetime.utcnow() - timedelta(minutes=80) new_tstamp = new_tstamp.strftime("%Y%m%d%H%M%S") if new_tstamp <= old_tstamp: continue found = sync_clickhouse_jsonl( pg_conn, click_conn, old_tstamp, new_tstamp ) if found: # Update timestamp only if data was found jsonl_tstamp_f.write_text(new_tstamp) except psycopg2.OperationalError: log.warn("Reconnecting to PG") pg_conn = setup_pg_connection(conf) except Exception: log.error("Unhandled exception", exc_info=1) log.info("Reconnecting to PG") pg_conn = setup_pg_connection(conf) log.info("Reconnecting to Clickhouse") click_conn = setup_click_connection(conf) # except psycopg2.OperationalError: # log.warn("Reconnecting to CH") # click_conn = setup_click_connection(conf) except ValueError as e: log.error(e)
def _setup_logging(self) -> None: root = logging.getLogger() if self.verbose: root.setLevel(logging.DEBUG) else: root.setLevel(logging.INFO) if self.journal: handler = JournalHandler() formatter = logging.Formatter('%(message)s') else: handler = logging.StreamHandler(sys.stderr) formatter = logging.Formatter('%(levelname)s: %(message)s') handler.setFormatter(formatter) root.addHandler(handler) self.logger = logging.getLogger("core")
def _setup_systemd_logger(): try: from systemd.journal import JournalHandler except ImportError: raise util.ConfigException( "OS log for systemd not supported; module not installed", 'log_os_logs') return JournalHandler()
def setup(): os.environ["TZ"] = "UTC" global conf ap = ArgumentParser(__doc__) ap.add_argument("--start-day", type=lambda d: parse_date(d)) ap.add_argument("--end-day", type=lambda d: parse_date(d)) ap.add_argument("--devel", action="store_true", help="Devel mode") ap.add_argument("--noapi", action="store_true", help="Do not start API feeder") ap.add_argument("--stdout", action="store_true", help="Log to stdout") ap.add_argument("--db-uri", help="Database DSN or URI.") ap.add_argument( "--update", action="store_true", help="Update summaries and files instead of logging an error", ) ap.add_argument("--stop-after", type=int, help="Stop after feeding N measurements", default=None) ap.add_argument( "--no-write-to-db", action="store_true", help="Do not insert measurement in database", ) ap.add_argument( "--keep-s3-cache", action="store_true", help="Keep files downloaded from S3 in the local cache", ) conf = ap.parse_args() if conf.devel or conf.stdout or no_journal_handler: format = "%(relativeCreated)d %(process)d %(levelname)s %(name)s %(message)s" logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=format) else: log.addHandler(JournalHandler(SYSLOG_IDENTIFIER="fastpath")) log.setLevel(logging.DEBUG) # Run inside current directory in devel mode root = Path(os.getcwd()) if conf.devel else Path("/") conf.conffile = root / "etc/ooni/fastpath.conf" log.info("Using conf file %s", conf.conffile) cp = ConfigParser() with open(conf.conffile) as f: cp.read_file(f) conf.collector_hostnames = cp["DEFAULT"]["collectors"].split() log.info("collectors: %s", conf.collector_hostnames) conf.s3_access_key = cp["DEFAULT"]["s3_access_key"].strip() conf.s3_secret_key = cp["DEFAULT"]["s3_secret_key"].strip() if conf.db_uri is None: conf.db_uri = cp["DEFAULT"]["db_uri"].strip() setup_dirs(conf, root)
def setup_logging(args): global logger kwargs = get_basicconfig_kwargs(args) logging.basicConfig(**kwargs) logger = logging.getLogger() if args.foreground: return # if not running in foreground, log to journald also logger.addHandler(JournalHandler())
def get_daemon_logger(filepath, log_format=None, loglevel=logging.INFO, journal=False): logger = logging.getLogger() logger.setLevel(loglevel) try: if journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER=basename(sys.argv[0])) elif filepath: handler = WatchedFileHandler(filepath) else: handler = StreamHandler() except Exception as e: # pylint: disable=broad-except print("Fatal error creating client_logger: " + str(e)) sys.exit(os.EX_OSERR) if (log_format): handler.setFormatter(logging.Formatter(log_format)) logger.addHandler(handler) return logger, handler
def on_prepare(self): try: from systemd.journal import JournalHandler self._journal = JournalHandler(SYSLOG_IDENTIFIER='io.github.Pithos') self._journal.setFormatter(logging.Formatter()) self._logger = logging.getLogger() self.preferences_dialog = LoggingPluginPrefsDialog(self.window, self.settings) except ImportError: self.prepare_complete(error=_('Systemd Python module not found')) else: self.prepare_complete()
def setup_logging(development): handlers = [JournalHandler()] if development: handlers.append(logging.StreamHandler()) logging.basicConfig( level=logging.DEBUG, format='%(levelname).1s: %(message)s', handlers=handlers, )
def loadLogging(self) -> None: self.Logger = logging.getLogger('PhaazeDB') self.Logger.setLevel(logging.DEBUG) SHF:logging.Logger = logging.Formatter("[%(levelname)s]: %(message)s") if CliArgs.get('logging', 'console') == "systemd" and 'systemd' in sys.modules: JH:JournalHandler = JournalHandler() JH.setFormatter(SHF) self.Logger.addHandler(JH) else: SH:logging.StreamHandler = logging.StreamHandler() SH.setFormatter(SHF) self.Logger.addHandler(SH)
def __init__(self, bus_name: BusName, obj_path: str, iface: str, data: dict) -> None: assert iface, "No interface provided for PropertiesObject" super().__init__(bus_name, obj_path) self.properties = data self.id = obj_path self.iface = iface self.log = logging.getLogger(obj_path) self.log.addHandler( JournalHandler(level=logging.DEBUG, SYSLOG_IDENTIFIER=obj_path))
def setup_logger(log_level, use_stderr=False): if use_stderr or os.environ.get('PLATFORM') == 'docker': logging_handler = logging.StreamHandler(sys.stderr) elif JournalHandler: logging_handler = JournalHandler() # Fallbacks when JournalHandler isn't available. elif sys.platform == 'linux': logging_handler = logging.handlers.SysLogHandler(address='/dev/log') elif sys.platform == 'darwin': logging_handler = logging.handlers.SysLogHandler(address='/var/run/syslog') elif sys.platform.lower() in ['windows', 'win32']: logging_handler = logging.handlers.SysLogHandler() else: # Unknown platform, revert to stderr logging_handler = logging.StreamHandler(sys.stderr) logging_handler.setFormatter( logging.Formatter(fmt='{levelname[0]}{asctime}.{msecs:03.0f} ' '{filename}:{lineno}] {message}', datefmt='%m%d %H:%M:%S', style='{')) logger = logging.getLogger() logger.setLevel(log_level) logger.addHandler(logging_handler)
def main(couchdb_url=None, couchdb_db='openprocurement', seq_file=None, dump_dir=None): if JournalHandler: params = { 'TAGS': 'python,concord', } LOGGER.addHandler(JournalHandler(**params)) if couchdb_url: server = Server(url=couchdb_url, session=Session(retry_delays=range(10))) else: server = Server(session=Session(retry_delays=range(10))) for i in range(10): try: db = server[couchdb_db] except: sleep(i) else: break else: db = server[couchdb_db] if dump_dir and not os.path.isdir(dump_dir): os.mkdir(dump_dir) if seq_file and os.path.isfile(seq_file): with open(seq_file) as f: fdata = f.read() last_seq = int(fdata) if fdata.isdigit() else 0 else: last_seq = 0 seq_block = last_seq / 100 while True: cc = db.changes(timeout=55000, since=last_seq, feed='longpoll', filter='_view', view='conflicts/all', include_docs=True, conflicts=True) #wait([ #spawn(conflicts_resolve, db, c, dump_dir) #for c in cc[u'results'] #]) for c in cc[u'results']: conflicts_resolve(db, c, dump_dir) last_seq = cc[u'last_seq'] if seq_file and seq_block < last_seq / 100: with open(seq_file, 'w+') as f: f.write(str(last_seq)) seq_block = last_seq / 100
def trenew(args): ### This does the "work" of the daemon # setup logging logger = logging.getLogger('trenew') try: logger.addHandler(JournalHandler()) except: logger.addHandler(SysLogHandler(address='/dev/log', facility='daemon')) if not args.background: logger.addHandler(logging.StreamHandler(sys.stdout)) if args.verbose: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger.debug("trenew thread started with args: %s" % args) # if args.log_file == True: # fh = logging.FileHandler(args.log_file) # fh.setLevel(logging.DEBUG) # formatstr = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' # formatter = logging.Formatter(formatstr) # fh.setFormatter(formatter) # logger.addHandler(fh) while True: logger.debug("entering loop") #logger.info("this is an INFO message") #logger.error("this is an ERROR message") logger.info("running %s" % args.aklog_path) try: aklog_call = subprocess.run([args.aklog_path, '-d', args.aklog_options], text=True, check=args.exit_immediately) except: aklog_call = subprocess.run([args.aklog_path, '-d', args.aklog_options], check=args.exit_immediately) logger.debug(aklog_call.stdout) logger.debug(aklog_call.stderr) logger.debug(aklog_call) if aklog_call.returncode != 0 and args.obsess != 0: logger.warning("aklog returned %d" % aklog_call.returncode) sleepTime = setSleep(args.obsess) logger.debug("we think we do not have a good token, obsessiong every %d seconds", sleepTime) else: sleepTime = setSleep(args.keep_alive) logger.debug("we think we do have a good token, sleeping for %d seconds", sleepTime) time.sleep(sleepTime)
def enable(): '''Enable global logging Use :py:mod:`logging` module from standard library to log messages. >>> import qubes.log >>> qubes.log.enable() # doctest: +SKIP >>> import logging >>> logging.warning('Foobar') # doctest: +SKIP ''' if logging.root.handlers: return def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return logging.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback)) sys.excepthook = handle_exception logging.root.setLevel(logging.INFO) if systemd.daemon.booted(): logging.root.addHandler(JournalHandler(SYSLOG_IDENTIFIER='qubesd')) return handler_console = logging.StreamHandler(sys.stderr) handler_console.setFormatter(formatter_console) logging.root.addHandler(handler_console) if os.path.exists('/var/log/qubes'): log_path = '/var/log/qubes/qubes.log' else: # for tests, travis etc log_path = '/tmp/qubes.log' old_umask = os.umask(0o007) try: handler_log = logging.FileHandler(log_path, 'a', encoding='utf-8') fcntl.fcntl(handler_log.stream.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) finally: os.umask(old_umask) handler_log.setFormatter(formatter_log) logging.root.addHandler(handler_log)
def setup_logging(self, logger_name="PTLogger", logging_level=20, log_to_journal=False): self._logging_level = logging_level self._log_to_journal = log_to_journal self._journal_log = logging.getLogger(logger_name) if (self._added_handler is False and log_to_journal is True): self._journal_log.addHandler(JournalHandler()) self._added_handler = True self._journal_log.setLevel(self._logging_level) self._journal_log.info("Logger created.")
def do_command_line(self, command_line): options = command_line.get_options_dict() # First, get rid of existing logging handlers due to call in header as per # http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig logging.root.handlers = [] # Show the version on local instance and exit if options.contains('version'): # Broken bindings... type(command_line).do_print_literal( command_line, "Pithos {}\n".format(self.version)) return 0 handlers = [] try: from systemd.journal import JournalHandler journal = JournalHandler( SYSLOG_IDENTIFIER=self.props.application_id) # We can be more verbose with the journal and filter it later # and don't need fancy formatting as its part of the structure journal.setLevel(logging.INFO) journal.setFormatter(logging.Formatter()) handlers.append(journal) except ImportError: pass # Set the logging level to show debug messages if options.contains('debug'): log_level = logging.DEBUG elif options.contains('verbose'): log_level = logging.INFO else: log_level = logging.WARN stream = logging.StreamHandler() stream.setLevel(log_level) stream.setFormatter( logging.Formatter( fmt= '%(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s' )) handlers.append(stream) logging.basicConfig(level=logging.NOTSET, handlers=handlers) self.test_mode = options.lookup_value('test') self.do_activate() return 0
def configure_logging(level: str = "INFO"): """Configure our logging - stderr by default but logging nicely to the journal under systemd.""" global CONFIGURED if not CONFIGURED: under_systemd = "INVOCATION_ID" in environ kwargs: Dict[str, Any] = dict(level=level) if under_systemd: kwargs["format"] = "%(message)s" kwargs["handlers"] = [JournalHandler()] else: kwargs[ "format"] = "%(asctime)s %(levelname)-8s %(name)-35s - %(message)s" kwargs["stream"] = stderr logging.basicConfig(**kwargs) turn_down_noisy_loggers() CONFIGURED = True
class JournalLoggingPlugin(PithosPlugin): preference = 'journald-logging' description = _('Store logs with the journald service') _logging_changed_handler = None def on_prepare(self): try: from systemd.journal import JournalHandler self._journal = JournalHandler(SYSLOG_IDENTIFIER='io.github.Pithos') self._journal.setFormatter(logging.Formatter()) self._logger = logging.getLogger() self.preferences_dialog = LoggingPluginPrefsDialog(self.window, self.settings) except ImportError: self.prepare_complete(error=_('Systemd Python module not found')) else: self.prepare_complete() def on_enable(self): self._on_logging_changed(None, self.settings['data'] or 'verbose') self._logger.addHandler(self._journal) self._logging_changed_handler = self.preferences_dialog.connect('logging-changed', self._on_logging_changed) def _on_logging_changed(self, prefs_dialog, level): self.settings['data'] = level self._journal.setLevel(LOG_LEVELS[level]) logging.info('setting journald logging level to: {}'.format(level)) def on_disable(self): if self._logging_changed_handler: self.preferences_dialog.disconnect(self._logging_changed_handler) self._logger.removeHandler(self._journal)