class JournalLoggingPlugin(PithosPlugin): preference = 'journald-logging' description = _('Store logs with the journald service') _logging_changed_handler = None def on_prepare(self): try: from systemd.journal import JournalHandler self._journal = JournalHandler( SYSLOG_IDENTIFIER='io.github.Pithos') self._journal.setFormatter(logging.Formatter()) self._logger = logging.getLogger() self.preferences_dialog = LoggingPluginPrefsDialog( self.window, self.settings) except ImportError: self.prepare_complete(error=_('Systemd Python module not found')) else: self.prepare_complete() def on_enable(self): self._on_logging_changed(None, self.settings['data'] or 'verbose') self._logger.addHandler(self._journal) self._logging_changed_handler = self.preferences_dialog.connect( 'logging-changed', self._on_logging_changed) def _on_logging_changed(self, prefs_dialog, level): self.settings['data'] = level self._journal.setLevel(LOG_LEVELS[level]) logging.info('setting journald logging level to: {}'.format(level)) def on_disable(self): if self._logging_changed_handler: self.preferences_dialog.disconnect(self._logging_changed_handler) self._logger.removeHandler(self._journal)
def get_main_logger(use_console=True, use_journal=False, use_logbuf=True, console_color=True, log_default_delta=0): """ Returns the top-level logger object. This is the only API call from this file that should be used outside. """ global LOGGER if LOGGER is not None: return LOGGER logging.addLevelName(TRACE, 'TRACE') logging.setLoggerClass(MPMLogger) LOGGER = logging.getLogger('MPM') if use_console: console_handler = ColorStreamHandler( ) if console_color else logging.StreamHandler() console_formatter = logging.Formatter( "[%(name)s] [%(levelname)s] %(message)s") console_handler.setFormatter(console_formatter) LOGGER.addHandler(console_handler) if use_journal: from systemd.journal import JournalHandler journal_handler = JournalHandler(SYSLOG_IDENTIFIER='usrp_hwd') journal_formatter = logging.Formatter( '[%(levelname)s] [%(module)s] %(message)s') journal_handler.setFormatter(journal_formatter) LOGGER.addHandler(journal_handler) if use_logbuf: queue_handler = LossyQueueHandler(LOGGER.py_log_buf) LOGGER.addHandler(queue_handler) # Set default level: from usrp_mpm import prefs mpm_prefs = prefs.get_prefs() default_log_level = int( min(mpm_prefs.get_log_level() - log_default_delta * 10, CRITICAL)) default_log_level = max(TRACE, default_log_level - (default_log_level % 10)) LOGGER.setLevel(default_log_level) # Connect to C++ logging: if LOGGER.cpp_log_buf is not None: lib_logger = LOGGER.getChild('lib') def log_from_cpp(): " Callback for logging from C++ " log_level, component, message = LOGGER.cpp_log_buf.pop() if log_level: lib_logger.log(log_level, "[%s] %s", component, message.strip()) LOGGER.cpp_log_buf.set_notify_callback(log_from_cpp) # Flush errors stuck in the prefs module: log = LOGGER.getChild('prefs') for err_msg in mpm_prefs.get_log_errors(): log.error(err_msg) return LOGGER
def add_journal_handler(): from systemd.journal import \ JournalHandler # pylint: disable=import-outside-toplevel journald_handler = JournalHandler() # set a formatter to include the level name journald_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) # add the journald handler to the current logger LOG.addHandler(journald_handler)
def get_main_logger( use_console=True, use_journal=False, use_logbuf=True, console_color=True, log_default_delta=0 ): """ Returns the top-level logger object. This is the only API call from this file that should be used outside. """ global LOGGER if LOGGER is not None: return LOGGER logging.addLevelName(TRACE, 'TRACE') logging.setLoggerClass(MPMLogger) LOGGER = logging.getLogger('MPM') if use_console: console_handler = ColorStreamHandler() if console_color else logging.StreamHandler() console_formatter = logging.Formatter("[%(name)s] [%(levelname)s] %(message)s") console_handler.setFormatter(console_formatter) LOGGER.addHandler(console_handler) if use_journal: from systemd.journal import JournalHandler journal_handler = JournalHandler(SYSLOG_IDENTIFIER='usrp_hwd') journal_formatter = logging.Formatter('[%(levelname)s] [%(module)s] %(message)s') journal_handler.setFormatter(journal_formatter) LOGGER.addHandler(journal_handler) if use_logbuf: queue_handler = LossyQueueHandler(LOGGER.py_log_buf) LOGGER.addHandler(queue_handler) # Set default level: from usrp_mpm import prefs mpm_prefs = prefs.get_prefs() default_log_level = int(min( mpm_prefs.get_log_level() - log_default_delta * 10, CRITICAL )) default_log_level = max(TRACE, default_log_level - (default_log_level % 10)) LOGGER.setLevel(default_log_level) # Connect to C++ logging: if LOGGER.cpp_log_buf is not None: lib_logger = LOGGER.getChild('lib') def log_from_cpp(): " Callback for logging from C++ " log_level, component, message = LOGGER.cpp_log_buf.pop() if log_level: lib_logger.log(log_level, "[%s] %s", component, message.strip()) LOGGER.cpp_log_buf.set_notify_callback(log_from_cpp) # Flush errors stuck in the prefs module: log = LOGGER.getChild('prefs') for err_key, err_msg in mpm_prefs.get_log_errors(): log.error('%s: %s', err_key, err_msg) return LOGGER
def __init__(self): logger = logging.getLogger() # instantiate the JournaldHandler to hook into systemd journald_handler = JournalHandler() # set a formatter to include the level name journald_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) # add the journald handler to the current logger logger.addHandler(journald_handler) logging.getLogger().setLevel(logging.INFO)
def do_command_line(self, command_line): options = command_line.get_options_dict() # First, get rid of existing logging handlers due to call in header as per # http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig logging.root.handlers = [] # Show the version on local instance and exit if options.contains('version'): # Broken bindings... type(command_line).do_print_literal( command_line, "Pithos {}\n".format(self.version)) return 0 handlers = [] try: from systemd.journal import JournalHandler journal = JournalHandler( SYSLOG_IDENTIFIER=self.props.application_id) # We can be more verbose with the journal and filter it later # and don't need fancy formatting as its part of the structure journal.setLevel(logging.INFO) journal.setFormatter(logging.Formatter()) handlers.append(journal) except ImportError: pass # Set the logging level to show debug messages if options.contains('debug'): log_level = logging.DEBUG elif options.contains('verbose'): log_level = logging.INFO else: log_level = logging.WARN stream = logging.StreamHandler() stream.setLevel(log_level) stream.setFormatter( logging.Formatter( fmt= '%(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s' )) handlers.append(stream) logging.basicConfig(level=logging.NOTSET, handlers=handlers) self.test_mode = options.lookup_value('test') self.do_activate() return 0
def setup_logging(log): if enable_journal: root_logger = log.root h = JournalHandler(SYSLOG_IDENTIFIER="ooni-api") formatter = logging.Formatter("%(levelname)s %(message)s") h.setFormatter(formatter) root_logger.addHandler(h) root_logger.setLevel(logging.DEBUG) else: log.setLevel(logging.DEBUG) logging.basicConfig(format="%(message)s")
def journald_setup(): """Set up and start journald logging""" if DEBUG_MODE: LOG.setLevel(logging.DEBUG) LOG.addHandler(logging.StreamHandler(sys.stderr)) with suppress(ImportError): from systemd.journal import JournalHandler journal_handler = JournalHandler() log_entry_format = '[%(levelname)s] %(message)s' journal_handler.setFormatter(logging.Formatter(log_entry_format)) LOG.setLevel(logging.INFO) LOG.addHandler(journal_handler)
def add_stderr_journal_handler(logger=logging.root, level=logging.INFO): if logger.getEffectiveLevel() > level: logger.setLevel(level) try: from systemd.journal import JournalHandler from idb import __version__ h = JournalHandler( SYSLOG_IDENTIFIER=os.path.basename(sys.argv[0]), VERSION=__version__) except: h = logging.StreamHandler() h.setFormatter(logging.Formatter(TIMELESS_FORMAT)) h.setLevel(level) logger.addHandler(h) return h
def do_command_line(self, command_line): options = command_line.get_options_dict() # First, get rid of existing logging handlers due to call in header as per # http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig logging.root.handlers = [] # Show the version on local instance and exit if options.contains('version'): # Broken bindings... type(command_line).do_print_literal(command_line, "Pithos {}\n".format(self.version)) return 0 handlers = [] try: from systemd.journal import JournalHandler journal = JournalHandler(SYSLOG_IDENTIFIER=self.props.application_id) # We can be more verbose with the journal and filter it later # and don't need fancy formatting as its part of the structure journal.setLevel(logging.INFO) journal.setFormatter(logging.Formatter()) handlers.append(journal) except ImportError: pass # Set the logging level to show debug messages if options.contains('debug'): log_level = logging.DEBUG elif options.contains('verbose'): log_level = logging.INFO else: log_level = logging.WARN stream = logging.StreamHandler() stream.setLevel(log_level) stream.setFormatter(logging.Formatter(fmt='%(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s')) handlers.append(stream) logging.basicConfig(level=logging.NOTSET, handlers=handlers) self.test_mode = options.lookup_value('test') self.do_activate() return 0
def _setup_logging(self) -> None: root = logging.getLogger() if self.verbose: root.setLevel(logging.DEBUG) else: root.setLevel(logging.INFO) if self.journal: handler = JournalHandler() formatter = logging.Formatter('%(message)s') else: handler = logging.StreamHandler(sys.stderr) formatter = logging.Formatter('%(levelname)s: %(message)s') handler.setFormatter(formatter) root.addHandler(handler) self.logger = logging.getLogger("core")
def get_daemon_logger(filepath, log_format=None, loglevel=logging.INFO, journal=False): logger = logging.getLogger() logger.setLevel(loglevel) try: if journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER=basename(sys.argv[0])) elif filepath: handler = WatchedFileHandler(filepath) else: handler = StreamHandler() except Exception as e: # pylint: disable=broad-except print("Fatal error creating client_logger: " + str(e)) sys.exit(os.EX_OSERR) if (log_format): handler.setFormatter(logging.Formatter(log_format)) logger.addHandler(handler) return logger, handler
def setup_logging(options): logger = logging.getLogger("pdud") """ Setup the log handler and the log level """ if options.journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER="pdudaemon") handler.setFormatter(logging.Formatter(logging_FORMAT_JOURNAL)) elif options.logfile == "-" or not options.logfile: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter(logging_FORMAT)) else: handler = WatchedFileHandler(options.logfile) handler.setFormatter(logging.Formatter(logging_FORMAT)) logger.addHandler(handler) options.loglevel = options.loglevel.upper() if options.loglevel == "DEBUG": logger.setLevel(logging.DEBUG) elif options.loglevel == "INFO": logger.setLevel(logging.INFO) elif options.loglevel == "WARNING": logger.setLevel(logging.WARNING) else: logger.setLevel(logging.ERROR)
def setup_logging(options, settings): logger = logging.getLogger("pdud") """ Setup the log handler and the log level """ if options.journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER="pdudaemon") handler.setFormatter(logging.Formatter(logging_FORMAT_JOURNAL)) elif options.logfile == "-" or not options.logfile: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter(logging_FORMAT)) else: handler = WatchedFileHandler(options.logfile) handler.setFormatter(logging.Formatter(logging_FORMAT)) logger.addHandler(handler) settings_level = settings.get('daemon', {}).get('logging_level', None) if settings_level: options.loglevel = settings_level.upper() else: options.loglevel = options.loglevel.upper() if options.loglevel == "DEBUG": logger.setLevel(logging.DEBUG) elif options.loglevel == "INFO": logger.setLevel(logging.INFO) elif options.loglevel == "WARNING": logger.setLevel(logging.WARNING) else: logger.setLevel(logging.ERROR)
def get_main_logger(): """ Returns a main logger object for use by the Gtk.Application. Should not be used by normal applications. Returns: :logging.Logger: The logger for the main application """ # Use verbose debug logging for now. console_loglevel = VERBOSITY_LEVELS[2] file_loglevel = VERBOSITY_LEVELS[2] console_fmt = logging.Formatter('%(name)s: %(levelname)s %(message)s') file_fmt = logging.Formatter( '%(asctime)s - %(name)s: %(levelname)s %(message)s') log = logging.getLogger('toggledarkly') console_log = logging.StreamHandler() console_log.setFormatter(console_fmt) console_log.setLevel(console_loglevel) log.addHandler(console_log) file_log = handlers.RotatingFileHandler(LOG_FILE_PATH, maxBytes=(1048576 * 5), backupCount=5) file_log.setFormatter(file_fmt) file_log.setLevel(file_loglevel) log.addHandler(file_log) if SYSTEMD_SUPPORT: journald_log = JournalHandler() journald_log.setLevel(file_loglevel) journald_log.setFormatter(console_fmt) log.addHandler(journald_log) log.setLevel(VERBOSITY_LEVELS[2]) return log
def get_daemon_logger(filepath, log_format=None, loglevel=logging.INFO, journal=False): logger = logging.getLogger() logger.setLevel(loglevel) try: if journal: from systemd.journal import JournalHandler handler = JournalHandler(SYSLOG_IDENTIFIER=basename(sys.argv[0])) elif filepath: handler = WatchedFileHandler(filepath) else: handler = StreamHandler() except Exception as e: # pylint: disable=broad-except print("Fatal error creating client_logger: " + str(e)) sys.exit(os.EX_OSERR) if (log_format): handler.setFormatter(logging.Formatter(log_format)) logger.addHandler(handler) return logger, handler
def setup_logger(log_level, use_stderr=False): if use_stderr or os.environ.get('PLATFORM') == 'docker': logging_handler = logging.StreamHandler(sys.stderr) elif JournalHandler: logging_handler = JournalHandler() # Fallbacks when JournalHandler isn't available. elif sys.platform == 'linux': logging_handler = logging.handlers.SysLogHandler(address='/dev/log') elif sys.platform == 'darwin': logging_handler = logging.handlers.SysLogHandler(address='/var/run/syslog') elif sys.platform.lower() in ['windows', 'win32']: logging_handler = logging.handlers.SysLogHandler() else: # Unknown platform, revert to stderr logging_handler = logging.StreamHandler(sys.stderr) logging_handler.setFormatter( logging.Formatter(fmt='{levelname[0]}{asctime}.{msecs:03.0f} ' '{filename}:{lineno}] {message}', datefmt='%m%d %H:%M:%S', style='{')) logger = logging.getLogger() logger.setLevel(log_level) logger.addHandler(logging_handler)
import logging from systemd.journal import JournalHandler # Logging to a file # root logger configure logging.basicConfig(filename="example.log", level=logging.WARN, format="%(levelname)s %(asctime)s %(message)s") # root logger log logging.warning("this is a formatted warning") # Logging to the sytemd journal jlogger = logging.getLogger("journal-logger") jhandler = JournalHandler() jformatter = logging.Formatter(fmt="%(levelname)s %(message)s") jhandler.setFormatter(jformatter) jlogger.addHandler(jhandler) # This message will propagate to the root logger as well jlogger.warning("This is a warning sent to the journal") # Prevent the message propagating to the root logger jlogger.propagate = False ## journalctl | tail jlogger.warning("Warning ONLY to journal") # Within an exception handler you can print a stack trace: def bad_idea(): try: c = 1 / 0 # Easy way to force an exception except:
import distro import time import click import warnings from math import isclose from pathlib import Path from subprocess import getoutput, call, run, check_output, DEVNULL import logging import random import time from systemd.journal import JournalHandler # setup logging logger = logging.getLogger(__name__) journald_handler = JournalHandler() journald_handler.setFormatter( logging.Formatter('[%(levelname)s] auto-cpufreq: %(message)s')) logger.addHandler(journald_handler) logger.setLevel(logging.INFO) warnings.filterwarnings("ignore") # ToDo: # - re-enable CPU fan speed display and make more generic and not only for thinkpad # - replace get system/CPU load from: psutil.getloadavg() | available in 5.6.2) SCRIPTS_DIR = Path("/usr/local/share/auto-cpufreq/scripts/") # from the highest performance to the lowest ALL_GOVERNORS = ( "performance", "ondemand",
def main(self, args): # Do the thing log_file_path = '/var/log/kernelstub.log' if args.log_file: log_file_path = args.log_file verbosity = 0 if args.verbosity: verbosity = args.verbosity if verbosity > 2: verbosity = 2 if args.print_config: verbosity = 1 level = { 0 : logging.WARNING, 1 : logging.INFO, 2 : logging.DEBUG, } console_level = level[verbosity] file_level = level[2] stream_fmt = logging.Formatter( '%(name)-21s: %(levelname)-8s %(message)s') file_fmt = logging.Formatter( '%(asctime)s - %(name)-21s: %(levelname)-8s %(message)s') log = logging.getLogger('kernelstub') console_log = logging.StreamHandler() console_log.setFormatter(stream_fmt) console_log.setLevel(console_level) file_log = handlers.RotatingFileHandler( log_file_path, maxBytes=(1048576*5), backupCount=5) file_log.setFormatter(file_fmt) file_log.setLevel(file_level) log.addHandler(console_log) log.addHandler(file_log) if systemd_support: journald_log = JournalHandler() journald_log.setLevel(file_level) journald_log.setFormatter(stream_fmt) log.addHandler(journald_log) log.setLevel(logging.DEBUG) log.debug('Got command line options: %s' % args) # Figure out runtime options no_run = False if args.dry_run: no_run = True config = Config.Config() configuration = config.config['user'] if args.esp_path: configuration['esp_path'] = args.esp_path root_path = "/" if args.root_path: root_path = args.root_path opsys = Opsys.OS() if args.kernel_path: log.debug( 'Manually specified kernel path:\n ' + ' %s' % args.kernel_path) opsys.kernel_path = args.kernel_path else: opsys.kernel_path = os.path.join(root_path, opsys.kernel_name) if args.initrd_path: log.debug( 'Manually specified initrd path:\n ' + ' %s' % args.initrd_path) opsys.initrd_path = args.initrd_path else: opsys.initrd_path = os.path.join(root_path, opsys.initrd_name) if not os.path.exists(opsys.kernel_path): log.exception('Can\'t find the kernel image! \n\n' 'Please use the --kernel-path option to specify ' 'the path to the kernel image') exit(0) if not os.path.exists(opsys.initrd_path): log.exception('Can\'t find the initrd image! \n\n' 'Please use the --initrd-path option to specify ' 'the path to the initrd image') exit(0) # Check for kernel parameters. Without them, stop and fail if args.k_options: configuration['kernel_options'] = self.parse_options(args.k_options.split()) else: try: configuration['kernel_options'] except KeyError: error = ("cmdline was 'InvalidConfig'\n\n" "Could not find any valid configuration. This " "probably means that the configuration file is " "corrupt. Either remove it to regenerate it from" "default or fix the existing one.") log.exception(error) raise CmdLineError("No Kernel Parameters found") exit(168) log.debug(config.print_config()) if args.preserve_live and configuration['live_mode']: configuration['live_mode'] = True log.warning( 'Live mode is enabled!\n' 'Kernelstub is running in live environment mode. This usually ' 'means that you are running a live disk, and kernelstub should ' 'not run. We are thus exiting with 0.\n' 'If you are not running a live disk, please run ' '`sudo kernelstub` to disable live mode.' ) exit(0) configuration['live_mode'] = False if args.setup_loader: configuration['setup_loader'] = True if args.off_loader: configuration['setup_loader'] = False if args.install_stub: configuration['manage_mode'] = False if args.manage_mode: configuration['manage_mode'] = True log.debug('Checking configuration integrity...') try: kernel_opts = configuration['kernel_options'] esp_path = configuration['esp_path'] setup_loader = configuration['setup_loader'] manage_mode = configuration['manage_mode'] force = configuration['force_update'] except KeyError: log.exception( 'Malformed configuration! \n' 'The configuration we got is bad, and we can\'nt continue. ' 'Please check the config files and make sure they are correct. ' 'If you can\'t figure it out, then deleting them should fix ' 'the errors and cause kernelstub to regenerate them from ' 'Default. \n\n You can use "-vv" to get the configuration used.') log.debug('Configuration we got: \n\n%s' % config.print_config()) exit(169) if args.add_options: add_opts = args.add_options.split(" ") add_opts = config.parse_options(add_opts) for opt in add_opts: if opt not in kernel_opts: kernel_opts.append(opt) configuration['kernel_options'] = kernel_opts if args.remove_options: rem_opts = args.remove_options.split(" ") rem_opts = config.parse_options(rem_opts) kernel_opts = list(set(kernel_opts) - set(rem_opts)) configuration['kernel_options'] = kernel_opts if args.force_update: force = True if configuration['force_update'] == True: force = True log.debug('Structing objects') drive = Drive.Drive(root_path=root_path, esp_path=esp_path) nvram = Nvram.NVRAM(opsys.name, opsys.version) installer = Installer.Installer(nvram, opsys, drive) # Log some helpful information, to file and optionally console info = ( ' OS:..................%s %s\n' %(opsys.name_pretty,opsys.version) + ' Root partition:......%s\n' % drive.root_fs + ' Root FS UUID:........%s\n' % drive.root_uuid + ' ESP Path:............%s\n' % esp_path + ' ESP Partition:.......%s\n' % drive.esp_fs + ' ESP Partition #:.....%s\n' % drive.esp_num + ' NVRAM entry #:.......%s\n' % nvram.os_entry_index + ' Boot Variable #:.....%s\n' % nvram.order_num + ' Kernel Boot Options:.%s\n' % " ".join(kernel_opts) + ' Kernel Image Path:...%s\n' % opsys.kernel_path + ' Initrd Image Path:...%s\n' % opsys.initrd_path + ' Force-overwrite:.....%s\n' % str(force)) log.info('System information: \n\n%s' % info) if args.print_config: all_config = ( ' ESP Location:..................%s\n' % configuration['esp_path'] + ' Management Mode:...............%s\n' % configuration['manage_mode'] + ' Install Loader configuration:..%s\n' % configuration['setup_loader'] + ' Configuration version:.........%s\n' % configuration['config_rev']) log.info('Configuration details: \n\n%s' % all_config) exit(0) log.debug('Setting up boot...') kopts = 'root=UUID=%s ro %s' % (drive.root_uuid, " ".join(kernel_opts)) log.debug('kopts: %s' % kopts) installer.setup_kernel( kopts, setup_loader=setup_loader, overwrite=force, simulate=no_run) try: installer.backup_old( kopts, setup_loader=setup_loader, simulate=no_run) except Exception as e: log.debug('Couldn\'t back up old kernel. \nThis might just mean ' + 'You don\'t have an old kernel installed. If you do, try ' + 'with -vv to see debuging information') log.debug(e) installer.copy_cmdline(simulate=no_run) if not manage_mode: installer.setup_stub(kopts, simulate=no_run) log.debug('Saving configuration to file') config.config['user'] = configuration config.save_config() log.debug('Setup complete!\n\n') return 0
""" Its the relay """ # outputWaterPump = 0 outputWaterPump = int(config['default']['outputWaterPump']) # Telegram token token = config['telegram']['telegramToken'] chatId = config['telegram']['chatId'] # constants """ Time to keep lights on """ lightsTimeOn = 60 * 60 * 4 # 10 hours """ LOGGING """ log = logging.getLogger('SpaceVegetables') log_fmt = logging.Formatter("%(levelname)s %(message)s") log_ch = JournalHandler() log_ch.setFormatter(log_fmt) log.addHandler(log_ch) log.setLevel(logging.DEBUG) s = SimpleThreadedXMLRPCServer((IP_address, 8000), allow_none=True) s.register_introspection_functions() #enables use of s.system.listMethods() log.info("Starting XMLRPC Server") """ Next functions will be displayed using threads """ """ This is a function to delete text from the LCD The element argument is what you want to delete Remove text is draw a rectangle where text is written 0 = lights 1 = air pump 2 = water pump
CF_USER = '' CF_API_KEY = '' DNS_ZONE_NAME = '' DNS_ZONE_ID = '' RECORD_NAME = '' # ----------------------------- API_ENDPOINT = 'https://api.cloudflare.com/client/v4/' API_HEADERS = {'X-Auth-Email': CF_USER, 'X-Auth-Key': CF_API_KEY} # ----------------------------- # SET UP LOGGING # ----------------------------- logHandler = JournalHandler(SYSLOG_IDENTIFIER='cf-dynamic-dns') formatter = logging.Formatter('[%(levelname)s] - %(message)s') logHandler.setFormatter(formatter) logger = logging.getLogger('cf-dynamic-dns') logger.propagate = False logger.setLevel(logging.DEBUG) logger.addHandler(logHandler) # ----------------------------- def main(): externalIP = GetExternalIP() records = GetZoneRecords() if (records.get('success')): record = records.get('result')[0] recordID = record.get('id') recordIP = record.get('content')
import logging from systemd.journal import JournalHandler logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) handler = JournalHandler() handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler)
'%(asctime)s - %(name)-21s: %(levelname)-8s %(message)s') log = logging.getLogger('pyflatpak') console_log = logging.StreamHandler() console_log.setFormatter(stream_fmt) console_log.setLevel(console_level) file_log = handlers.RotatingFileHandler(log_file_path, maxBytes=(1048576 * 5), backupCount=5) file_log.setFormatter(file_fmt) file_log.setLevel(file_level) log.addHandler(console_log) #log.addHandler(file_log) if SYSTEMD_SUPPORT: journald_log = JournalHandler() journald_log.setLevel(file_level) journald_log.setFormatter(stream_fmt) log.addHandler(journald_log) log.setLevel(logging.DEBUG) remotes = Remotes.Remotes() def validate(url): valid_fp_url = url.endswith('.flatpakrepo') return valid_fp_url
import sys, dns.resolver, signal, logging, ssl, time from select import epoll, EPOLLIN, EPOLLHUP from systemd.journal import JournalHandler from socket import * class CustomAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): return '[{}] {}'.format(self.extra['origin'], msg), kwargs ## == Setup logging: logger = logging.getLogger() # __name__ journald_handler = JournalHandler() journald_handler.setFormatter( logging.Formatter('[{levelname}] {message}', style='{')) logger.addHandler(journald_handler) logger.setLevel(logging.DEBUG) LOG_LEVEL = 5 def log(*msg, origin='UNKNOWN', level=5, **kwargs): if level <= LOG_LEVEL: msg = [ item.decode('UTF-8', errors='backslashreplace') if type(item) == bytes else item for item in msg ] msg = [str(item) if type(item) != str else item for item in msg] log_adapter = CustomAdapter(logger, {'origin': origin}) if level <= 1: print('[!] ' + ' '.join(msg))
def get_args(self): parser = argparse.ArgumentParser( description="Performance monitor server.") parser.add_argument('-p', '--port', type=int, default=config.get_default_port(), help='device') parser.add_argument('-i', '--interval', type=int, default=5, help='stats interval') parser.add_argument('-d', '--device', type=str, default='sda', help='disk device name') parser.add_argument('-s', '--smart', default=False, action="store_true", help='smartctl data (root required)') parser.add_argument('-o', '--log_handler', type=str, default='stderr', choices=['journal', 'stderr'], help='log handler') parser.add_argument('-l', '--log_level', type=str, default='info', choices=['debug', 'info'], help='log level') parser.add_argument('-t', '--test', type=str, default='', help='test routines') args = parser.parse_args() if args.interval < 1: raise Exception( f'parameter error: invalid interval: {args.interval}') args.device = args.device.split('/')[-1] if args.device == '': raise Exception( f'parameter error: invalid disk device name: "{args.device}"') if args.log_handler == 'journal': log_h = JournalHandler() log_h.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) else: log_h = logging.StreamHandler() log_h.setFormatter( logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')) log.addHandler(log_h) log.setLevel(getattr(logging, args.log_level.upper())) log.info('Parameters: {}'.format(str(args))) return args
import time import logging import signal import traceback import Xlib import Xlib.display import re from subprocess import run, PIPE from contextlib import contextmanager from systemd.journal import JournalHandler journald = JournalHandler() fmt = logging.Formatter("[%(levelname)s] %(message)s") journald.setFormatter(fmt) logger = logging.getLogger(__name__) # logger.setLevel(logging.DEBUG) logger.setLevel(logging.INFO) logger.addHandler(journald) # console = logging.StreamHandler() # console.setFormatter(fmt) # logger.addHandler(console) XCAPE_CODES = ( "Control_L=Escape;" "Control_R=Return;" "Super_L=Tab;" "Super_R=backslash;" "Alt_R=space;" "ISO_Level3_Shift=XF86Search"
import logging from systemd.journal import JournalHandler from colorlog import ColoredFormatter #Log Format, colored format log_formatter_file = logging.Formatter( '%(asctime)s | %(levelname)s | %(message)s') log_formatter_journal = logging.Formatter('%(levelname)s | %(message)s') log_formatter_colored = ColoredFormatter( "%(asctime)s | %(log_color)s%(levelname)s%(reset)s | %(message)s%(reset)s") #Setup Journal Handler (Linux/Unix systemd log) journal_handler = JournalHandler() journal_handler.setFormatter(log_formatter_journal) #Setup Stream Handler (i.e. console) stream_handler = logging.StreamHandler() stream_handler.setFormatter(log_formatter_colored) #Get our app_logger app_log = logging.getLogger('P2PoW') app_log.setLevel(logging.DEBUG) #Add Handlers app_log.addHandler(journal_handler) app_log.addHandler(stream_handler) app_log.propagate = False def logFile(fileOutput): # create file handler for logger.
import ipaddress import json import logging import os import sys import requests import yaml from systemd.journal import JournalHandler LOGGER = logging.getLogger("gandi_ddns") CONFIG_FILE = "config.yaml" SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) JOURNAL_HANDLER = JournalHandler() JOURNAL_HANDLER.setFormatter(logging.Formatter("%(message)s")) LOGGER.addHandler(JOURNAL_HANDLER) LOGGER.setLevel(logging.DEBUG) # Could be any service that just gives us a simple raw ASCII IP address (not HTML etc) EXTERNAL_IP_URL = "https://api.ipify.org" def get_ip(): """ Retuns the external ip address of the machine. """ try: resp = requests.get(EXTERNAL_IP_URL, timeout=3) except Exception: LOGGER.critical("Failed to retrieve external IP.", exc_info=True)
from threading import Lock, Thread from time import sleep import logging from systemd.journal import JournalHandler logger = logging.getLogger(__name__) journalHandler = JournalHandler() journalHandler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s')) logger.addHandler(journalHandler) fileHandler = logging.FileHandler('/var/log/door.log') fileHandler.setFormatter(logging.Formatter("%(asctime)s|%(levelname)s;|%(message)s")) logger.addHandler(fileHandler) logger.setLevel(logging.INFO) class Door(object): def __init__(self, pifacedigital, output_pins, input_pins, relay_number ): self.lock = Lock() self.pifacedigital = pifacedigital self.door_switch_green_led_output_pin = output_pins[0] self.door_switch_red_led_output_pin = output_pins[1] self.rfid_reader_red_led_output_pin = output_pins[2] self.rfid_reader_green_led_output_pin = output_pins[3] self.door_state_input_pin = input_pins[0] self.door_relay_number = relay_number self.state = False def update_leds(self): if self.state: self.pifacedigital.leds[self.door_switch_red_led_output_pin].turn_on() self.pifacedigital.leds[self.rfid_reader_red_led_output_pin].turn_on()
def __init__(self, **kw): # configuration self.mem_threshold = kw.get("mem_threshold") self.max_accepted_swap_size = kw.get("max_accepted_swap_size") self.poll_proc_swap_interval = kw.get("poll_proc_swap_interval") self.poll_proc_active_interval = kw.get("poll_proc_active_interval") self.idle_after_system_busy = kw.get("idle_after_busy") self.idle_after_completed = kw.get("idle_after_completed") self.proc_threshold = kw.get("max_active_processes") self.perform_swap_off = kw.get("swapoffon") self.num_workers = kw.get("num_parallel") self.develop_mode = kw.get("develop") self.spawn_timeout = kw.get("spawn_timeout") verbose = kw.get("verbose") systemdlog = kw.get("systemd_logger") # create logger self.logger = logging.getLogger(__name__) if systemdlog: from systemd.journal import JournalHandler handler = JournalHandler() else: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s")) self.logger.handlers = [] self.logger.addHandler(handler) self.logger.setLevel( logging.ERROR if verbose == "error" else logging. WARNING if verbose == "warning" else logging.INFO if verbose == "info" else logging.DEBUG if verbose == "dump" else logging.NOTSET) if os.geteuid() != 0: self.logerror( "It seems that this application is not run as root. Exiting.") sys.exit(1) for d in kw: self.logdump("Option %s value %s (type %s)", d, kw[d], str(type(kw[d]))) if not kw.get("avoid_renicing", False): os.nice(20) self.loginfo("set nice value to 20") try: import psutil p = psutil.Process(os.getpid()) p.ionice(psutil.IOPRIO_CLASS_IDLE) self.loginfo("set io nice value to idle") except ImportError: self.logwarning( "Cannot import psutil, ionice value will not be modified.") if self.spawn_timeout > 0 and sys.version_info.major < 3: self.logerror("option spawn_timeout not available in python 2") sys.exit(1) # state self.state = self.STATE_DESWAP self.current_process = None self.f_memory = None self.remaining_items = SmapsFileWrapper() self.remaining_items_iter = iter(self.remaining_items) self.total_amount_to_deswap = 0 self.current_process_swapsize = 0 # cache self.lastNumActiveProcs = None self.lastProcScan = {} # regexes self.condense_smap = re.compile( r"^(Swap:.*)|(([0-9a-fA-F]+)\s*-\s*([0-9a-fA-F]+)\s.*)$", re.MULTILINE) self.freere = re.compile(r"Mem:.*\s([0-9]+)$") self.swapusedre = re.compile(r"Swap:\s+[0-9]+\s+([0-9]+)\s") self.statere = re.compile(r"State:\s*([A-Z])") self.namere = re.compile(r"Name:\s*(.*)") # initialize self.scan_proc(self.SCAN_PROC_ACTIVE | self.SCAN_PROC_SWAP_DETAILS) self.executor = ThreadPoolExecutor(max_workers=self.num_workers) self.futures_pending = set()
def main(self, args): """ Do the thing """ log_file_path = '/var/log/kernelstub.log' if args.log_file: log_file_path = args.log_file verbosity = 0 if args.verbosity: verbosity = args.verbosity if verbosity > 2: verbosity = 2 if args.print_config: verbosity = 1 level = { 0 : logging.WARNING, 1 : logging.INFO, 2 : logging.DEBUG, } console_level = level[verbosity] file_level = level[2] stream_fmt = logging.Formatter( '%(name)-21s: %(levelname)-8s %(message)s') file_fmt = logging.Formatter( '%(asctime)s - %(name)-21s: %(levelname)-8s %(message)s') log = logging.getLogger('kernelstub') console_log = logging.StreamHandler() console_log.setFormatter(stream_fmt) console_log.setLevel(console_level) file_log = handlers.RotatingFileHandler( log_file_path, maxBytes=(1048576*5), backupCount=5) file_log.setFormatter(file_fmt) file_log.setLevel(file_level) log.addHandler(console_log) log.addHandler(file_log) if SYSTEMD_SUPPORT: journald_log = JournalHandler() journald_log.setLevel(file_level) journald_log.setFormatter(stream_fmt) log.addHandler(journald_log) log.setLevel(logging.DEBUG) # Figure out our command line options. log.debug('Got command line options: %s', args) if args.dry_run: log.warning( 'DEPRECATED!\n\n' 'The simulate or dry-run option has been removed from ' 'kernelstub and no longer functions. This will be removed in a ' 'future version. Since you likely intend no action, we will now ' 'exit.' ) exit() config = Config.Config() configuration = config.config['user'] if args.esp_path: configuration['esp_path'] = args.esp_path root_path = "/" if args.root_path: root_path = args.root_path opsys = Opsys.OS() if args.kernel_path: log.debug('Manual kernel path:\n %s', args.kernel_path) opsys.kernel_path = args.kernel_path else: opsys.kernel_path = os.path.join(root_path, opsys.kernel_name) if args.initrd_path: log.debug('Manual initrd path:\n %s', args.initrd_path) opsys.initrd_path = args.initrd_path else: opsys.initrd_path = os.path.join(root_path, opsys.initrd_name) if not os.path.exists(opsys.kernel_path): log.exception( 'Can\'t find the kernel image! \n\n Please use the ' '--kernel-path option to specify the path to the kernel image' ) exit(0) if not os.path.exists(opsys.initrd_path): log.exception( 'Can\'t find the initrd image! \n\n Please use the ' '--initrd-path option to specify the path to the initrd image' ) exit(0) # Check for kernel parameters. Without them, stop and fail if args.k_options: configuration['kernel_options'] = config.parse_options(args.k_options.split()) else: try: configuration['kernel_options'] except KeyError: error = ( 'cmdline was "InvalidConfig"\n\n Could not find any valid ' 'configuration. This probably means that the configuration ' 'file is corrupt. Either remove it to regenerate it from ' 'default or fix the existing one.' ) log.exception(error) raise CmdLineError("No Kernel Parameters found") exit(168) log.debug(config.print_config()) if args.preserve_live and configuration['live_mode']: configuration['live_mode'] = True log.warning( 'Live mode is enabled!\n' 'Kernelstub is running in live environment mode. This usually ' 'means that you are running a live disk, and kernelstub should ' 'not run. We are thus exiting with 0.\n' 'If you are not running a live disk, please run ' '`sudo kernelstub` to disable live mode.' ) exit(0) configuration['live_mode'] = False if args.setup_loader: configuration['setup_loader'] = True if args.off_loader: configuration['setup_loader'] = False if args.install_stub: configuration['manage_mode'] = False if args.manage_mode: configuration['manage_mode'] = True log.debug('Checking configuration integrity...') try: kernel_opts = configuration['kernel_options'] esp_path = configuration['esp_path'] setup_loader = configuration['setup_loader'] manage_mode = configuration['manage_mode'] force = configuration['force_update'] live_mode = configuration['live_mode'] except KeyError: log.exception( 'Malformed configuration! \n' 'The configuration we got is bad, and we can\'t continue. ' 'Please check the config files and make sure they are correct. ' 'If you can\'t figure it out, then deleting them should fix ' 'the errors and cause kernelstub to regenerate them from ' 'Default. \n\n You can use "-vv" to get the configuration used.' ) log.debug('Configuration we got: \n\n%s', config.print_config()) exit(169) if args.add_options: add_opts = args.add_options.split(" ") add_opts = config.parse_options(add_opts) for opt in add_opts: if opt not in kernel_opts: kernel_opts.append(opt) configuration['kernel_options'] = kernel_opts if args.remove_options: rem_opts = args.remove_options.split(" ") rem_opts = config.parse_options(rem_opts) kernel_opts = list(set(kernel_opts) - set(rem_opts)) configuration['kernel_options'] = kernel_opts if args.force_update: force = True if configuration['force_update'] is True: force = True log.debug('Structing objects') drive = Drive.Drive(root_path=root_path, esp_path=esp_path) nvram = Nvram.NVRAM(opsys.name, opsys.version) installer = Installer.Installer(nvram, opsys, drive) # Log some helpful information, to file and optionally console data_system = { 'Root:': drive.root_fs, 'ESP:': drive.esp_fs, 'Kernel Path:': opsys.kernel_path, 'Initrd Path:': opsys.initrd_path, 'Boot Options:': " ".join(kernel_opts), } data_debug = { 'OS:': "{} {}".format(opsys.name_pretty, opsys.version), 'ESP Partition #:': drive.esp_num, 'NVRAM entry #:': nvram.os_entry_index, 'Boot Variable #:': nvram.order_num, 'Root FS UUID:': drive.root_uuid, } data_config = { 'Kernel Options:': " ".join(kernel_opts), 'ESP Path:': esp_path, 'Install loader config:': setup_loader, 'Management Mode:': manage_mode, 'Force Overwrite:': str(force), 'Live Disk Mode:': live_mode, 'Config revision:': configuration['config_rev'] } if args.print_config: log.info( 'System information:\n\n%s', self.mktable(data_system, 22) ) log.debug( 'Debug information:\n\n%s', self.mktable(data_debug, 22) ) log.info( 'Active configuration details:\n\n%s', self.mktable(data_config, 22) ) exit(0) log.info( 'System information:\n\n%s', self.mktable(data_system, 16) ) log.debug( 'Debug information:\n\n%s', self.mktable(data_debug, 16) ) log.debug( 'Active configuration:\n\n%s', self.mktable(data_config, 22) ) if args.print_config: log.info( 'Active configuration details:\n\n%s', self.mktable(data_config, 22) ) exit(0) log.debug('Setting up boot...') kopts = 'root=UUID={uuid} ro {options}'.format( uuid=drive.root_uuid, options=" ".join(kernel_opts) ) log.debug('kopts: %s', kopts) installer.setup_kernel( kopts, setup_loader=setup_loader, overwrite=force) try: installer.backup_old( kopts, setup_loader=setup_loader) except Exception as e_e: log.debug( 'Couldn\'t back up old kernel. \nThis might just mean you ' 'don\'t have an older kernel installed. If you do, try with -vv' ' to see debugging information' ) log.debug(e_e) installer.copy_cmdline() if not manage_mode: installer.setup_stub(kopts) log.debug('Saving configuration to file') config.config['user'] = configuration config.save_config() log.debug('Setup complete!\n\n') return 0
if os.environ.get('PLATFORM') == 'docker': logging_handler = logging.StreamHandler(sys.stderr) elif JournalHandler: logging_handler = JournalHandler() # Fallbacks when JournalHandler isn't available. elif sys.platform == 'linux': logging_handler = logging.handlers.SysLogHandler(address='/dev/log') elif sys.platform == 'darwin': logging_handler = logging.handlers.SysLogHandler(address='/var/run/syslog') elif sys.platform.lower() in ['windows', 'win32']: logging_handler = logging.handlers.SysLogHandler() else: # Unknown platform, revert to stderr logging_handler = logging.StreamHandler(sys.stderr) logging_handler.setFormatter( logging.Formatter(fmt='{levelname[0]}{asctime}.{msecs:03.0f} ' '{filename}:{lineno}] {message}', datefmt='%m%d %H:%M:%S', style='{')) logger = logging.getLogger() logger.setLevel(_parsed_args.log_level) logger.addHandler(logging_handler) _pima_server = AlarmServer() # type: AlarmServer _pima_server.start() _mqtt_client = None # type: typing.Optional[mqtt.Client] _mqtt_topics = {} # type: typing.Dict[str, str] if _parsed_args.mqtt_host: _mqtt_topics['pub'] = os.path.join(_parsed_args.mqtt_topic, 'status') _mqtt_topics['sub'] = os.path.join(_parsed_args.mqtt_topic, 'command') _mqtt_topics['lwt'] = os.path.join(_parsed_args.mqtt_topic, 'LWT')
class LogManager: def __init__(self): self.root = None self._file_formatter = None self._syslog_formatter = None self._console_formatter = None self._log_handler = None self._console_handler = None def init(self, log_level): self._file_formatter = logging.Formatter( "{asctime} [{name:<15.15}] [{levelname:<8.8}]: {message}", style="{") self._syslog_formatter = logging.Formatter( "[{name:>15.15}] [{levelname:<8.8}]: {message}", style="{") self._console_formatter = ColoredFormatter( "{log_color} * {reset}{message}", style="{", log_colors={ 'DEBUG': 'bold_cyan', 'INFO': 'bold_blue', 'WARNING': 'bold_yellow', 'ERROR': 'bold_red' }) if HAS_SYSTEMD: self._log_handler = JournalHandler( SYSLOG_IDENTIFIER="clover-config") self._log_handler.setFormatter(self._syslog_formatter) else: log_path = os.path.join(os.path.expanduser("~"), ".local", "share", "clover-config") if not os.path.exists(log_path): os.makedirs(log_path) log_file = os.path.join(log_path, "clover-config.log") self._log_handler = logging.handlers.RotatingFileHandler( log_file, # 1MB size and 10 files maxBytes=1048576, backupCount=9) self._log_handler.setFormatter(self._file_formatter) self._log_handler.setLevel(logging.INFO) self._console_handler = logging.StreamHandler() self._console_handler.setFormatter(self._console_formatter) self.set_log_level(log_level) self.root = logging.getLogger() self.root.name = "clover-config" self.root.setLevel(logging.DEBUG) self.root.addHandler(self._log_handler) self.root.addHandler(self._console_handler) if HAS_SYSTEMD: self.root.debug("Using journald logging system...") else: self.root.debug("Logging to `{}`".format(log_file)) def __getattr__(self, name): return logging.getLogger(name) def die(self, code, message="An error occured during the execution of the current action" ): self.root.error("%s - aborting...", message) self.root.debug("Exiting with exit code %d", code) sys.exit(code.value) def set_log_level(self, log_level): log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR } level = log_levels[log_level] self._console_handler.setLevel(level)
class JournalLoggingPlugin(PithosPlugin): preference = 'journald-logging' description = _('Store logs with the journald service') _logging_changed_handler = None def on_prepare(self): try: from systemd.journal import JournalHandler self._journal = JournalHandler(SYSLOG_IDENTIFIER='io.github.Pithos') self._journal.setFormatter(logging.Formatter()) self._logger = logging.getLogger() self.preferences_dialog = LoggingPluginPrefsDialog(self.window, self.settings) except ImportError: self.prepare_complete(error=_('Systemd Python module not found')) else: self.prepare_complete() def on_enable(self): self._on_logging_changed(None, self.settings['data'] or 'verbose') self._logger.addHandler(self._journal) self._logging_changed_handler = self.preferences_dialog.connect('logging-changed', self._on_logging_changed) def _on_logging_changed(self, prefs_dialog, level): self.settings['data'] = level self._journal.setLevel(LOG_LEVELS[level]) logging.info('setting journald logging level to: {}'.format(level)) def on_disable(self): if self._logging_changed_handler: self.preferences_dialog.disconnect(self._logging_changed_handler) self._logger.removeHandler(self._journal)