def log_validator(args): if args.testnet: service = validator_testnet_service else: service = validator_service j = journal.Reader() j.this_boot() j.log_level(journal.LOG_INFO) j.add_match(_SYSTEMD_UNIT=service) if args.subcommand == "proposals": j.add_match(MESSAGE="Submitted new block") j.seek_tail() print(" Time Slot Root Atts Deps Graffiti") j.seek_tail() i = 0 while i < args.rows: msg = j.get_previous() if not msg: i += 1 continue slot = int(msg['SLOT']) if args.epoch and slot > args.epoch * 32: continue root = msg['BLOCKROOT'] atts = msg['NUMATTESTATIONS'] deps = msg['NUMDEPOSITS'] graffiti_temp = base64.b64decode(body["graffiti"]) graffiti = graffiti_temp.decode('utf8').replace("\00", " ") datetime = msg['_SOURCE_REALTIME_TIMESTAMP'] time = datetime.strftime("%H:%M:%S") print("{} {:>7} {} {} {} {}".format(time, slot, root, atts, deps, graffiti)) i += 1 if args.subcommand == "attestations": j.add_match(MESSAGE="Submitted new attestations") j.seek_tail() print(" time source target slot index agr committee sourceroot targetroot beaconroot") i = 0 while i < args.rows: msg = j.get_previous() if not msg: i += 1 continue slot = int(msg['SLOT']) if args.epoch and slot > args.epoch * 32: continue aggidx = [int(m) for m in msg["AGGREGATORINDICES"][1:-1].split()] attidx = [int(m) for m in msg["ATTESTERINDICES"][1:-1].split()] source = int(msg['SOURCEEPOCH']) target = int(msg['TARGETEPOCH']) commitee = int(msg['COMMITTEEINDEX']) targetroot = msg['TARGETROOT'] sourceroot = msg['SOURCEROOT'] beaconroot = msg['BEACONBLOCKROOT'] datetime = msg['_SOURCE_REALTIME_TIMESTAMP'] time = datetime.strftime("%H:%M:%S") for idx in attidx: if idx in aggidx: print("{} {:>8} {:>8} {:>9} {:>8} ✓ {:>8} {} {} {}".format( time, source, target, slot, idx, commitee, sourceroot, targetroot, beaconroot)) else: print("{} {:>8} {:>8} {:>9} {:>8} {:>8} {} {} {}".format( time, source, target, slot, idx, commitee, sourceroot, targetroot, beaconroot)) i += 1 if args.subcommand == "performance": j.add_match(MESSAGE="Previous epoch voting summary") j.seek_tail() print(" time pubkey epoch source target head inc. dist.") i = 0 while i < args.rows: msg = j.get_previous() if not msg: i += 1 continue epoch = int(msg['EPOCH']) if args.epoch and epoch > args.epoch: continue inclusion = int(msg['INCLUSIONDISTANCE']) if msg['CORRECTLYVOTEDSOURCE'] == "true": source = "✓" else: source = "⨯" if msg['CORRECTLYVOTEDTARGET'] == "true": target = "✓" else: target = "⨯" if msg['CORRECTLYVOTEDHEAD'] == "true": head = "✓" else: head = "⨯" pubkey = msg['PUBKEY'] datetime = msg['_SOURCE_REALTIME_TIMESTAMP'] time = datetime.strftime("%H:%M:%S") if inclusion < 33: print("{} {} {:>8} {:^5} {:^5} {:^5} {:>5}".format( time, pubkey, epoch, source, target, head, inclusion)) else: print("{} {} {:>8} {:^5} {:^5} {:^5} {:>5}".format( time, pubkey, epoch, source, target, head, "miss")) i += 1 if args.subcommand == "status": print("Validator summary since last launch:\n") resp = read_status(service) for key in resp: print('{:<10}: {}'.format(key, resp[key])) j.add_match(MESSAGE="Vote summary since launch") j.seek_tail() msg = j.get_previous() if not msg: return source = msg['CORRECTLYVOTEDSOURCEPCT'] target = msg['CORRECTLYVOTEDTARGETPCT'] head = msg['CORRECTLYVOTEDHEADPCT'] inclusion = msg['AVERAGEINCLUSIONDISTANCE'] attinclusion = msg['ATTESTATIONSINCLUSIONPCT'] epochs = msg['NUMBEROFEPOCHS'] print("\n") print("Average Inclusion Distance : {}".format(inclusion)) print("Correctly Voted Source : {}".format(source)) print("Correctly Voted Target : {}".format(target)) print("Correctly Voted Head : {}".format(head)) print("Attestations Inclusion : {}".format(attinclusion)) print("\n") print("Number of Epochs running : {}".format(epochs)) j.flush_matches() j.add_match(_SYSTEMD_UNIT=service) j.add_match(MESSAGE="Previous epoch voting summary") bals_now = balances(j) td = Time()-3600 bals_hour = balances(j, td) td = td - 82800 bals_day = balances(j, td) hourly = {pubkey: (bal - bals_hour[pubkey]) * 8760 / bals_hour[pubkey]\ for pubkey, bal in bals_now.items() } daily = {pubkey: (bal - bals_day[pubkey]) * 365 / bals_day[pubkey]\ for pubkey, bal in bals_now.items() } time_fraction = 31536000 / (Time() - genesis) total = {pubkey: (bal - 32) * time_fraction / 32\ for pubkey, bal in bals_now.items() } print("\n Public Key Balance Hourly Daily Total") for pubkey,bal in bals_now.items(): print("{:<20}{:>.9f} {:.2%} {:.2%} {:.2%}".format(pubkey, bal, hourly[pubkey], daily[pubkey], total[pubkey] ))
def __init__(self, matches={}): self.j = journal.Reader() for k, v in matches.items(): self.j.add_match("%s=%s" % (k, v)) self.j.seek_tail() self.j.get_previous()
def test_reader_init_path(tmpdir): j1 = journal.Reader(path=tmpdir.strpath) journal.Reader(0, path=tmpdir.strpath) j2 = journal.Reader(path=tmpdir.strpath) journal.Reader(path=tmpdir.strpath)
def log_beacon(args): if args.testnet: service = beacon_testnet_service else: service = beacon_service j = journal.Reader() j.this_boot() if args.subcommand == "warn": j.log_level(journal.LOG_WARNING) else: j.log_level(journal.LOG_INFO) j.add_match(_SYSTEMD_UNIT=service) if args.subcommand == "warn": j.seek_tail() for i in range(args.rows): msg = j.get_previous() if not msg: continue message = msg['MESSAGE'] errormsg = msg.get('ERR', '') if not errormsg: errormsg = msg.get('ERROR', '') if errormsg: errormsg = "Error :"+errormsg block = msg.get('BLOCKSLOT', '') if block: block = "Slot : "+block datetime = msg['_SOURCE_REALTIME_TIMESTAMP'] time = datetime.strftime("%H:%M:%S") print("{} -- {} {} {}".format(time, message, errormsg, block)) return if args.subcommand == "status": print("Beacon status:\n") resp = read_status(service) for key in resp: print(' {:<10}: {}'.format(key, resp[key])) peers = get_peers(args.testnet) numpeers = len([1 for m in peers if m['connectionState'] == "CONNECTED"]) print (" Peers : {}".format(numpeers)) print("\nChain Head:\n") chainhead = get_chainhead(args.testnet) chainhead['headBlockRoot'] = "0x"+base64.b64decode(chainhead['headBlockRoot']).hex()[:12] chainhead['finalizedBlockRoot'] = "0x"+base64.b64decode(chainhead['finalizedBlockRoot']).hex()[:12] chainhead['justifiedBlockRoot'] = "0x"+base64.b64decode(chainhead['justifiedBlockRoot']).hex()[:12] chainhead['previousJustifiedBlockRoot'] = "0x"+base64.b64decode(chainhead['previousJustifiedBlockRoot']).hex()[:12] for key, val in chainhead.items(): print ( " {:<27} : {}".format(key, val)) return if args.subcommand == "blocks": if args.stream: print("Epoch Slot Proposer Att Dep Slsh Exits Graffiti root") stream_blocks(args) else: chainhead = get_chainhead(args.testnet) if args.epoch == -1: epoch = int(chainhead['headEpoch']) else: epoch = args.epoch print("Epoch Slot Proposer Att Dep Slsh Exits Graffiti root") ret = args.rows while ret: ret = print_epoch_blocks(epoch, args.testnet, ret) epoch -= 1 return
def _reset_journal(self): """ Closes any open journal and loads the journal file located at self._journal_path """ try: if self._journal: self._journal.close() self._journal = None self._poll = None # open the journal, limiting it to read logs since boot self._journal = journal.Reader(path=self._journal_path) self._journal.this_boot() # add any filters for match in self._matches: self._journal.add_match(match) # load the checkpoint cursor if it exists cursor = self._checkpoint.get_checkpoint(self._checkpoint_name) skip_to_end = True # if we have a checkpoint see if it's current if cursor is not None: try: self._journal.seek_cursor(cursor) entry = self._journal.get_next() timestamp = entry.get("__REALTIME_TIMESTAMP", None) if timestamp: current_time = datetime.datetime.utcnow() delta = current_time - timestamp if delta.total_seconds( ) < self._staleness_threshold_secs: skip_to_end = False else: global_log.log( scalyr_logging.DEBUG_LEVEL_0, "Checkpoint is older than %d seconds, skipping to end" % self._staleness_threshold_secs, ) except Exception as e: global_log.warn( "Error loading checkpoint: %s. Skipping to end." % six.text_type(e)) if skip_to_end: # seek to the end of the log # NOTE: we need to back up a single item, otherwise journald returns # random entries self._journal.seek_tail() self._journal.get_previous() # configure polling of the journal file self._poll = select.poll() mask = self._journal.get_events() self._poll.register(self._journal, mask) except Exception as e: global_log.warn("Failed to reset journal %s\n%s" % (six.text_type(e), traceback.format_exc()))
def demux_one_journal(j): print("Gathering journals from " + j['name']) # open the journal from a specific directory, or use the host journal if 'journal_dir' in j: print(" Using journal dir " + j['journal_dir']) jreader = journal.Reader(path=j['journal_dir']) else: print(" Using host journal") jreader = journal.Reader() # the path to where we will save the journal for this host/container j_dir = working_dir + '/logs' if 'subdir' in j: j_dir = j_dir + '/' + j['subdir'] d_dir = j_dir j_dir = j_dir + '/' + j['name'] d_dir = d_dir + '/deprecations/' + j['name'] # Create regular logs directory if not os.path.isdir(j_dir): os.makedirs(j_dir) # Create deperecations directory if not os.path.isdir(d_dir): os.makedirs(d_dir) output_files = {} # for each journal entry, try to match it with the services we care about # and split each service out into its own list of journal entries for entry in jreader: if 'MESSAGE' not in entry: continue if '_SYSTEMD_UNIT' not in entry: continue unit = entry['_SYSTEMD_UNIT'] if not next((s for s in service_names if s in unit), None): continue # write each matched service journal entry out s_name = '/' + unit + '.journal-' + timestamp + '.log' j_filename = j_dir + s_name message = str(entry['MESSAGE']) message_time = str(entry['__REALTIME_TIMESTAMP']) result_message = f"{message_time} {unit} {message}\n" if j_filename not in output_files: output_files[j_filename] = open(j_filename, 'w') output_files[j_filename].write(result_message) if 'eprecat' not in message: continue d_filename = d_dir + s_name if d_filename not in output_files: output_files[d_filename] = open(d_filename, 'w') output_files[d_filename].write(result_message) for fd in output_files.values(): fd.close() # We created directories regardless if they needed or not. We should drop empty ones. empty_dirs = set([j_dir, d_dir]) - set( [os.path.dirname(fn) for fn in output_files.keys()]) for e_dir in empty_dirs: try: os.rmdir(e_dir) except OSError: continue print(''.join([' Written ' + k + '\n' for k in output_files.keys()])) return True
def __init__(self): super().__init__() self.reader = journal.Reader() self.reader.this_boot() self.reader.seek_head()
#!/usr/bin/env python3 import re import select from pathlib import Path from systemd import journal alloc_re = re.compile( r"(.*)\-(\b[0-9a-f]{8}\-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b)" ) alloc_base = Path('/allocs') j = journal.Reader(flags=journal.SYSTEM_ONLY) j.log_level(journal.LOG_INFO) j.add_match(_SYSTEMD_UNIT="docker.service") j.seek_tail() j.get_previous() p = select.poll() p.register(j, j.get_events()) handlers = {} class NomadLogHandler: def __init__(self, log_dir, log_name, max_bytes, backup_count): self.log_dir = log_dir self.log_name = log_name self.max_bytes = max_bytes
def test_reader_this_machine(tmpdir): j = journal.Reader(path=tmpdir.strpath) with j: j.this_machine() j.this_machine(TEST_MID) j.this_machine(TEST_MID.hex)
def test_reader_has_persistent_files(tmpdir): j = journal.Reader(path=tmpdir.strpath) with j: with skip_oserror(errno.ENOSYS): ans = j.has_runtime_files() assert ans is False
def test_reader_messageid_match(tmpdir): j = journal.Reader(path=tmpdir.strpath) with j: j.messageid_match(id128.SD_MESSAGE_JOURNAL_START) j.messageid_match(id128.SD_MESSAGE_JOURNAL_STOP.hex)
def test_reader_init_path_nondirectory_fd(): with pytest.raises(OSError): journal.Reader(0, path=0)
def test_reader_init_path_invalid_fd(): with pytest.raises(OSError): journal.Reader(0, path=-1)
def scan(self): reader = journal.Reader(**self._reader_settings) reader.this_boot() self._scan(reader)
def display(self): answers = self.troubleshooter.answers parent = self.troubleshooter.get_window() self.answers = {} checkpoint = answers.get('error_log_checkpoint') cursor = answers.get('error_log_cursor') timestamp = answers.get('error_log_timestamp') if ('error_log' in self.persistent_answers or 'journal' in self.persistent_answers): checkpoint = None cursor = None def fetch_log(c): prompt = c._get_prompt_allowed() c._set_prompt_allowed(False) c._connect() with NamedTemporaryFile(delete=False) as tmpf: success = False try: c.getFile('/admin/log/error_log', file=tmpf) success = True except cups.HTTPError: try: os.remove(tmpf.name) except OSError: pass c._set_prompt_allowed(prompt) if success: return tmpf.file return None now = datetime.datetime.fromtimestamp(time.time()).strftime("%F %T") self.authconn = self.troubleshooter.answers[ '_authenticated_connection'] if 'error_log_debug_logging_set' in answers: try: self.op = TimedOperation(self.authconn.adminGetServerSettings, parent=parent) settings = self.op.run() except cups.IPPError: return False settings[cups.CUPS_SERVER_DEBUG_LOGGING] = '0' orig_settings = answers['cups_server_settings'] settings['MaxLogSize'] = orig_settings.get('MaxLogSize', '2000000') success = False def set_settings(connection, settings): connection.adminSetServerSettings(settings) # Now reconnect. attempt = 1 while attempt <= 5: try: time.sleep(1) connection._connect() break except RuntimeError: # Connection failed attempt += 1 try: self.op = TimedOperation(set_settings, (self.authconn, settings), parent=parent) self.op.run() self.persistent_answers['error_log_debug_logging_unset'] = True except cups.IPPError: pass self.answers = {} if journal and cursor is not None: def journal_format(x): try: priority = "XACEWNIDd"[x['PRIORITY']] except (IndexError, TypeError): priority = " " return (priority + " " + x['__REALTIME_TIMESTAMP'].strftime("[%m/%b/%Y:%T]") + " " + x['MESSAGE']) r = journal.Reader() r.seek_cursor(cursor) r.add_match(_SYSTEMD_UNIT="cups.service") self.answers['journal'] = [journal_format(x) for x in r] if checkpoint is not None: self.op = TimedOperation(fetch_log, (self.authconn, ), parent=parent) tmpfname = self.op.run() if tmpfname is not None: f = open(tmpfname) f.seek(checkpoint) lines = f.readlines() os.remove(tmpfname) self.answers = {'error_log': [x.strip() for x in lines]} if (len(self.answers.get('journal', [])) + len(self.answers.get('error_log', []))) == 0: cmd = ("su -c 'journalctl -u cups.service " "--since=\"%s\" --until=\"%s\"' > troubleshoot-logs.txt" % (timestamp, now)) self.entry.set_text(cmd) return True return False
return False if os.path.isfile(cmd_log): try: hdlr = open(cmd_log, 'rb') output( "Use log file : %s" % cmd_log ) output( "Use encoding : %s" % self.encoding ) test_lines = self.file_lines_gen(hdlr) except IOError, e: output( e ) return False elif cmd_log == "systemd-journal": # pragma: no cover if not journal: output( "Error: systemd library not found. Exiting..." ) return False myjournal = journal.Reader(converters={'__CURSOR': lambda x: x}) journalmatch = self._journalmatch self.setDatePattern(None) if journalmatch: try: for element in journalmatch: if element == "+": myjournal.add_disjunction() else: myjournal.add_match(element) except ValueError: output( "Error: Invalid journalmatch: %s" % shortstr(" ".join(journalmatch)) ) return False output( "Use journal match : %s" % " ".join(journalmatch) ) test_lines = journal_lines_gen(myjournal) else:
#!/usr/bin/env python3 """ - Read the system log and dump all lines. - Use filter to dump only lines related to systemd - Use map to extract only date and time """ from systemd import journal def journal_filter(msg, identifier): return msg.get("SYSLOG_IDENTIFIER") == identifier def journal_map_datetime(logentry): return logentry["__REALTIME_TIMESTAMP"].strftime("%Y-%m-%d %H:%M:%S") def journal_get_entries(jr): jr.seek_head() systemd_filter = filter(lambda entry: journal_filter(entry, "systemd"), jr) return map(journal_map_datetime, systemd_filter) jreader = journal.Reader() print(list(journal_get_entries(jreader)))
def display(self): answers = self.troubleshooter.answers parent = self.troubleshooter.get_window() self.answers = {} checkpoint = answers.get('error_log_checkpoint') cursor = answers.get('error_log_cursor') if ('error_log' in self.persistent_answers or 'journal' in self.persistent_answers): checkpoint = None cursor = None def fetch_log(c): prompt = c._get_prompt_allowed() c._set_prompt_allowed(False) c._connect() (tmpfd, tmpfname) = tempfile.mkstemp() os.close(tmpfd) success = False try: c.getFile('/admin/log/error_log', tmpfname) success = True except cups.HTTPError: try: os.remove(tmpfname) except OSError: pass c._set_prompt_allowed(prompt) if success: return tmpfname return None self.authconn = self.troubleshooter.answers[ '_authenticated_connection'] if 'error_log_debug_logging_set' in answers: try: self.op = TimedOperation(self.authconn.adminGetServerSettings, parent=parent) settings = self.op.run() except cups.IPPError: return False settings[cups.CUPS_SERVER_DEBUG_LOGGING] = '0' orig_settings = answers['cups_server_settings'] settings['MaxLogSize'] = orig_settings.get('MaxLogSize', '2000000') success = False def set_settings(connection, settings): connection.adminSetServerSettings(settings) # Now reconnect. attempt = 1 while attempt <= 5: try: time.sleep(1) connection._connect() break except RuntimeError: # Connection failed attempt += 1 try: self.op = TimedOperation(set_settings, (self.authconn, settings), parent=parent) self.op.run() self.persistent_answers['error_log_debug_logging_unset'] = True except cups.IPPError: pass self.answers = {} if journal and cursor != None: def journal_format(x): try: priority = "XACEWNIDd"[x['PRIORITY']] except (IndexError, TypeError): priority = " " return (priority + " " + x['__REALTIME_TIMESTAMP'].strftime("[%m/%b/%Y:%T]") + " " + x['MESSAGE']) r = journal.Reader() r.seek_cursor(cursor) r.add_match(_COMM="cupsd") self.answers['journal'] = [journal_format(x) for x in r] if checkpoint != None: self.op = TimedOperation(fetch_log, (self.authconn, ), parent=parent) tmpfname = self.op.run() if tmpfname != None: f = open(tmpfname) f.seek(checkpoint) lines = f.readlines() os.remove(tmpfname) self.answers = {'error_log': [x.strip() for x in lines]} return False
def __init__(self): self.journal_reader = journal.Reader()
# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Prints out journal entries with no or bad catalog explanations. """ import re from systemd import journal, id128 j = journal.Reader() logged = set() pattern = re.compile('@[A-Z0-9_]+@') mids = {v: k for k, v in id128.__dict__.items() if k.startswith('SD_MESSAGE')} freq = 1000 def log_entry(x): if 'CODE_FILE' in x: # some of our code was using 'CODE_FUNCTION' instead of 'CODE_FUNC' print('{}:{} {}'.format( x.get('CODE_FILE', '???'), x.get('CODE_LINE', '???'), x.get('CODE_FUNC', None) or x.get('CODE_FUNCTION', '???')))
def do_command_line(self, command_line): options = command_line.get_options_dict() # Show the Pithos log since last reboot and exit if options.contains('last-logs'): try: from systemd import journal from os.path import basename except ImportError: self._print(command_line, _('Systemd Python module not found')) return 1 # We want the version also since the logging plugin misses # logging messages before it's enabled. self._print(command_line, 'Pithos {}'.format(self.version)) reader = journal.Reader() reader.this_boot() reader.add_match(SYSLOG_IDENTIFIER='io.github.Pithos') _PRIORITY_TO_LEVEL = { journal.LOG_DEBUG: 'DEBUG', journal.LOG_INFO: 'INFO', journal.LOG_WARNING: 'WARNING', journal.LOG_ERR: 'ERROR', journal.LOG_CRIT: 'CRTICIAL', journal.LOG_ALERT: 'ALERT', } got_logs = False for entry in reader: try: got_logs = True level = _PRIORITY_TO_LEVEL[entry['PRIORITY']] line = entry['CODE_LINE'] function = entry['CODE_FUNC'] module = basename(entry['CODE_FILE'])[:-3] message = entry['MESSAGE'] except KeyError: self._print(command_line, _('Error Reading log entry, printing complete entry')) log_line = '\n'.join(('{}: {}'.format(k, v) for k, v in entry.items())) else: log_line = '{} - {}:{}:{} - {}'.format(level, module, function, line, message) self._print(command_line, log_line) if not got_logs: self._print(command_line, _('No logs for Pithos present for this boot.')) return 0 # Show the version on local instance and exit if options.contains('version'): self._print(command_line, 'Pithos {}'.format(self.version)) return 0 # Set the logging level to show debug messages if options.contains('debug'): log_level = logging.DEBUG elif options.contains('verbose'): log_level = logging.INFO else: log_level = logging.WARN stream = logging.StreamHandler() stream.setLevel(log_level) stream.setFormatter(logging.Formatter(fmt='%(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s')) logging.basicConfig(level=logging.NOTSET, handlers=[stream]) self.test_mode = options.lookup_value('test') self.do_activate() return 0
def __init__(self, unit): self.reader = journal.Reader() self.reader.add_match(_SYSTEMD_UNIT=unit)
from systemd import journal journal_stream = journal.Reader().this_boot().add_match( _SYSTEMD_UNIT="minecraft").seek_tail() print(journal_stream)