def probe(self): with nagiosplugin.Cookie(COOKIE_FILE) as cookie: if os.path.isfile("/run/reboot-required"): _log.info("reboot-required file found") timestamp = cookie.get('timestamp', default=os.path.getmtime("/run/reboot-required")) cookie['timestamp'] = timestamp file_age = datetime.datetime.now().timestamp() - timestamp try: with open("/run/reboot-required.pkgs") as file: _log.info("reboot-required.pkgs file read") packages = set(file.read().splitlines()) _log.info(packages) except Exception: packages = None return [ nagiosplugin.Metric('age', file_age, uom="s"), nagiosplugin.Metric('packages', packages) ] else: cookie.pop('timestamp', None) _log.info("File not found") return [ nagiosplugin.Metric('age', 0, uom="s"), nagiosplugin.Metric('packages', set()) ]
def request_rate(self, records): """Create request rate metric (does not depend on url filters). In its current implementation, the request rate computation lies a little bit: we compute the rate of requests seen in the log file between the last check invocation and now. This must not necessarily equal the request rate as computed by the requests' timestamps. """ with nagiosplugin.Cookie(self.statefile) as cookie: last_run = cookie.get("last_run", None) now = datetime.datetime.now() cookie["last_run"] = now.timetuple()[0:6] if not last_run: logging.info("cannot find last_run in state file") return nagiosplugin.Metric("rate", 0, "req/s", min=0, context="default") last_run = datetime.datetime(*last_run) timedelta = max((now - last_run).total_seconds(), 1) return nagiosplugin.Metric( "rate", len(records) / timedelta, "req/s", min=0, context="default", )
def parse(self): """Extract tuples (t_tot, is_error, urlpath) from new log lines.""" cookie = nagiosplugin.Cookie(self.statefile) records = [] with nagiosplugin.LogTail(self.logfile, cookie) as lf: for line in lf: match = self.r_logline.search(line.decode("iso-8859-1")) if not match: logging.debug("ignoring line: %s", line.strip()) continue if self.r_exclude.search(line.decode("iso-8859-1")): logging.debug("hit exclude pattern in line: %s", line.strip()) continue t_tot, stat, url = match.groups() err = not (stat.startswith("2") or stat.startswith("3")) records.append((t_tot, err, url)) return numpy.array( records, dtype=[ ("t_tot", numpy.int32), ("err", numpy.uint16), ("url", "80a"), ], )
def probe(self): """ Querys the REST-API and create throughput metrics. :return: a throughput metric. """ _log.info('Reading XML from: %s', self.xml_obj.build_request_url()) api_outbytes, api_inbytes = 0, 0 current_time = get_time() soup = self.xml_obj.read() ifnet = soup.find('ifnet') for item in ifnet.find_all('entry'): api_inbytes = Finder.find_item(item, 'ibytes') api_outbytes = Finder.find_item(item, 'obytes') _log.debug('Path to statefile: %r' % get_statefile_path()) with np.Cookie(get_statefile_path()) as cookie: old_inbytes = cookie.get(self.host + self.interface_name + 'i', api_inbytes) old_outbytes = cookie.get(self.host + self.interface_name + 'o', api_outbytes) old_time = cookie.get(self.host + self.interface_name + 't', current_time) if not api_inbytes or not api_outbytes or float(api_inbytes) < 0 or float(api_outbytes) < 0: raise np.CheckError('Couldn\'t get a valid value!') cookie[self.host + self.interface_name + 'i'] = api_inbytes cookie[self.host + self.interface_name + 'o'] = api_outbytes cookie[self.host + self.interface_name + 't'] = current_time if float(api_inbytes) < float(old_inbytes) or float(api_outbytes) < float(old_outbytes): raise np.CheckError('Couldn\'t get a valid value: Found throughput less then old!') diff_time = int(current_time) - int(old_time) if diff_time > 0: in_bits_per_second = round( ((float(api_inbytes) - float(old_inbytes)) / diff_time) * 8, 2) out_bits_per_second = round( ((float(api_outbytes) - float(old_outbytes)) / diff_time) * 8, 2) else: raise np.CheckError( 'Difference between old timestamp and new timestamp is less ' 'or equal 0: If it is the first time you run the script, ' 'please execute it again!') return [ np.Metric('in_bps_' + str(self.interface_name), in_bits_per_second, min=0), np.Metric('out_bps_' + str(self.interface_name), out_bits_per_second, min=0)]
def parse_log(self): """Yields ttot and error status for each log line.""" cookie = nagiosplugin.Cookie(self.statefile) with nagiosplugin.LogTail(self.logfile, cookie) as lf: for line in lf: match = self.r_logline.search(line.decode()) if not match: continue ttot, stat = match.groups() err = not (stat.startswith('2') or stat.startswith('3')) yield int(ttot), err
def _get_growth_rate(self, current_value): """ Varnishstat often reports cummulative values for its metrics. As were only interested in relative changes we need to save state between executions of the plugin (done via nagiosplugin.Cookie). The first execution will therefor only ever report 0 and save state. :param current_value: value read from varnishstat from this execution :return: change in value since last execution """ self._create_tmp_dir() with nag.Cookie(statefile=join(self.tmpdir, self.metric)) as cookie: historic_value = cookie.get(self.metric) if historic_value is not None: metric_value = current_value - historic_value cookie[self.metric] = current_value cookie.commit() else: metric_value = 0 cookie[self.metric] = current_value cookie.commit() return metric_value
def setUp(self): self.lf = tempfile.NamedTemporaryFile(prefix='log.') self.cf = tempfile.NamedTemporaryFile(prefix='cookie.') self.cookie = nagiosplugin.Cookie(self.cf.name)
def __init__(self, logfile, statefile): self.logfile = logfile self.cookie = nagiosplugin.Cookie(statefile)