def getLinesFromFile(theFile, start, stop): #collectd.notice("# Getting lines from {}".format(theFile)) usePreviousLog = False linesUsed = [] if os.path.exists(theFile): if re.search("gz", theFile): fileLines = [ x.rstrip() for x in gzip.open(theFile, "rt").readlines() ] else: fileLines = [x.rstrip() for x in open(theFile, "r").readlines()] lines = [l for l in fileLines if not re.match("^#", l)][::-1] if getDateTimeFromLogLine(lines[-1]) > start: usePreviousLog = True linesUsed = [ l for l in lines if getDateTimeFromLogLine(l) > start and getDateTimeFromLogLine(l) <= stop ] else: collectd.notice("# File {} does not exist".format(theFile)) usePreviousLog = True return usePreviousLog, linesUsed
def __init__(self, formatter, host, port, ttl=255, interface=None): collectd.debug("%s formatter=%s host=%s, port=%s ttl=%s interface=%s" % ('write_socket_json', formatter, host, port ,ttl, interface)) super(UdpWriter, self).__init__(formatter) self.host = host self.port = int(port) self.interface = interface self.ttl = ttl self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) if self.interface: # Crude test to distinguish between interface names and IP addresses. interface_ip = None try: if socket.gethostbyname(self.interface) == self.interface: interface_ip = self.interface except socket.gaierror: try: import netifaces interface_ip = netifaces.ifaddresses(self.interface)[0]['addr'] except (ImportError, OSError, ValueError), msg: collectd.notice("%s error setting interface: %s" % ('write_socket_json', msg)) if interface_ip: try: self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(interface_ip)) except socket.error, msg: collectd.notice("%s error setting interface: %s" % ('write_socket_json', msg))
def write(self, values_dict): collectd.debug('%s.write_callback: values_object=%s' % ('$NAME', values_dict)) try: self.buffer.put_nowait(values_dict) except queue.Full: collectd.notice("%s output buffer full" % (self))
def logger(t, msg): if t == 'err': collectd.error('%s: %s' % (NAME, msg)) elif t == 'warn': collectd.warning('%s: %s' % (NAME, msg)) elif t == 'info': collectd.info('%s: %s' % (NAME, msg)) else: collectd.notice('%s: %s' % (NAME, msg))
def config(config): for node in config.children: key = node.key.lower() val = node.values[0] if key == "interval": collectd.notice("iotop-worker: config: got interval value %s" % (val)) DATA["interval"] = int(val)
def shutdown(data): collectd.notice("iotop-worker: shutdown: start shutdown") data["close_event"].set() data["process"].join(timeout=data["interval"] + 1) if data["process"].is_alive(): collectd.notice("iotop-worker: shutdown: worker needs terminating") data["process"].terminate() data["process"].join() data["queue"].close()
def logger(t, msg): if t == 'err': collectd.error('%s: %s' % (NAME, msg)) elif t == 'warn': collectd.warning('%s: %s' % (NAME, msg)) elif t == 'verb' and VERBOSE_LOGGING: collectd.info('%s: %s' % (NAME, msg)) else: collectd.notice('%s: %s' % (NAME, msg))
def logger(level, message): if level == 'err': collectd.error("%s: %s" % (NAME, message)) elif level == 'warn': collectd.warning("%s: %s" % (NAME, message)) elif level == 'verb': if CONFIG_INSTANCES['root_config']['VERBOSE_LOGGING']: collectd.info("%s: %s" % (NAME, message)) else: collectd.notice("%s: %s" % (NAME, message))
def logger(t, msg): if t == 'err': collectd.error('%s: %s' % (NAME, msg)) elif t == 'warn': collectd.warning('%s: %s' % (NAME, msg)) elif t == 'verb': if CONFIG_ROOT['root']['VERBOSE_LOGGING']: collectd.info('%s: %s' % (NAME, msg)) else: collectd.notice('%s: %s' % (NAME, msg))
def logger(t, msg): if t == 'err': collectd.error('%s: %s' % (NAME, msg)) elif t == 'warn': collectd.warning('%s: %s' % (NAME, msg)) elif t == 'verb': if PuppetReportsConfig.verbose: collectd.info('%s: %s' % (NAME, msg)) else: collectd.notice('%s: %s' % (NAME, msg))
def logger(self, t, msg): if t == 'err': collectd.error('%s: %s' % (NAME, msg)) elif t == 'warn': collectd.warning('%s: %s' % (NAME, msg)) elif t == 'verb': if self.verbose: collectd.info('%s: %s' % (NAME, msg)) else: collectd.notice('%s: %s' % (NAME, msg))
def logger(t, msg): if t == "err": collectd.error("%s: %s" % (NAME, msg)) elif t == "warn": collectd.warning("%s: %s" % (NAME, msg)) elif t == "verb": if VERBOSE_LOGGING: collectd.info("%s: %s" % (NAME, msg)) else: collectd.notice("%s: %s" % (NAME, msg))
def message(self, level, text): text = '%s: %s' % (level, text) if level == 'E': collectd.error(text) elif level == 'W': collectd.warning(text) elif level == 'N': collectd.notice(text) elif level == 'I': collectd.info(text) else: collectd.debug(text)
def getLinesToParse(): global LASTREADDATETIME currentLogFile = "/var/log/bro/current/stats_count.log" stop = datetime.now() start = LASTREADDATETIME LASTREADDATETIME = stop if start is None: start = stop - timedelta(seconds=900) LASTREADDATETIME = start collectd.notice("# Start: {}".format(start)) collectd.notice("# Stop: {}".format(stop)) collectd.notice("# Curr Log File: {}".format(currentLogFile)) usePreviousLog, linesUsed = getLinesFromFile(currentLogFile, start, stop) if usePreviousLog or True: previousLogFiles = getPreviousLogFiles(start, stop) for f in previousLogFiles: collectd.notice("# Curr Log File: {}".format(f)) _, extraLines = getLinesFromFile(f, start, stop) linesUsed += extraLines return linesUsed
def init(data): import multiprocessing as mp q = mp.Queue() e = mp.Event() p = mp.Process(target=worker, args=( e, q, data["interval"], )) data["queue"] = q data["close_event"] = e data["process"] = p collectd.notice("iotop-worker: init: start process") p.start()
def emit(self, record): """ Emits a log record to the appropriate collectd log function Arguments record -- str log record to be emitted """ if record.msg is not None: if record.levelname == 'ERROR': collectd.error('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'WARNING': collectd.warning('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'NOTICE': collectd.notice('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'INFO': collectd.info('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'DEBUG': collectd.info('%s : %s' % (self.plugin, record.msg))
def configure_callback(config): global WRITERS global TYPES_DICT for node in config.children: key = node.key.lower() collectd.debug("%s key=%s node.values=%s" % ('write_socket_json', key, node.values)) if key == 'typesdb': TYPES_DICT.update(read_typesdb(node.values[0])) elif key == 'udp': WRITERS.append(UdpWriter(json_formatter, *node.values)) elif key == 'tcp': WRITERS.append(TcpWriter(json_formatter, *node.values)) else: collectd.notice("%s configuration error: unknown key %s" % ('write_socket_json', key))
def notify(n): if lk: p = {} p['lk'] = lk p['alert_id'] = ':'.join( filter(lambda x: isinstance(x, basestring) and bool(x), [n.plugin, n.plugin_instance, n.type, n.type_instance])) if n.severity == NOTIF_FAILURE: p['status'] = "FAILURE" elif n.severity == NOTIF_WARNING: p['status'] = "WARNING" elif n.severity == NOTIF_OKAY: p['status'] = "OK" p['message'] = n.message collectd.notice("[{status}] {alert_id}: {message}".format(**p)) dispatch('alerts/store', p)
def configure_callback(config): global WRITERS global TYPES_DICT for node in config.children: key = node.key.lower() collectd.debug("%s key=%s node.values=%s" % ('$NAME', key, node.values)) if key == 'typesdb': TYPES_DICT.update(read_typesdb(node.values[0])) #if "$WRITER" eq "socket" elif key == 'udp': WRITERS.append(UdpWriter($FORMAT_formatter, *node.values)) elif key == 'tcp': WRITERS.append(TcpWriter($FORMAT_formatter, *node.values)) # elif key == 'unix': # WRITERS.append(UnixWriter($FORMAT_formatter, *node.values)) #endif else: collectd.notice("%s configuration error: unknown key %s" % ('$NAME', key))
def reader(input_data=None): global LINKS collectd.notice('##### Python RRD: Reading') parseData() dsType = "func_latency" for l in LINKS: plugin = "armore-" + "__".join(l) for p in LINKS[l]: for f in LINKS[l][p]: typeInstance = "{}_{}".format(p, f) collectd.notice("{} {} {}".format(dsType, typeInstance, plugin)) v = collectd.Values(type=dsType, type_instance=typeInstance, plugin=plugin) v.values = [ round(sum(LINKS[l][p][f]) / len(LINKS[l][p][f]), 6) ] v.interval = 900 v.dispatch()
def emit(self, record): """ Emits a log record to the appropraite collectd log function Arguments record -- str log record to be emitted """ try: if record.msg is not None: if record.levelname == 'ERROR': collectd.error('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'WARNING': collectd.warning('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'NOTICE': collectd.notice('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'INFO' and self.verbose is True: collectd.info('%s : %s' % (self.plugin, record.msg)) elif record.levelname == 'DEBUG' and self.verbose is True: collectd.debug('%s : %s' % (self.plugin, record.msg)) except Exception as e: collectd.warning(('{p} [ERROR]: Failed to write log statement due ' 'to: {e}').format(p=self.plugin, e=e))
if interface_ip: try: self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(interface_ip)) except socket.error, msg: collectd.notice("%s error setting interface: %s" % ('$NAME', msg)) else: # Fudge self.interface to make self.__repr__() look better self.interface = '<invalid>' if self.ttl: try: self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, self.ttl) except socket.error, msg: collectd.notice("%s error setting TTL to %d for host %s port %s: %s" % ('$NAME', self.ttl, self.host, self.port, msg)) # Fudge self.ttl to make self.__repr__() look better self.ttl = '<invalid>' def flush(self, values): message = self.formatter(values) try: collectd.debug("%s.UdpWriter.flush: %s:%s %s" % ('$NAME', self.host, self.port, message)) self.sock.sendto(message, (self.host, self.port)) except (TypeError, socket.error), msg: collectd.warning("%s error sending to host %s port %s: %s" % ('$NAME', self.host, self.port, msg))
def warning(self, msg): collectd.notice("{name}: {msg}".format(name=PLUGIN_NAME, msg=msg))
def log_notice(self, msg): collectd.notice(msg)
def notice(self, message): """Log an notice message to the collectd logger. """ collectd.notice('%s plugin: %s' % (self.name, message))
def shutdowner(): collectd.notice('##### Python RRD: Shutting Down')
def warning(self, msg): collectd.notice('{name}: {msg}'.format(name=PLUGIN_NAME, msg=msg))
def notice(self, message): """ Log an notice message to the collectd logger. """ collectd.notice('%s plugin: %s' % (self.name, message))
def initer(): collectd.notice('##### Python RRD: Initializing')
if interface_ip: try: self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(interface_ip)) except socket.error, msg: collectd.notice("%s error setting interface: %s" % ('write_socket_json', msg)) else: # Fudge self.interface to make self.__repr__() look better self.interface = '<invalid>' if self.ttl: try: self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, self.ttl) except socket.error, msg: collectd.notice("%s error setting TTL to %d for host %s port %s: %s" % ('write_socket_json', self.ttl, self.host, self.port, msg)) # Fudge self.ttl to make self.__repr__() look better self.ttl = '<invalid>' def flush(self, values): message = self.formatter(values) try: collectd.debug("%s.UdpWriter.flush: %s:%s %s" % ('write_socket_json', self.host, self.port, message)) self.sock.sendto(message, (self.host, self.port)) except (TypeError, socket.error), msg: collectd.warning("%s error sending to host %s port %s: %s" % ('write_socket_json', self.host, self.port, msg))
def configer(ObjConfiguration): collectd.notice('##### Python RRD: Configuring')
def read_typesdb(path): """ Read a Collectd types.db file. :param path: Path to types.db file. :returns: Dictionary where the keys are the "type" and values are list of ```(dsname, dstype, dsmin, dsmax)``` tuples. If ```dsmin``` or ```dsmax``` are returned as floats or as the character ```U``` if undefined. This function should be called for each types.db file, updating the dictionary each time. >>> types_dict = {} >>> types_dict.update('/usr/share/collectd/types.db') >>> types_dict.update('/usr/local/share/collectd/types.db') Since Collect 5.5 the Python plugin implements `collectd.get_dataset()` and `read_typesdb()` is no longer required. """ types_dict = {} try: with open(path) as fp: for line in fp: fields = re.split(r'[,\s]+', line.strip()) # Skip comments if fields[0].startswith('#'): continue name = fields[0] if len(fields) < 2: collectd.notice("configuration error: %s in %s is missing definition" % (name, path)) continue name = fields[0] types_dict[name] = [] for field in fields[1:]: fields2 = field.split(':') if len(fields2) < 4: collectd.notice("configuration error: %s %s has wrong format" % (name, field)) continue dsname = fields2[0] dstype = fields2[1].lower() dsmin = fields2[2] dsmax = fields2[3] if dsmin != 'U': dsmin = float(fields2[2]) if dsmax != 'U': dsmax = float(fields2[3]) types_dict[name].append((dsname, dstype, dsmin, dsmax)) collectd.debug("read_types_db: types_dict[%s]=%s" % (name, types_dict[name])) except IOError, msg: collectd.notice("configuration error: %s - %s" % (path, msg))
def worker(close_event, queue, interval): import subprocess import time from datetime import datetime, timedelta proc = None try: proc = subprocess.Popen( ["iotop", "-oqqtkd", str(interval)], stdout=subprocess.PIPE, universal_newlines=True, ) collectd.notice("iotop-worker: worker: iotop started") # Skip first two lines, since they contain the first statistic, that is # often wrong: proc.stdout.readline() proc.stdout.readline() while proc.returncode is None and not close_event.is_set(): line = proc.stdout.readline().strip() if not line: collectd.info("iotop-worker: worker: EOL") break # Format of the iotop line: # 11:22:33 Actual DISK READ: 0.00 K/s | Actual DISK WRITE: 0.00 K/s # Skip everything that is not this line: if "Actual DISK READ" not in line: continue # Convert 'HH:MM:SS' to unix time (assumes that it on the current # day) now = datetime.now() ts = datetime.strptime(line.split()[0], "%H:%M:%S") ts = now.replace(hour=ts.hour, minute=ts.minute, second=ts.second, microsecond=0) # If the resulting time is in the future, substract days (normally # just one): while ts > now: ts = ts - timedelta(days=1) # If the timestamp is to old, just ignore it: if ts <= now - timedelta(days=1): continue # Convert it to unix time, without the microsecond floaty bits: ts = int(time.mktime(ts.timetuple())) # Convert XX.YY KB/s to a integer bitrate actual_read = int(float(line.split()[4]) * 1000 * 8) actual_write = int(float(line.split()[10]) * 1000 * 8) collectd.info("iotop-worker: worker: submitted to queue") # Put it in the queue: queue.put((ts, actual_read, actual_write)) finally: queue.close() if proc and proc.returncode is None: try: collectd.notice("iotop-worker: worker: iotop kill") proc.kill() collectd.notice("iotop-worker: worker: iotop communicate") proc.communicate() collectd.notice("iotop-worker: worker: iotop close stdout") proc.stdout.close() collectd.notice("iotop-worker: worker: iotop wait") proc.wait() if proc.returncode is None: proc.terminate() except OSError: # ignore errors when terminating iotop, we have done our best pass collectd.notice("iotop-worker: worker exits")