def read_vsys_data(command, version): """Runs vsys 'command' and returns results as dict. See command notes for description of returned data format. Args: command: str, name of script or command to execute in vsys backend. version: int, expected version of backend response. Returns: dict, results of 'command'. """ # Send request through vsys (for slice context). data = read_vsys_data_direct(command) if 'data' not in data: collectd.error('%s: returned value has no "data" field.' % command) return {} if 'version' not in data: collectd.error('%s: returned value has no "version" field.' % command) return {} if 'message_type' in data and data['message_type'] != command: collectd.error('Returned message_type does not match request.') collectd.error('Requested: %s' % command) collectd.error('Received : %s' % data['message_type']) return {} if data['version'] != version: msg = '%s: version mismatch: found (%d), expected (%d)' % ( command, data['version'], version) collectd.warning(msg) return data['data']
def configure_callback(conf): """Receive configuration block""" global REDIS_HOST, REDIS_PORT, VERBOSE_LOGGING, METRICS_HASH global RECORD_AS_COUNTER, RECORD_AS_DERIVE, RECORD_AS_GAUGE, RECORD_AS_ABSOLUTE for node in conf.children: if node.key == 'Host': REDIS_HOST = node.values[0] elif node.key == 'Port': REDIS_PORT = int(node.values[0]) elif node.key == 'Verbose': VERBOSE_LOGGING = bool(node.values[0]) elif node.key == 'Metrics_Hash': METRICS_HASH = node.values[0] elif node.key == 'Counter': RECORD_AS_COUNTER = bool(node.values[0]) elif node.key == 'Derive': RECORD_AS_DERIVE = bool(node.values[0]) elif node.key == 'Gauge': RECORD_AS_GAUGE = bool(node.values[0]) elif node.key == 'Absolute': RECORD_AS_ABSOLUTE = bool(node.values[0]) else: collectd.warning('redis_metrics plugin: Unknown config key: %s.' % node.key) log_verbose('Configured with host=%s, port=%s' % (REDIS_HOST, REDIS_PORT))
def config_callback(conf): collectd.debug('config callback') for node in conf.children: key = node.key.lower() values = node.values collectd.debug('Reading config %s: %s' % (key, " ".join(str(v) for v in values))) if key == 'debug': CONF['debug'] = str2bool(values[0]) elif key == 'verbose': CONF['verbose'] = str2bool(values[0]) elif key == 'cluster': CONF['cluster'] = values[0] elif key == 'pools': CONF['pools'] = values elif key == 'scli_wrap': CONF['scli_wrap'] = values[0] elif key == 'user': CONF['scli_user'] = values[0] elif key == 'password': CONF['scli_password'] = values[0] elif key == 'ignoreselected': CONF['ignoreselected'] = str2bool(values[0]) else: collectd.warning('ScaleIO: unknown config key: %s' % (key))
def get_stats(socket): """ Makes two calls to haproxy to fetch server info and server stats. Returns the dict containing metric name as the key and a tuple of metric value and the dict of dimensions if any """ if socket is None: collectd.error("Socket configuration parameter is undefined. Couldn't get the stats") return stats = [ ] haproxy = HAProxySocket(socket) try: server_info = haproxy.get_server_info() server_stats = haproxy.get_server_stats() except socket.error: collectd.warning( 'status err Unable to connect to HAProxy socket at %s' % socket) return stats for key, val in server_info.iteritems(): try: stats.append((key, int(val), None)) except (TypeError, ValueError): pass for statdict in server_stats: if not (statdict['svname'].lower() in PROXY_MONITORS or statdict['pxname'].lower() in PROXY_MONITORS): continue for metricname, val in statdict.items(): try: stats.append((metricname, int(val), {'proxy_name': statdict['pxname'], 'service_name': statdict['svname']})) except (TypeError, ValueError): pass return stats
def librato_flush_metrics(gauges, counters, data): """ POST a collection of gauges and counters to Librato Metrics. """ headers = { 'Content-Type': 'application/json', 'User-Agent': config['user_agent'], 'Authorization': 'Basic %s' % config['auth_header'] } body = json.dumps({ 'gauges' : gauges, 'counters' : counters }) url = "%s%s" % (config['api'], config['api_path']) req = urllib2.Request(url, body, headers) try: f = urllib2.urlopen(req, timeout = config['flush_timeout_secs']) response = f.read() f.close() except urllib2.HTTPError as error: body = error.read() collectd.warning('%s: Failed to send metrics to Librato: Code: %d. Response: %s' % \ (plugin_name, error.code, body)) except IOError as error: collectd.warning('%s: Error when sending metrics Librato (%s)' % \ (plugin_name, error.reason))
def read(data=None): starttime = time.time() gnocchi = client.Client(session=keystone_session) try: status = gnocchi.status.get() metric = collectd.Values() metric.plugin = 'gnocchi_status' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = 'measures' metric.values = [status['storage']['summary']['measures']] metric.dispatch() metric = collectd.Values() metric.plugin = 'gnocchi_status' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = 'metrics' metric.values = [status['storage']['summary']['metrics']] metric.dispatch() except Exception as err: collectd.error( 'gnocchi_status: Exception getting status: {}' .format(err)) timediff = time.time() - starttime if timediff > INTERVAL: collectd.warning( 'gnocchi_status: Took: {} > {}' .format(round(timediff, 2), INTERVAL))
def logger(t, msg): if t == "err": collectd.error("%s: %s" % (NAME, msg)) if t == "warn": collectd.warning("%s: %s" % (NAME, msg)) elif t == "verb" and VERBOSE_LOGGING == True: collectd.info("%s: %s" % (NAME, msg))
def configure_callback(conf): """Received configuration information""" host = TRAEFIK_HOST port = TRAEFIK_PORT verboseLogging = VERBOSE_LOGGING version = TRAEFIK_VERSION instance = TRAEFIK_INSTANCE for node in conf.children: if node.key == 'Host': host = node.values[0] elif node.key == 'Port': port = int(node.values[0]) elif node.key == 'Verbose': verboseLogging = bool(node.values[0]) elif node.key == 'Version': version = node.values[0] elif node.key == 'Instance': instance = node.values[0] else: collectd.warning('traefik plugin: Unknown config key: %s.' % node.key) continue log_verbose(verboseLogging, 'traefik plugin configured with host = %s, port = %s, verbose logging = %s, version = %s, instance = %s' % ( host, port, verboseLogging, version, instance)) CONFIGS.append({ 'host': host, 'port': port, 'url': 'http://' + host + ':' + str(port) + '/health', 'verboseLogging': verboseLogging, 'version': version, 'instance': instance, })
def read(*args, **kwargs): """ read callback """ global gpu_queries vl = collectd.Values(type='gauge') vl.plugin = "python.gpu_monitor" data = _subproc_call() root = xml.etree.ElementTree.fromstring(data) for gpu in root.getiterator('gpu'): vl.plugin_instance = 'cuda-{}'.format(gpu.attrib['id']) for _type, nest in gpu_queries.items(): for instance in nest: if _type == "general": # root level query = instance else: # sublevel query = "{}/{}".format(_type, instance) try: value = float(gpu.find(query).text.split(" ")[0]) except: collectd.warning("Could not find query {}".format(query)) else: vl.dispatch(type_instance=query, values=[value])
def configure_callback(conf): """Receive configuration block""" ip = None interval = 10 graphite_host = None graphite_port = None for node in conf.children: key = node.key val = node.values[0] if key == 'ip': ip = val elif key == 'interval': interval = val elif key == 'graphite_host': graphite_host = val elif key == 'graphite_port': graphite_port = val else: collectd.warning('neutron_api_local_check: Unknown config key: {}' .format(key)) continue CONFIGS['ip'] = ip CONFIGS['interval'] = interval CONFIGS['graphite_host'] = graphite_host CONFIGS['graphite_port'] = graphite_port
def read_callback(): ipport = ':'.join([CONFIG['controller_ip'], CONFIG['controller_port']]) try: api = ApiSession.get_session(controller_ip=ipport, username=CONFIG['username'], password=CONFIG['password'], tenant=CONFIG['tenant']) metric_config = { 'cluster/runtime': dispatch_cluster_runtime, 'serviceengine-inventory': partial(dispatch_inventory, plugin='avi_serviceengine'), 'virtualservice-inventory': partial(dispatch_inventory, plugin='avi_virtualservice'), } for endpoint, dispatch_fn in metric_config.iteritems(): resp = api.get(endpoint) if resp.status_code != 200: collectd.error('failed to collect %s stats: %s', endpoint, resp.text) continue dispatch_fn(data=resp.json()) except Exception as e: collectd.warning(str(e)) return
def configure_callback(conf): """Receive configuration block""" ip = None port = 11211 interval = 10 graphite_host = None graphite_port = None for node in conf.children: key = node.key val = node.values[0] if key == 'ip': # memcached IP address ip = val elif key == 'interval': interval = val elif key == 'port': # memcached port. port == port elif key == 'graphite_host': graphite_host = val elif key == 'graphite_port': graphite_port = val else: collectd.warning( 'memcached_status: Unknown config key: {}'.format(key)) continue CONFIGS['ip'] = ip CONFIGS['port'] = port CONFIGS['interval'] = interval CONFIGS['graphite_host'] = graphite_host CONFIGS['graphite_port'] = graphite_port
def configure_callback(conf): """Received configuration information""" zk_hosts = ZK_HOSTS zk_port = ZK_PORT zk_instance = ZK_INSTANCE for node in conf.children: if node.key == "Hosts": if len(node.values[0]) > 0: zk_hosts = [host.strip() for host in node.values[0].split(",")] else: log(("ERROR: Invalid Hosts string. " "Using default of %s") % zk_hosts) elif node.key == "Port": if isinstance(node.values[0], (float, int)) and node.values[0] > 0: zk_port = node.values[0] else: log(("ERROR: Invalid Port number. " "Using default of %s") % zk_port) elif node.key == "Instance": if len(node.values[0]) > 0: zk_instance = node.values[0] else: log(("ERROR: Invalid Instance string. " "Using default of %s") % zk_instance) else: collectd.warning("zookeeper plugin: Unknown config key: %s." % node.key) continue config = {"hosts": zk_hosts, "port": zk_port, "instance": zk_instance} log("Configured with %s." % config) CONFIGS.append(config)
def dispatch_values(values, host, plugin, plugin_instance, metric_type, type_instance=None): """ Dispatch metrics to collectd. """ path = "{0}.{1}.{2}.{3}.{4}".format(host, plugin, plugin_instance, metric_type, type_instance) try: val = collectd.Values() val.host = host val.plugin = plugin if plugin_instance: val.plugin_instance = plugin_instance val.type = metric_type if type_instance: val.type_instance = type_instance val.values = [values] val.dispatch() except Exception as ex: collectd.warning("Failed to dispatch %s. Exception %s" % (path, ex))
def logger(self, t, msg): if t == 'err': collectd.error('%s: %s' % (self.name, msg)) if t == 'warn': collectd.warning('%s: %s' % (self.name, msg)) elif t == 'verb' and self.verbose: collectd.info('%s: %s' % (self.name, msg))
def configure_callback(conf): """Receive configuration block""" global config for node in conf.children: if node.key == 'AuthURL': config['auth_url'] = node.values[0] elif node.key == 'Username': config['username'] = node.values[0] elif node.key == 'Password': config['password'] = node.values[0] elif node.key == 'Tenant': config['tenant'] = node.values[0] elif node.key == 'EndpointType': config['endpoint_type'] = node.values[0] elif node.key == 'Verbose': config['verbose_logging'] = node.values[0] else: collectd.warning('%s plugin: Unknown config key: %s.' % (plugin_name, node.key)) if 'auth_url' not in config: log_error('AuthURL not defined') if 'username' not in config: log_error('Username not defined') if 'password' not in config: log_error('Password not defined') if 'tenant' not in config: log_error('Tenant not defined') log_verbose( "Configured with auth_url=%s, username=%s, password=%s, tenant=%s, " % (config['auth_url'], config['username'], config['password'], config['tenant']) + " endpoint_type=%s" % (config['endpoint_type']))
def dispatch_value(info, key, type, plugin_instance=None, type_instance=None): """Read a key from info response data and dispatch a value""" if key not in info: collectd.warning('redis_info plugin: Info key not found: %s' % key) return if plugin_instance is None: plugin_instance = 'unknown redis' collectd.error('redis_info plugin: plugin_instance is not set, Info key: %s' % key) if not type_instance: type_instance = key try: value = int(info[key]) except ValueError: value = float(info[key]) log_verbose('Sending value: %s=%s' % (type_instance, value)) val = collectd.Values(plugin='redis_info') val.type = type val.type_instance = type_instance val.plugin_instance = plugin_instance val.values = [value] val.meta={'0': True} val.dispatch()
def configure_callback(conf): """Received configuration information""" global VERBOSE_LOGGING if conf.key == 'Verbose': VERBOSE_LOGGING = bool(conf.values[0]) else: collectd.warning('nfsv4 plugin: Unknown config key: %s.' % conf.key)
def configure_callback(conf): """Received configuration information""" host = MARATHON_HOST port = MARATHON_PORT instance = MARATHON_INSTANCE verbose_logging = VERBOSE_LOGGING for node in conf.children: if node.key == "Host": host = node.values[0] elif node.key == "Port": port = int(node.values[0]) elif node.key == "Verbose": verbose_logging = bool(node.values[0]) elif node.key == "Instance": instance = node.values[0] else: collectd.warning("marathon plugin: Unknown config key: %s." % node.key) CONFIGS.append( { "host": host, "port": port, "instance": instance, "verbose_logging": verbose_logging, "metrics_url": "http://" + host + ":" + str(port) + "/metrics", } ) log_verbose("Configured marathon host with host=%s, port=%s" % (host, port), verbose_logging)
def write_stats(values, types, base_path=None, client=None): """ Actually write the stats to statsd! """ for idx, value in enumerate(values.values): value = int(value) if base_path is None: base_path = stats_path(values) # Append the data source name, if any if len(values.values) > 1: path = '.'.join((base_path, types[values.type][idx]['name'])) else: path = base_path collectd.info('%s: %s = %s' % (values.plugin, path, value)) if client is not None: # Intentionally *not* wrapped in a try/except so that an # exception here causes collectd to slow down trying to write # stats. client.gauge(path, value) else: # No statsd client, be noisy message = 'Statsd client is None, not sending metrics!' collectd.warning(message) # Raise an exception so we aren't *too* noisy. raise RuntimeError(message)
def configure_callback(conf): """Received configuration information""" global SOLR_HOST, SOLR_PORT, SOLR_INSTANCES, VERBOSE_LOGGING for node in conf.children: if node.key == "Instance": # if the instance is named, get the first given name if len(node.values): if len(node.values) > 1: collectd.info("%s: Ignoring extra instance names (%s)" % (__name__, ", ".join(node.values[1:])) ) SOLR_INSTANCE = node.values[0] # else register an empty name instance else: SOLR_INSTANCE = 'default' for child in node.children: if child.key == 'Host': SOLR_HOST = child.values[0] elif child.key == 'Port': SOLR_PORT = int(child.values[0]) elif child.key == 'Verbose': VERBOSE_LOGGING = bool(child.values[0]) else: collectd.warning('solr_info plugin: Unknown config key: %s.' % node.key) # add this instance to the dict of instances SOLR_INSTANCES[SOLR_INSTANCE] = "http://" + SOLR_HOST + ":" + str(SOLR_PORT) + "/solr/" + SOLR_INSTANCE continue log_verbose('Configured with host=%s, port=%s, instance=%s' % (SOLR_HOST, SOLR_PORT, SOLR_INSTANCE))
def configure_callback(conf): """Received configuration information""" host = MESOS_HOST port = MESOS_PORT verboseLogging = VERBOSE_LOGGING version = MESOS_VERSION instance = MESOS_INSTANCE for node in conf.children: if node.key == 'Host': host = node.values[0] elif node.key == 'Port': port = int(node.values[0]) elif node.key == 'Verbose': verboseLogging = bool(node.values[0]) elif node.key == 'Version': version = node.values[0] elif node.key == 'Instance': instance = node.values[0] else: collectd.warning('mesos-slave plugin: Unknown config key: %s.' % node.key) continue log_verbose('true','mesos-slave plugin configured with host = %s, port = %s, verbose logging = %s, version = %s, instance = %s' % (host,port,verboseLogging,version,instance)) CONFIGS.append({ 'host': host, 'port': port, 'mesos_url': "http://" + host + ":" + str(port) + "/metrics/snapshot", 'verboseLogging': verboseLogging, 'version': version, 'instance': instance, })
def parse_info(info_lines): """Parse info response from Redis""" info = {} for line in info_lines: if "" == line or line.startswith('#'): continue if ':' not in line: collectd.warning('redis_info plugin: Bad format for info line: %s' % line) continue key, val = line.split(':') # Handle multi-value keys (for dbs). # db lines look like "db0:keys=10,expire=0" if ',' in val: split_val = val.split(',') val = {} for sub_val in split_val: k, _, v = sub_val.rpartition('=') val[k] = v info[key] = val info["changes_since_last_save"] = info.get("changes_since_last_save", info.get("rdb_changes_since_last_save")) return info
def handle_config(root): for child in root.children: instance_name = None if child.key == 'Instance': instance_name = child.values[0] url = None for ch2 in child.children: if ch2.key == 'URL': url = ch2.values[0] if not url: collectd.warning('No URL found in dump1090 Instance ' + instance_name) else: collectd.register_read(callback=handle_read, data=(instance_name, urlparse.urlparse(url).hostname, url), name='dump1090.' + instance_name) collectd.register_read( callback=handle_read_1min, data=(instance_name, urlparse.urlparse(url).hostname, url), name='dump1090.' + instance_name + '.1min', interval=60) else: collectd.warning('Ignored config entry: ' + child.key)
def config(self, obj): for node in obj.children: if node.key == "Port": self.mongo_port = int(node.values[0]) collectd.info("mongodb plugin: Port " + self.mongo_port) elif node.key == "Host": self.mongo_host = node.values[0] collectd.info("mongodb plugin: Host " + self.Host) elif node.key == "User": self.mongo_user = node.values[0] elif node.key == "Password": self.mongo_password = node.values[0] elif node.key == "Databases": self.mongo_dbs = node.values collectd.info("mongodb plugin: Databases " + self.mongo_dbs) elif node.key == "ConnectionPoolStatus": self.includeConnPoolMetrics = node.values collectd.info("mongodb plugin: ConnectionPoolStatus " + self.ConnectionPoolStatus) elif node.key == "ServerStats": self.includeServerStatsMetrics = node.values collectd.info("mongodb plugin: ServerStats " + self.ServerStats) elif node.key == "DBStats": self.includeDbstatsMetrics = node.values collectd.info("mongodb plugin: DBStats " + self.DBStats) else: collectd.warning("mongodb plugin: Unkown configuration key %s" % node.key)
def influxdb_parse_types_file(path): global types f = open(path, "r") for line in f: fields = line.split() if len(fields) < 2: continue type_name = fields[0] if type_name[0] == "#": continue v = [] for ds in fields[1:]: ds = ds.rstrip(",") ds_fields = ds.split(":") if len(ds_fields) != 4: collectd.warning("%s: cannot parse data source " "%s on type %s" % (plugin_name, ds, type_name)) continue v.append(ds_fields) types[type_name] = v f.close()
def kairosdb_parse_types_file(path): global types f = open(path, 'r') for line in f: fields = line.split() if len(fields) < 2: continue type_name = fields[0] if type_name[0] == '#': continue v = [] for ds in fields[1:]: ds = ds.rstrip(',') ds_fields = ds.split(':') if len(ds_fields) != 4: collectd.warning('kairosdb_writer: cannot parse data source %s on type %s' % (ds, type_name)) continue v.append(ds_fields) types[type_name] = v f.close()
def librato_flush_metrics(gauges, counters, data): """ POST a collection of gauges and counters to Librato Metrics. """ headers = { 'Content-Type': 'application/json', 'User-Agent': config['user_agent'], 'Authorization': 'Basic %s' % config['auth_header'] } body = json.dumps({'gauges': gauges, 'counters': counters}) url = "%s%s" % (config['api'], config['api_path']) req = urllib2.Request(url, body, headers) try: f = urllib2.urlopen(req, timeout=config['flush_timeout_secs']) response = f.read() f.close() except urllib2.HTTPError as error: body = error.read() collectd.warning('%s: Failed to send metrics to Librato: Code: %d. Response: %s' % \ (plugin_name, error.code, body)) except IOError as error: collectd.warning('%s: Error when sending metrics Librato (%s)' % \ (plugin_name, error.reason))
def configure_callback(conf): """Receive configuration block""" host = None port = None auth = None instance = None for node in conf.children: key = node.key.lower() val = node.values[0] if key == 'host': host = val elif key == 'port': port = int(val) elif key == 'auth': auth = val elif key == 'verbose': global VERBOSE_LOGGING VERBOSE_LOGGING = bool(node.values[0]) or VERBOSE_LOGGING elif key == 'instance': instance = val else: collectd.warning('redis_info plugin: Unknown config key: %s.' % key ) continue log_verbose('Configured with host=%s, port=%s, instance name=%s, using_auth=%s' % ( host, port, instance, auth!=None)) CONFIGS.append( { 'host': host, 'port': port, 'auth':auth, 'instance':instance } )
def configure_callback(conf): """Received configuration information""" global MESOS_HOST, MESOS_PORT, MESOS_URL, VERBOSE_LOGGING, STATS_CUR for node in conf.children: if node.key == 'Host': MESOS_HOST = node.values[0] elif node.key == 'Port': MESOS_PORT = int(node.values[0]) elif node.key == 'Verbose': VERBOSE_LOGGING = bool(node.values[0]) elif node.key == 'Version': MESOS_VERSION = node.values[0] else: collectd.warning('mesos-slave plugin: Unknown config key: %s.' % node.key) if MESOS_VERSION == "0.19.0" or MESOS_VERSION == "0.19.1": STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_019.items()) elif MESOS_VERSION == "0.20.0" or MESOS_VERSION == "0.20.1": STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_020.items()) elif MESOS_VERSION == "0.21.0" or MESOS_VERSION == "0.21.1": STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_021.items()) else: STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_021.items()) MESOS_URL = "http://" + MESOS_HOST + ":" + str( MESOS_PORT) + "/metrics/snapshot" log_verbose( 'mesos-slave plugin configured with version=%s, host=%s, port=%s, url=%s' % (MESOS_VERSION, MESOS_HOST, MESOS_PORT, MESOS_URL))
def parse_types_file(path): f = open(path, 'r') types = {} for line in f: fields = line.split() if len(fields) < 2: continue type_name = fields[0] if type_name[0] == '#': continue v = [] for ds in fields[1:]: ds = ds.rstrip(',') ds_fields = ds.split(':') if len(ds_fields) != 4: collectd.warning( 'collectd2python: cannot parse data source %s on type %s' % (ds, type_name)) continue v.append(ds_fields) types[type_name] = v f.close() return types
def configure_callback(config): host = '127.0.0.1' port = 6379 password = '******' sentinel_port = 26379 sentinel_name = 'mymaster' redis_info = {} for node in config.children: k, v = node.key, node.values[0] match = re.search(r'Redis_(.*)$', k, re.M | re.I) if k == 'Host': host = v elif k == 'Port': port = int(v) elif k == 'Password': password = v elif k == 'Sentinel_port': sentinel_port = int(v) elif k == 'Sentinel_name': sentinel_name = v elif match: redis_info[match.group(1)] = v else: collectd.warning('unknown config key: %s' % (k)) CONFIG.append({ 'host': host, 'port': port, 'password': password, 'sentinel_port': sentinel_port, 'sentinel_name': sentinel_name, 'redis_info': redis_info })
def configure_callback(conf): """Receive configuration block""" host = None port = None auth = None instance = None for node in conf.children: key = node.key.lower() val = node.values[0] log_verbose('Analyzing config %s key (value: %s)' % (key, val)) searchObj = re.search(r'redis_(.*)$', key, re.M|re.I) if key == 'host': host = val elif key == 'port': port = int(val) elif key == 'auth': auth = val elif key == 'verbose': global VERBOSE_LOGGING VERBOSE_LOGGING = bool(node.values[0]) or VERBOSE_LOGGING elif key == 'instance': instance = val elif searchObj: log_verbose('Matching expression found: key: %s - value: %s' % (searchObj.group(1), val)) global REDIS_INFO REDIS_INFO[searchObj.group(1), val] = True else: collectd.warning('redis_info plugin: Unknown config key: %s.' % key ) continue log_verbose('Configured with host=%s, port=%s, instance name=%s, using_auth=%s' % ( host, port, instance, auth!=None)) CONFIGS.append( { 'host': host, 'port': port, 'auth':auth, 'instance':instance } )
def parse_info(info_lines): """Parse info response from Redis""" info = {} for line in info_lines: if "" == line or line.startswith('#'): continue if ':' not in line: collectd.warning('redis_info plugin: Bad format for info line: %s' % line) continue key, val = line.split(':') # Handle multi-value keys (for dbs and slaves). # db lines look like "db0:keys=10,expire=0" # slave lines look like "slave0:ip=192.168.0.181,port=6379,state=online,offset=1650991674247,lag=1" if ',' in val: split_val = val.split(',') for sub_val in split_val: k, _, v = sub_val.rpartition('=') sub_key = "{0}_{1}".format(key, k) info[sub_key] = v else: info[key] = val # compatibility with pre-2.6 redis (used changes_since_last_save) info["changes_since_last_save"] = info.get("changes_since_last_save", info.get("rdb_changes_since_last_save")) return info
def configure_callback(conf): """Received configuration information""" global ES_HOST, ES_PORT, ES_URL, ES_VERSION, VERBOSE_LOGGING, STATS_CUR for node in conf.children: if node.key == 'Host': ES_HOST = node.values[0] elif node.key == 'Port': ES_PORT = int(node.values[0]) elif node.key == 'Verbose': VERBOSE_LOGGING = bool(node.values[0]) elif node.key == 'Cluster': ES_CLUSTER = node.values[0] elif node.key == "Version": ES_VERSION = node.values[0] else: collectd.warning('elasticsearch plugin: Unknown config key: %s.' % node.key) if ES_VERSION == "1.0": ES_URL = "http://" + ES_HOST + ":" + str( ES_PORT ) + "/_nodes/_local/stats/transport,http,process,jvm,indices" STATS_CUR = dict(STATS.items() + STATS_ES1.items()) else: ES_URL = "http://" + ES_HOST + ":" + str( ES_PORT ) + "/_cluster/nodes/_local/stats?http=true&process=true&jvm=true&transport=true" STATS_CUR = dict(STATS.items() + STATS_ES09.items()) log_verbose('Configured with version=%s, host=%s, port=%s, url=%s' % (ES_VERSION, ES_HOST, ES_PORT, ES_URL))
def configure_callback(conf): """Received configuration information""" global MESOS_HOST, MESOS_PORT, MESOS_URL, VERBOSE_LOGGING, STATS_CUR for node in conf.children: if node.key == 'Host': MESOS_HOST = node.values[0] elif node.key == 'Port': MESOS_PORT = int(node.values[0]) elif node.key == 'Verbose': VERBOSE_LOGGING = bool(node.values[0]) elif node.key == 'Version': MESOS_VERSION = node.values[0] else: collectd.warning('mesos-slave plugin: Unknown config key: %s.' % node.key) if MESOS_VERSION == "0.19.0" or MESOS_VERSION == "0.19.1": STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_019.items()) elif MESOS_VERSION == "0.20.0" or MESOS_VERSION == "0.20.1": STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_020.items()) elif MESOS_VERSION == "0.21.0" or MESOS_VERSION == "0.21.1": STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_021.items()) else: STATS_CUR = dict(STATS_MESOS.items() + STATS_MESOS_021.items()) MESOS_URL = "http://" + MESOS_HOST + ":" + str(MESOS_PORT) + "/metrics/snapshot" log_verbose('mesos-slave plugin configured with version=%s, host=%s, port=%s, url=%s' % (MESOS_VERSION, MESOS_HOST, MESOS_PORT, MESOS_URL))
def read_stats(path=MGMT_SOCK_PATH): """ Requests stats over then given socket, and returns them as a file-like object. """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) message = "" try: sock.connect(path) sock.send("STATS\n") sock.settimeout(0.5) while True: buf = sock.recv(BUFFER_SIZE) message += buf if len(buf) < BUFFER_SIZE: break except socket.timeout: collectd.warning("Socket read timeout") raise finally: sock.close() if message: log_verbose("Read %s from sock" % len(message)) return StringIO.StringIO(message)
def configure_callback(conf): """Received configuration information""" zk_hosts = ZK_HOSTS zk_port = ZK_PORT zk_instance = ZK_INSTANCE for node in conf.children: if node.key == 'Hosts': if len(node.values[0]) > 0: zk_hosts = [host.strip() for host in node.values[0].split(',')] else: log(('ERROR: Invalid Hosts string. ' 'Using default of %s') % zk_hosts) elif node.key == 'Port': if isinstance(node.values[0], float) and node.values[0] > 0: zk_port = node.values[0] else: log(('ERROR: Invalid Port number. ' 'Using default of %s') % zk_port) elif node.key == 'Instance': if len(node.values[0]) > 0: zk_instance = node.values[0] else: log(('ERROR: Invalid Instance string. ' 'Using default of %s') % zk_instance) else: collectd.warning('zookeeper plugin: Unknown config key: %s.' % node.key) continue config = {'hosts': zk_hosts, 'port': zk_port, 'instance': zk_instance} log('Configured with %s.' % config) CONFIGS.append(config)
def logger(t, msg): if t == 'err': collectd.error('%s: %s' % (NAME, msg)) if t == 'warn': collectd.warning('%s: %s' % (NAME, msg)) elif t == 'verb' and VERBOSE_LOGGING == True: collectd.info('%s: %s' % (NAME, msg))
def config_callback(self, conf): """Takes a collectd conf object and fills in the local config.""" for node in conf.children: if node.key == "Username": self.username = node.values[0] elif node.key == "Password": self.password = node.values[0] elif node.key == "TenantName": self.tenant = node.values[0] elif node.key == "AuthURL": self.auth_url = node.values[0] elif node.key == "Verbose": if node.values[0] in ['True', 'true']: self.verbose = True elif node.key == "Debug": if node.values[0] in ['True', 'true']: self.debug = True elif node.key == "AllocationRatioCores": self.AllocationRatioCores = float(node.values[0]) elif node.key == "AllocationRatioRam": self.AllocationRatioRam = float(node.values[0]) elif node.key == "ReservedNodeCores": self.ReservedNodeCores = float(node.values[0]) elif node.key == "ReservedNodeRamMB": self.ReservedNodeRamMB = float(node.values[0]) elif node.key == "ReservedCores": self.ReservedCores = float(node.values[0]) elif node.key == "ReservedRamMB": self.ReservedRamMB = float(node.values[0]) elif node.key == "Prefix": self.prefix = node.values[0] elif node.key == 'Interval': self.interval = float(node.values[0]) else: collectd.warning("%s: unknown config key: %s" % (self.prefix, node.key))
def configure_callback(conf): """Receive configuration block""" ip = None interval = 10 graphite_host = None graphite_port = None for node in conf.children: key = node.key val = node.values[0] if key == 'ip': ip = val elif key == 'interval': interval = val elif key == 'graphite_host': graphite_host = val elif key == 'graphite_port': graphite_port = val else: collectd.warning( 'nova_cloud_stats: Unknown config key: {}'.format(key)) continue auth_ref = get_auth_ref() CONFIGS['ip'] = ip CONFIGS['auth_ref'] = auth_ref CONFIGS['interval'] = interval CONFIGS['graphite_host'] = graphite_host CONFIGS['graphite_port'] = graphite_port
def parse_types_file(path): global COLLECTD_TYPES with open(path, 'r') as f: for line in f: fields = line.split() if len(fields) < 2: continue type_name = fields[0] if type_name[0] == '#': continue v = [] for ds in fields[1:]: ds = ds.rstrip(',') ds_fields = ds.split(':') if len(ds_fields) != 4: collectd.warning('amqp-writer: cannot parse data source %s on type %s' % ( ds, type_name )) continue v.append(ds_fields) COLLECTD_TYPES[type_name] = v
def rest_request(self, url, path, *args, **kwargs): """ Makes REST call to Hadoop API endpoint """ url = url.rstrip('/')+"/"+path.lstrip('/') if args: for arg in args: url = url.rstrip('/')+"/"+arg.lstrip('/') if kwargs: query = '&'.join(['{0}={1}'.format(key, value) for key, value in kwargs.iteritems()]) url = urljoin(url, '?' + query) try: req = urllib2.Request(url) data = urllib2.urlopen(req) resp = data.read() return resp except (urllib2.HTTPError, urllib2.URLError) as e: if not (isinstance(e, urllib2.HTTPError) and e.code == 404): collectd.warning("hadoop : Unable to make request at ({0}) {1}".format(e, url)) return None # TODO: figure out what other specific exceptions should be caught # and remove the generic exception except Exception: return None
def multi_config(self, obj): admin_port = None host = None plugin_name = None vhost = None username = None password = None for node in obj.children: if node.key == 'Port': admin_port = int(node.values[0]) elif node.key == 'Host': host = node.values[0] elif node.key == 'Name': plugin_name = node.values[0] elif node.key == 'Vhost': vhost = node.values[0] elif node.key == 'Username': username = node.values[0] elif node.key == 'Password': password = node.values[0] else: collectd.warning("%s: Unknown configuration key %s" % (PLUGIN_NAME, node.key)) CONFIGS.append( { 'admin_port': admin_port , 'host': host, 'plugin_name' : plugin_name , 'vhost' : vhost , 'username' : username, 'password' : password} )
def configurator(collectd_conf): """ configure the cadvisor metrics collector options: host: ip of target mesos host port: port of target mesos host config_file: path to cadvisor.yaml """ global client collectd.info('Loading CAdvisorMetrics plugin') config = {} for item in collectd_conf.children: key = item.key.lower() val = item.values[0] if key == 'host': config['host'] = val elif key == 'port': config['port'] = int(val) elif key == 'configfile': config['config_file'] = val else: collectd.warning( 'cadvisor plugin: unknown config key {} = {}'.format( item.key, val)) client = CAdvisorMetrics(config)
def librato_parse_types_file(path): global types for p in path: f = open(p, 'r') for line in f: fields = line.split() if len(fields) < 2: continue type_name = fields[0] if type_name[0] == '#': continue v = [] for ds in fields[1:]: ds = ds.rstrip(',') ds_fields = ds.split(':') if len(ds_fields) != 4: collectd.warning('%s: cannot parse data source ' \ '%s on type %s' % (plugin_name, ds, type_name)) continue v.append(ds_fields) types[type_name] = v f.close()
def dispatch_value(info, key, type, plugin_instance=None, type_instance=None): """Read a key from info response data and dispatch a value""" if key not in info: collectd.warning('redis_info plugin: Info key not found: %s' % key) return if plugin_instance is None: plugin_instance = 'unknown redis' collectd.error( 'redis_info plugin: plugin_instance is not set, Info key: %s' % key) if not type_instance: type_instance = key try: value = int(info[key]) except ValueError: value = float(info[key]) log_verbose('Sending value: %s=%s' % (type_instance, value)) val = collectd.Values(plugin='redis_info') val.type = type val.type_instance = type_instance val.plugin_instance = plugin_instance val.values = [value] val.dispatch()
def configure_callback(conf): global MIRROR_DIR for node in conf.children: if node.key == "MirrorDir": MIRROR_DIR = node.values[0] else: collectd.warning("pypi_mirror plugin: Unknown config key: %s." % node.key)
def kairosdb_send_http_data(data, json): collectd.debug('Json=%s' % json) data['lock'].acquire() if not kairosdb_connect(data): data['lock'].release() collectd.warning('kairosdb_writer: no connection to kairosdb server') return response = '' try: headers = {'Content-type': 'application/json', 'Connection': 'keep-alive'} data['conn'].request('POST', '/api/v1/datapoints', json, headers) res = data['conn'].getresponse() response = res.read() collectd.debug('Response code: %d' % res.status) if res.status == 204: exit_code = True else: collectd.error(response) exit_code = False except httplib.ImproperConnectionState, e: collectd.error('Lost connection to kairosdb server: %s' % e.message) data['conn'] = None exit_code = False
def kairosdb_parse_types_file(path): global types f = open(path, 'r') for line in f: fields = line.split() if len(fields) < 2: continue type_name = fields[0] if type_name[0] == '#': continue v = [] for ds in fields[1:]: ds = ds.rstrip(',') ds_fields = ds.split(':') if len(ds_fields) != 4: collectd.warning('kairosdb_writer: cannot parse data source %s on type %s' % ( ds, type_name )) continue v.append(ds_fields) types[type_name] = v f.close()
def plugin_write(vl, config): try: session = boto3.session.Session(region_name=config.aws_region) client_config = botocore.client.Config(connect_timeout=5, read_timeout=5) client = session.client('cloudwatch', config=client_config) metrics_list = list(metrics(vl, config)) ts = datetime.fromtimestamp(vl.time) data = [] for i, v in enumerate(vl.values): fullname, unit, dims = metrics_list[i] name = fullname[:255] if len(name) < len(fullname): collectd.warning('Metric name was truncated for CloudWatch: {}'.format(fullname)) data.append(dict( MetricName=name, Timestamp=ts, Value=v, Unit=unit, Dimensions=dims )) client.put_metric_data(Namespace=vl.plugin, MetricData=data) except Exception, e: collectd.error(str(e))
def collectd_config(config): for node in config.children: name = node.key.lower() if not name == 'asus': continue k = node.values[0] v = node.values[1] if k not in list(CONFIG.keys()): collectd.warning( 'asus plugin: ignored unknown config setting {k} with ' 'value {v}'.format(**locals())) continue if k == 'ssh_key': with NamedTemporaryFile(mode='w', delete=False) as ssh_key_file: ssh_key_file.write(v) v = ssh_key_file.name CONFIG[k] = v collectd.debug( 'asus plugin: set config key "{k}" to "{v}"'.format(**locals())) collectd.debug('Config is {}'.format(CONFIG))
def write_callback(v, data=None): if v.type not in TYPES: collectd.warning( 'collectd2kafka: cannot handle type %s. check types.db file' % v.type) return v_type = TYPES[v.type] if len(v_type) != len(v.values): collectd.warning('collectd2kafka: more values than types %s' % v.type) return metric = {} metric['host'] = v.host metric['plugin'] = v.plugin metric['plugin_instance'] = v.plugin_instance metric['type'] = v.type metric['type_instance'] = v.type_instance metric['time'] = v.time metric['interval'] = v.interval metric['values'] = [] i = 0 for value in v.values: s_name = v_type[i][0] ds_type = v_type[i][1] metric['values'].append(value) producer.send_messages(TOPIC, json.dumps(metric))
def parse_info(info_lines): """Parse info response from Redis""" info = {} for line in info_lines: if "" == line or line.startswith('#'): continue if ':' not in line: collectd.warning('redis_info plugin: Bad format for info line: %s' % line) continue key, val = line.split(':') # Handle multi-value keys (for dbs and slaves). # db lines look like "db0:keys=10,expire=0" # slave lines look like "slave0:ip=192.168.0.181,port=6379,state=online,offset=1650991674247,lag=1" if ',' in val: split_val = val.split(',') val = {} for sub_val in split_val: k, _, v = sub_val.rpartition('=') val[k] = v info[key] = val info["changes_since_last_save"] = info.get("changes_since_last_save", info.get("rdb_changes_since_last_save")) # For each slave add an additional entry that is the replication delay regex = re.compile("slave\d+") for key in info: if regex.match(key): info[key]['delay'] = int(info['master_repl_offset']) - int(info[key]['offset']) return info
def report_cpuavg_for_system(stat_path): """Reports whole-system, average cpu usage. Args: stat_path: str, path to filename with /proc/stat contents. """ if not os.path.exists(stat_path): collectd.error('stat path does not exist: %s' % stat_path) return with open(stat_path, 'r') as stat_file: lines = [line for line in stat_file if line.startswith('cpu ')] if len(lines) == 1: # There can be only one [cpu avg]. fields = lines[0].strip().split() if len(fields) >= 9: submit_cputotal('user', int(fields[1])) submit_cputotal('nice', int(fields[2])) submit_cputotal('system', int(fields[3])) submit_cputotal('idle', int(fields[4])) submit_cputotal('wait', int(fields[5])) submit_cputotal('interrupt', int(fields[6])) submit_cputotal('softirq', int(fields[7])) submit_cputotal('steal', int(fields[8])) else: collectd.warning('Found too few fields (%s) in stat file: %s' % (len(fields), stat_path)) submit_cpucores()