Esempio n. 1
0
 def add_read_callback(self, callback, **kwargs):
     """
     Register a read callback with collectd. kwargs will be passed to
     collectd.register_read. The callback will be called by collectd
     without arguments.
     """
     collectd.register_read(callback, **kwargs)
def process_watch_config(conf):
    global MIN_RUN_INTERVAL
    global DEBUG_DO_FILTER_PROCESSES
    global MIN_CPU_USAGE_PERCENT
    global MIN_MEM_USAGE_PERCENT
    global REPORT_DOCKER_CONTAINER_NAMES

    for kv in conf.children:
        if kv.key == 'MinRuntimeSeconds':
            # int() will throw exception if invalid
            MIN_RUN_INTERVAL = int(kv.values[0])
        elif kv.key == 'FilterProcesses':
            DEBUG_DO_FILTER_PROCESSES = kv.values[0]
            # if user typed something other than true/false
            if type(DEBUG_DO_FILTER_PROCESSES).__name__ != 'bool':
                DEBUG_DO_FILTER_PROCESSES = str2bool(kv.values[0])
        elif kv.key == 'MinCPUPercent':
            if int(kv.values[0]) == 0 or int(kv.values[0]) > 100:
                raise Exception('invalid value for ' + kv.key)
            MIN_CPU_USAGE_PERCENT = int(kv.values[0])
        elif kv.key == 'MinMemoryPercent':
            if int(kv.values[0]) == 0 or int(kv.values[0]) > 100:
                raise Exception('invalid value for ' + kv.key)
            MIN_MEM_USAGE_PERCENT = int(kv.values[0])
        elif kv.key == 'ReportDockerContainerNames':
            REPORT_DOCKER_CONTAINER_NAMES = kv.values[0]
            if type(REPORT_DOCKER_CONTAINER_NAMES).__name__ != 'bool':
                REPORT_DOCKER_CONTAINER_NAMES = str2bool(kv.values[0])
        else:
            raise Exception('unknown config parameter')
    collectd.register_read(send_metrics)
Esempio n. 3
0
 def __init__(self,direction,unit):
   self.unit = unit
   self.direction = direction
   if self.unit == "octets":
     self.unit = "bytes"
   self.file_handle = open("/sys/class/net/{0}/statistics/{1}_{2}".format(self.direction,self.unit))
   collectd.register_read(self.get)
   self.bandwidth = Bandwith(data)
Esempio n. 4
0
def setup_collectd():
    """
    Registers callback functions with collectd
    """
    collectd.register_config(config)
    collectd.register_init(init)
    collectd.register_read(read)
    collectd.register_shutdown(shutdown)
Esempio n. 5
0
def maasi_init():
  d = {
    'server_uri': server_uri,
    'interval': interval
    }
  logging.info('initiating maasi daemon')
  collectd.register_write(maasi_collect, data=d)
  collectd.register_read(maasi_send)
Esempio n. 6
0
    def configure_callback(self, config):
        for node in config.children:
            val = str(node.values[0])
            if node.key == 'CephRadosBench':
                self.ceph_rados_bench = val in ['True', 'true']
            elif node.key == 'CephMONStats':
                self.ceph_mon_stats = val in ['True', 'true']
            elif node.key == 'CephOSDStats':
                self.ceph_osd_stats = val in ['True', 'true']
            elif node.key == 'CephPGStats':
                self.ceph_pg_stats = val in ['True', 'true']
            elif node.key == 'CephPoolStats':
                self.ceph_pool_stats = val in ['True', 'true']
            elif node.key == 'CephCluster':
                self.ceph_cluster = val
            elif node.key == 'CephRadosBenchInterval':
                self.ceph_rados_bench_interval = int(float(val))
            elif node.key == 'CephMONStatsInterval':
                self.ceph_mon_stats_interval = int(float(val))
            elif node.key == 'CephOSDStatsInterval':
                self.ceph_osd_stats_interval = int(float(val))
            elif node.key == 'CephPGStatsInterval':
                self.ceph_pg_stats_interval = int(float(val))
            elif node.key == 'CephPoolStatsInterval':
                self.ceph_pool_stats_interval = int(float(val))
            else:
                collectd.warning(
                    'collectd-ceph-storage: Unknown config key: {}'
                    .format(node.key))

        if not self.ceph_cluster:
            collectd.warning('collectd-ceph-storage: CephCluster Undefined')

        if self.ceph_rados_bench:
            collectd.info('Registered Ceph Rados Bench')
            collectd.register_read(
                self.read_ceph_rados_bench,
                self.ceph_rados_bench_interval, name='ceph-rados-bench')
        if self.ceph_mon_stats:
            collectd.info('Registered Ceph Mon')
            collectd.register_read(
                self.read_ceph_mon, self.ceph_mon_stats_interval,
                name='ceph-monitor')
        if self.ceph_osd_stats:
            collectd.info('Registered Ceph OSD')
            collectd.register_read(
                self.read_ceph_osd, self.ceph_osd_stats_interval,
                name='ceph-osd')
        if self.ceph_pg_stats:
            collectd.info('Registered Ceph PG')
            collectd.register_read(
                self.read_ceph_pg, self.ceph_pg_stats_interval, name='ceph-pg')
        if self.ceph_pool_stats:
            collectd.info('Registered Ceph Pool')
            collectd.register_read(
                self.read_ceph_pool, self.ceph_pool_stats_interval,
                name='ceph-pool')
def configure(configobj):
    global INTERVAL

    config = {c.key: c.values for c in configobj.children}
    INTERVAL = 10
    if 'interval' in config:
        INTERVAL = config['interval'][0]
    collectd.info('gnocchi_status: Interval: {}'.format(INTERVAL))
    collectd.register_read(read, INTERVAL)
def configure_callback(conf):
    """Receive configuration block"""
    host = None
    port = None
    auth = None
    instance = None
    llen_keys = {}
    metric_types = {}
    verbose = False

    for node in conf.children:
        key = node.key.lower()
        val = node.values[0]
        searchObj = re.search(r'redis_(.*)$', key, re.M | re.I)

        if key == 'host':
            host = val
        elif key == 'port':
            port = int(val)
        elif key == 'auth':
            auth = val
        elif key == 'verbose':
            verbose = node.values[0]
        elif key == 'instance':
            instance = val
        elif key == 'sendlistlength':
            if (len(node.values)) == 2:
                llen_keys.setdefault(int(node.values[0]), []).append(node.values[1])
            else:
                collectd.warning("redis_info plugin: monitoring length of keys requires both \
                                    database index and key value")

        elif searchObj:
            metric_types[searchObj.group(1), val] = True
        else:
            collectd.warning('redis_info plugin: Unknown config key: %s.' %
                             key)
            continue

    if verbose:
        collectd.info('Configured with host=%s, port=%s, instance name=%s, using_auth=%s, llen_keys=%s'
            % (host, port, instance, (auth is not None), llen_keys))

    collector = RedisCollector(**{
        'host': host,
        'port': port,
        'auth': auth,
        'instance': instance,
        'metric_types': metric_types,
        'verbose': verbose,
        'llen_keys': llen_keys})

    collectd.register_read(collector.read_callback, name="%s:%s:%s" % (host, port, instance))
Esempio n. 9
0
    def configure_callback(self, configobj):
        for node in configobj.children:
            val = str(node.values[0])
            if node.key == 'Interval':
                self.interval = int(float(val))
            elif node.key == 'Prefix':
                self.prefix = val
            elif node.key == 'User':
                self.user = val
            elif node.key == 'Password':
                self.password = val
            elif node.key == 'AuthURL':
                self.authurl = val
            elif node.key == 'AuthVersion':
                self.authversion = val
            elif node.key == 'Project':
                self.project = val
            else:
                collectd.warning(
                    'collectd-swift-stat: Unknown config key: {}'
                    .format(node.key))

        read_plugin = True
        if not self.prefix:
            collectd.error('collectd-swift-stat: Prefix Undefined')
            read_plugin = False
        if not self.user:
            collectd.error('collectd-swift-stat: User Undefined')
            read_plugin = False
        if not self.password:
            collectd.error('collectd-swift-stat: Password Undefined')
            read_plugin = False
        if not self.authurl:
            collectd.error('collectd-swift-stat: AuthURL Undefined')
            read_plugin = False
        if not self.authversion:
            collectd.error('collectd-swift-stat: AuthVersion Undefined')
            read_plugin = False
        if not self.project:
            collectd.error('collectd-swift-stat: Project Undefined')
            read_plugin = False

        if read_plugin:
            collectd.info(
                'swift_stat: Connecting with user={}, password={}, tenant={}, auth_url={},'
                ' auth_version={}'.format(
                    self.user, self.password, self.project, self.authurl, self.authversion))

            self.swift_conn = self.create_swift_session()
            collectd.register_read(self.read_swift_stat, self.interval)
        else:
            collectd.error('collectd_swift_stat: Invalid configuration')
Esempio n. 10
0
def configure(cfg):
    global INTERVAL
    global interfaces
    global namespaces
    interfaces = []
    namespaces = []
    config = {c.key: c.values for c in cfg.children}
    INTERVAL = config['interval'][0]
    collectd.register_read(read, INTERVAL)
    if 'interfaces' in config:
        interfaces = config['interfaces']
    if 'namespaces' in config :
        namespaces = config['namespaces']
 def config_callback(conf):
     """Collectd config callback."""
     for node in conf.children:
         if node.key == 'Host':
             amq.host = node.values[0]
         elif node.key == 'Port':
             amq.port = int(node.values[0])
         elif node.key == 'Verbose':
             amq.verbose_logging = bool(node.values[0])
         else:
             log.warning('activemq_info plugin: Unknown config key: %s.'
                         % node.key)
     amq.log_verbose('Configured with host={0}, port={1}'.format(
         amq.host, amq.port))
     collectd.register_read(read_callback)
Esempio n. 12
0
 def configure_callback(self, conf):
     """
     Receive configuration block
     """
     for node in conf.children:
         if node.key == 'Interval':
             self._interval = int(node.values[0])
         elif node.key == 'Sensor':
             sensors[node.values[0]] = node.values[1]
             collectd.warning('Sensor: ' + node.values[0] + ' Named: ' +
             node.values[1])
         else:
             collectd.warning('collectd-w1 plugin: Unknown config key: %s.' % node.key)
     collectd.register_read(mon.read_callback, mon._interval)
     self.log_verbose('Configured with interval=%s' %
                      (self._interval,))
def configure(configobj):
    global INTERVAL
    global cl
    global queues_to_count

    config = {c.key: c.values for c in configobj.children}
    INTERVAL = config['interval'][0]
    host = config['host'][0]
    port = int(config['port'][0])
    username = config['username'][0]
    password = config['password'][0]
    queues_to_count = config['message_count']
    collectd.info('rabbitmq_monitoring: Interval: {}'.format(INTERVAL))
    cl = Client('{}:{}'.format(host, port), username, password)
    collectd.info('rabbitmq_monitoring: Connecting to: {}:{} as user:{} password:{}'.format(host, port, username, password))
    collectd.info('rabbitmq_monitoring: Counting messages on: {}'.format(queues_to_count))
    collectd.register_read(read, INTERVAL)
Esempio n. 14
0
 def config_cb(self, config, data=None):
     self.config = util.map_collectd_config(config)
     if "Module.config" in self.config:
         self._log("config_cb: {!r}".format(self.config))
     if "Module.init" in self.config:
         collectd.register_init(util.init_closure(self), name=self.__module__)
     if "Module.read" in self.config:
         collectd.register_read(util.read_closure(self), name=self.__module__)
     if "Module.write" in self.config:
         collectd.register_write(util.write_closure(self), name=self.__module__)
     if "Module.notification" in self.config:
         collectd.register_notification(util.notification_closure(self), name=self.__module__)
     if "Module.flush" in self.config:
         collectd.register_flush(util.flush_closure(self), name=self.__module__)
     if "Module.log" in self.config:
         collectd.register_log(util.log_closure(self), name=self.__module__)
     if "Module.shutdown" in self.config:
         collectd.register_shutdown(util.shutdown_closure(self), name=self.__module__)
Esempio n. 15
0
def handle_config(root):
    for child in root.children:
        instance_name = None

        if child.key == 'Instance':
            instance_name = child.values[0]
            url = None
            for ch2 in child.children:
                if ch2.key == 'URL':
                    url = ch2.values[0]
            if not url:
                collectd.warning('No URL found in dump1090 Instance ' + instance_name)
            else:
                collectd.register_read(callback=handle_read,
                                       data=(instance_name, urlparse.urlparse(url).hostname, url),
                                       name='dump1090.' + instance_name)
                collectd.register_read(callback=handle_read_1min,
                                       data=(instance_name, urlparse.urlparse(url).hostname, url),
                                       name='dump1090.' + instance_name + '.1min',
                                       interval=60)

        else:
            collectd.warning('Ignored config entry: ' + child.key)
def plugin_config(collectd_config):
	plugin_config = {'url': 'http://localhost:55672/api/', 'monitored_queues': []}
	for child in collectd_config.children:
		if child.key == 'User':
			plugin_config['user'] = child.values[0]
		elif child.key == 'Password':
			plugin_config['password'] = child.values[0]
		elif child.key == 'URL':
			plugin_config['url'] = child.values[0]
		elif child.key == 'MonitorQueue':
			if len(child.values) == 2:
				vhost, queue = child.values[0:2]
			elif len(child.values) == 1:
				vhost, queue = "/", child.values[0]
			else:
				raise RuntimeError("MonitorQueue expects 1 or 2 params, no more.")
			plugin_config['monitored_queues'].append((vhost, queue))
		elif child.key.lower() == 'verbose':
			pass
		else:
			raise RuntimeError("Unknown config item %s" % child.key)
	
	collectd.register_read(read_rabbitmq_metrics, 10, plugin_config)
Esempio n. 17
0
    def configure_callback(self, conf):
        self.message('I', 'Configuring callback...')

        for node in conf.children:
            if len(node.values) == 5:
                record = {
                    'label': node.key,
                    'from': node.values[0],
                    'to': node.values[1],
                    'amount': node.values[2],
                    'direction': node.values[3],
                    'reciprocal': node.values[4],
                }

                self.DATA.append(record)
            elif node.key == 'Interval':
                self.INTERVAL = int(node.values[0])

        self.message('I', "Data: %s" % str(self.DATA))
        self.message('I', "Interval: %s" % (
            self.INTERVAL if self.INTERVAL > 0 else 'default'))

        # Register the read callback to be able to set the correct interval
        collectd.register_read(self.read_callback, interval=self.INTERVAL)
Esempio n. 18
0
    # only log changes
    if unreachable_list_changed is True:
        if obj.unreachable_servers:
            collectd.info("%s unreachable servers: %s" %
                          (PLUGIN, obj.unreachable_servers))
        else:
            collectd.info("%s all servers are reachable" % PLUGIN)

    # The sample published to the database is simply the number
    # of reachable servers if one is selected
    if not obj.selected_server:
        sample = 0
    else:
        sample = len(obj.reachable_servers)

    # Dispatch usage value to collectd
    val = collectd.Values(host=obj.hostname)
    val.plugin = 'ntpq'
    val.type = 'absolute'
    val.type_instance = 'reachable'
    val.dispatch(values=[sample])

    return 0


# register the config, init and read functions
collectd.register_config(config_func)
collectd.register_init(init_func)
collectd.register_read(read_func, interval=PLUGIN_INTERVAL)
Esempio n. 19
0
    # send keyspace hits and misses, if they exist
    if 'keyspace_hits' in info: dispatch_value(info, 'keyspace_hits', 'derive', plugin_instance)
    if 'keyspace_misses' in info: dispatch_value(info, 'keyspace_misses', 'derive', plugin_instance)

    # send replication stats, but only if they exist (some belong to master only, some to slaves only)
    if 'master_repl_offset' in info: dispatch_value(info, 'master_repl_offset', 'gauge', plugin_instance)
    if 'master_last_io_seconds_ago' in info: dispatch_value(info, 'master_last_io_seconds_ago', 'gauge', plugin_instance)
    if 'slave_repl_offset' in info: dispatch_value(info, 'slave_repl_offset', 'gauge', plugin_instance)

    # database and vm stats
    for key in info:
        if key.startswith('repl_'):
            dispatch_value(info, key, 'gauge', plugin_instance)
        if key.startswith('vm_stats_'):
            dispatch_value(info, key, 'gauge', plugin_instance)
        if key.startswith('db'):
            dispatch_value(info[key], 'keys', 'counter', plugin_instance, '%s-keys' % key)
        if key.startswith('slave'):
            dispatch_value(info[key], 'delay', 'gauge', plugin_instance, '%s-delay' % key)

def log_verbose(msg):
    if not VERBOSE_LOGGING:
        return
    collectd.info('redis plugin [verbose]: %s' % msg)
    #print 'redis plugin [verbose]: {0}'.format(msg)


# register callbacks
collectd.register_config(configure_callback)
collectd.register_read(read_callback)
Esempio n. 20
0
def configure_callback(conf):
    """Receive configuration block"""
    plugin_conf = {}
    cluster = "default"
    interval = DEFAULT_INTERVAL
    username = None
    password = None
    custom_dimensions = {}
    enhanced_metrics = False
    exclude_optional_metrics = set()
    include_optional_metrics = set()
    ssl_keys = {}
    http_timeout = DEFAULT_API_TIMEOUT
    testing = False

    required_keys = frozenset(("Host", "Port"))

    for val in conf.children:
        if val.key in required_keys:
            plugin_conf[val.key] = val.values[0]
        elif val.key == "Username" and val.values[0]:
            username = val.values[0]
        elif val.key == "Password" and val.values[0]:
            password = val.values[0]
        elif val.key == "Interval" and val.values[0]:
            interval = val.values[0]
        elif val.key == "Cluster" and val.values[0]:
            cluster = val.values[0]
            custom_dimensions["cluster"] = val.values[0]
        elif val.key == "Dimension":
            if len(val.values) == 2:
                custom_dimensions.update({val.values[0]: val.values[1]})
            else:
                collectd.warning(
                    "WARNING: Check configuration setting for %s" % val.key)
        elif val.key == "EnhancedMetrics" and val.values[0]:
            enhanced_metrics = str_to_bool(val.values[0])
        elif val.key == "IncludeMetric" and val.values[0]:
            include_optional_metrics.add(val.values[0])
        elif val.key == "ExcludeMetric" and val.values[0]:
            exclude_optional_metrics.add(val.values[0])
        elif val.key == "ssl_keyfile" and val.values[0]:
            ssl_keys["ssl_keyfile"] = val.values[0]
        elif val.key == "ssl_certificate" and val.values[0]:
            ssl_keys["ssl_certificate"] = val.values[0]
        elif val.key == "ssl_ca_certs" and val.values[0]:
            ssl_keys["ssl_ca_certs"] = val.values[0]
        elif val.key == "Testing" and val.values[0]:
            testing = str_to_bool(val.values[0])

    for key in required_keys:
        try:
            val = plugin_conf[key]
        except KeyError:
            raise KeyError("Missing required config setting: %s" % key)

    base_url = "http://{0}:{1}/solr".format(plugin_conf["Host"],
                                            plugin_conf["Port"])

    https_handler = None
    if "ssl_certificate" in ssl_keys and "ssl_keyfile" in ssl_keys:
        base_url = "https" + base_url[4:]
        https_handler = urllib_ssl_handler.HTTPSHandler(
            key_file=ssl_keys["ssl_keyfile"],
            cert_file=ssl_keys["ssl_certificate"],
            ca_certs=ssl_keys["ssl_ca_certs"])

    # Auth handler to handle basic http authentication.
    auth = urllib.request.HTTPPasswordMgrWithDefaultRealm()
    if username is None and password is None:
        username = password = ""

    auth.add_password(None, uri=base_url, user=username, passwd=password)
    auth_handler = urllib.request.HTTPBasicAuthHandler(auth)
    if https_handler:
        opener = urllib.request.build_opener(auth_handler, https_handler)
    else:
        opener = urllib.request.build_opener(auth_handler)

    module_config = {
        "state": None,
        "member_id": ("{0}:{1}".format(plugin_conf["Host"],
                                       plugin_conf["Port"])),
        "plugin_conf": plugin_conf,
        "cluster": cluster,
        "interval": interval,
        "ssl_keys": ssl_keys,
        "base_url": base_url,
        "opener": opener,
        "username": username,
        "password": password,
        "http_timeout": http_timeout,
        "custom_dimensions": custom_dimensions,
        "enhanced_metrics": enhanced_metrics,
        "include_optional_metrics": include_optional_metrics,
        "exclude_optional_metrics": exclude_optional_metrics,
    }

    if testing:
        return module_config

    collectd.register_read(read_metrics,
                           data=module_config,
                           name=module_config["member_id"])
def restore_sigchld():
    """
    Restore SIGCHLD handler for python <= v2.6
    It will BREAK exec plugin!!!
    See https://github.com/deniszh/collectd-iostat-python/issues/2 for details
    """
    if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)


if __name__ == '__main__':
    iostat = IOStat()
    ds = iostat.get_diskstats()

    for disk in ds:
        for metric in ds[disk]:
            tbl = string.maketrans('/-%', '___')
            metric_name = metric.translate(tbl)
            print("%s.%s:%s" % (disk, metric_name, ds[disk][metric]))

    sys.exit(0)
else:
    import collectd

    iomon = IOMon()

    # Register callbacks
    collectd.register_init(restore_sigchld)
    collectd.register_config(iomon.configure_callback)
    collectd.register_read(iomon.read_callback)
Esempio n. 22
0
    VALUES.plugin_instance = INTERFACE
    VALUES.type = 'gauge'
    VALUES.type_instance = 'stations-count'
    # If no clients are connected, just send 0 to the metrics storage backend,
    # otherwise, send the count and the attributes of clients
    if results[-1] == -1:
        VALUES.dispatch(values=[0])
    else:
        VALUES.dispatch(values=[len(results)])
        # Browse the stations returned by the kernel
        for station in results:
            # If we shouldn't send data for every clients, we check the MAC address
            if len(CLIENTS) > 0:
                if station.mac_addr in CLIENTS:
                    send_station_stats(station)
            # If not, just send the data
            else:
                send_station_stats(station)

    # Clean a few values to avoid memory leak
    del (msg)
    del (cb)
    del (results)


# Register various functions called during the various stages of the daemon
collectd.register_init(init)
collectd.register_config(config_function)
collectd.register_read(read_function)
collectd.register_shutdown(terminate_function)
Esempio n. 23
0
def read_config(conf):
    """
    Reads the configurations provided by the user
    """
    module_config = {
        "member_id": None,
        "plugin_config": {},
        "username": None,
        "api_token": None,
        "path": "",
        "opener": None,
        "metrics_key": None,
        "custom_dimensions": {},
        "enhanced_metrics": False,
        "include_optional_metrics": set(),
        "exclude_optional_metrics": set(),
        "http_timeout": DEFAULT_API_TIMEOUT,
        "jobs_last_timestamp": {},
        "ssl_keys": {
            "enabled": False,
            "ssl_cert_validation": True
        },
        "exclude_job_metrics": False,
    }

    interval = None
    testing = False

    required_keys = ("Host", "Port")
    auth_keys = ("Username", "APIToken", "Path", "MetricsKey")

    for val in conf.children:
        if val.key in required_keys:
            module_config["plugin_config"][val.key] = val.values[0]
        elif val.key == "Interval" and val.values[0]:
            interval = val.values[0]
        elif val.key in auth_keys and val.key == "Username" and val.values[0]:
            module_config["username"] = val.values[0]
        elif val.key in auth_keys and val.key == "APIToken" and val.values[0]:
            module_config["api_token"] = val.values[0]
        elif val.key in auth_keys and val.key == "Path" and val.values[0]:
            module_config["path"] = val.values[0]
        elif val.key in auth_keys and val.key == "MetricsKey" and val.values[0]:
            module_config["metrics_key"] = val.values[0]
        elif val.key == "Dimension":
            if len(val.values) == 2:
                module_config["custom_dimensions"].update(
                    {val.values[0]: val.values[1]})
            else:
                collectd.warning(
                    "WARNING: Dimension Key Value format required")
        elif val.key == "EnhancedMetrics" and val.values[0]:
            module_config["enhanced_metrics"] = str_to_bool(val.values[0])
        elif val.key == "IncludeMetric" and val.values[0] and val.values[
                0] not in NODE_METRICS:
            module_config["include_optional_metrics"].add(val.values[0])
        elif val.key == "ExcludeMetric" and val.values[0] and val.values[
                0] not in NODE_METRICS:
            module_config["exclude_optional_metrics"].add(val.values[0])
        elif val.key == "ExcludeJobMetrics" and val.values[0]:
            module_config["exclude_job_metrics"] = str_to_bool(val.values[0])
        elif val.key == "ssl_enabled" and val.values[0]:
            module_config["ssl_keys"]["enabled"] = str_to_bool(val.values[0])
        elif val.key == "ssl_keyfile" and val.values[0]:
            module_config["ssl_keys"]["ssl_keyfile"] = val.values[0]
        elif val.key == "ssl_certificate" and val.values[0]:
            module_config["ssl_keys"]["ssl_certificate"] = val.values[0]
        elif val.key == "ssl_ca_certs" and val.values[0]:
            module_config["ssl_keys"]["ssl_ca_certs"] = val.values[0]
        elif val.key == "ssl_cert_validation" and val.values[0]:
            # Doesn't use str_to_bool because the function defaults to
            # false and we want to default to true.
            if val.values[0].strip().lower() == "false":
                module_config["ssl_keys"]["ssl_cert_validation"] = False
        elif val.key == "Testing" and str_to_bool(val.values[0]):
            testing = True

    # Make sure all required config settings are present, and log them
    collectd.info("Using config settings:")
    for key in required_keys:
        val = module_config["plugin_config"].get(key)
        if val is None:
            raise ValueError("Missing required config setting: %s" % key)
        collectd.info("%s=%s" % (key, val))

    if module_config["metrics_key"] is None:
        raise ValueError("Missing required config setting: Metrics_Key")

    module_config["member_id"] = "%s:%s" % (
        module_config["plugin_config"]["Host"],
        module_config["plugin_config"]["Port"],
    )

    module_config["base_url"] = "http://%s:%s%s/" % (
        module_config["plugin_config"]["Host"],
        module_config["plugin_config"]["Port"],
        module_config["path"],
    )
    collectd.info("Using base_url %s" % module_config["base_url"])

    if module_config["ssl_keys"]["enabled"] or (
            "ssl_certificate" in module_config["ssl_keys"]
            and "ssl_keyfile" in module_config["ssl_keys"]):
        module_config["base_url"] = "https" + module_config["base_url"][4:]

    if module_config["username"] is None and module_config["api_token"] is None:
        module_config["username"] = module_config["api_token"] = ""
    collectd.info("Using username '%s'" % module_config["username"])

    module_config["auth_args"] = get_auth_args(module_config)

    collectd.debug("module_config: (%s)" % str(module_config))

    if testing:
        # for testing purposes
        return module_config

    if interval is not None:
        collectd.register_read(read_metrics,
                               interval,
                               data=module_config,
                               name=module_config["member_id"])
    else:
        collectd.register_read(read_metrics,
                               data=module_config,
                               name=module_config["member_id"])
Esempio n. 24
0
        try:
            with Client() as c:
                temperatures = c.call('disk.temperatures', self.disks,
                                      self.powermode, self.smartctl_args)

            for disk, temp in temperatures.items():
                if temp is not None:
                    self.dispatch_value(disk,
                                        'temperature',
                                        temp,
                                        data_type='temperature')
        except Exception:
            collectd.error(traceback.format_exc())

    def dispatch_value(self, name, instance, value, data_type=None):
        val = collectd.Values()
        val.plugin = 'disktemp'
        val.plugin_instance = name
        if data_type:
            val.type = data_type
        val.values = [value]
        val.meta = {'0': True}
        val.dispatch(interval=READ_INTERVAL)


disktemp = DiskTemp()

collectd.register_init(disktemp.init)
collectd.register_read(disktemp.read, READ_INTERVAL)
Esempio n. 25
0
    def dispatch_value(self, type_instance, value, value_type,
                       plugin_instance):
        val = collectd.Values(plugin='solr')
        val.type_instance = type_instance
        val.type = value_type
        val.values = [value]
        val.plugin_instance = plugin_instance
        val.dispatch()

    def read_callback(self):
        log_verbose('Read Callback Called')
        solr = Solr(self.SOLR_HOST, self.SOLR_PORT, self.SOLR_STATUS)
        self.dispatch_value('leader', solr.get_leader(), 'gauge',
                            self.SOLR_INSTANCE)
        self.dispatch_value('overseer_queue_size',
                            solr.get_overseer_queue_size(), 'gauge',
                            self.SOLR_INSTANCE)
        self.dispatch_value('overseer_work_queue_size',
                            solr.get_overseer_collection_queue_size(), 'gauge',
                            self.SOLR_INSTANCE)
        self.dispatch_value('overseer_collection_queue_size',
                            solr.get_overseer_collection_queue_size(), 'gauge',
                            self.SOLR_INSTANCE)


# register callbacks
plugin = SolrPlugin()
collectd.register_config(plugin.configure_callback)
collectd.register_read(plugin.read_callback, plugin.SOLR_INTERVAL)
Esempio n. 26
0
        self.interval = DEFAULT_INTERVAL
        self.port = None
        self.conf = {}

    def config(self, cfg):
        for children in cfg.children:
            if children.key == INTERVAL:
                self.interval = children.values[0]
            if children.key == PORT:
                self.port = children.values[0]
        self.conf.update({
            'interval': self.interval,
            'port': self.port,
            'name': 'prometheusnginx'
        })
        super(PrometheusNginx, self).__init__(self.conf)

    def read_temp(self):
        collectd.unregister_read(self.read_temp)
        collectd.register_read(self.read, interval=int(self.interval))


def init():
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)


OBJ = PrometheusNginx()
collectd.register_init(init)
collectd.register_config(OBJ.config)
collectd.register_read(OBJ.read_temp)
Esempio n. 27
0
 def read_temp(self):
     collectd.unregister_read(self.read_temp)
     collectd.register_read(self.read, interval=int(self.interval))
Esempio n. 28
0
                    if dict_postgres[doc]['_documentType'] not in self.documentsTypes:
                        del dict_postgres[doc]
                self.dispatch_data(deepcopy(dict_postgres))
        except Exception as e:
                #collectd.error("%s" % traceback.format_exc())
            collectd.error("Couldn't read and gather the postgres metrics due to the exception :%s" % e)
            return

    @staticmethod
    def dispatch_data(dict_disks_copy):
        for details_type, details in dict_disks_copy.items():
            collectd.info("Plugin Postgres: Values: " + json.dumps(details))
            collectd.info("final details are : %s" % details)
            dispatch(details)

    def get_size(self, data):
        byte_size = data * 8192
        return byte_size/(1024 * 1024)

    def read_temp(self):
        collectd.unregister_read(self.read_temp)
        collectd.register_read(self.read, interval=int(self.interval))

def init():
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)


obj = PostgresStats()
collectd.register_config(obj.read_config)
collectd.register_read(obj.read_temp)
Esempio n. 29
0
                                avg_doc_size, mongo_db)
                if 'wiredTiger' in collection_stats:
                    if 'cursor' in collection_stats['wiredTiger']:
                        for k, v in collection_stats['wiredTiger'][
                                'cursor'].items():
                            self.submit('collection_stats',
                                        (collection + '-' + k), v, mongo_db)

        con.close()

    def config(self, obj):
        for node in obj.children:
            if node.key == 'Port':
                self.mongo_port = int(node.values[0])
            elif node.key == 'Host':
                self.mongo_host = node.values[0]
            elif node.key == 'User':
                self.mongo_user = node.values[0]
            elif node.key == 'Password':
                self.mongo_password = node.values[0]
            elif node.key == 'Database':
                self.mongo_db = node.values
            else:
                collectd.warning(
                    "mongodb plugin: Unkown configuration key %s" % node.key)


mongodb = MongoDB()
collectd.register_read(mongodb.do_server_status)
collectd.register_config(mongodb.config)
Esempio n. 30
0
        data[PLUGIN_INS] = domain.name()
        data[UTC] = str(datetime.datetime.utcnow())
        data[TAGS] = "vm_static"

        try:
            state, maxmem, mem, cpus, cput = domain.info()
            data[NO_OF_VCPU] = int(cpus)
            data[CPU_TIME] = cput
            data[VM_STATE] = get_vm_state(state)
            data[ALLOCATED_MEMORY] = float(maxmem) / 1024
            collectd.info("Memory stats collected for VM: %s" %
                          (domain.name()))
        except libvirt.libvirtError as e:
            collectd.warning("Unable to collect stats for"
                             " VM: %s, Reason: %s" %
                             (domain.name(), e.get_error_message()))

        collectd.info("Collected VM static data.")
        return data

    def read_temp(self):
        collectd.unregister_read(self.read_temp)
        collectd.register_read(self.read, interval=int(self.interval))


collectd.info("Registering '%s' ... " % PLUGIN_NAME)
virt = LibvirtStatic()
collectd.register_config(virt.read_config)
collectd.register_read(virt.read_temp)
collectd.info("Registered '%s' plugin successfully :)" % PLUGIN_NAME)
Esempio n. 31
0
            "_tag_clustermetrics": "ResourceManager",
            "modelerType": "ClusterMetrics",
            "NumLostNMs": 0,
            "time": 1543301379,
            "_tag_appName": "hadoopapp1",
            "NumUnhealthyNMs": 0,
            "AMRegisterDelayAvgTime": 0,
            "NumActiveNMs": 0,
            "AMLaunchDelayAvgTime": 0
        }]
        for doc in docs:
            self.add_common_params(doc, doc['_documentType'])
            write_json.write(doc)

    def read(self):
        self.collect_data()

    def read_temp(self):
        """
        Collectd first calls register_read. At that time default interval is taken,
        hence temporary function is made to call, the read callback is unregistered
        and read() is called again with interval obtained from conf by register_config callback.
        """
        collectd.unregister_read(self.read_temp)  # pylint: disable=E1101
        collectd.register_read(self.read, interval=int(self.interval))  # pylint: disable=E1101


namenodeinstance = YarnStats()
collectd.register_config(namenodeinstance.read_config)  # pylint: disable=E1101
collectd.register_read(namenodeinstance.read_temp)  # pylint: disable=E1101
Esempio n. 32
0
 def read_temp(self):
     """Collectd first calls register_read. At that time default interval is taken,
     hence temporary function is made to call, the read callback is unregistered
     and read() is called again with interval obtained from conf by register_config callback."""
     collectd.unregister_read(self.read_temp)
     collectd.register_read(self.read, interval=int(self.interval))
Esempio n. 33
0
def config(config_values):
    """
    A callback method that  loads information from the HaProxy collectd plugin config file.
    Args:
    config_values (collectd.Config): Object containing config values
    """

    module_config = {}
    socket = DEFAULT_SOCKET
    proxy_monitors = []
    excluded_metrics = set()
    enhanced_metrics = False
    interval = None
    testing = False
    custom_dimensions = {}

    for node in config_values.children:
        if node.key == "ProxyMonitor" and node.values[0]:
            proxy_monitors.append(node.values[0])
        elif node.key == "Socket" and node.values[0]:
            socket = node.values[0]
        elif node.key == "Interval" and node.values[0]:
            interval = node.values[0]
        elif node.key == "EnhancedMetrics" and node.values[0]:
            enhanced_metrics = _str_to_bool(node.values[0])
        elif node.key == "ExcludeMetric" and node.values[0]:
            excluded_metrics.add(node.values[0])
        elif node.key == "Testing" and node.values[0]:
            testing = _str_to_bool(node.values[0])
        elif node.key == 'Dimension':
            if len(node.values) == 2:
                custom_dimensions.update({node.values[0]: node.values[1]})
            else:
                collectd.warning("WARNING: Check configuration \
                                            setting for %s" % node.key)
        else:
            collectd.warning('Unknown config key: %s' % node.key)

    if not proxy_monitors:
        proxy_monitors += DEFAULT_PROXY_MONITORS

    module_config = {
        'socket': socket,
        'proxy_monitors': proxy_monitors,
        'interval': interval,
        'enhanced_metrics': enhanced_metrics,
        'excluded_metrics': excluded_metrics,
        'custom_dimensions': custom_dimensions,
        'testing': testing,
    }
    proxys = "_".join(proxy_monitors)

    if testing:
        return module_config

    interval_kwarg = {}
    if interval:
        interval_kwarg['interval'] = interval
    collectd.register_read(collect_metrics, data=module_config,
                           name='node_' + module_config['socket'] + '_' + proxys,
                           **interval_kwarg)
Esempio n. 34
0
                previous_values[device][i] = value
                continue

            # else we have a previous value
            previous_value = previous_values[device][i]

            delta = None

            # we have wrapped around
            if previous_value > value:
                delta = 4294967296 - previous_value + value
            else:
                delta = value - previous_value

            # field 9 is not a counter
            if i == 9:
                delta = value

            # record the new previous value
            previous_values[device][i] = value

            values.dispatch(plugin_instance=device,
                            type_instance=field_map[i],
                            values=[delta])

    fh.close()


collectd.register_read(diskstats_read)
collectd.register_config(diskstats_config)
Esempio n. 35
0
    def __init__(self, port, key, value):
        OOMValue.__init__(self, 'temperature', port, key, value)


def read_callback(data=None):
    portlist = oom.oom_get_portlist()
    for port in portlist:
        dom = oom.oom_get_memory(port, 'DOM')
        if dom is None:
            continue

        for key in ('TX1_POWER', 'TX2_POWER', 'TX3_POWER', 'TX4_POWER',
                    'RX1_POWER', 'RX2_POWER', 'RX3_POWER', 'RX4_POWER'):
            if key in dom:
                value = dom[key] / 1000.0
                LaserOutputPower(port, key[:3].lower(), value).dispatch()

        for key in ('TX1_BIAS', 'TX2_BIAS', 'TX3_BIAS', 'TX4_BIAS'):
            if key in dom:
                value = dom[key] / 1000.0
                LaserBiasCurrent(port, key[:3].lower(), value).dispatch()

        if 'SUPPLY_VOLTAGE' in dom:
            SupplyVoltage(port, 'supply', dom['SUPPLY_VOLTAGE']).dispatch()

        if 'TEMPERATURE' in dom:
            ModuleTemperature(port, 'module', dom['TEMPERATURE']).dispatch()


collectd.register_read(read_callback)
                        values=[highest]).dispatch()
        collectd.Values(plugin='geth_status',
                        type_instance='sync',
                        type='gauge',
                        values=[round(sync_percent, 3)]).dispatch()
    else:
        collectd.Values(plugin='geth_status',
                        type_instance='service',
                        type='gauge',
                        values=[geth_service]).dispatch()
        collectd.Values(plugin='geth_status',
                        type_instance='ssd_free',
                        type='gauge',
                        values=[ssd_free]).dispatch()
        collectd.Values(plugin='geth_status',
                        type_instance='sync',
                        type='gauge',
                        values=[-1]).dispatch()
        collectd.Values(plugin='geth_status',
                        type_instance='peers',
                        type='gauge',
                        values=[-1]).dispatch()


if __name__ != '__main__':
    collectd.register_init(init)
    collectd.register_config(conf)
    collectd.register_read(read_geth_stats)
else:
    raise SystemExit('Nope - it is a plugin reporting to collected.')
Esempio n. 37
0
   collectd.info('buddyinfo plugin: configuring host: %s' % (host_name)) 

def initer():
   get_host_type()
   collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
   collectd.info('buddyinfo initer: white list: %s ' % (white_list))
   init_stats_cache()
   collectd.info('buddyinfo init: stats_cache: %s ' % (stats_cache))

def reader(input_data=None):
   collect_buddyinfo()
   swap_current_cache()

def writer(metric, data=None):
   for i in metric.values:
      collectd.debug("%s (%s): %f" % (metric.plugin, metric.type, i))

def shutdown():
   collectd.info("buddyinfo plugin shutting down")

#== Callbacks ==#
if (os_name == 'Linux'):
   collectd.register_config(configer)
   collectd.register_init(initer)
   collectd.register_read(reader)
   collectd.register_write(writer)
   collectd.register_shutdown(shutdown)
else:
   collectd.warning('buddyinfo plugin currently works for Linux only')

Esempio n. 38
0
        portstat = PortStat()
        ports = portstat.get_portstats("localhost")
        for port_object in ports:
            stat = portstat.parse_ports(port_object)
            port_name = port_object["Object"]["IntfRef"]
            print("%s : %s" % (port_name, stat))
            self.sendToCollect('derive', port_name, stat)


if __name__ == '__main__':
    portstat = PortStat()
    portmon = PortMon()
    ports = portstat.get_portstats("localhost")
    for port_object in ports:
        stat = portstat.parse_ports(port_object)
        port_name = json.dumps(port_object["Object"]["IntfRef"])
        print("bps %s : %s" % (port_name, stat))
        portmon.sendToCollect('derive', port_name, stat)

    sys.exit(0)
else:
    import collectd

    portmon = PortMon()

    # Register callbacks

    collectd.register_init(portmon.init_callback)
    collectd.register_config(portmon.configure_callback)
    collectd.register_read(portmon.read_callback)
Esempio n. 39
0
            The data is read from all actions defined in SERVICE_ACTIONS.
            This function returns a dict in the following format:
            {instance: (value_type, value)} where value_type and instance are
            mapped from VALUES and CONVERSION.
        """
        values = {}

        # Don't try to gather data if the connection is not available
        if self._fc is None:
            return values

        # Combine all values available in SERVICE_ACTIONS into a dict
        for service, action in self.SERVICE_ACTIONS:
            values.update(self._fc.call_action(service, action))

        # Construct a dict: {instance: (value_type, value)} from the queried
        # results applying a conversion (if defined)
        result = {
            instance:
            (value_type,
             self.CONVERSION.get(key, lambda x: x)(values.get(key)))
            for key, (instance, value_type) in self.VALUES.items()
        }
        return result


FC = FritzCollectd()
collectd.register_config(FC.callback_configure)
collectd.register_init(FC.callback_init)
collectd.register_read(FC.callback_read)
Esempio n. 40
0
    def cache(self):
        return self.stats()["CACHE"]

    def diff_from(self, old_time):
        delta = datetime.datetime.now() - old_time
        return delta.seconds + (delta.microseconds / 1000000.0)

    def stats_for(self, key):
        stats = self.cache()[key]["stats"]
        ret = {}
        for key in self.keys_to_track:
            ret[key] = stats.get(key, None)
        return ret

    def all_stats(self):
        ret = {}
        for key in self.keys:
            lowered_key = self.field_value_mapping[key]
            ret[lowered_key] = self.stats_for(key)
        return ret

    def info(self, message):
        if self.verbose:
            collectd.info("solr plugin [verbose]: %s" % (message))


server = SolrServer()
collectd.register_config(server.configure)
collectd.register_read(server.read)
            gw_stats = load(f)
    except IOError as e:
        collectd.error('ttn_gw plugin: Cannot read gateway stats file %s' % e)
        collectd.error('ttn_gw plugin: (gateway not runing?)')
        return
    except ValueError:
        collectd.error('ttn_gw plugin: Cannot parse gateway stats file')
        return

    new_data = False
    if last_time != gw_stats['time']:
        new_data = True
        last_time = gw_stats['time']

    current = gw_stats['current']
    keys = ('up_radio_packets_received', 'up_radio_packets_crc_good',
            'up_radio_packets_crc_bad', 'up_radio_packets_crc_absent',
            'up_radio_packets_dropped', 'up_radio_packets_forwarded',
            'down_radio_packets_succes', 'down_radio_packets_failure',
            'down_radio_packets_collision_packet',
            'down_radio_packets_collision_beacon',
            'down_radio_packets_too_early', 'down_radio_packets_too_late',
            'down_beacon_packets_queued', 'down_beacon_packets_send',
            'down_beacon_packets_rejected')
    for key in keys:
        dispatch(key, current[key], new_data)


collectd.register_config(config)
collectd.register_read(read, stat_interval)
Esempio n. 42
0
            self.submit('counter', 'object_count', db_stats['objects'], mongo_db)
            self.submit('counter', 'collections', db_stats['collections'], mongo_db)
            self.submit('counter', 'num_extents', db_stats['numExtents'], mongo_db)
            self.submit('counter', 'indexes', db_stats['indexes'], mongo_db)

            # stats sizes
            self.submit('file_size', 'storage', db_stats['storageSize'], mongo_db)
            self.submit('file_size', 'index', db_stats['indexSize'], mongo_db)
            self.submit('file_size', 'data', db_stats['dataSize'], mongo_db)

        #con.disconnect()

    def config(self, obj):
        for node in obj.children:
            if node.key == 'Port':
                self.mongo_port = int(node.values[0])
            elif node.key == 'Host':
                self.mongo_host = node.values[0]
            elif node.key == 'User':
                self.mongo_user = node.values[0]
            elif node.key == 'Password':
                self.mongo_password = node.values[0]
            elif node.key == 'Database':
                self.mongo_db = node.values
            else:
                collectd.warning("mongodb plugin: Unkown configuration key %s" % node.key)

mongodb = MongoDB()
collectd.register_read(mongodb.do_server_status)
collectd.register_config(mongodb.config)
Esempio n. 43
0
def collectd_init():
    collectd.register_read(retrieve_accounting,
                           interval=INTERVAL,
                           name='python.%s' % retrieve_accounting.__module__)
            self.log_verbose(traceback.print_exc())
            self.log_verbose("plugin %s run into exception" % (self.plugin_name))
            self.log_verbose(exp.message)


if __name__ == '__main__':
    print "************\n"
    # print str(get_intergrp_stat())
    # print "***********\n"
    # print str(get_internet_stat())
    # print "************\n"
    # print str(get_intragrp_stat())
    # print "**********\n"
    # print str(get_policy_stat())
    # print '------------------\n'
    a = get_internet_stat()
    print a
    import time
    time.sleep(10)
    print '------------------\n'
    b = get_internet_stat()
    c = get_delta_value(a, b)
    print '---delta value----'
    print c
else:
    import collectd
    firewall_status = FireWallUserStatMon()
    collectd.register_config(firewall_status.configure_callback)
    collectd.register_init(firewall_status.init)
    collectd.register_read(firewall_status.read_callback)
Esempio n. 45
0
def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)
    collectd.register_read(read_callback, plugin.interval)
Esempio n. 46
0
    def init(self):
        collectd.info("rebellion_monitor plugin has been initialized.")

    def read(self):
        try:
            params = {"filters": '{"name": ["rebellion.service"]}'}
            containers = requests.get(
                "{}/containers/json".format(self.DOCKER_URL_PREFIX),
                params=params,
                timeout=self.TIMEOUT).json()
            metric = collectd.Values()
            metric.plugin = self.NAME
            metric.plugin_instance = "rebellion_service"
            metric.type = "val"
            metric.values = [len(containers)]
            metric.dispatch()
        except Exception as e:
            collectd.error(
                "rebellion_monitor.read() failed, exception: {}".format(e))

    def shutdown(self):
        collectd.info("rebellion_monitor plugin has been shutdown.")


if __name__ != "__main__":
    rebellion_monitor = Plugin()
    collectd.register_init(rebellion_monitor.init)
    collectd.register_read(rebellion_monitor.read,
                           rebellion_monitor.READ_INTERVAL)
    collectd.register_shutdown(rebellion_monitor.shutdown)
Esempio n. 47
0
        if self.includeServerStatsMetrics:
            for root_metric_key in self.includeServerStatsMetrics.iterkeys():
                if server_status.has_key(root_metric_key):
                    metrics_to_collect[root_metric_key] = deepcopy(SERVER_STATUS_METRICS[root_metric_key])
        else:
            metrics_to_collect = deepcopy(SERVER_STATUS_METRICS)
        # rename "." lock to be "GLOBAL"
        if metrics_to_collect["locks"].has_key("."):
            print(SERVER_STATUS_METRICS["locks"])
            global_lock_data = metrics_to_collect["locks"].pop(".")
            metrics_to_collect["locks"]["GLOBAL"] = global_lock_data

            print(SERVER_STATUS_METRICS["locks"])
        for db_name in self.mongo_dbs:
            metrics_to_collect["locks"][db_name] = deepcopy(SERVER_STATUS_METRICS["locks"]["."])

        self.recursive_submit(metrics_to_collect, server_status)


    def publish_data(self):
        self.publish_server_status()
        self.publish_connection_pool_metrics()
        self.publish_dbstats()


mongodb = MongoDB()
collectd.register_read(mongodb.publish_data)
collectd.register_config(mongodb.config)
collectd.register_init(mongodb.connect)
collectd.register_shutdown(mongodb.disconnect)
                data=''
                for row in range(self.hosts[hostname]['rrdupdates'].GetRows()):
                    epoch = self.hosts[hostname]['rrdupdates'].GetRowTime(row)
                    if isHost:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetHostData(param,row))
                    else:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetVMData(uuid,param,row))
                    if epoch > max_time:
                        max_time = epoch
                        data = dv
                result[param] = data
        return result

    def _LogVerbose(self, msg):
        ''' Be verbose, if self.verbose is True'''
        if not self.verbose:
            return
        collectd.info('xenserver-collectd [verbose]: %s' % msg)


# Hooks
xenserverCollectd = XenServerCollectd()
if __name__ == "__main__":
    import ipdb
    ipdb.set_trace()

collectd.register_config(xenserverCollectd.Config)
collectd.register_init(xenserverCollectd.Connect)
collectd.register_read(xenserverCollectd.Read)
collectd.register_shutdown(xenserverCollectd.Shutdown)
Esempio n. 49
0
    return


def flush_cb(timeout, identifier, data=None):
    return

def log_cb(severity, message, data=None):
    return


## Register the call-back functions

data = "stub-string"         # placeholder
name = init_cb.__module__    # the default
interval = 10                # the default

collectd.register_config(config_cb, data, name)
collectd.register_init(init_cb, data, name)
collectd.register_shutdown(shutdown_cb, data, name)

collectd.register_read(read_cb, interval, data, name)
collectd.register_write(write_cb, data, name)
collectd.register_notification(notification_cb, data, name)

collectd.register_flush(flush_cb, data, name)
collectd.register_log(log_cb, data, name)

## Local Variables:
## mode: python
## End:
Esempio n. 50
0
import collectd
import os


def config_func(config):
    pass


def read_func():
    required = 0
    if os.path.isfile("/var/run/reboot-required"):
        required = 1

    stat = collectd.Values(type="gauge")
    stat.plugin = "reboot-required"
    stat.dispatch(values=[required])


collectd.register_config(config_func)
collectd.register_read(read_func)
Esempio n. 51
0
        self.dispatch_failed()

    def dispatch_failed(self):
        type_instance = "failed"
        value = int(self.redis_client.get("resque:stat:failed"))
        val = collectd.Values(plugin=PLUGIN_NAME, type_instance=type_instance, values=[value], type="gauge")
        self.info("Sending value: %s=%s" % (type_instance, value))
        val.dispatch()

    def dispatch_queue_sized(self):
        for key in self.queues():
            value = self.queue_size(key)
            type_instance = "queue-%s" % (key)
            val = collectd.Values(plugin=PLUGIN_NAME, type_instance=type_instance, values=[value], type="gauge")
            self.info("Sending value: %s=%s" % (type_instance, value))
            val.dispatch()

    def queues(self):
        return self.redis_client.smembers("resque:queues") - set("*",)

    def queue_size(self, key):
        return self.redis_client.llen("resque:queue:%s" % key)

    def info(self, message):
        if self.verbose:
            collectd.info("solr plugin [verbose]: %s" % (message))

monitor = ResqueMonitor()
collectd.register_config(monitor.configure)
collectd.register_read(monitor.read)
Esempio n. 52
0
    http.request('GET', '/frame.asp', headers=headers)

    # Data
    request = http.request('GET',
                           '/html/amp/ethinfo/ethinfo.asp',
                           headers=headers)
    data = request.data.decode('UTF-8').split('\r\n')

    eths = []
    for d in data:
        if "var userEthInfos" in d:
            d = d.replace("var userEthInfos = new Array(",
                          "").replace(",null);",
                                      "").replace(")", "").replace("\"", "")
            eths = d.split("new LANStats(")

    for eth in eths:
        if (eth):
            tmp = eth.split(",")
            name = re.search("(?<=AMP\.).*(?=\.Statistics)", tmp[0])

            v_tmp = collectd.Values(plugin='huawei_collectd',
                                    type='if_octets',
                                    type_instance=name.group(0).replace(
                                        ".", "_"))
            v_tmp.dispatch(values=[tmp[4], tmp[2]])


collectd.register_config(config)
collectd.register_read(getInfo)
Esempio n. 53
0
def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)
    collectd.register_read(read_callback, plugin.interval)
Esempio n. 54
0
        key = item.key.lower()
        val = item.values[0]
        if key == 'host':
            config['host'] = val
        elif key == 'port':
            config['port'] = int(val)
        elif key == 'separator':
            config[key] = val
        elif key == 'trackingname':
            config['tracking_name'] = val
        elif key == 'configfile':
            config['config_file'] = val
        else:
            collectd.warning('mesos-master plugin: unknown config key {} = {}'.format(item.key, val))

    #
    # this cannot be overridden
    #
    config['master'] = True

    client = MesosMaster(config)


def reader():
    global client
    client.emit_metrics(client.fetch_metrics())


collectd.register_config(configurator)
collectd.register_read(reader)
import http_check


NAME = "check_local_endpoint"


class CheckLocalEndpoint(http_check.HTTPCheckPlugin):
    def __init__(self, *args, **kwargs):
        super(CheckLocalEndpoint, self).__init__(*args, **kwargs)
        self.plugin = NAME


plugin = CheckLocalEndpoint(collectd)


def config_callback(conf):
    plugin.config_callback(conf)


def notification_callback(notification):
    plugin.notification_callback(notification)


def read_callback():
    plugin.conditional_read_callback()


collectd.register_config(config_callback)
collectd.register_notification(notification_callback)
collectd.register_read(read_callback, base.INTERVAL)
Esempio n. 56
0
        }
        yield {'type': 'pg_data_bytes', 'values': pgmap['data_bytes']}
        yield {'type': 'pg_count', 'values': pgmap['num_pgs']}

        for state in pgmap['pgs_by_state']:
            yield {
                'type': 'pg_state_count',
                'type_instance': state['state_name'],
                'values': state['count']
            }


plugin = CephMonPlugin(collectd)


def init_callback():
    plugin.restore_sigchld()


def config_callback(conf):
    plugin.config_callback(conf)


def read_callback():
    plugin.read_callback()


collectd.register_init(init_callback)
collectd.register_config(config_callback)
collectd.register_read(read_callback, INTERVAL)
Esempio n. 57
0
class Plugin(object):
    DOCKER_PS_URL = "http://docker.lain:2375/containers/json"
    READ_INTERVAL = 60  # 60 seconds
    TIMEOUT = 5  # 5 seconds

    def init(self):
        collectd.info("docker_daemon_monitor plugin has been initialized.")

    def read(self):
        metric = collectd.Values()
        metric.plugin = "lain.cluster.docker_daemon"
        metric.plugin_instance = "docker_ps_time"
        metric.type = "val"
        start_at = time.time()
        requests.get(
            self.DOCKER_PS_URL, params={"limit": 1}, timeout=self.TIMEOUT)
        docker_ps_time = time.time() - start_at
        metric.values = [docker_ps_time]
        metric.dispatch()

    def shutdown(self):
        collectd.info("docker_daemon_monitor plugin has been shutdown.")


docker_daemon = Plugin()

if __name__ != "__main__":
    collectd.register_init(docker_daemon.init)
    collectd.register_read(docker_daemon.read, docker_daemon.READ_INTERVAL)
    collectd.register_shutdown(docker_daemon.shutdown)
Esempio n. 58
0
            # Note that we have to carefully descend down equivilent paths in 
            # both the schema and data nested dictionaries.
            for result in flatten_cephdata(value, data[key], daemonname, sep=sep, prefix=name):
                yield result

def cephtype(bitfield):
    """
    Determine what Ceph types the given value type indicates.
    Value is a bitmask, with the following definitions:
 
            float = 0x1
            int   = 0x2
            avg   = 0x4
            count = 0x8

    Returns a tuple of boolean values for each of the above types,
    in order.
    """

    return ( bool(bitfield & 0x1),
             bool(bitfield & 0x2),
             bool(bitfield & 0x4),
             bool(bitfield & 0x8) )

# --- Main program ---

# Register functions.
collectd.register_init(init)
collectd.register_config(config)
collectd.register_read(read_all)
Esempio n. 59
0
        for snapshot in snapshots:
            try:
                tenant_id = getattr(snapshot, 'os-extended-snapshot-attributes:project_id')
            except AttributeError:
                continue
            try:
                data_tenant = data[self.prefix]["tenant-%s" % tenants[tenant_id]]
            except KeyError:
                continue
            data_tenant['volume-snapshots']['count'] += 1
            data_tenant['volume-snapshots']['bytes'] += (snapshot.size * 1024 * 1024 * 1024)

        return data

try:
    plugin = CinderPlugin()
except Exception as exc:
    collectd.error("openstack-cinder: failed to initialize cinder plugin :: %s :: %s"
            % (exc, traceback.format_exc()))

def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)

def read_callback():
    """Callback triggerred by collectd on read"""
    plugin.read_callback()

collectd.register_config(configure_callback)
collectd.register_read(read_callback, plugin.interval)
Esempio n. 60
0
        # osd perf data
        for osd in json_data['osd_stats']:
            osd_id = "osd-%s" % osd['osd']
            data[ceph_cluster][osd_id] = {}
            data[ceph_cluster][osd_id]['kb_used'] = osd['kb_used']
            data[ceph_cluster][osd_id]['kb_total'] = osd['kb']
            data[ceph_cluster][osd_id]['snap_trim_queue_len'] = osd['snap_trim_queue_len']
            data[ceph_cluster][osd_id]['num_snap_trimming'] = osd['num_snap_trimming']
            data[ceph_cluster][osd_id]['apply_latency_ms'] = osd['fs_perf_stat']['apply_latency_ms']
            data[ceph_cluster][osd_id]['commit_latency_ms'] = osd['fs_perf_stat']['commit_latency_ms']

        return data

try:
    plugin = CephPGPlugin()
except Exception as exc:
    collectd.error("ceph-pg: failed to initialize ceph pg plugin :: %s :: %s"
            % (exc, traceback.format_exc()))

def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)

def read_callback():
    """Callback triggerred by collectd on read"""
    plugin.read_callback()

collectd.register_config(configure_callback)
collectd.register_read(read_callback, plugin.interval)