def setup_collectd():
    """
    Registers callback functions with collectd
    """
    collectd.register_init(init)
    collectd.register_config(read_config)
    collectd.register_shutdown(shutdown)
def setup_collectd():
    """
    Registers callback functions with collectd
    """
    collectd.register_init(init)
    collectd.register_config(config)
    collectd.register_shutdown(shutdown)
Beispiel #3
0
 def __init__(self, name=None):
     self.name = name
     if name:
         collectd.register_config(self.configure, name=self.name)
     else:
         collectd.register_config(self.configure)
     collectd.register_init(self.initialize)
     collectd.register_shutdown(self.shutdown)
def register_plugin(collectd):
    """Bind plugin hooks to collectd and viceversa."""
    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd, config=config)
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.DEBUG)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_config(instance.config)
    collectd.register_shutdown(instance.shutdown)
    collectd.register_notification(instance.notify)
Beispiel #5
0
	def __init__(self,typeinfo):
		self.nameserver="unknown"
		self.cluster="none"
		self.ns=None
		self.ip="0.0.0.0"
		self.publishTimeout=600
		self.q = multiprocessing.Queue()
		self.qthread = None
		self.typesdb = "/usr/share/collectd/types.db"
		self.types = {}
		self.typeinfo = typeinfo
		self.cachedValues = {}
	
		collectd.register_config(self.config)
		collectd.register_init(self.init)
		collectd.register_write(self.write)
		collectd.register_shutdown(self.shutdown)
Beispiel #6
0
    def __init__(self, typeinfo):
        self.nameserver = "unknown"
        self.cluster = "none"
        self.ns = None
        self.ip = "0.0.0.0"
        self.publishTimeout = 600
        self.q = multiprocessing.Queue()
        self.qthread = None
        self.typesdb = "/usr/share/collectd/types.db"
        self.types = {}
        self.typeinfo = typeinfo
        self.cachedValues = {}

        collectd.register_config(self.config)
        collectd.register_init(self.init)
        collectd.register_write(self.write)
        collectd.register_shutdown(self.shutdown)
def register_plugin(collectd):
    "Bind plugin hooks to collectd and viceversa"

    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd)
    log_handler.cfg = config
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.NOTSET)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_config(instance.config)
    collectd.register_write(instance.write)
    collectd.register_shutdown(instance.shutdown)
 def config_cb(self, config, data=None):
     self.config = util.map_collectd_config(config)
     if "Module.config" in self.config:
         self._log("config_cb: {!r}".format(self.config))
     if "Module.init" in self.config:
         collectd.register_init(util.init_closure(self), name=self.__module__)
     if "Module.read" in self.config:
         collectd.register_read(util.read_closure(self), name=self.__module__)
     if "Module.write" in self.config:
         collectd.register_write(util.write_closure(self), name=self.__module__)
     if "Module.notification" in self.config:
         collectd.register_notification(util.notification_closure(self), name=self.__module__)
     if "Module.flush" in self.config:
         collectd.register_flush(util.flush_closure(self), name=self.__module__)
     if "Module.log" in self.config:
         collectd.register_log(util.log_closure(self), name=self.__module__)
     if "Module.shutdown" in self.config:
         collectd.register_shutdown(util.shutdown_closure(self), name=self.__module__)
Beispiel #9
0
def register_plugin(collectd):
    "Bind plugin hooks to collectd and viceversa"

    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd)
    log_handler.cfg = config
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.NOTSET)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_config(instance.config)
    collectd.register_write(instance.write)
    collectd.register_shutdown(instance.shutdown)
Beispiel #10
0
    def config(cfg):
        # Handle legacy config (not multiple-endpoint capable)
        if not any([n.key == 'Endpoint' for n in cfg.children]):
            # Create fake intermediary Endpoint node
            cfg.children = (collectd.Config('Endpoint', cfg, ('default', ),
                                            cfg.children), )

        endpoints = []
        for node in cfg.children:
            if node.key == 'Endpoint':
                endpoint = WriteWarp10.config_endpoint(node)
                if endpoint:
                    if any(e['name'] == endpoint['name'] for e in endpoints):
                        collectd.warning('write_warp10 plugin: Duplicate '
                                         'endpoint: %s' % endpoint['name'])
                    else:
                        endpoints.append(endpoint)
            else:
                collectd.warning('write_warp10 plugin: Unknown config key: '
                                 '%s' % node.key)

        if endpoints:
            for e in endpoints:
                ww10 = WriteWarp10(e['url'], e['token'], e['flush_interval'],
                                   e['flush_retry_interval'], e['buffer_size'],
                                   e['default_labels'], e['rewrite_rules'],
                                   e['rewrite_limit'])
                collectd.info('write_warp10 plugin: register init write and '
                              'shutdown functions')
                collectd.register_init(ww10.init,
                                       name='write_warp10/%s' % e['name'])
                collectd.register_write(ww10.write,
                                        name='write_warp10/%s' % e['name'])
                collectd.register_shutdown(ww10.shutdown,
                                           name='write_warp10/%s' % e['name'])
        else:
            collectd.warning('write_warp10 plugin: No valid endpoints found')
    def wallarm_init(self):
        for typedb_file in self.config['types_db']:
            try:
                self.wallarm_parse_types_file(typedb_file)
            except IOError as e:
                msg = "{0}: Unable to open TypesDB file '{1}': {2}.".format(
                    self.plugin_name,
                    typedb_file,
                    str(e)
                )
                self.log('warning', msg)

        if not len(self.types):
            msg = (
                "{0}: Didn't find any valid type in TypesDB files: {1}".format(
                    self.plugin_name,
                    self.config['types_db'],
                )
            )
            self.log('error', msg)
            raise ValueError(msg)

        self.last_try_time = self.get_time()
        self.last_flush_time = self.get_time()
        self.main_queue = Queue.Queue()
        self.send_queue = []
        self.measr_avg_size = self.config['measr_avg_size']
        self.update_queue_size()
        self.shutdown_event = threading.Event()
        self.send_lock = threading.Lock()

        self.send_thread = threading.Thread(target=self.send_watchdog)
        self.send_thread.start()

        collectd.register_write(self.wallarm_write)
        collectd.register_shutdown(self.shutdown_callback)
        """Collectd write callback"""
        # pylint: disable=broad-except
        # pass arguments to the writer
        try:
            self._writer.write(vl, data)
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during write: %s' % exc)

    def shutdown(self):
        """Shutdown callback"""
        # pylint: disable=broad-except
        collectd.info("SHUTDOWN")
        try:
            self._writer.flush()
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during shutdown: %s' % exc)


# The collectd plugin instance
# pylint: disable=invalid-name
instance = Plugin()
# pylint: enable=invalid-name

# Register plugin callbacks
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown)
Beispiel #13
0
    for child in config.children:
      parse_config(child, depth+1)


def plugin_configure(config):
  """Handles configuring for this module. Called by collectd."""
  collectd.info('Configuring collectd-mlab plugin.')
  parse_config(config)


def plugin_initialize():
  """Initializes global variables during collectd plugin initialization."""
  global _PROC_PID_STAT
  collectd.info('Initializing collectd-mlab plugin.')
  _PROC_PID_STAT = '/proc/%s/stat' % os.getpid()


def plugin_shutdown():
  """Runs any shutdown routines during collectd plugin shutdown."""
  collectd.info('Shutting down collectd-mlab plugin.')


if should_register_plugin:
  # Register callbacks. Order is important.
  collectd.register_config(plugin_configure)
  collectd.register_init(plugin_initialize)
  collectd.register_read(plugin_read)
  # The mlab plugin has no write support today.
  # collectd.register_write(write)
  collectd.register_shutdown(plugin_shutdown)
Beispiel #14
0
    VALUES.plugin_instance = INTERFACE
    VALUES.type = 'gauge'
    VALUES.type_instance = 'stations-count'
    # If no clients are connected, just send 0 to the metrics storage backend,
    # otherwise, send the count and the attributes of clients
    if results[-1] == -1:
        VALUES.dispatch(values=[0])
    else:
        VALUES.dispatch(values=[len(results)])
        # Browse the stations returned by the kernel
        for station in results:
            # If we shouldn't send data for every clients, we check the MAC address
            if len(CLIENTS) > 0:
                if station.mac_addr in CLIENTS:
                    send_station_stats(station)
            # If not, just send the data
            else:
                send_station_stats(station)

    # Clean a few values to avoid memory leak
    del (msg)
    del (cb)
    del (results)


# Register various functions called during the various stages of the daemon
collectd.register_init(init)
collectd.register_config(config_function)
collectd.register_read(read_function)
collectd.register_shutdown(terminate_function)
Beispiel #15
0

def uninit():
    global handler
    handler.stop()


def write(data):
    global rows
    queue.append(data)
    rows += 1


def read(data=None):
    # stats about me :)
    global rows
    global handler
    v1 = collectd.Values(type='gauge', interval=10)
    v1.plugin = 'pgstore-rows'
    v1.dispatch(values=[rows / 10])
    resetrows()
    v2 = collectd.Values(type='gauge', interval=10)
    v2.plugin = 'pgstore-threads'
    v2.dispatch(values=[handler.threadcount()])


collectd.register_write(write)
collectd.register_read(read)
collectd.register_init(init)
collectd.register_shutdown(uninit)
                max_time = 0
                data = ''
                for row in range(self.hosts[hostname]['rrdupdates'].GetRows()):
                    epoch = self.hosts[hostname]['rrdupdates'].GetRowTime(row)
                    if isHost:
                        dv = str(
                            self.hosts[hostname]['rrdupdates'].GetHostData(
                                param, row))
                    else:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetVMData(
                            uuid, param, row))
                    if epoch > max_time:
                        max_time = epoch
                        data = dv
                result[param] = data
        return result

    def _LogVerbose(self, msg):
        ''' Be verbose, if self.verbose is True'''
        if not self.verbose:
            return
        collectd.info('xenserver-collectd [verbose]: %s' % msg)


# Hooks
xenserverCollectd = XenServerCollectd()
collectd.register_config(xenserverCollectd.Config)
collectd.register_init(xenserverCollectd.Connect)
collectd.register_read(xenserverCollectd.Read)
collectd.register_shutdown(xenserverCollectd.Shutdown)
    :param values: Instance of `collectd.Values`.

    An example of `values` is shown below. It may also contain `plugin_instance`
    and `type_instance` attributes. The `dsname`, `dstype`, `dsmin` and
    `dsmax` fields are are not present in `collectd.Values`. They are
    added in the `BaseFormatter.convert_values_to_dict()` method if possible.

      collectd.Values(type='load', plugin='load', host='localhost', time=1432083347.3517618,
                      interval=300.0, values=[0.0, 0.01, 0.050000000000000003])

    """

    collectd.debug('%s.write_callback: values_object=%s' % ('$NAME', values))


    # Add dataset from types.db files.
    #
    values_dict = add_typesdb_info_to_values(values_to_dict(values), TYPES_DICT)

    with LOCK:
        for writer in WRITERS:
            writer.write(values_dict)


# Register callbacks
#
collectd.register_config(configure_callback)
collectd.register_shutdown(shutdown_callback)
collectd.register_write(write_callback)
collectd.register_init(init_callback)
Beispiel #18
0
    def dispatch_data(self):
        collectd.debug("Plugin apache_trans: Values dispatched = " + json.dumps(self.values))
        dispatch(self.values)

    def read(self):
        """ Collectd read callback to gather metrics
            data from the access log and submit them """
        try:
            self.init()
            self.gather_metrics()
            if len(self.values) > 0:
                self.add_common_params()
                self.dispatch_data()
            else:
                collectd.error("No values are present to dispatch")
                return
        except Exception as e:
            collectd.error("Couldn't gather metrics due to the exception %s" % e)
            return

    def shutdown(self):
        """ Collectd plugin shutdown callback """
        self.logwatch.killed = True
        self.logwatch.join(1)


a_log = ApacheLog()
collectd.register_config(a_log.configure)
collectd.register_read(a_log.read)
collectd.register_shutdown(a_log.shutdown)
Beispiel #19
0
            if node.key == 'Port':
                self.port = int(node.values[0])
            elif node.key == 'Host':
                self.host = node.values[0]
            elif node.key == 'User':
                self.user = node.values[0]
            elif node.key == 'Password':
                self.password = node.values[0]
            else:
                collectd.warning(
                    "vcenter plugin: Unkown configuration key %s" %
                    node.key
                )


def main():
    args = getArgs()
    vCenterStat = VCenterStat(args.host, args.port, args.user, args.password)
    return vCenterStat.read()

if __name__ == '__main__':
    main()
else:
    import collectd

    vcenter = VCenter()
    collectd.register_init(vcenter.init)
    collectd.register_shutdown(vcenter.shutdown)
    collectd.register_read(vcenter.read)
    collectd.register_config(vcenter.config)
Beispiel #20
0
        # The following formula handles wrapping COUNTER data types
        # around since COUNTERs should never be negative.
        # Taken from: https://collectd.org/wiki/index.php/Data_source
        if data_source_type == 'COUNTER' and new_value < old_value:
            if old_value < 2 ** 32:
                new_row.value = (2 ** 32 - old_value + new_value) / time_delta
            else:
                new_row.value = (2 ** 64 - old_value + new_value) / time_delta

            if collectd_sample.plugin == 'cpu' and collectd_sample.type == 'cpu' and collectd_sample.type_instance == 'wait':
                # in virtualized environments, iowait sometimes wraps around and then back
                if new_row.value > (2 ** 31):
                    new_row.raw_value = previous_row.raw_value
                    new_row.value = previous_row.value
        else:
            # the default wrap-around formula
            new_row.value = (new_value - old_value) / time_delta

######################
# Register callbacks

try:
    collectd.register_config(memsql_config, MEMSQL_DATA)
    collectd.register_init(memsql_init, MEMSQL_DATA)
    collectd.register_write(memsql_write, MEMSQL_DATA)
    collectd.register_shutdown(memsql_shutdown, MEMSQL_DATA)
    collectd.register_read(memsql_read, 1, MEMSQL_DATA)
except:
    # collectd not available
    pass
Beispiel #21
0
        Unit: CPU Cores
        '''
        cpu_delta = stats["cpu_stats"]["cpu_usage"]["total_usage"] - stats[
            "precpu_stats"]["cpu_usage"]["total_usage"]
        system_delta = stats["cpu_stats"]["system_cpu_usage"] - stats[
            "precpu_stats"]["system_cpu_usage"]
        if cpu_delta > 0 and system_delta > 0:
            return (float(cpu_delta) / system_delta
                    ) * len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])

        return 0

    def read(self):
        try:
            container_ids = self.__get_all_container_ids()
            self.read_docker_used_cpu_cores(container_ids)
            time.sleep(10)
            self.read_docker_reserved_cpu_cores(container_ids)
        except Exception as e:
            collectd.error("read() failed, exception: {}".format(e))

    def shutdown(self):
        collectd.info("node_monitor plugin has been shutdown.")


if __name__ != "__main__":
    node_monitor = Plugin()
    collectd.register_init(node_monitor.init)
    collectd.register_read(node_monitor.read, node_monitor.READ_INTERVAL)
    collectd.register_shutdown(node_monitor.shutdown)
Beispiel #22
0
class Plugin(object):
    DOCKER_INFO_URL = "http://docker.lain:2375/info"
    READ_INTERVAL = 60  # 60 seconds
    TIMEOUT = 5  # 5 seconds

    def init(self):
        collectd.info("docker_daemon_monitor plugin has been initialized.")

    def read(self):
        metric = collectd.Values()
        metric.plugin = "lain.cluster.docker_daemon"
        metric.plugin_instance = "docker_info_time"
        metric.type = "val"
        start_at = time.time()
        requests.get(self.DOCKER_INFO_URL, timeout=self.TIMEOUT)
        docker_info_time = time.time() - start_at
        metric.values = [docker_info_time]
        metric.dispatch()

    def shutdown(self):
        collectd.info("docker_daemon_monitor plugin has been shutdown.")


docker_daemon = Plugin()

if __name__ != "__main__":
    collectd.register_init(docker_daemon.init)
    collectd.register_read(docker_daemon.read, docker_daemon.READ_INTERVAL)
    collectd.register_shutdown(docker_daemon.shutdown)
Beispiel #23
0
	data["config"]["amqp_delivery_mode"] = int(data["config"]["amqp_delivery_mode"])

	data["config"]["threads"] = int(data["config"]["threads"])
	if data["config"]["threads"] < 1:
		data["config"]["threads"] = 1

def init(data={}):
	collectd.info("2AMQP: Initializing...")

	data["2amqp"] = Collectd2AMQP(config=data["config"])

def write(vl, data={}):
	data["2amqp"].write(vl)

def shutdown(data={}):
	collectd.info("2AMQP: Shutting down...")

	data["2amqp"].shutdown()



data = {}

collectd.info("2AMQP: Starting plugin...")

collectd.register_config(callback=config, data=data)
collectd.register_init(callback=init, data=data)
collectd.register_write(callback=write, data=data)
collectd.register_shutdown(callback=shutdown, data=data)
Beispiel #24
0
                getattr(vl, 'type_instance', None)
        ]:
            if item:
                item = str(item).lower().replace('-', '_')
                if item not in name_segments:
                    name_segments.append(item)

        name = '.'.join(name_segments)
        if name == 'load':
            self._store(name + '1', vl.time, vl.values[0])
            self._store(name + '5', vl.time, vl.values[1])
            self._store(name + '15', vl.time, vl.values[2])
        elif vl.plugin in ['interface', 'netlink'] and len(vl.values) > 1:
            for key, value in zip(['tx', 'rx'], vl.values):
                self._store(name + '.' + key, vl.time, value)
        else:
            self._store(name, vl.time, vl.values[0])

    def collectd_shutdown(self):
        """Callback when collectd shutdown."""
        self.server.stop()


kc = KatcpCollectd()

collectd.register_config(kc.collectd_configure, name='katcp_collectd')
collectd.register_write(kc.collectd_write)
collectd.register_init(kc.collectd_init)
collectd.register_shutdown(kc.collectd_shutdown)
#
    send_stats(a)
    clean()


def send_stats(data=None):
    order = ("Getattr Setattr Lookup Access Readlink Read Write Create Mkdir Symlink Mknod Remove Rmdir Rename Link Readdir RdirPlus Fsstat Fsinfo PathConf Commit")
    for x in order.split():
        dispatch_stat(data[x], x.lower())


def dispatch_stat(value, name):
    """Read a key from info response data and dispatch a value"""
    if value is None:
        collectd.warning('nfs plugin: Value not found for %s' % name)
        return
    if value < 0:
        collectd.warning('nfs plugin: Value is negative for %s' % name)
        value = 0
    collectd.info('Sending value[counter]: %s=%s' % (name, value))

    val = collectd.Values(plugin='nfs')
    val.type = 'counter'
    val.type_instance = name
    val.values = [value]
    #print val
    val.dispatch()


collectd.register_read(read)
collectd.register_shutdown(clean)
#
#     The callback function will be called without parameters, except for
#     data if it was supplied.

collectd.register_init(handle_init)

# register_write(...)
# register_write(callback[, data][, name]) -> identifier
#
#     Register a callback function to receive values dispatched by other plugins
#
#     'callback' is a callable object that will be called every time a value
#         is dispatched.
#     'data' is an optional object that will be passed back to the callback
#         function every time it is called.
#     'name' is an optional identifier for this callback. The default name
#         is 'python.<module>'.
#         Every callback needs a unique identifier, so if you want to
#         register this callback multiple time from the same module you need
#         to specify a name here.
#     'identifier' is the full identifier assigned to this callback.
#
#     The callback function will be called with one or two parameters:
#     values: A Values object which is a copy of the dispatched values.
#     data: The optional data parameter passed to the register function.
#         If the parameter was omitted it will be omitted here, too.

# collectd.register_write(handle_write) # Registered as part of handle_init

collectd.register_shutdown(handle_shutdown)
Beispiel #27
0
        except Exception as e:
            collectd.error(
                "read_docker_used_cpu_cores() failed, exception: {}".format(e))

    def read(self):
        self.read_docker_used_cpu_cores()

    def shutdown(self):
        collectd.info("node_monitor plugin has been shutdown.")

    def __calculate_cpu_cores(self, stats):
        '''
        Unit: CPU Cores
        '''
        cpu_delta = stats["cpu_stats"]["cpu_usage"]["total_usage"] - stats[
            "precpu_stats"]["cpu_usage"]["total_usage"]
        system_delta = stats["cpu_stats"]["system_cpu_usage"] - stats[
            "precpu_stats"]["system_cpu_usage"]
        if cpu_delta > 0 and system_delta > 0:
            return (float(cpu_delta) / system_delta
                    ) * len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])

        return 0


if __name__ != "__main__":
    node_monitor = Plugin()
    collectd.register_init(node_monitor.init)
    collectd.register_read(node_monitor.read, node_monitor.READ_INTERVAL)
    collectd.register_shutdown(node_monitor.shutdown)
Beispiel #28
0
                env['host_counter_ids'].append(metric)

    collectd.info("create_environment: configured to grab %d host counters" %
                  (len(env['host_counter_ids'])))

    env['vm_counter_ids'] = []
    if config['vm_counters'] == "all":
        env['vm_counter_ids'] = env['lookup_vm']
    else:
        for metric in env['lookup_vm']:
            if ids_counters_dict[metric.counterId] in config['vm_counters']:
                env['vm_counter_ids'].append(metric)

    collectd.info(
        "create_environment: configured to grab %d virtual_machine counters" %
        (len(env['vm_counter_ids'])))

    Disconnect(service_instance)

    return env


################################################################################
# COLLECTD REGISTRATION
################################################################################

collectd.register_config(configure_callback)
collectd.register_init(init_callback)
collectd.register_read(callback=read_callback, interval=INTERVAL)
collectd.register_shutdown(shutdown_callback)
            new_watts = {}
            for ch in chans:
                new_watts['ch%d' % ch] = int(ch[0].childNodes[0].childNodes[0].nodeValue)
            self.watts = new_watts

    def latest_watts(self):
        return self.watts

reader_thread = CollectCostReader()

def collectcost_init():
    reader_thread.start()

def collectcost_read():
    watts = reader_thread.latest_watts()

    for ch, w in watts.items():
        val = collectd.Values(plugin='currentcost')
        val.type = 'gauge'
        val.type_instance = ch
        val.values = [w]
        val.dispatch()

def collectcost_shutdown():
    reader_thread.stop()


collectd.register_init(collectcost_init)
collectd.register_read(collectcost_read)
collectd.register_shutdown(collectcost_shutdown)
Beispiel #30
0
    def notify(self, n):
        """Collectd notification callback"""
        # type='gauge',type_instance='link_status',plugin='ovs_events',plugin_instance='br0',
        # host='silv-vmytnyk-nos.ir.intel.com',time=1476441572.7450583,severity=4,
        # message='link state of "br0" interface has been changed to "UP"')
        collectd_event_severity_map = {
            collectd.NOTIF_FAILURE : 'CRITICAL',
            collectd.NOTIF_WARNING : 'WARNING',
            collectd.NOTIF_OKAY : 'NORMAL'
        }
        fault = Fault(self.get_event_id())
        fault.event_severity = collectd_event_severity_map[n.severity]
        fault.specific_problem = '{}-{}'.format(n.plugin_instance, n.type_instance)
        fault.alarm_condition = n.message
        self.event_send(fault)

    def shutdown(self):
        """Collectd shutdown callback"""
        # stop the timer
        self.stop_timer()

# The collectd plugin instance
plugin_instance = VESPlugin()

# Register plugin callbacks
collectd.register_config(plugin_instance.config)
collectd.register_init(plugin_instance.init)
collectd.register_write(plugin_instance.write)
collectd.register_notification(plugin_instance.notify)
collectd.register_shutdown(plugin_instance.shutdown)
Beispiel #31
0
            collectd.warning('fritzcollectd: Unknown config %s' % node.key)
    CONFIGS.append(FritzCollectd(**params))


def callback_init():
    """ Init callback """
    for config in CONFIGS:
        config.init()


def callback_read():
    """ Read callback """
    for config in CONFIGS:
        try:
            config.read()
        except XMLSyntaxError:
            collectd.warning('fritzcollectd: Invalid data received, '
                             'attempting to reconnect')
            config.init()


def callback_shutdown():
    """ Shutdown callback """
    del CONFIGS[:]


collectd.register_config(callback_configure)
collectd.register_init(callback_init)
collectd.register_read(callback_read)
collectd.register_shutdown(callback_shutdown)
                    collectd.debug("End XML Tag Fragment found: " + rawline)
                    try:
                        xmltree = ET.fromstring(rawxml)
                        if xmltree.tag == 'InstantaneousDemand':
                            write_to_collectd(getInstantDemandKWh(xmltree))
                            # collectd.debug(getInstantDemandKWh(xmltree))
                        else:
                            # collectd.info("ravencollectd: Unrecognised (not implemented) XML Fragment")
                            # collectd.info(rawxml)
			    pass
                    except Exception as e:
                      collectd.warning("ravencollectd: Exception triggered: " + str(e))
                    # reset rawxml
                    rawxml = ""
                    return
                # if it starts with a space, it's inside the fragment
                else:
                    # rawxml = rawxml + rawline
                    # collectd.debug("ravencollectd: Normal inner XML Fragment: " + str(rawxml))
		    pass
            else:
                pass
    else:
        collectd.warning("ravencollectd: Was asked to begin reading/writing data without opening connections.")


collectd.register_init(initialise_plugin)
collectd.register_config(config_plugin)
collectd.register_read(read_data)
collectd.register_shutdown(close_plugin)
Beispiel #33
0
        for child in config.children:
            parse_config(child, depth + 1)


def plugin_configure(config):
    """Handles configuring for this module. Called by collectd."""
    collectd.info('Configuring collectd-mlab plugin.')
    parse_config(config)


def plugin_initialize():
    """Initializes global variables during collectd plugin initialization."""
    global _PROC_PID_STAT
    collectd.info('Initializing collectd-mlab plugin.')
    _PROC_PID_STAT = '/proc/%s/stat' % os.getpid()


def plugin_shutdown():
    """Runs any shutdown routines during collectd plugin shutdown."""
    collectd.info('Shutting down collectd-mlab plugin.')


if should_register_plugin:
    # Register callbacks. Order is important.
    collectd.register_config(plugin_configure)
    collectd.register_init(plugin_initialize)
    collectd.register_read(plugin_read)
    # The mlab plugin has no write support today.
    # collectd.register_write(write)
    collectd.register_shutdown(plugin_shutdown)
    try:
        platform.system()
    except:
        log("executing SIGCHLD workaround")
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    if __name__ != "__main__":
        DOGSTATSD_INSTANCE.init_callback()


# Note: Importing collectd_dogstatsd registers its own endpoints

if __name__ != "__main__":
    # when running inside plugin
    collectd.register_init(restore_sigchld)
    collectd.register_config(plugin_config)
    collectd.register_shutdown(DOGSTATSD_INSTANCE.register_shutdown)

else:
    # outside plugin just collect the info
    restore_sigchld()
    send()
    log(
        json.dumps(get_host_info(),
                   sort_keys=True,
                   indent=4,
                   separators=(',', ': ')))
    if len(sys.argv) < 2:
        while True:
            time.sleep(INTERVAL)
            send()
Beispiel #35
0
        for item in [vl.plugin, getattr(vl, 'plugin_instance', None),
                     vl.type, getattr(vl, 'type_instance', None)]:
            if item:
                item = str(item).lower().replace('-', '_')
                if item not in name_segments:
                    name_segments.append(item)

        name = '.'.join(name_segments)
        if name == 'load':
            self._store(name + '1', vl.time, vl.values[0])
            self._store(name + '5', vl.time, vl.values[1])
            self._store(name + '15', vl.time, vl.values[2])
        elif vl.plugin in ['interface', 'netlink'] and len(vl.values) > 1:
            for key, value in zip(['tx', 'rx'], vl.values):
                self._store(name + '.' + key, vl.time, value)
        else:
            self._store(name, vl.time, vl.values[0])

    def collectd_shutdown(self):
        """Callback when collectd shutdown."""
        self.server.stop()

kc = KatcpCollectd()

collectd.register_config(kc.collectd_configure, name='katcp_collectd')
collectd.register_write(kc.collectd_write)
collectd.register_init(kc.collectd_init)
collectd.register_shutdown(kc.collectd_shutdown)
#
            paramList = self.hosts[hostname]['rrdupdates'].GetVMParamList(uuid)
        for param in paramList:
            if param != '':
                max_time=0
                data=''
                for row in range(self.hosts[hostname]['rrdupdates'].GetRows()):
                    epoch = self.hosts[hostname]['rrdupdates'].GetRowTime(row)
                    if isHost:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetHostData(param,row))
                    else:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetVMData(uuid,param,row))
                    if epoch > max_time:
                        max_time = epoch
                        data = dv
                result[param] = data
        return result

    def _LogVerbose(self, msg):
        ''' Be verbose, if self.verbose is True'''
        if not self.verbose:
            return
        collectd.info('xenserver-collectd [verbose]: %s' % msg)


# Hooks
xenserverCollectd = XenServerCollectd()
collectd.register_config(xenserverCollectd.Config)
collectd.register_init(xenserverCollectd.Connect)
collectd.register_read(xenserverCollectd.Read)
collectd.register_shutdown(xenserverCollectd.Shutdown)
Beispiel #37
0
            collectd.warning('fritzcollectd: Unknown config %s' % node.key)
    CONFIGS.append(FritzCollectd(**params))


def callback_init():
    """ Init callback """
    for config in CONFIGS:
        config.init()


def callback_read():
    """ Read callback """
    for config in CONFIGS:
        try:
            config.read()
        except XMLSyntaxError:
            collectd.warning('fritzcollectd: Invalid data received, '
                             'attempting to reconnect')
            config.init()


def callback_shutdown():
    """ Shutdown callback """
    del CONFIGS[:]


collectd.register_config(callback_configure)
collectd.register_init(callback_init)
collectd.register_read(callback_read)
collectd.register_shutdown(callback_shutdown)
Beispiel #38
0
    def __init__(self, name):
        self.name = name

        collectd.register_config(self.configure, name=self.name)
        collectd.register_init(self.initialize)
        collectd.register_shutdown(self.shutdown)
Beispiel #39
0
                        if xmltree.tag == 'InstantaneousDemand':
                            write_to_collectd(getInstantDemandKWh(xmltree))
                            # collectd.debug(getInstantDemandKWh(xmltree))
                        else:
                            # collectd.info("ravencollectd: Unrecognised (not implemented) XML Fragment")
                            # collectd.info(rawxml)
                            pass
                    except Exception as e:
                        collectd.warning(
                            "ravencollectd: Exception triggered: " + str(e))
                    # reset rawxml
                    rawxml = ""
                    return
                # if it starts with a space, it's inside the fragment
                else:
                    # rawxml = rawxml + rawline
                    # collectd.debug("ravencollectd: Normal inner XML Fragment: " + str(rawxml))
                    pass
            else:
                pass
    else:
        collectd.warning(
            "ravencollectd: Was asked to begin reading/writing data without opening connections."
        )


collectd.register_init(initialise_plugin)
collectd.register_config(config_plugin)
collectd.register_read(read_data)
collectd.register_shutdown(close_plugin)
    See https://github.com/deniszh/collectd-iostat-python/issues/2 for
    details.
    """
    try:
        platform.system()
    except:
        log("executing SIGCHLD workaround")
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    if __name__ != "__main__":
        DOGSTATSD_INSTANCE.init_callback()


# Note: Importing collectd_dogstatsd registers its own endpoints

if __name__ != "__main__":
    # when running inside plugin
    collectd.register_init(restore_sigchld)
    collectd.register_config(plugin_config)
    collectd.register_shutdown(DOGSTATSD_INSTANCE.register_shutdown)

else:
    # outside plugin just collect the info
    restore_sigchld()
    send()
    log(json.dumps(get_host_info(), sort_keys=True,
                   indent=4, separators=(',', ': ')))
    if len(sys.argv) < 2:
        while True:
            time.sleep(INTERVAL)
            send()
Beispiel #41
0
    def init(self):
        collectd.info("rebellion_monitor plugin has been initialized.")

    def read(self):
        try:
            params = {"filters": '{"name": ["rebellion.service"]}'}
            containers = requests.get(
                "{}/containers/json".format(self.DOCKER_URL_PREFIX),
                params=params,
                timeout=self.TIMEOUT).json()
            metric = collectd.Values()
            metric.plugin = self.NAME
            metric.plugin_instance = "rebellion_service"
            metric.type = "val"
            metric.values = [len(containers)]
            metric.dispatch()
        except Exception as e:
            collectd.error(
                "rebellion_monitor.read() failed, exception: {}".format(e))

    def shutdown(self):
        collectd.info("rebellion_monitor plugin has been shutdown.")


if __name__ != "__main__":
    rebellion_monitor = Plugin()
    collectd.register_init(rebellion_monitor.init)
    collectd.register_read(rebellion_monitor.read,
                           rebellion_monitor.READ_INTERVAL)
    collectd.register_shutdown(rebellion_monitor.shutdown)
    def init_callback(self):
        collectd.register_read(self.read_callback, interval=self.interval)

    def shutdown_callback(self):
        for cnr in self.jmx_threads:
            self.jmx_threads[cnr].stop = True


logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# handler = logging.FileHandler('/usr/share/collectd/docker-kafka-collectd-plugin')
# handler.setFormatter(formatter)
# logger.addHandler(handler)

if __name__ == '__main__':
    plugin = DockerKafkaPlugin()
    while True:
        plugin.read_callback()
else:
    import collectd

    plugin = DockerKafkaPlugin()
    collectd.register_config(plugin.configure_callback)
    collectd.register_init(plugin.init_callback)
    collectd.register_shutdown(plugin.shutdown_callback)
Beispiel #43
0
def configure_callback(conf):
    '''
    Configures plugin with config provided from collectd.
    '''
    LOGGER.info('Starting Consul Plugin configuration.')
    # Default values of config options
    api_host = 'localhost'
    api_port = 8500
    api_protocol = 'http'
    telemetry_server = False
    telemetry_host = 'localhost'
    telemetry_port = 8125
    acl_token = None
    sfx_token = None
    ssl_certs = {'ca_cert': None, 'client_cert': None, 'client_key': None}
    enhanced_metrics = False
    exclude_metrics = []
    include_metrics = []
    custom_dimensions = {}
    default_telemetry_regex = re.compile('|'.join(
        '(?:{0})'.format(re.escape(x)) for x in default_telemetry))

    for node in conf.children:
        if node.key == 'ApiHost':
            api_host = node.values[0]
        elif node.key == 'ApiPort':
            api_port = int(node.values[0])
        elif node.key == 'ApiProtocol':
            api_protocol = node.values[0]
        elif node.key == 'TelemetryServer':
            telemetry_server = _str_to_bool(node.values[0])
        elif node.key == 'TelemetryHost':
            telemetry_host = node.values[0]
        elif node.key == 'TelemetryPort':
            telemetry_port = int(node.values[0])
        elif node.key == 'AclToken':
            acl_token = node.values[0]
        elif node.key == 'Dimensions' or node.key == 'Dimension':
            custom_dimensions.update(_dimensions_str_to_dict(node.values[0]))
        elif node.key == 'SfxToken':
            sfx_token = node.values[0]
        elif node.key == 'CaCertificate':
            ssl_certs['ca_cert'] = node.values[0]
        elif node.key == 'ClientCertificate':
            ssl_certs['client_cert'] = node.values[0]
        elif node.key == 'ClientKey':
            ssl_certs['client_key'] = node.values[0]
        elif node.key == 'Debug':
            log_handler.enable_debug = _str_to_bool(node.values[0])
        elif node.key == 'EnhancedMetrics':
            enhanced_metrics = _str_to_bool(node.values[0])
        elif node.key == 'ExcludeMetric':
            exclude_metrics.append(re.escape(node.values[0]))
        elif node.key == 'IncludeMetric':
            include_metrics.append(re.escape(node.values[0]))

    # the values of the 'exclude_metric' parameter are used
    # to block metrics using prefix matching - e.g. for the config
    # exclude_metric "consul.http", we block all metrics from
    # 'agent/metrics' endpoint or from udp packets starting with
    # "consul.http".

    # compile a combined regex for all metrics to exclude
    # from the /metrics endpoint or the udp packets.
    if exclude_metrics:
        exclude_metrics_regex = re.compile('|'.join('(?:{0})'.format(x)
                                                    for x in exclude_metrics))
    else:
        exclude_metrics_regex = None

    if include_metrics:
        include_metrics_regex = re.compile('|'.join('(?:{0})'.format(x)
                                                    for x in include_metrics))
    else:
        include_metrics_regex = None

    plugin_conf = {
        'api_host': api_host,
        'api_port': api_port,
        'api_protocol': api_protocol,
        'telemetry_server': telemetry_server,
        'telemetry_host': telemetry_host,
        'telemetry_port': telemetry_port,
        'acl_token': acl_token,
        'sfx_token': sfx_token,
        'ssl_certs': ssl_certs,
        'default_telemetry_regex': default_telemetry_regex,
        'enhanced_metrics': enhanced_metrics,
        'exclude_metrics_regex': exclude_metrics_regex,
        'include_metrics_regex': include_metrics_regex,
        'custom_dimensions': custom_dimensions,
        'debug': log_handler.enable_debug
    }

    LOGGER.debug('Plugin Configurations - ')
    for k, v in plugin_conf.items():
        if k == 'exclude_metrics_regex':
            k, v = 'exclude_metrics', exclude_metrics
        elif k == 'include_metrics_regex':
            k, v = 'include_metrics', include_metrics
        LOGGER.debug('{0} : {1}'.format(k, v))

    consul_plugin = ConsulPlugin(plugin_conf)

    collectd.register_read(consul_plugin.read,
                           name='{0}:{1}'.format(api_host, api_port))
    collectd.register_shutdown(consul_plugin.shutdown)
Beispiel #44
0
def init():
    global handler
    handler = Handler(queue)
    handler.start()

def uninit():
    global handler
    handler.stop()

def write(data):
    global rows
    queue.append(data)
    rows += 1

def read(data=None):
    # stats about me :)
    global rows
    global handler
    v1 = collectd.Values(type='gauge', interval=10)
    v1.plugin='pgstore-rows'
    v1.dispatch(values=[rows / 10])
    resetrows()
    v2 = collectd.Values(type='gauge', interval=10)
    v2.plugin='pgstore-threads'
    v2.dispatch(values=[handler.threadcount()])

collectd.register_write(write)
collectd.register_read(read)
collectd.register_init(init)
collectd.register_shutdown(uninit)
Beispiel #45
0
   collectd.info('buddyinfo plugin: configuring host: %s' % (host_name)) 

def initer():
   get_host_type()
   collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
   collectd.info('buddyinfo initer: white list: %s ' % (white_list))
   init_stats_cache()
   collectd.info('buddyinfo init: stats_cache: %s ' % (stats_cache))

def reader(input_data=None):
   collect_buddyinfo()
   swap_current_cache()

def writer(metric, data=None):
   for i in metric.values:
      collectd.debug("%s (%s): %f" % (metric.plugin, metric.type, i))

def shutdown():
   collectd.info("buddyinfo plugin shutting down")

#== Callbacks ==#
if (os_name == 'Linux'):
   collectd.register_config(configer)
   collectd.register_init(initer)
   collectd.register_read(reader)
   collectd.register_write(writer)
   collectd.register_shutdown(shutdown)
else:
   collectd.warning('buddyinfo plugin currently works for Linux only')

Beispiel #46
0
        if self.includeServerStatsMetrics:
            for root_metric_key in self.includeServerStatsMetrics.iterkeys():
                if server_status.has_key(root_metric_key):
                    metrics_to_collect[root_metric_key] = deepcopy(SERVER_STATUS_METRICS[root_metric_key])
        else:
            metrics_to_collect = deepcopy(SERVER_STATUS_METRICS)
        # rename "." lock to be "GLOBAL"
        if metrics_to_collect["locks"].has_key("."):
            print(SERVER_STATUS_METRICS["locks"])
            global_lock_data = metrics_to_collect["locks"].pop(".")
            metrics_to_collect["locks"]["GLOBAL"] = global_lock_data

            print(SERVER_STATUS_METRICS["locks"])
        for db_name in self.mongo_dbs:
            metrics_to_collect["locks"][db_name] = deepcopy(SERVER_STATUS_METRICS["locks"]["."])

        self.recursive_submit(metrics_to_collect, server_status)


    def publish_data(self):
        self.publish_server_status()
        self.publish_connection_pool_metrics()
        self.publish_dbstats()


mongodb = MongoDB()
collectd.register_read(mongodb.publish_data)
collectd.register_config(mongodb.config)
collectd.register_init(mongodb.connect)
collectd.register_shutdown(mongodb.disconnect)
Beispiel #47
0
        """ Immitates class passed in by collectd """
        def __init__(self, name, intvl, acclog, acclog_fmt):
            self.children = []
            self.children.append(NodeMock('name', name))
            self.children.append(NodeMock('interval', intvl))
            self.children.append(NodeMock('accesslog', acclog))
            self.children.append(NodeMock('accesslogformat', acclog_fmt))


    from time import sleep
    sleep_time = 1
    cfg = ConfigMock(
        'serverX_requests', sleep_time,
        '/etc/httpd/logs/ssl_access.log',
         '%h %l %u %t \"%r\" %>s %b \"%{Referer}i\"'\
        ' \"%{User-Agent}i\" %k %I %O %D')
    alog = ApacheLog(debug=True)
    alog.configure(cfg)
    try:
        while True:
            alog.read()
            sleep(sleep_time)
    except KeyboardInterrupt:
        alog.shutdown()
else:
    import collectd
    alog = ApacheLog()
    collectd.register_config(alog.configure)
    collectd.register_read(alog.read)
    collectd.register_shutdown(alog.shutdown)
Beispiel #48
0
    return


def flush_cb(timeout, identifier, data=None):
    return

def log_cb(severity, message, data=None):
    return


## Register the call-back functions

data = "stub-string"         # placeholder
name = init_cb.__module__    # the default
interval = 10                # the default

collectd.register_config(config_cb, data, name)
collectd.register_init(init_cb, data, name)
collectd.register_shutdown(shutdown_cb, data, name)

collectd.register_read(read_cb, interval, data, name)
collectd.register_write(write_cb, data, name)
collectd.register_notification(notification_cb, data, name)

collectd.register_flush(flush_cb, data, name)
collectd.register_log(log_cb, data, name)

## Local Variables:
## mode: python
## End:
    vl = collectd.Values(plugin="iotop_wrapper",
                         time=values[-1][0],
                         type="bitrate")
    vl.interval = data["interval"]

    vl.dispatch(type_instance="actual_read", values=(values[-1][1], ))
    vl.dispatch(type_instance="actual_read_min",
                values=(min(v[1] for v in values), ))
    vl.dispatch(type_instance="actual_read_max",
                values=(max(v[1] for v in values), ))
    vl.dispatch(
        type_instance="actual_read_avg",
        values=(sum(float(v[1]) for v in values) / len(values), ),
    )

    vl.dispatch(type_instance="actual_write", values=(values[-1][2], ))
    vl.dispatch(type_instance="actual_write_min",
                values=(min(v[2] for v in values), ))
    vl.dispatch(type_instance="actual_write_max",
                values=(max(v[2] for v in values), ))
    vl.dispatch(
        type_instance="actual_write_avg",
        values=(sum(float(v[2]) for v in values) / len(values), ),
    )


collectd.register_config(config)
collectd.register_init(init, data=DATA)
collectd.register_shutdown(shutdown, data=DATA)
collectd.register_read(read, data=DATA)
Beispiel #50
0
    get_host_type()
    collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
    collectd.info('buddyinfo initer: white list: %s' % (white_list))
    init_stats_cache()
    collectd.info('buddyinfo init: stats_cache: %s' % (stats_cache))


def reader(input_data=None):
    collect_buddyinfo()
    swap_current_cache()


def writer(metric, data=None):
    for i in metric.values:
        collectd.debug('%s (%s): %f' % (metric.plugin, metric.type, i))


def shutdown():
    collectd.info('buddyinfo plugin shutting down')


#== Callbacks ==#
if (os_name == 'Linux'):
    collectd.register_config(configer)
    collectd.register_init(initer)
    collectd.register_read(reader)
    collectd.register_write(writer)
    collectd.register_shutdown(shutdown)
else:
    collectd.warning('buddyinfo plugin currently works for Linux only')
            val.type = 'gauge'
            val.type_instance = 'power_factor'
            val.values = [socket.power_factor]
            val.dispatch()

            val.type = 'gauge'
            val.type_instance = 'load_on'
            val.values = [socket.powered]
            val.dispatch()

            val.type = 'frequency'
            val.type_instance = 'grid'
            val.values = [socket.frequency]
            val.dispatch()


def shutdown_func():
    global instances

    for inst in instances:
        if inst['socket'] != None:
            inst['socket'].disconnect()

    instances = []


collectd.register_config(config_func)
collectd.register_init(init_func)
collectd.register_read(read_func)
collectd.register_shutdown(shutdown_func)
Beispiel #52
0
class Plugin(object):
    DOCKER_PS_URL = "http://docker.lain:2375/containers/json"
    READ_INTERVAL = 60  # 60 seconds
    TIMEOUT = 5  # 5 seconds

    def init(self):
        collectd.info("docker_daemon_monitor plugin has been initialized.")

    def read(self):
        metric = collectd.Values()
        metric.plugin = "lain.cluster.docker_daemon"
        metric.plugin_instance = "docker_ps_time"
        metric.type = "val"
        start_at = time.time()
        requests.get(
            self.DOCKER_PS_URL, params={"limit": 1}, timeout=self.TIMEOUT)
        docker_ps_time = time.time() - start_at
        metric.values = [docker_ps_time]
        metric.dispatch()

    def shutdown(self):
        collectd.info("docker_daemon_monitor plugin has been shutdown.")


docker_daemon = Plugin()

if __name__ != "__main__":
    collectd.register_init(docker_daemon.init)
    collectd.register_read(docker_daemon.read, docker_daemon.READ_INTERVAL)
    collectd.register_shutdown(docker_daemon.shutdown)
Beispiel #53
0
        """Collectd write callback"""
        # pylint: disable=broad-except
        # pass arguments to the writer
        try:
            self._writer.write(vl, data)
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during write: %s' % exc)

    def shutdown(self):
        """Shutdown callback"""
        # pylint: disable=broad-except
        collectd.info("SHUTDOWN")
        try:
            self._writer.flush()
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during shutdown: %s' % exc)


# The collectd plugin instance
# pylint: disable=invalid-name
instance = Plugin()
# pylint: enable=invalid-name

# Register plugin callbacks
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown)