Example #1
0
def setup_collectd():
    """
    Registers callback functions with collectd
    """
    collectd.register_init(init)
    collectd.register_config(config)
    collectd.register_shutdown(shutdown)
Example #2
0
def setup_collectd():
    """
    Registers callback functions with collectd
    """
    collectd.register_init(init)
    collectd.register_config(config)
    collectd.register_shutdown(shutdown)
Example #3
0
 def __init__(self, name=None):
     self.name = name
     if name:
         collectd.register_config(self.configure, name=self.name)
     else:
         collectd.register_config(self.configure)
     collectd.register_init(self.initialize)
     collectd.register_shutdown(self.shutdown)
Example #4
0
    def register(cls):
        assert collectd is not None

        LOG.info("Register plugin: %s", cls)

        log_handler = CollectdLogHandler(collectd=collectd)
        log_handler.setFormatter(logging.Formatter(LOG_FORMAT))
        logging.getLogger('collectd_pandas').addHandler(log_handler)

        instance = cls()
        collectd.register_config(instance.configure)
        collectd.register_init(instance.initialize)
        collectd.register_write(instance.write)
        LOG.info("Plugin registered as: %r.", instance)
        return instance
Example #5
0
    def __init__(self, typeinfo):
        self.nameserver = "unknown"
        self.cluster = "none"
        self.ns = None
        self.ip = "0.0.0.0"
        self.publishTimeout = 600
        self.q = multiprocessing.Queue()
        self.qthread = None
        self.typesdb = "/usr/share/collectd/types.db"
        self.types = {}
        self.typeinfo = typeinfo
        self.cachedValues = {}

        collectd.register_config(self.config)
        collectd.register_init(self.init)
        collectd.register_write(self.write)
        collectd.register_shutdown(self.shutdown)
Example #6
0
	def __init__(self,typeinfo):
		self.nameserver="unknown"
		self.cluster="none"
		self.ns=None
		self.ip="0.0.0.0"
		self.publishTimeout=600
		self.q = multiprocessing.Queue()
		self.qthread = None
		self.typesdb = "/usr/share/collectd/types.db"
		self.types = {}
		self.typeinfo = typeinfo
		self.cachedValues = {}
	
		collectd.register_config(self.config)
		collectd.register_init(self.init)
		collectd.register_write(self.write)
		collectd.register_shutdown(self.shutdown)
def register_plugin(collectd):
    "Bind plugin hooks to collectd and viceversa"

    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd, config=config)
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.DEBUG)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_init(instance.init)
    collectd.register_config(instance.config)
    collectd.register_write(instance.write)
    collectd.register_shutdown(instance.shutdown)
Example #8
0
 def config_cb(self, config, data=None):
     self.config = util.map_collectd_config(config)
     if "Module.config" in self.config:
         self._log("config_cb: {!r}".format(self.config))
     if "Module.init" in self.config:
         collectd.register_init(util.init_closure(self), name=self.__module__)
     if "Module.read" in self.config:
         collectd.register_read(util.read_closure(self), name=self.__module__)
     if "Module.write" in self.config:
         collectd.register_write(util.write_closure(self), name=self.__module__)
     if "Module.notification" in self.config:
         collectd.register_notification(util.notification_closure(self), name=self.__module__)
     if "Module.flush" in self.config:
         collectd.register_flush(util.flush_closure(self), name=self.__module__)
     if "Module.log" in self.config:
         collectd.register_log(util.log_closure(self), name=self.__module__)
     if "Module.shutdown" in self.config:
         collectd.register_shutdown(util.shutdown_closure(self), name=self.__module__)
Example #9
0
def configure(config, data=None):
    """
    Extract the statsd configuration data from the Config object passed in
    by collectd.
    """
    data = {
        'conf': DEFAULTS.copy(),
    }

    # The root node of the config is the Module block. The actual
    # configuration items are in `config.children`.
    for item in config.children:
        key = item.key.lower()

        # First, check if this is an expected configuration option.
        if key not in DEFAULTS:
            collectd.warning('Unexpected configuration key: %s!' % item.key)
            continue

        # Second, check only a single value was provided (we don't expect
        # any multiple-value configuration items, or items with no value).
        value_count = len(item.values)
        if value_count < 1:
            collectd.warning(
                'Must provide a value for configuration key: %s!' %
                item.key)
            continue
        elif value_count > 1:
            collectd.warning('Too many values for configuration key: %s!' %
                             item.key)
            collectd.warning('Expected 1, got %i' % value_count)
            continue

        # We've sanity-checked, so now we can use the value
        data['conf'][key] = item.values[0]

    data['types'] = parse_types(data['conf'].pop('typesdb'))
    collectd.register_init(initialize, data=data)
Example #10
0
def configure(config, data=None):
    """
    Extract the statsd configuration data from the Config object passed in
    by collectd.
    """
    data = {
        'conf': DEFAULTS.copy(),
    }

    # The root node of the config is the Module block. The actual
    # configuration items are in `config.children`.
    for item in config.children:
        key = item.key.lower()

        # First, check if this is an expected configuration option.
        if key not in DEFAULTS:
            collectd.warning('Unexpected configuration key: %s!' % item.key)
            continue

        # Second, check only a single value was provided (we don't expect
        # any multiple-value configuration items, or items with no value).
        value_count = len(item.values)
        if value_count < 1:
            collectd.warning(
                'Must provide a value for configuration key: %s!' % item.key)
            continue
        elif value_count > 1:
            collectd.warning('Too many values for configuration key: %s!' %
                             item.key)
            collectd.warning('Expected 1, got %i' % value_count)
            continue

        # We've sanity-checked, so now we can use the value
        data['conf'][key] = item.values[0]

    data['types'] = parse_types(data['conf'].pop('typesdb'))
    print("Registering with collectd")
    collectd.register_init(initialize, data=data)
Example #11
0
    def config(cfg):
        # Handle legacy config (not multiple-endpoint capable)
        if not any([n.key == 'Endpoint' for n in cfg.children]):
            # Create fake intermediary Endpoint node
            cfg.children = (collectd.Config('Endpoint', cfg, ('default', ),
                                            cfg.children), )

        endpoints = []
        for node in cfg.children:
            if node.key == 'Endpoint':
                endpoint = WriteWarp10.config_endpoint(node)
                if endpoint:
                    if any(e['name'] == endpoint['name'] for e in endpoints):
                        collectd.warning('write_warp10 plugin: Duplicate '
                                         'endpoint: %s' % endpoint['name'])
                    else:
                        endpoints.append(endpoint)
            else:
                collectd.warning('write_warp10 plugin: Unknown config key: '
                                 '%s' % node.key)

        if endpoints:
            for e in endpoints:
                ww10 = WriteWarp10(e['url'], e['token'], e['flush_interval'],
                                   e['flush_retry_interval'], e['buffer_size'],
                                   e['default_labels'], e['rewrite_rules'],
                                   e['rewrite_limit'])
                collectd.info('write_warp10 plugin: register init write and '
                              'shutdown functions')
                collectd.register_init(ww10.init,
                                       name='write_warp10/%s' % e['name'])
                collectd.register_write(ww10.write,
                                        name='write_warp10/%s' % e['name'])
                collectd.register_shutdown(ww10.shutdown,
                                           name='write_warp10/%s' % e['name'])
        else:
            collectd.warning('write_warp10 plugin: No valid endpoints found')
def restore_sigchld():
    """
    Restore SIGCHLD handler for python <= v2.6
    It will BREAK exec plugin!!!
    See https://github.com/deniszh/collectd-iostat-python/issues/2 for details
    """
    if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)


if __name__ == '__main__':
    iostat = IOStat()
    ds = iostat.get_diskstats()

    for disk in ds:
        for metric in ds[disk]:
            tbl = string.maketrans('/-%', '___')
            metric_name = metric.translate(tbl)
            print("%s.%s:%s" % (disk, metric_name, ds[disk][metric]))

    sys.exit(0)
else:
    import collectd

    iomon = IOMon()

    # Register callbacks
    collectd.register_init(restore_sigchld)
    collectd.register_config(iomon.configure_callback)
        # Monitor block in config, one per Monitis custom monitor
        elif child.key == 'Monitor':

            # reset defaults
            monitor_delta = monitor_defaults['delta']

            # get monitor block child values
            for mon_child in child.children:
                if mon_child.key in ['Id', 'ID']:
                    monitor_id = str(int(mon_child.values[0]))
                elif mon_child.key == 'Name':
                    monitor_name = mon_child.values[0]
                elif mon_child.key == 'Delta':
                    monitor_delta = mon_child.values[0]
                elif mon_child.key == 'ResultParam':
                    result_param = mon_child.values[0]

            # TODO raise exception if name or ID are missing
            monitors[monitor_name] = deepcopy(monitor_defaults)
            monitors[monitor_name]['id'] = monitor_id
            monitors[monitor_name]['delta'] = monitor_delta
            monitors[monitor_name]['result_param'] = result_param
        else:
            # ignore any unknown directives/blocks
            pass


collectd.register_config(config)
collectd.register_init(writer_init)
            else: 
                collectd.warning("Unrecognized conf parameter %s - ignoring" % node.values[0])

    def init_callback(self):
        self.client = DockerClient(base_url=self.BASE_URL)

    def read_callback(self):
        for container in self.client.containers():
            if not container["Status"].startswith("Up"):
                continue
            stats = self.client.stats(container).next()
            t = stats["read"]
            for key, value in stats.items():
                klass = self.CLASSES.get(key)
                if klass:
                    klass.read(container, value, t)


plugin = DockerPlugin()

if __name__ == "__main__":
    if len(sys.argv) > 1:
        plugin.BASE_URL = sys.argv[1]
    plugin.init_callback()
    plugin.read_callback()

else:
    collectd.register_config(plugin.configure_callback)
    collectd.register_init(plugin.init_callback)
    collectd.register_read(plugin.read_callback)
Example #15
0
        if self.includeServerStatsMetrics:
            for root_metric_key in self.includeServerStatsMetrics.iterkeys():
                if server_status.has_key(root_metric_key):
                    metrics_to_collect[root_metric_key] = deepcopy(SERVER_STATUS_METRICS[root_metric_key])
        else:
            metrics_to_collect = deepcopy(SERVER_STATUS_METRICS)
        # rename "." lock to be "GLOBAL"
        if metrics_to_collect["locks"].has_key("."):
            print(SERVER_STATUS_METRICS["locks"])
            global_lock_data = metrics_to_collect["locks"].pop(".")
            metrics_to_collect["locks"]["GLOBAL"] = global_lock_data

            print(SERVER_STATUS_METRICS["locks"])
        for db_name in self.mongo_dbs:
            metrics_to_collect["locks"][db_name] = deepcopy(SERVER_STATUS_METRICS["locks"]["."])

        self.recursive_submit(metrics_to_collect, server_status)


    def publish_data(self):
        self.publish_server_status()
        self.publish_connection_pool_metrics()
        self.publish_dbstats()


mongodb = MongoDB()
collectd.register_read(mongodb.publish_data)
collectd.register_config(mongodb.config)
collectd.register_init(mongodb.connect)
collectd.register_shutdown(mongodb.disconnect)
Example #16
0
            else:
                collectd.warn('unrecognized ds_type {}'.format(ds_type))
                new_value = value

            sample.values[i] = new_value

        points.extend(sample.values)
        columns.extend(('host', 'type'))
        points.extend((sample.host, sample.type))

        if sample.plugin_instance:
            columns.append('plugin_instance')
            points.append(sample.plugin_instance)

        if sample.type_instance:
            columns.append('type_instance')
            points.append(sample.type_instance)

        data = {'name': sample.plugin, 'columns': columns, 'points': [points]}

        self._queues[identifier].put(data)
        self._flush()


db = InfluxDB()
collectd.register_config(db.config)
collectd.register_flush(db.flush)
collectd.register_init(db.init)
collectd.register_shutdown(db.shutdown)
collectd.register_write(db.write)
Example #17
0
                        if xmltree.tag == 'InstantaneousDemand':
                            write_to_collectd(getInstantDemandKWh(xmltree))
                            # collectd.debug(getInstantDemandKWh(xmltree))
                        else:
                            # collectd.info("ravencollectd: Unrecognised (not implemented) XML Fragment")
                            # collectd.info(rawxml)
                            pass
                    except Exception as e:
                        collectd.warning(
                            "ravencollectd: Exception triggered: " + str(e))
                    # reset rawxml
                    rawxml = ""
                    return
                # if it starts with a space, it's inside the fragment
                else:
                    # rawxml = rawxml + rawline
                    # collectd.debug("ravencollectd: Normal inner XML Fragment: " + str(rawxml))
                    pass
            else:
                pass
    else:
        collectd.warning(
            "ravencollectd: Was asked to begin reading/writing data without opening connections."
        )


collectd.register_init(initialise_plugin)
collectd.register_config(config_plugin)
collectd.register_read(read_data)
collectd.register_shutdown(close_plugin)
Example #18
0
 def cb_config(self, config):
     iid = next(self.iid)
     new = Plugin(iid, config)
     self.instances.append(new)
     collectd.register_init(new.cb_init)
                data[ceph_cluster]['cluster']['max_latency'] = float(
                    value) * 1000
            elif key == 'Min':
                data[ceph_cluster]['cluster']['min_latency'] = float(
                    value) * 1000

        return data


try:
    plugin = CephLatencyPlugin()
except Exception as exc:
    collectd.error(
        "ceph-latency: failed to initialize ceph latency plugin :: %s :: %s" %
        (exc, traceback.format_exc()))


def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)
    collectd.register_read(read_callback, plugin.interval)


def read_callback():
    """Callback triggerred by collectd on read"""
    plugin.read_callback()


collectd.register_init(CephLatencyPlugin.reset_sigchld)
collectd.register_config(configure_callback)
Example #20
0
    def __init__(self, name):
        self.name = name

        collectd.register_config(self.configure, name=self.name)
        collectd.register_init(self.initialize)
        collectd.register_shutdown(self.shutdown)
Example #21
0
            collectd.warning('fritzcollectd: Unknown config %s' % node.key)
    CONFIGS.append(FritzCollectd(**params))


def callback_init():
    """ Init callback """
    for config in CONFIGS:
        config.init()


def callback_read():
    """ Read callback """
    for config in CONFIGS:
        try:
            config.read()
        except XMLSyntaxError:
            collectd.warning('fritzcollectd: Invalid data received, '
                             'attempting to reconnect')
            config.init()


def callback_shutdown():
    """ Shutdown callback """
    del CONFIGS[:]


collectd.register_config(callback_configure)
collectd.register_init(callback_init)
collectd.register_read(callback_read)
collectd.register_shutdown(callback_shutdown)
        vms_domains.append((d, dom.name()))
        count += 1

    vms_dispatch_one('totals.vm_count', 'absolute', count)

    # For each stat,
    for stat in PLUGIN_STATS:
        key = PLUGIN_STATS[stat][0]
        type = PLUGIN_STATS[stat][1]
        scale = PLUGIN_STATS[stat][2]
        total = 0

        # For each domain, 
        for d, name in vms_domains:
            # Get value and scale
            value = float(vms.commands.get(d, key)[0]) * scale

            # Dispatch
            vms_dispatch_one('domains.' + name + '.' + stat,
                             type, value)

            # Add to total
            total = total + value

        # Dispatch total value
        vms_dispatch_one('totals.' + stat, type, total)

collectd.register_init(vms_collectd_init)
collectd.register_read(vms_collectd_read)
            new_watts = {}
            for ch in chans:
                new_watts['ch%d' % ch] = int(ch[0].childNodes[0].childNodes[0].nodeValue)
            self.watts = new_watts

    def latest_watts(self):
        return self.watts

reader_thread = CollectCostReader()

def collectcost_init():
    reader_thread.start()

def collectcost_read():
    watts = reader_thread.latest_watts()

    for ch, w in watts.items():
        val = collectd.Values(plugin='currentcost')
        val.type = 'gauge'
        val.type_instance = ch
        val.values = [w]
        val.dispatch()

def collectcost_shutdown():
    reader_thread.stop()


collectd.register_init(collectcost_init)
collectd.register_read(collectcost_read)
collectd.register_shutdown(collectcost_shutdown)
                new_value = value

            sample.values[i] = new_value

        points.extend(sample.values)
        columns.extend(('host', 'type'))
        points.extend((sample.host, sample.type))

        if sample.plugin_instance:
            columns.append('plugin_instance')
            points.append(sample.plugin_instance)

        if sample.type_instance:
            columns.append('type_instance')
            points.append(sample.type_instance)

        data = {'name': sample.plugin,
                'columns': columns,
                'points': [points]}

        self._queues[identifier].put(data)
        self._flush()


db = InfluxDB()
collectd.register_config(db.config)
collectd.register_flush(db.flush)
collectd.register_init(db.init)
collectd.register_shutdown(db.shutdown)
collectd.register_write(db.write)
Example #25
0
            The data is read from all actions defined in SERVICE_ACTIONS.
            This function returns a dict in the following format:
            {instance: (value_type, value)} where value_type and instance are
            mapped from VALUES and CONVERSION.
        """
        values = {}

        # Don't try to gather data if the connection is not available
        if self._fc is None:
            return values

        # Combine all values available in SERVICE_ACTIONS into a dict
        for service, action in self.SERVICE_ACTIONS:
            values.update(self._fc.call_action(service, action))

        # Construct a dict: {instance: (value_type, value)} from the queried
        # results applying a conversion (if defined)
        result = {
            instance:
            (value_type,
             self.CONVERSION.get(key, lambda x: x)(values.get(key)))
            for key, (instance, value_type) in self.VALUES.items()
        }
        return result


FC = FritzCollectd()
collectd.register_config(FC.callback_configure)
collectd.register_init(FC.callback_init)
collectd.register_read(FC.callback_read)
            paramList = self.hosts[hostname]['rrdupdates'].GetVMParamList(uuid)
        for param in paramList:
            if param != '':
                max_time=0
                data=''
                for row in range(self.hosts[hostname]['rrdupdates'].GetRows()):
                    epoch = self.hosts[hostname]['rrdupdates'].GetRowTime(row)
                    if isHost:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetHostData(param,row))
                    else:
                        dv = str(self.hosts[hostname]['rrdupdates'].GetVMData(uuid,param,row))
                    if epoch > max_time:
                        max_time = epoch
                        data = dv
                result[param] = data
        return result

    def _LogVerbose(self, msg):
        ''' Be verbose, if self.verbose is True'''
        if not self.verbose:
            return
        collectd.info('xenserver-collectd [verbose]: %s' % msg)


# Hooks
xenserverCollectd = XenServerCollectd()
collectd.register_config(xenserverCollectd.Config)
collectd.register_init(xenserverCollectd.Connect)
collectd.register_read(xenserverCollectd.Read)
collectd.register_shutdown(xenserverCollectd.Shutdown)
        """Collectd write callback"""
        # pylint: disable=broad-except
        # pass arguments to the writer
        try:
            self._writer.write(vl, data)
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during write: %s' % exc)

    def shutdown(self):
        """Shutdown callback"""
        # pylint: disable=broad-except
        collectd.info("SHUTDOWN")
        try:
            self._writer.flush()
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during shutdown: %s' % exc)


# The collectd plugin instance
# pylint: disable=invalid-name
instance = Plugin()
# pylint: enable=invalid-name

# Register plugin callbacks
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown)
Example #28
0
            m_time *= CONFIG['floor_time_secs']

        measurement = "%s\t%d\t%d" % (metric_name, value, m_time)
        output.append(measurement)

    sensu_queue_measurements(output, data)


def sensu_init():
    """Prepare to send data to Sensu"""
    import threading

    try:
        sensu_parse_types_file(CONFIG['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (PLUGIN_NAME, CONFIG['types_db'])
        raise Exception(msg)

    data_init = {
        'lock': threading.Lock(),
        'last_flush_time': get_time(),
        'output': [],
    }

    collectd.register_write(sensu_write, data=data_init)


collectd.register_config(sensu_config)
collectd.register_init(sensu_init)
        metrics_list = list(metrics(vl, config))
        ts = datetime.fromtimestamp(vl.time)
        data = []

        for i, v in enumerate(vl.values):
            fullname, unit, dims = metrics_list[i]
            name = fullname[:255]
            if len(name) < len(fullname):
                collectd.warning('Metric name was truncated for CloudWatch: {}'.format(fullname))

            data.append(dict(
                MetricName=name,
                Timestamp=ts,
                Value=v,
                Unit=unit,
                Dimensions=dims
            ))

        client.put_metric_data(Namespace=vl.plugin, MetricData=data)
    except Exception, e:
        collectd.error(str(e))

def plugin_init():
    collectd.info('Initializing write_cloudwatch')
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)

config = Config()
collectd.register_config(plugin_config, config)
collectd.register_init(plugin_init)
collectd.register_write(plugin_write, config)
    for sensor in sensors:
        sensor_path = '%s/%s/%s' % (OWFS_PATH, bus, sensor)
        try:
            sensor_value = float(read_file('%s/fasttemp' % sensor_path))
            val = collectd.Values(plugin=PLUGIN)
            val.type = 'temperature'
            val.plugin_instance = sensor
            val.values = [sensor_value]
            val.dispatch(interval=INTERVAL)
        except Exception as e:
            log_warning("could not read from sensor %s (%s)" % (sensor_path, e))

def collectd_configure(configuration):
    global OWFS_PATH, INTERVAL

    for node in configuration.children:
        if node.key.upper() == 'OWFSPATH':
            OWFS_PATH = str(node.values[0])
        elif node.key.upper() == 'INTERVAL':
            INTERVAL = int(node.values[0])

def collectd_init():
    discovered_busses = [bus for bus in os.listdir(OWFS_PATH) if bus.startswith('bus.')]

    for bus in discovered_busses:
        collectd.register_read(process_bus, data=bus, interval=INTERVAL, name='python.%s.%s' % (process_bus.__module__, bus))
        log_info("registered %s" % bus)

collectd.register_config(collectd_configure)
collectd.register_init(collectd_init)
Example #31
0
                    new_value = value - old_value

                if (isinstance(new_value, (float, int)) and
                        data['differentiate_values_over_time']):
                    interval = time - old_time
                    if interval < 1:
                        interval = 1
                    new_value = new_value / interval

            # update previous value
            data['values'][metric] = ( time, value )

        else:
            new_value = value

        if new_value is not None:
            line = '%s %f %d' % ( metric, new_value, time )
            lines.append(line)

        i += 1

    data['lock'].release()

    lines.append('')
    carbon_write_data(data, '\n'.join(lines))

collectd.register_config(carbon_config)
collectd.register_init(carbon_init)

########NEW FILE########
"""
CollectdCloudWatchPlugin plugin
"""
import collectd
import traceback

from cloudwatch.modules.configuration.confighelper import ConfigHelper
from cloudwatch.modules.flusher import Flusher
from cloudwatch.modules.logger.logger import get_logger
from cloudwatch.modules.collectd_integration.dataset import get_dataset_resolver


_LOGGER = get_logger(__name__)

def aws_init():
    """
    Collectd callback entry used to initialize plugin
    """
    config = ConfigHelper()
    flusher = Flusher(config_helper=config,  dataset_resolver=get_dataset_resolver())
    collectd.register_write(aws_write, data = flusher)
    _LOGGER.info('Initialization finished successfully.')

def aws_write(vl, flusher):
    """
    Collectd callback entry used to write metric data
    """
    flusher.add_metric(vl)

collectd.register_init(aws_init)
    if [vl.plugin,vl.type,vl.plugin_instance,vl.type_instance] in d[vl.host]:
      pass
    else:
      # add service
      d[vl.host].append([vl.plugin,vl.type,vl.plugin_instance,vl.type_instance])
      pluginname = vl.plugin + "/" + vl.type
      if len(vl.plugin_instance) != 0:
        pluginname = vl.plugin + "-" + vl.plugin_instance + "/" + vl.type
      if len(vl.type_instance) != 0:
        pluginname = pluginname + "-" + vl.type_instance
      sendCommand("-o SERVICE -a add -v \""+vl.host+";"+pluginname+";generic-service\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";command;check_collectd\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";normal_check_interval;1\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";retry_check_interval;5\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";check_period;24x7\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";max_check_attempts;5\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";active_checks_enabled;1\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";notif_period;24x7\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";args;!"+pluginname+"!\"")
      sendCommand("-o SERVICE -a setcg -v \""+vl.host+";"+pluginname+";Supervisors\"")

      # Regenerates the centreon configuration
      #TODO: currently we flood the centreon regenerating each service that we add.
      sendCommand("-a POLLERGENERATE -v 1")
      sendCommand("-a CFGMOVE -v 1")
      sendCommand("-a POLLERRELOAD -v 1")
      

collectd.register_init(init);
collectd.register_write(write);
        data[ceph_cluster]['cluster'] = {}
        for key, value in results:
            if key == 'Average':
                data[ceph_cluster]['cluster']['avg_latency'] = float(value) * 1000
            elif key == 'Stddev':
                data[ceph_cluster]['cluster']['stddev_latency'] = float(value) * 1000
            elif key == 'Max':
                data[ceph_cluster]['cluster']['max_latency'] = float(value) * 1000
            elif key == 'Min':
                data[ceph_cluster]['cluster']['min_latency'] = float(value) * 1000

        return data

try:
    plugin = CephLatencyPlugin()
except Exception as exc:
    collectd.error("ceph-latency: failed to initialize ceph latency plugin :: %s :: %s"
            % (exc, traceback.format_exc()))

def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)
    collectd.register_read(read_callback, plugin.interval)

def read_callback():
    """Callback triggerred by collectd on read"""
    plugin.read_callback()

collectd.register_init(CephLatencyPlugin.reset_sigchld)
collectd.register_config(configure_callback)
            }

        if ds_type == 'GAUGE':
            gauges.append(measurement)
        else:
            counters.append(measurement)

    librato_queue_measurements(gauges, counters, data)

def librato_init():
    import threading

    try:
        librato_parse_types_file(config['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (plugin_name, config['types_db'])
        raise Exception(msg)

    d = {
        'lock' : threading.Lock(),
        'last_flush_time' : get_time(),
        'gauges' : [],
        'counters' : []
        }

    collectd.register_write(librato_write, data = d)

collectd.register_config(librato_config)
collectd.register_init(librato_init)
        elif kv.key == 'MinCPUPercent':
            if int(kv.values[0]) == 0 or int(kv.values[0]) > 100:
                raise Exception('invalid value for ' + kv.key)
            MIN_CPU_USAGE_PERCENT = int(kv.values[0])
        elif kv.key == 'MinMemoryPercent':
            if int(kv.values[0]) == 0 or int(kv.values[0]) > 100:
                raise Exception('invalid value for ' + kv.key)
            MIN_MEM_USAGE_PERCENT = int(kv.values[0])
        elif kv.key == 'ReportDockerContainerNames':
            REPORT_DOCKER_CONTAINER_NAMES = kv.values[0]
            if type(REPORT_DOCKER_CONTAINER_NAMES).__name__ != 'bool':
                REPORT_DOCKER_CONTAINER_NAMES = str2bool(kv.values[0])
        else:
            raise Exception('unknown config parameter')
    collectd.register_read(send_metrics)


def write_metrics(mmaps, plugin_name):
    for name,mmap in mmaps.iteritems():
        for metric,val in mmap.iteritems():
            write_val(plugin_name, name, metric, val)


def send_metrics():
    pmaps = get_processes_info()
    write_metrics(pmaps, PROCESS_PLUGIN_NAME)


collectd.register_init(process_watch_init)
collectd.register_config(process_watch_config)
                    type_instance in self.include):
                self.dispatch_value(val_type, type_instance, value)


def restore_sigchld():
    """
    Restore SIGCHLD handler for python <= v2.6
    It will BREAK exec plugin!!!
    See https://github.com/deniszh/collectd-iostat-python/issues/2 for details
    """
    if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)


if __name__ == '__main__':
    vmstat = VMStat()
    ds = vmstat.get_vmstats()

    for metric in ds:
        print("%s:%s" % (metric, ds[metric]))

    sys.exit(0)
else:
    import collectd

    vmmon = VMMon()

    # Register callbacks
    collectd.register_init(restore_sigchld)
    collectd.register_config(vmmon.configure_callback)
Example #38
0
    val = convert_unit_to_bytes(val, "KB", 1)
    val = convert_unit_to_bytes(val, "MB", 2)
    val = convert_unit_to_bytes(val, "GB", 3)
    val = convert_unit_to_bytes(val, "TB", 4)
    val = convert_unit_to_bytes(val, "PB", 5)
    return val

def convert_unit_to_bytes(val, unit, power):
    m = re.search('([0-9\.]+) ' + unit, val, re.I)
    if m:
        return str(long(m.group(1)) * (1024 ** power))
    return val

def str2bool(v):
    if type(v) == types.BooleanType:
        return v
    return v.lower() in ("yes", "true", "t", "1")

def my_debug(msg):
    if CONF['debug']:
        collectd.info('ScaleIO: %s' % (msg))

def my_verbose(msg):
    if CONF['verbose']:
        collectd.info('ScaleIO: %s' % (msg))

# register callback functions
collectd.register_config(config_callback)
collectd.register_init(init_callback)
collectd.register_read(read_callback)
Example #39
0
   collectd.info('buddyinfo plugin: configuring host: %s' % (host_name)) 

def initer():
   get_host_type()
   collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
   collectd.info('buddyinfo initer: white list: %s ' % (white_list))
   init_stats_cache()
   collectd.info('buddyinfo init: stats_cache: %s ' % (stats_cache))

def reader(input_data=None):
   collect_buddyinfo()
   swap_current_cache()

def writer(metric, data=None):
   for i in metric.values:
      collectd.debug("%s (%s): %f" % (metric.plugin, metric.type, i))

def shutdown():
   collectd.info("buddyinfo plugin shutting down")

#== Callbacks ==#
if (os_name == 'Linux'):
   collectd.register_config(configer)
   collectd.register_init(initer)
   collectd.register_read(reader)
   collectd.register_write(writer)
   collectd.register_shutdown(shutdown)
else:
   collectd.warning('buddyinfo plugin currently works for Linux only')

Example #40
0
            dict_cassandra = self.collect_data()
            if not dict_cassandra:
                collectd.error(
                    "Plugin CASSANDRA: Unable to fetch data for CASSANDRA.")
                return

            # dispatch data to collectd, copying by value
            self.dispatch_data(deepcopy(dict_cassandra))
        except Exception as e:
            collectd.error(
                "Couldn't read and gather the cassandra metrics due to the exception :%s due to %s"
                % (e, traceback.format_exc()))
            return

    def read_temp(self):
        """Collectd first calls register_read. At that time default interval is taken,
        hence temporary function is made to call, the read callback is unregistered
        and read() is called again with interval obtained from conf by register_config callback."""
        collectd.unregister_read(self.read_temp)
        collectd.register_read(self.read, interval=int(self.interval))


def init():
    """When new process is formed, action to SIGCHLD is reset to default behavior."""
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)


OBJ = CassandraStats()
collectd.register_init(init)
collectd.register_config(OBJ.config)
collectd.register_read(OBJ.read_temp)
Example #41
0
    return


def flush_cb(timeout, identifier, data=None):
    return

def log_cb(severity, message, data=None):
    return


## Register the call-back functions

data = "stub-string"         # placeholder
name = init_cb.__module__    # the default
interval = 10                # the default

collectd.register_config(config_cb, data, name)
collectd.register_init(init_cb, data, name)
collectd.register_shutdown(shutdown_cb, data, name)

collectd.register_read(read_cb, interval, data, name)
collectd.register_write(write_cb, data, name)
collectd.register_notification(notification_cb, data, name)

collectd.register_flush(flush_cb, data, name)
collectd.register_log(log_cb, data, name)

## Local Variables:
## mode: python
## End:
                self.dispatch_value(self.plugin_name, host,
                                    "mod_dl_dst_n_packets", key, value[1])
        except Exception as exp:
            self.log_verbose(traceback.print_exc())
            self.log_verbose("plugin %s run into exception" % self.plugin_name)
            self.log_verbose(exp.message)


if __name__ == '__main__':
    stat_1_1 = parse_table_in("1,2")
    stat_6_1 = parse_table_6(6)
    print '***********'
    print stat_1_1
    print stat_6_1
    import time
    time.sleep(5)
    stat_1_2 = parse_table_in("1,2")
    stat_6_2 = parse_table_6(6)
    print '***********'
    print stat_1_2
    print stat_6_2

    print '***********'
    print get_delta_value(stat_1_1, stat_1_2)
    print get_delta_value(stat_6_1, stat_6_2)
else:
    import collectd
    vrouter_status = VRouterTrafficStatMon()
    collectd.register_config(vrouter_status.configure_callback)
    collectd.register_init(vrouter_status.init)
    collectd.register_read(vrouter_status.read_callback)
            print 'PUTVAL', identifier, \
                  ':'.join(map(str, [int(self.time)] + self.values))

    class ExecCollectd:
        def Values(self):
            return ExecCollectdValues()

        def warning(self, msg):
            print 'WARNING:', msg

        def info(self, msg):
            print 'INFO:', msg

        def register_read(self, docker_plugin):
            pass

    collectd = ExecCollectd()
    plugin = DockerPlugin()
    if len(sys.argv) > 1:
        plugin.docker_url = sys.argv[1]

    if plugin.init_callback():
        plugin.read_callback()

# Normal plugin execution via CollectD
else:
    import collectd
    plugin = DockerPlugin()
    collectd.register_config(plugin.configure_callback)
    collectd.register_init(plugin.init_callback)
Example #44
0
        except Exception as e:
            collectd.error(
                "read_docker_used_cpu_cores() failed, exception: {}".format(e))

    def read(self):
        self.read_docker_used_cpu_cores()

    def shutdown(self):
        collectd.info("node_monitor plugin has been shutdown.")

    def __calculate_cpu_cores(self, stats):
        '''
        Unit: CPU Cores
        '''
        cpu_delta = stats["cpu_stats"]["cpu_usage"]["total_usage"] - stats[
            "precpu_stats"]["cpu_usage"]["total_usage"]
        system_delta = stats["cpu_stats"]["system_cpu_usage"] - stats[
            "precpu_stats"]["system_cpu_usage"]
        if cpu_delta > 0 and system_delta > 0:
            return (float(cpu_delta) / system_delta
                    ) * len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])

        return 0


if __name__ != "__main__":
    node_monitor = Plugin()
    collectd.register_init(node_monitor.init)
    collectd.register_read(node_monitor.read, node_monitor.READ_INTERVAL)
    collectd.register_shutdown(node_monitor.shutdown)
            if i > 0:
                json += ','

            json += '{'
            json += '"name":"%s",' % new_name
            json += '"datapoints":[[%d, %f]],' % (timestamp, new_value)
            json += '"tags": {'

            first = True
            for tn, tv in tags.iteritems():
                if first:
                    first = False
                else:
                    json += ", "

                json += '"%s": "%s"' % (tn, tv)
                
            json += '}'

            json += '}'
        i += 1

    json += ']'

    collectd.debug(json)
    kairosdb_send_http_data(data, json)


collectd.register_config(kairosdb_config)
collectd.register_init(kairosdb_init)
            else:
                collectd.warning('%s plugin: Unknown config key: %s.' % (self.plugin_name, node.key))
    
    def check_url(self, url):
        response = urllib2.urlopen(url, timeout=3)
        code = response.getcode()
        if(code >= 200 and code < 400):
            values = [1]
        else:
            valuse = [0]

        metric = collectd.Values()
        metric.plugin = 'collect_url_check'
        metric.type = 'gauge'
        metric.values = values
        metric.dispatch()
            
    def read(self):
        for url in self.urls:
            check_url(url) 
    
    
if __name__ == '__main__':
    pass

else:
    url_checker = URLChecker()
    collectd.register_init(url_checker.init)
    collectd.register_config(url_checker.config)
    collectd.register_read(url_checker.read)
Example #47
0
            with Client() as c:
                temperatures = c.call('disk.temperatures', self.disks,
                                      self.powermode)

            for disk, temp in temperatures.items():
                if temp is not None:
                    self.dispatch_value(disk,
                                        'temperature',
                                        temp,
                                        data_type='temperature')
        except CallTimeout:
            collectd.error("Timeout collecting disk temperatures")
        except Exception:
            collectd.error(traceback.format_exc())

    def dispatch_value(self, name, instance, value, data_type=None):
        val = collectd.Values()
        val.plugin = 'disktemp'
        val.plugin_instance = name
        if data_type:
            val.type = data_type
        val.values = [value]
        val.meta = {'0': True}
        val.dispatch(interval=READ_INTERVAL)


disktemp = DiskTemp()

collectd.register_init(disktemp.init)
collectd.register_read(disktemp.read, READ_INTERVAL)
    metric['interval'] = v.interval

    # prepare metric values lists
    metric['values'] = []
    metric['dstypes'] = []
    metric['dsnames'] = []

    # we update shared recorded values, so lock to prevent race conditions
    data['lock'].acquire()

    i = 0
    for value in v.values:
        ds_name = v_type[i][0]
        ds_type = v_type[i][1]

        metric['dsnames'].append(ds_name)
        metric['dstypes'].append(ds_type)
        metric['values'].append(str_to_num(value))

        i += 1

    data['hotqueue'].put([
        metric,
    ])

    data['lock'].release()


collectd.register_config(redis_queue_config)
collectd.register_init(redis_queue_init)
Example #49
0
                             alarm_state=_alarm_state,
                             entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
                             entity_instance_id=obj.entity_id,
                             severity=_severity_num,
                             reason_text=reason,
                             alarm_type=base_obj.alarm_type,
                             probable_cause=base_obj.cause,
                             proposed_repair_action=base_obj.repair,
                             service_affecting=base_obj.service_affecting,
                             suppression=base_obj.suppression)

        alarm_uuid = api.set_fault(fault)
        if is_uuid_like(alarm_uuid) is False:
            collectd.error("%s %s:%s set_fault failed:%s" %
                           (PLUGIN, base_obj.id, obj.entity_id, alarm_uuid))
            return 0

    # update the lists now that
    base_obj._manage_alarm(obj.entity_id, severity_str)

    collectd.info("%s %s alarm %s:%s %s:%s thld:%2.2f value:%2.2f" %
                  (PLUGIN, _alarm_state, base_obj.id, severity_str,
                   obj.instance, obj.entity_id, obj.threshold, obj.value))

    # Debug only: comment out for production code.
    # obj._state_audit("change")


collectd.register_init(init_func)
collectd.register_notification(notifier_func)
Example #50
0
def generate_metrics(
    target,
    success,
    failed,
    latency,
):
    droprate = failed / (success + failed)
    v = collectd.Values(plugin='exping_udp',
                        type='exping_udp',
                        plugin_instance=target)
    if latency.size > 0:
        v.dispatch(values=[
            droprate,
            np.mean(latency),
            np.std(latency),
            np.amin(latency),
            np.amax(latency),
            np.percentile(latency, 99),
            np.percentile(latency, 95),
            np.percentile(latency, 90),
        ])
    else:
        v.dispatch(values=[droprate, 0, 0, 0, 0, 0, 0, 0])


collectd.register_config(read_config)
collectd.register_read(read_data)
collectd.register_init(start_monitoring)
collectd.register_shutdown(shutdown)

def init_callback():
    """Initialization block"""
    global config
    connect(config)
    log_verbose('Got a valid connection to Heat API')


def read_callback(data=None):
    global config
    if 'util' not in config:
        log_warning("Connection has not been done. Retrying")
        connect(config)

    try:
        info = config['util'].get_stats()
        log_verbose(pformat(info))
        for key, value in info.items():
            dispatch_value(value, key, 'heat', config['util'].last_stats, '',
                           '', 'openstack')
    except Exception as e:
        log_warning("Problem while reading, trying to authenticate (%s)" % e)
        log_warning("Trying to reconnect (%s)" % e)
        connect(config)


collectd.register_config(configure_callback)
collectd.register_init(init_callback)
collectd.register_read(read_callback)
Example #52
0
                    private += int(line.split()[1])
                elif line.startswith("Pss"):
                    pss += 0.5 + float(line.split()[1])

            F.close()

            if pss > 0:
                shared = pss - private

            M.values = [1024 * int(private + shared)]  # in bytes

        else:
            # rough, but quick estimate
            # I'd use `with` statement, but not sure if it's present in Python 2.6
            statm = open("/proc/%s/statm" % pid, "rt")
            S = statm.readline().split()
            statm.close()
            statm = S

            shared = int(statm[2]) * PAGESIZE
            Rss = int(statm[1]) * PAGESIZE
            private = Rss - shared
            M.values = [int(private) + int(shared)]

        M.dispatch()


collectd.register_config(config_memory)
collectd.register_init(init_memory)
collectd.register_read(read_memory)
        errorIndication = ntfOrg.sendNotification(
            ntforg.CommunityData(host['community']),
            ntforg.UdpTransportTarget(
                (host['hostname'], host['port'])), 'trap',
            ntforg.MibVariable('SNMPv2-MIB', host['oid']),
            (ntforg.MibVariable('SNMPv2-MIB', 'sysName', 0), 'new name'))
        if errorIndication:
            self.log('info', 'Notification did not sent: %s' % errorIndication)

    def wallarm_snmp_notify(self, notification):
        hosts = self.get_dest_hosts()
        for host in hosts:
            thr = Thread(target=send_trap, args=(host, notification))
            thr.start()

    def wallarm_snmp_notify_config(self, cfg):
        # TODO(adanin) Add a real config parsing
        self.config['configured'] = True

    def wallarm_snmp_notify_init(self):
        if self.config['configured']:
            collectd.register_notification(self.wallarm_snmp_notify)
        else:
            self.log('warning',
                     'A configuration error occured. Abort initializing')


plugin = WallarmSNMPNotify(plugin_name)
collectd.register_config(plugin.wallarm_snmp_notify_config)
collectd.register_init(plugin.wallarm_snmp_notify_init)
Example #54
0
        inP = portstat.parse_inPorts(port_object)
        self.sendToCollect('derive', port_name + inPn, inP)

    def read_callback(self):

        print("Read callback called")
        portstat = PortStat()
        ports = portstat.get_portstats("localhost")
        for port_object in ports:
            self.collectStats(portstat, port_object)


if __name__ == '__main__':
    portstat = PortStat()
    portmon = PortMon()
    ports = portstat.get_portstats("localhost")
    for port_object in ports:
        portmon.collectStats(portstat, port_object)

    sys.exit(0)
else:
    import collectd

    portmon = PortMon()

    # Register callbacks

    collectd.register_init(portmon.init_callback)
    collectd.register_config(portmon.configure_callback)
    collectd.register_read(portmon.read_callback)
        except Exception as exp:
            self.log_verbose(traceback.print_exc())
            self.log_verbose("plugin %s run into exception" % (self.plugin_name))
            self.log_verbose(exp.message)


if __name__ == '__main__':
    print "************\n"
    # print str(get_intergrp_stat())
    # print "***********\n"
    # print str(get_internet_stat())
    # print "************\n"
    # print str(get_intragrp_stat())
    # print "**********\n"
    # print str(get_policy_stat())
    # print '------------------\n'
    a = get_internet_stat()
    print a
    import time
    time.sleep(10)
    print '------------------\n'
    b = get_internet_stat()
    c = get_delta_value(a, b)
    print '---delta value----'
    print c
else:
    import collectd
    firewall_status = FireWallUserStatMon()
    collectd.register_config(firewall_status.configure_callback)
    collectd.register_init(firewall_status.init)
    collectd.register_read(firewall_status.read_callback)
Example #56
0
        json_data = json.loads(output)

        data[ceph_cluster]['mon']['number'] = len(json_data['mons'])
        data[ceph_cluster]['mon']['quorum'] = len(json_data['quorum'])

        return data


try:
    plugin = CephMonPlugin()
except Exception as exc:
    collectd.error(
        "ceph-mon: failed to initialize ceph mon plugin :: %s :: %s" %
        (exc, traceback.format_exc()))


def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)
    collectd.register_read(read_callback, plugin.interval)


def read_callback():
    """Callback triggerred by collectd on read"""
    plugin.read_callback()


collectd.register_init(CephMonPlugin.reset_sigchld)
collectd.register_config(configure_callback)
Example #57
0
class Plugin(object):
    DOCKER_PS_URL = "http://docker.lain:2375/containers/json"
    READ_INTERVAL = 60  # 60 seconds
    TIMEOUT = 5  # 5 seconds

    def init(self):
        collectd.info("docker_daemon_monitor plugin has been initialized.")

    def read(self):
        metric = collectd.Values()
        metric.plugin = "lain.cluster.docker_daemon"
        metric.plugin_instance = "docker_ps_time"
        metric.type = "val"
        start_at = time.time()
        requests.get(
            self.DOCKER_PS_URL, params={"limit": 1}, timeout=self.TIMEOUT)
        docker_ps_time = time.time() - start_at
        metric.values = [docker_ps_time]
        metric.dispatch()

    def shutdown(self):
        collectd.info("docker_daemon_monitor plugin has been shutdown.")


docker_daemon = Plugin()

if __name__ != "__main__":
    collectd.register_init(docker_daemon.init)
    collectd.register_read(docker_daemon.read, docker_daemon.READ_INTERVAL)
    collectd.register_shutdown(docker_daemon.shutdown)
def get_stats():
    '''
    Collectd routine to actually get and dispatch the statistics
    '''
    # If we are not correctly initialized, initialize us once more.
    # Something happened after the first init and we have lost state
    if 'client' not in data or data['client'] is None:
        shutdown()
        initialize()
    # And let's fetch our data
    try:
        stats = data['client'].session_stats()
    except transmissionrpc.error.TransmissionError:
        shutdown()
        initialize()
        return  # On this run, just fail to return anything
    # Let's get our data
    for category, catmetrics in metrics.items():
        for metric in catmetrics:
            vl = collectd.Values(type='gauge',
                                 plugin=PLUGIN_NAME,
                                 type_instance='%s-%s' % (category, metric))
            vl.dispatch(values=[field_getter(stats, metric, category)])


# Register our functions
collectd.register_config(config)
collectd.register_init(initialize)
collectd.register_read(get_stats)
collectd.register_shutdown(shutdown)