Example #1
0
def config_callback(conf):
    collectd.debug('config callback')
    for node in conf.children:
        key = node.key.lower()
        values = node.values
        collectd.debug('Reading config %s: %s' % (key, " ".join(str(v) for v in values)))

        if key == 'debug':
            CONF['debug'] = str2bool(values[0])
        elif key == 'verbose':
            CONF['verbose'] = str2bool(values[0])
        elif key == 'cluster':
            CONF['cluster'] = values[0]
        elif key == 'pools':
            CONF['pools'] = values
        elif key == 'scli_wrap':
            CONF['scli_wrap'] = values[0]
        elif key == 'user':
            CONF['scli_user'] = values[0]
        elif key == 'password':
            CONF['scli_password'] = values[0]
        elif key == 'ignoreselected':
            CONF['ignoreselected'] = str2bool(values[0])
        else:
            collectd.warning('ScaleIO: unknown config key: %s' % (key))
def ts3_read():
	global PLUGIN_NAME

	global TYPE_GAUGE
	global TYPE_BYTES

	collectd.debug('ts3_read:')

	# Reconnect if connection was lost or TS3 is not up yet
	try:
		__connectTS3()
	except Exception as e:
		collectd.debug('FAILED\n')
		collectd.warning(str(e) + '\n')

	try:
		stats = __getStatsTS3();

		collectd.debug('ok')

		for sid in stats.keys():
			server = stats[sid]
			
			__newCollectdValue(PLUGIN_NAME, TYPE_GAUGE, 'ts3vs' + str(sid) + '_clients_online', [server['clients_online']])
			__newCollectdValue(PLUGIN_NAME, TYPE_GAUGE, 'ts3vs' + str(sid) + '_filetransfer_count', [server['filetransfer_count']])
			__newCollectdValue(PLUGIN_NAME, TYPE_BYTES, 'ts3vs' + str(sid) + '_filetransfer_speed', [server['filetransfer_speed']])
	except Exception as e:
		collectd.warning(str(e))
		collectd.debug('SKIP')

	collectd.debug('\n')
    def dispatch_nodes(self):
        """
        Dispatches nodes stats.
        """
        name = self.generate_vhost_name('')
        node_names = []
        stats = self.rabbit.get_nodes()
        collectd.debug("Node stats for {} {}".format(name, stats))
        for node in stats:
            node_name = node['name'].split('@')[1]
            if node_name in node_names:
                # If we ahve already seen this node_name we
                node_name = '%s%s' % (node_name, len(node_names))
            node_names.append(node_name)
            collectd.debug("Getting stats for %s node" % node_names)
            for stat_name in self.node_stats:
                value = node.get(stat_name, 0)
                self.dispatch_values(value, name, node_name, None, stat_name)

                details = node.get("%s_details" % stat_name, None)
                if not details:
                    continue
                for detail in self.message_details:
                    value = details.get(detail, 0)
                    self.dispatch_values(value, name, node_name, None,
                                         "%s_details" % stat_name, detail)
    def write(self, values_dict):
        collectd.debug('%s.write_callback: values_object=%s' % ('$NAME', values_dict))

        try:
            self.buffer.put_nowait(values_dict)
        except queue.Full:
            collectd.notice("%s output buffer full" % (self))
    def dispatch_message_stats(self, data, vhost, plugin, plugin_instance):
        """
        Sends message stats to collectd.
        """
        if not data:
            collectd.debug("No data for %s in vhost %s" % (plugin, vhost))
            return

        vhost = self.generate_vhost_name(vhost)

        for name in self.message_stats:
            if 'message_stats' not in data:
                return
            collectd.debug("Dispatching stat %s for %s in %s" %
                           (name, plugin_instance, vhost))

            value = data['message_stats'].get(name, 0)
            self.dispatch_values(value, vhost, plugin, plugin_instance, name)

            details = data['message_stats'].get("%s_details" % name, None)
            if not details:
                continue
            for detail in self.message_details:
                self.dispatch_values(
                    (details.get(detail, 0)), vhost, plugin, plugin_instance,
                    "%s_details" % name, detail)
def kairosdb_write_telnet_metrics(data, types_list, v, name, tags):
    timestamp = v.time
    
    tag_string = ""
    
    for tn, tv in tags.iteritems():
        tag_string += "%s=%s " % (tn, tv)

    lines = []
    i = 0
    for value in v.values:
        ds_name = types_list[i][0]
        new_name = "%s.%s" % (name, ds_name)
        new_value = value
        collectd.debug("metric new_name= %s" % new_name)

        if new_value is not None:
            line = 'put %s %d %f %s' % (new_name, timestamp, new_value, tag_string)
            collectd.debug(line)
            lines.append(line)

        i += 1

    lines.append('')
    kairosdb_send_telnet_data(data, '\n'.join(lines))
def write(vl, datas=None):
    global cw_ec2, NAMESPACE, METRICS, INSTANCE_ID, AS_GRP_NAME

    # Get config for current p/t, if not exists, do nothing
    if METRICS.get(vl.plugin) and METRICS[vl.plugin].get(vl.type):
        # Get default plugin unit
        unit = METRICS[vl.plugin][vl.type].get('unit', 'None')

        # Build Metric Name (like FS because I can't have a beautifull CamelCase with collectd's name)
        if vl.plugin_instance:
            metric_name = '{p}-{pi}/{t}'.format(p=vl.plugin, pi=vl.plugin_instance, t=vl.type)
        else:
            metric_name = '{p}/{t}'.format(p=vl.plugin, t=vl.type)

        # Append type_instance to metric_name and get the unit of the metric if exists
        if vl.type_instance:
            metric_name = '{m}-{ti}'.format(m=metric_name, ti=vl.type_instance)
            if METRICS[vl.plugin][vl.type].get('type_instance', False):
                unit = METRICS[vl.plugin][vl.type]['type_instance'].get(vl.type_instance, unit)

        dimensions = {'InstanceId': INSTANCE_ID}
        if AS_GRP_NAME:
            as_dimensions = {'AutoScalingGroupName': AS_GRP_NAME}

        # Needed ?
        for i in vl.values:
            try:
                collectd.debug(('Putting {metric}={value} {unit} to {namespace} {dimensions}').format(metric=metric_name, value=i, unit=unit, namespace=NAMESPACE, dimensions=dimensions))
                cw_ec2.put_metric_data(namespace=NAMESPACE, name=metric_name, value=float(i), unit=unit, dimensions=dimensions)
                if AS_GRP_NAME:
                    collectd.debug(('Putting {metric}={value} {unit} to {namespace} {dimensions}').format(metric=metric_name, value=i, unit=unit, namespace=NAMESPACE, dimensions=as_dimensions))
                    cw_ec2.put_metric_data(namespace=NAMESPACE, name=metric_name, value=float(i), unit=unit, dimensions=as_dimensions)
            except boto.exception.EC2ResponseError:
                print_boto_error()
                collectd.warning(('Fail to put {metric}={value} {unit} to {namespace} {dimensions}').format(metric=metric_name, value=i, unit=unit, namespace=NAMESPACE, dimensions=dimensions))
    def __init__(self, formatter, host, port, ttl=255, interface=None):
        collectd.debug("%s formatter=%s host=%s, port=%s ttl=%s interface=%s" % 
                       ('write_socket_json', formatter, host, port ,ttl, interface))

        super(UdpWriter, self).__init__(formatter)

        self.host = host
        self.port = int(port)
        self.interface = interface
        self.ttl = ttl


        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        if self.interface:
            # Crude test to distinguish between interface names and IP addresses.
            interface_ip = None
            try:
                if socket.gethostbyname(self.interface) == self.interface:
                    interface_ip = self.interface
            except socket.gaierror:
                try:
                    import netifaces
                    interface_ip = netifaces.ifaddresses(self.interface)[0]['addr']
                except (ImportError, OSError, ValueError), msg:
                    collectd.notice("%s error setting interface: %s" % ('write_socket_json', msg))

            if interface_ip:
                try:
                    self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(interface_ip))
                except socket.error, msg:
                    collectd.notice("%s error setting interface: %s" % ('write_socket_json', msg))
def config(conf):
    collectd.debug('Configuring Stuff')
    global REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, NAMESPACE, METRICS, INSTANCE_ID

    for node in conf.children:
        if node.key == 'region':
            REGION = node.values[0]
        if node.key == 'aws_access_key_id':
            AWS_ACCESS_KEY_ID = node.values[0]
        if node.key == 'aws_secret_access_key':
            AWS_SECRET_ACCESS_KEY = node.values[0]
        if node.key == 'namespace':
            NAMESPACE = node.values[0]
        if node.key == 'metrics_config':
            metrics_config = node.values[0]

    if not metrics_config:
        collectd.warning("Missing YAML plugins configuration please define metrics_config")

    collectd.debug('Loading YAML plugins configuration')
    try:
        stream = open(metrics_config)
        METRICS = yload(stream)
    except:
        collectd.warning(("Couldn't load YAML plugins configuration {0}").format(metrics_config))

    # get instance ID
    INSTANCE_ID = boto.utils.get_instance_metadata()['instance-id']
Example #10
0
 def get_queue_names(self, vhost_name=None):
     """
     Returns a list of all queue names.
     """
     collectd.debug("Getting queue names for %s" % vhost_name)
     all_queues = self.get_queues(vhost_name)
     return self.get_names(all_queues)
Example #11
0
    def get_stats(self, stat_type, stat_name, vhost_name):
        """
        Returns a dictionary of stats.
        """
        collectd.debug("Getting stats for %s %s%s in %s" %
                       (stat_name or 'all',
                        stat_type,
                        's' if not stat_name else '',
                        vhost_name))

        if stat_type not in('exchange', 'queue'):
            raise ValueError("Unsupported stat type {0}".format(stat_type))
        stat_name_func = getattr(self, 'get_{0}_names'.format(stat_type))
        if not vhost_name:
            vhosts = self.get_vhost_names()
        else:
            vhosts = [vhost_name]

        stats = dict()
        for vhost in vhosts:
            if not stat_name:
                names = stat_name_func(vhost)
            else:
                names = [stat_name]
            for name in names:
                if not self.config.is_ignored(stat_type, name):
                    stats[name] = self.get_info("{0}s".format(stat_type),
                                                vhost,
                                                name)
        return stats
def kairosdb_send_http_data(data, json):
    collectd.debug('Json=%s' % json)
    data['lock'].acquire()
    
    if not kairosdb_connect(data):
        data['lock'].release()
        collectd.warning('kairosdb_writer: no connection to kairosdb server')
        return

    response = ''
    try:
        headers = {'Content-type': 'application/json', 'Connection': 'keep-alive'}
        data['conn'].request('POST', '/api/v1/datapoints', json, headers)
        res = data['conn'].getresponse()
        response = res.read()
        collectd.debug('Response code: %d' % res.status)

        if res.status == 204:
            exit_code = True
        else:
            collectd.error(response)
            exit_code = False

    except httplib.ImproperConnectionState, e:
        collectd.error('Lost connection to kairosdb server: %s' % e.message)
        data['conn'] = None
        exit_code = False
def write_callback(values):
    """
    Pass values_object to all `WRITERS`.

    :param values: Instance of `collectd.Values`.

    An example of `values` is shown below. It may also contain `plugin_instance`
    and `type_instance` attributes. The `dsname`, `dstype`, `dsmin` and
    `dsmax` fields are are not present in `collectd.Values`. They are
    added in the `BaseFormatter.convert_values_to_dict()` method if possible.

      collectd.Values(type='load', plugin='load', host='localhost', time=1432083347.3517618,
                      interval=300.0, values=[0.0, 0.01, 0.050000000000000003])

    """

    collectd.debug('%s.write_callback: values_object=%s' % ('$NAME', values))


    # Add dataset from types.db files.
    #
    values_dict = add_typesdb_info_to_values(values_to_dict(values), TYPES_DICT)

    with LOCK:
        for writer in WRITERS:
            writer.write(values_dict)
def _post_metrics(metrics, module_config):
    """
    Posts metrics to collectd.
    Args:
    :param metrics : Array of Metrics objects
    """
    for metric in metrics:
        datapoint = collectd.Values()
        datapoint.type = DEFAULT_METRIC_TYPE
        datapoint.type_instance = metric.name
        datapoint.plugin = PLUGIN_NAME
        datapoint.plugin_instance = _format_dimensions(metric.dimensions,
                                                       module_config[
                                                           'field_length'])
        datapoint.values = (metric.value,)
        pprint_dict = {
            'plugin': datapoint.plugin,
            'plugin_instance': datapoint.plugin_instance,
            'type': datapoint.type,
            'type_instance': datapoint.type_instance,
            'values': datapoint.values,
            'interval': module_config['interval']
        }
        collectd.debug(pprint.pformat(pprint_dict))
        datapoint.dispatch()
def configure_callback(configuration, conf):
    collectd.debug("CouchDB plugin configure callback")
    for node in conf.children:
        if node.key.lower() == 'url':
            configuration['url'] = node.values[0].rstrip("/")
        else:
            raise RuntimeError("Unknown configuration key %s" % node.key)
    def __init__(self, formatter):
        collectd.debug("BaseWriter.__init__: formatter=%s, MAX_BUFFER_SIZE=%s" % 
                       (formatter, self.MAX_BUFFER_SIZE))

        threading.Thread.__init__(self)
        self.buffer = queue.Queue(maxsize=self.MAX_BUFFER_SIZE)
        self.formatter = formatter
    def submit(self, value_type, instance, value, context):
        plugin_instance = []
        if self.prefix:
            plugin_instance.append(self.prefix)

        plugin_instance.append(self.node_id)
        plugin_instance.append(context)

        plugin_instance = ".".join(plugin_instance)

        data = pprint.pformat((type, plugin_instance, instance
                               , value, context))
        collectd.debug("Dispatching: %s"%(data))

        val = collectd.Values()
        if self.host_name:
            val.host = self.host_name
        val.plugin = self.plugin_name
        val.plugin_instance = plugin_instance
        val.type = value_type
        val.type_instance = instance.lower().replace('-', '_')
        # HACK with this dummy dict in place JSON parsing works
        # https://github.com/collectd/collectd/issues/716
        val.meta = {'0': True}
        val.values = [value, ]
        val.dispatch()
Example #18
0
def dispatch_values(values, host, plugin, plugin_instance, metric_type,
                    type_instance=None):
    '''
    dispatch metrics to collectd
    Args:
      values (tuple): the values to dispatch
      host: (str): the name of the vhost
      plugin (str): the name of the plugin. Should be queue/exchange
      plugin_instance (str): the queue/exchange name
      metric_type: (str): the name of metric
      type_instance: Optional
    '''

    collectd.debug("Dispatching %s %s %s %s %s\n\t%s " % (host, plugin,
                   plugin_instance, metric_type, type_instance, values))

    metric = collectd.Values()
    if host:
        metric.host = hostname
    metric.plugin = plugin
    if plugin_instance:
        metric.plugin_instance = plugin_instance
    metric.type = metric_type
    if type_instance:
        metric.type_instance = type_instance
    metric.values = values
    metric.dispatch()
def dispatch_message_stats(data, vhost, plugin, plugin_instance):
    if not data:
        collectd.debug("No data for %s in vhost %s" % (plugin, vhost))
        return

    for name in MESSAGE_STATS:
        dispatch_values((data.get(name,0),), vhost, plugin, plugin_instance, name)
def dispatch_values(values, host, plugin, plugin_instance, metric_type,
                    type_instance=None):
    '''
    dispatch metrics to collectd
    Args:
      values (tuple): the values to dispatch
      host: (str): the name of the vhost
      plugin (str): the name of the plugin. Should be queue/exchange
      plugin_instance (str): the queue/exchange name
      metric_type: (str): the name of metric
      type_instance: Optional
    '''

    collectd.debug("Dispatching %s %s %s %s %s\n\t%s " % (host, plugin,
                   plugin_instance, metric_type, type_instance, values))

    metric = collectd.Values()
    if host:
        metric.host = host
    metric.plugin = plugin
    if plugin_instance:
        metric.plugin_instance = plugin_instance
    metric.type = metric_type
    if type_instance:
        metric.type_instance = type_instance
    metric.values = values
    # Tiny hack to fix bug with write_http plugin in Collectd versions < 5.5.
    # See https://github.com/phobos182/collectd-elasticsearch/issues/15 for details
    metric.meta = {'0': True}
    metric.dispatch()
def read():
    """
    Makes API calls to Couchbase and records metrics to collectd.
    """
    for module_config in CONFIGS:
        for request_type in module_config['api_urls']:
            collectd.info("Request type " + request_type + " for responce: " +
                          module_config['api_urls'].get(request_type))
            resp_obj = _api_call(module_config['api_urls'].get(request_type),
                                 module_config['opener'])
            if resp_obj is None:
                continue

            # 1. Prepare dimensions list
            collect_target = module_config['plugin_config'].get(
                    'CollectTarget')
            dimensions = _build_dimensions(collect_target, module_config)
            collectd.debug("Using dimensions:")
            collectd.debug(pprint.pformat(dimensions))

            # 2. Parse metrics
            metrics = _parse_metrics(resp_obj, dimensions, request_type,
                                     module_config)

            # 3. Post metrics
            _post_metrics(metrics, module_config)
Example #22
0
 def get_vhost_names(self):
     """
     Returns a list of vhost names.
     """
     collectd.debug("Getting vhost names")
     all_vhosts = self.get_vhosts()
     return self.get_names(all_vhosts) or list()
Example #23
0
 def get_exchange_names(self, vhost_name=None):
     """
     Returns a list of all exchange names.
     """
     collectd.debug("Getting exchange names for %s" % vhost_name)
     all_exchanges = self.get_exchanges(vhost_name)
     return self.get_names(all_exchanges)
Example #24
0
def dispatch_values(values, vhost, plugin, plugin_instance, metric_type,
                    type_instance=None):
    '''
    dispatch metrics to collectd
    Args:
      values (tuple): the values to dispatch
      vhost: (str): the name of the vhost
      plugin (str): the name of the plugin. Should be queue/exchange
      plugin_instance (str): the queue/exchange name
      metric_type: (str): the name of metric
      type_instance: Optional
    '''

    collectd.debug("Dispatching %s %s %s %s %s\n\t%s " % (vhost, plugin,
                   plugin_instance, metric_type, type_instance, values))

    metric = collectd.Values()
    metric.plugin = 'rabbitmq'
    if plugin_instance:
        # this might be "vhost-default-queues-foo" where the vhost is named
        # "default", the plugin is "queues" or "exchanges" and the instance is
        # the name of the queue or the exchange
        metric.plugin_instance = "vhost-%s-%s-%s" % (vhost, plugin, plugin_instance)
    else:
        metric.plugin_instance = plugin
    metric.type = metric_type
    if type_instance:
        metric.type_instance = type_instance
    metric.values = values
    metric.dispatch()
Example #25
0
def init():
    collectd.debug('initing stuff')
    global sock
    sock = socket()
    try:
        sock.connect((CARBON_SERVER,CARBON_PORT))
    except:
        collectd.warn("Couldn't connect to %(server)s on port %(port)d, is carbon-agent.py running?" % { 'server':CARBON_SERVER, 'port':CARBON_PORT })
Example #26
0
    def get_exchange_stats(self, exchange_name=None, vhost_name=None):
        """
        Returns a dictionary of stats for exchange_name.
        """
        collectd.debug("Getting exchange stats for %s in %s" %
                       (exchange_name, vhost_name))

        return self.get_stats('exchange', exchange_name, vhost_name)
def close_plugin():
    '''This will clean up all opened connections'''
    global ser
    if ser is not None:
        ser.close()
        collectd.info("ravencollectd: Serial port closed.")
    else:
        collectd.debug("ravencollectd: Asking to close serial port, but it was never open.")
def rabbit_api_call(url, user, password):
	collectd.debug("Polling on RabbitMQ admin API %s" % url)
	r = requests.get(url, auth=requests.auth.HTTPBasicAuth(user, password))
	if r.status_code == 200:
		return r.json()
	else:
		collectd.error("url %s returned status %d" % (url, r.status_code))
		return {}
 def dispatch_exchanges(self, vhost_name):
     """
     Dispatches exchange data for vhost_name.
     """
     collectd.debug("Dispatching exchange data for {0}".format(vhost_name))
     stats = self.rabbit.get_exchange_stats(vhost_name=vhost_name)
     for exchange_name, value in stats.iteritems():
         self.dispatch_message_stats(value, vhost_name, 'exchanges',
                                     exchange_name)
def read():
    """
    Reads and dispatches data.
    """
    collectd.debug("Reading data from rabbit and dispatching")
    if not PLUGIN:
        collectd.warning('Plugin not ready')
        return
    PLUGIN.read()
Example #31
0
    def __init__(self, alarm_id):

        self.name = None
        self.state = LINK_UP
        self.timestamp = float(0)
        self.severity = fm_constants.FM_ALARM_SEVERITY_CLEAR
        self.alarm_id = alarm_id
        self.state_change = True
        self.port_alarm = False

        collectd.debug("%s LinkObject constructor: %s" % (PLUGIN, alarm_id))
Example #32
0
    def dispatch_values(values,
                        host,
                        plugin,
                        plugin_instance,
                        metric_type,
                        type_instance=None):
        """
        Dispatch metrics to collectd.

        :param values (tuple or list): The values to dispatch. It will be
                                       coerced into a list.
        :param host: (str): The name of the vhost.
        :param plugin (str): The name of the plugin. Should be
                             queue/exchange.
        :param plugin_instance (str): The queue/exchange name.
        :param metric_type: (str): The name of metric.
        :param type_instance: Optional.

        """
        path = "{0}.{1}.{2}.{3}.{4}".format(host, plugin, plugin_instance,
                                            metric_type, type_instance)

        collectd.debug("Dispatching %s values: %s" % (path, values))

        try:
            metric = collectd.Values()
            metric.host = ""

            metric.plugin = plugin

            if plugin_instance:
                metric.plugin_instance = "{0}_{1}".format(
                    host, plugin_instance)
            else:
                metric.plugin_instance = host

            metric.type = metric_type

            if type_instance:
                metric.type_instance = type_instance

            if utils.is_sequence(values):
                metric.values = values
            else:
                metric.values = [values]
            # Tiny hack to fix bug with write_http plugin in Collectd
            # versions < 5.5.
            # See https://github.com/phobos182/collectd-elasticsearch/issues/15
            # for details
            metric.meta = {'0': True}
            metric.dispatch()
        except Exception as ex:
            collectd.warning("Failed to dispatch %s. Exception %s" %
                             (path, ex))
Example #33
0
def dispatch_message_stats(data, vhost, plugin, plugin_instance):
    """
    Sends message stats to collectd.
    """
    if not data:
        collectd.debug("No data for %s in vhost %s" % (plugin, vhost))
        return

    for name in MESSAGE_STATS:
        dispatch_values((data.get(name, 0),), vhost, plugin,
                        plugin_instance, name)
    def dispatch_values(values,
                        host,
                        plugin,
                        plugin_instance,
                        metric_type,
                        type_instance=None):
        """
        Dispatch metrics to collectd.

        :param values (tuple or list): The values to dispatch. It will be
                                       coerced into a list.
        :param host: (str): The name of the vhost.
        :param plugin (str): The name of the plugin. Should be
                             queue/exchange.
        :param plugin_instance (str): The queue/exchange name.
        :param metric_type: (str): The name of metric.
        :param type_instance: Optional.

        """
        path = "{0}.{1}.{2}.{3}.{4}".format(host, plugin, plugin_instance,
                                            metric_type, type_instance)

        collectd.debug("Dispatching %s values: %s" % (path, values))

        metric = collectd.Values()

        # kayn: the host has to be hostname and not some general value
        # which messes up the graphite. The value of 'host' varible was
        # added to the following element 'plugin'.
        #metric.host = host

        metric.host = socket.gethostname()

        #metric.plugin = plugin
        metric.plugin = host + '.' + plugin

        if plugin_instance:
            metric.plugin_instance = plugin_instance

        metric.type = metric_type

        if type_instance:
            metric.type_instance = type_instance

        if utils.is_sequence(values):
            metric.values = values
        else:
            metric.values = [values]
        # Tiny hack to fix bug with write_http plugin in Collectd
        # versions < 5.5.
        # See https://github.com/phobos182/collectd-elasticsearch/issues/15
        # for details
        metric.meta = {'0': True}
        metric.dispatch()
Example #35
0
def read_cpu_wait(data=None):
    collectd.debug("Reading: " + repr(data))
    for pid, host in discover().items():
        # /var/lib/collectd/rrd/kvm_HOST/cpu_kvm/cpu-wait.rrd
        M = collectd.Values("gauge")
        M.host = "kvm_" + host
        M.plugin = "cpu_kvm"
        M.type_instance = "cpu_wait"
        (user, system) = open("/proc/%s/stat" % pid, 'r').readline().split(' ')[15:17]
        M.values = [int(user) + int(system)]
        M.dispatch()
Example #36
0
    def __init__(self, name):

        self.name = name
        self.state = LINK_UP
        self.timestamp = datetime.datetime.now()
        self.severity = fm_constants.FM_ALARM_SEVERITY_CLEAR
        self.alarm_id = OVS_IFACE_ALARMID
        self.state_change = True

        collectd.debug("%s InterfaceObject constructor: %s" %
                       (PLUGIN, self.name))
Example #37
0
 def dispatch_data(self, dict_disks_copy):
     """Dispatches dictionary to collectd."""
     for disk_name, disk_info in dict_disks_copy.items():
         # delete readbyte, writebyte, readcount and writecount field
         del dict_disks_copy[disk_name][READBYTE], dict_disks_copy[
             disk_name][WRITEBYTE], dict_disks_copy[disk_name][
                 READCOUNT], dict_disks_copy[disk_name][WRITECOUNT]
         collectd.info("Plugin disk_stat: Successfully sent to collectd.")
         collectd.debug("Plugin disk_stat: Values: " +
                        json.dumps(disk_info))
         utils.dispatch(disk_info)
Example #38
0
 def dispatch_queues(self, vhost_name):
     """
     Dispatches queue data for vhost_name.
     """
     collectd.debug("Dispatching queue data for {0}".format(vhost_name))
     stats = self.rabbit.get_queue_stats(vhost_name=vhost_name)
     for queue_name, value in stats.iteritems():
         self.dispatch_message_stats(value, vhost_name, 'queues',
                                     queue_name)
         self.dispatch_queue_stats(value, vhost_name, 'queues',
                                   queue_name)
Example #39
0
def config(conf):
    for node in conf.children:
        key = node.key.lower()
        val = node.values[0]
        if key == "interval":
            INTERVAL = val
        elif key == "window":
            LOGINS_WINDOW = datetime.timedelta(seconds=val)

        collectd.debug('{} plugin config: {} = {}'.format(
            PLUGIN_NAME, key, val))
    collectd.register_read(read, INTERVAL)
Example #40
0
 def get_names(items):
     """
     Return URL encoded names.
     """
     collectd.debug("Getting names for %s" % items)
     names = list()
     for item in items:
         name = item.get('name', None)
         if name:
             name = urllib.quote(name, '')
             names.append(name)
     return names
Example #41
0
def read():
    """
    Retrieve metrics and dispatch data.
    """
    collectd.debug('Reading data from qdrouterd and dispatching')
    for config in CONFIGS:
        INSTANCES.append(CollectdPlugin(config))
    for instance in INSTANCES:
        instance.read()
    for instance in INSTANCES:
        instance.close()
    INSTANCES.clear()
 def connection_available(self, port):
     """Check if jolokia client is up."""
     try:
         jolokia_url = "http://127.0.0.1:%s/jolokia/version" % port
         resp = requests.get(jolokia_url)
         if resp.status_code == 200:
             collectd.debug(
                 "Plugin %s: Jolokia Connection available in port %s" %
                 (self.plugin_name, port))
             return True
     except requests.exceptions.ConnectionError:
         return False
    def dispatch_data(self, doc_name, result):
        """Dispatch data to collectd."""
        if doc_name == "zookeeperStats":
            for item in ["packetsSent", "packetsReceived"]:
                del result[item]

        collectd.info(
            "Plugin zookeeperjmx: Succesfully sent %s doctype to collectd." %
            doc_name)
        collectd.debug("Plugin zookeeperjmx: Values dispatched =%s" %
                       json.dumps(result))
        utils.dispatch(result)
Example #44
0
def read_cpu(data=None):
    collectd.debug("Reading: " + repr(data))
    for pid, host in discover().items():
        # /var/lib/collectd/rrd/kvm_HOST/cpu_kvm/cpu-usage.rrd
        M = collectd.Values("derive")  # or try "counter"
        M.host = "kvm_" + host
        M.plugin = "cpu_kvm"
        M.type_instance = "cpu_usage"
        # import os
        # os.sysconf("SC_CLK_TCK")
        (user, system) = open("/proc/%s/stat" % pid, 'r').readline().split(' ')[13:15]
        M.values = [int(user) + int(system)]
        M.dispatch()
Example #45
0
    def message(self, level, text):
        text = '%s: %s' % (level, text)

        if level == 'E':
            collectd.error(text)
        elif level == 'W':
            collectd.warning(text)
        elif level == 'N':
            collectd.notice(text)
        elif level == 'I':
            collectd.info(text)
        else:
            collectd.debug(text)
def make_api_call(data, url):
    collectd.debug("GETTING THIS  URL %s" % url)
    try:
        key_file, cert_file, ca_certs = get_ssl_params(data)
        opener = urllib2.build_opener(
            urllib_ssl_handler.HTTPSHandler(key_file=key_file,
                                            cert_file=cert_file,
                                            ca_certs=ca_certs))

        response = opener.open(url)
        return response
    except (urllib2.HTTPError, urllib2.URLError), e:
        collectd.error("ERROR: API call failed: (%s) %s" % (e, url))
Example #47
0
    def dispatch_data(self, result, doc):
        """Dispatch data to collectd."""
        if doc == "haproxyStats":
            collectd.info("Plugin haproxy: Succesfully sent %s doctype to collectd." % doc)
            collectd.debug("Plugin haproxy: Values dispatched =%s" % json.dumps(result))

            utils.dispatch(result)

        elif doc == "frontendStats" or doc == "backendStats":
            for pxname in result.keys():
                collectd.info("Plugin haproxy: Succesfully sent %s of %s to collectd." % (doc, pxname))
                collectd.debug("Plugin haproxy: Values dispatched =%s" % json.dumps(result[pxname]))
                utils.dispatch(result[pxname])
Example #48
0
def configer(config):
    global instances
    collectd.debug('Configuring Stuff')

    # children', 'key', 'parent', 'values'
    for c in config.children:
        if c.key == 'server':
            for srv in c.children:
                if srv.key == 'hostname':
                    hostname = '.'.join(srv.values)
                elif srv.key == 'port':
                    port = int(srv.values[0])
            instances[hostname] = port
Example #49
0
def configure_callback(conf):
    collectd.debug("hadoop: Start configuring")
    for node in conf.children:
        if node.key == 'Ports':
            global PORTS
            PORTS = node.values
        if node.key == 'Host':
            global HOST
            HOST = node.values
        if node.key == 'Url':
            global URL
            URL = node.values
    collectd.debug("hadoop: Finish configuring")
Example #50
0
 def knife(self, *args):
     cmd = ['knife']+list(args)+['-u', self.node_name, '-s', self.server]
     collectd.debug('Running %s'%(' '.join(cmd)))
     p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
     out, err = p.communicate()
     lines = out.splitlines()
     if lines[0] == 'No knife configuration file found':
         del lines[0]
     out = ''.join(lines)
     try:
         return json.loads(out)
     except ValueError:
         collectd.error('Unable to decode %r'%out)
Example #51
0
def _get_object(alarm_id, eid):
    """
    Get the plugin object for the specified alarm id and eid
    """

    base_obj = _get_base_object(alarm_id)
    if len(base_obj.instance_objects):
        try:
            return (base_obj.instance_objects[eid])
        except:
            collectd.debug("%s %s has no instance objects" %
                           (PLUGIN, base_obj.plugin))
    return base_obj
Example #52
0
def _parse_metrics(obj_to_parse, dimensions, request_type, module_config):
    metrics = []
    if request_type == REQUEST_TYPE_NODE:
        if 'storageTotals' in obj_to_parse:
            value = obj_to_parse['storageTotals']
            metric_name_pref = 'storage'
            metrics.extend(
                _parse_with_prefix(metric_name_pref, value, dimensions,
                                   module_config))
    elif request_type == REQUEST_TYPE_NODE_STAT:
        if 'nodes' in obj_to_parse:
            value = obj_to_parse['nodes']
            metric_name_pref = 'nodes'
            for node in value:
                if 'thisNode' in node and node['thisNode'] is True:
                    dimensions = dict(dimensions)
                    dimensions['node'] = node.get('hostname')
                    metrics.extend(
                        _parse_with_prefix(metric_name_pref, node, dimensions,
                                           module_config))
    elif request_type == REQUEST_TYPE_BUCKET:
        if 'quota' in obj_to_parse:
            value = obj_to_parse['quota']
            metric_name_pref = 'bucket.quota'
            metrics.extend(
                _parse_with_prefix(metric_name_pref, value, dimensions,
                                   module_config))
        if 'basicStats' in obj_to_parse:
            value = obj_to_parse['basicStats']
            metric_name_pref = 'bucket.basic'
            metrics.extend(
                _parse_with_prefix(metric_name_pref, value, dimensions,
                                   module_config))
    elif request_type == REQUEST_TYPE_BUCKET_STAT:
        if 'op' in obj_to_parse:
            value = obj_to_parse['op']
            samples = value.get('samples')
            metric_name_pref = 'bucket.op'
            for key_sample, value_sample in samples.iteritems():
                if isinstance(value_sample, list):
                    metric_value = value_sample[-1]
                    metric = _process_metric(metric_name_pref, key_sample,
                                             metric_value, dimensions,
                                             module_config)
                    if metric:
                        metrics.append(metric)

    collectd.debug("End parsing: " + str(len(metrics)))
    for metric in metrics:
        collectd.debug(str(metric))
    return metrics
Example #53
0
def read_metrics(module_config):
    '''
    Registered read call back function that collects
    metrics from all endpoints
    '''
    collectd.debug('Executing read_metrics callback')

    alive = get_response(module_config['base_url'], 'ping', module_config)

    if alive is not None:
        prepare_and_dispatch_metric(module_config,
                                    NODE_STATUS_METRICS['ping'].name, alive,
                                    NODE_STATUS_METRICS['ping'].type)

    if module_config['computer_metrics']:
        resp_obj = get_response(module_config['base_url'], 'computer',
                                module_config)

        if resp_obj is not None:
            report_computer_status(module_config, resp_obj['computer'])

    resp_obj = get_response(module_config['base_url'], 'metrics',
                            module_config)

    if resp_obj is not None:
        parse_and_post_metrics(module_config, resp_obj['gauges'])

    resp_obj = get_response(module_config['base_url'], 'healthcheck',
                            module_config)

    if resp_obj is not None:
        parse_and_post_healthcheck(module_config, resp_obj)

    if module_config['job_metrics']:
        resp_obj = get_response(module_config['base_url'], 'jenkins',
                                module_config)

        if resp_obj is not None:
            if "jobs" in resp_obj and resp_obj['jobs']:
                jobs_data = resp_obj['jobs']
                for job in jobs_data:
                    if job['name'] in module_config['jobs_last_timestamp']:
                        last_timestamp = module_config['jobs_last_timestamp'][
                            job['name']]
                    else:
                        last_timestamp = int(time.time() * 1000) - (60 * 1000)
                        module_config['jobs_last_timestamp'][
                            job['name']] = last_timestamp
                    read_and_post_job_metrics(module_config,
                                              module_config['base_url'],
                                              job['name'], last_timestamp)
Example #54
0
    def get_metrics_and_dimensions(self):
        metrics = {}
        base_url = ("http://%s:%s/api" % (self.host, self.port))

        for endpoint in self.api_endpoints:
            resp_list = self._api_call("%s/%s" % (base_url, endpoint))

            base_name = endpoint.rstrip('s')  # Report dimensions as singular
            for resp in resp_list:
                dimensions = extract_dimensions(resp)
                collectd.debug("Using dimensions:")
                collectd.debug(pprint.pformat(dimensions))
                metrics = self.determine_metrics(resp, base_name=base_name)
                yield metrics, dimensions
Example #55
0
def config(cfg):
    global countup_db_path
    global logfile
    global loglevel
    global interval
    global environ

    for child in cfg.children:
        if child.key == "CountupDBPath":
            collectd.debug("[config] config arg set key %s: %s" %
                           (child.key, child.values[0]))
            countup_db_path = child.values[0]

        if child.key == "LogFile":
            collectd.debug("[config] config arg set key %s: %s" %
                           (child.key, child.values[0]))
            logfile = child.values[0]

        if child.key == "LogLevel":
            collectd.debug("[config] config arg set key %s: %s" %
                           (child.key, child.values[0]))
            loglevel = int(child.values[0])

        if child.key == "Environ":
            for _value in child.values:
                collectd.debug("[config] config arg set key %s: %s" %
                               (child.key, _value))
                pieces = _value.split("=", 1)
                environ[pieces[0]] = pieces[1]
Example #56
0
    def dispatch_router(self):
        """
        Dispatch general router data
        """
        collectd.debug('Dispatching general router data')

        objects = self.query('org.apache.qpid.dispatch.router')

        router = objects[0]
        for stat_name in self.router_stats:
            if stat_name != 'id':
                value = str(getattr(router, stat_name))
                self.dispatch_values(value, self.config.host, 'router',
                                     router.id, uncamelcase(stat_name))
Example #57
0
    def dispatch_memory(self):
        """
        Dispatch memory data
        """
        collectd.debug('Dispatching memory data')

        objects = self.query('org.apache.qpid.dispatch.allocator')

        for mem in objects:
            for stat_name in self.mem_stats:
                if stat_name != 'identity':
                    value = str(getattr(mem, stat_name))
                    self.dispatch_values(value, self.config.host, 'memory',
                                         mem.identity, uncamelcase(stat_name))
Example #58
0
def read_func():
    total = 0
    domain_counter = {}
    for d in domains:
        domain_counter[d] = 0
    records = utmp.UtmpRecord()
    for rec in records:
        if rec.ut_type == USER_PROCESS:
            (rec.ut_user, rec.ut_line, rec.ut_pid, rec.ut_host,
             time.ctime(rec.ut_tv[0]))
            host = rec.ut_host
            for d in domains:
                collectd.debug("HERE: %s %s" % (host, d))
                if d in host and host.endswith(d) == True:
                    collectd.debug('Matches')
                    domain_counter[d] = domain_counter[d] + 1
            total = total + 1
    records.endutent()
    datapoint = collectd.Values(plugin='sessions', )
    datapoint.type = 'count'
    datapoint.type_instance = 'total_sessions'
    datapoint.values = [total]
    collectd.debug('Dispatching a value of %s for total sessions' % total)
    datapoint.dispatch()

    for d in domains:
        datapoint = collectd.Values(plugin='sessions', )
        datapoint.type = 'count'
        datapoint.type_instance = d
        datapoint.values = [domain_counter[d]]
        collectd.debug('Dispatching a value of %s for domain sessions %s' %
                       (domain_counter[d], d))
        datapoint.dispatch()
def read_metrics(data):
    '''
    Registered read call back function that collects
    metrics from all endpoints
    '''
    collectd.debug("STARTED FETCHING METRICS")
    map_id_to_url(data, 'members')
    get_self_metrics(data, 'self')
    get_store_metrics(data, 'store')
    if data['state'] == LEADER:
        get_leader_metrics(data, 'leader')
    # get optional metrics
    if data['enhanced_metrics'] or len(data['include_optional_metrics']) > 0:
        get_optional_metrics(data, 'metrics')
Example #60
0
    def __init__(self, plugin, url=""):

        # static variables set in init_func
        self.plugin = plugin  # the name of this plugin
        self.hostname = ''  # the name of this host
        self.port = 0  # the port number for this plugin
        self.base_eid = ''  # the base entity id host=<hostname>
        self.controller = False  # set true if node is controller

        # dynamic gate variables
        self.virtual = False  # set to True if host is virtual
        self._config_complete = False  # set to True once config is complete
        self.config_done = False  # set true if config_func completed ok
        self.init_complete = False  # set true if init_func completed ok
        self.fm_connectivity = False  # set true when fm connectivity ok

        self.alarm_type = fm_constants.FM_ALARM_TYPE_7  # OPERATIONAL
        self.cause = fm_constants.ALARM_PROBABLE_CAUSE_50  # THRESHOLD CROSS
        self.suppression = True
        self.service_affecting = False

        # dynamic variables set in read_func
        self.usage = float(0)  # last usage value recorded as float
        self.value = float(0)  # last read value
        self.audits = 0  # number of audit since init
        self.enabled = False  # tracks a plugin's enabled state
        self.alarmed = False  # tracks the current alarmed state
        self.mode = ''  # mode specific to plugin

        # http and json specific variables
        self.url = url  # target url
        self.jresp = None  # used to store the json response
        self.resp = ''

        self.objects = []  # list of plugin specific objects
        self.cmd = ''  # plugin specific command string

        # Log controls
        self.config_logged = False  # used to log once the plugin config
        self.error_logged = False  # used to prevent log flooding
        self.log_throttle_count = 0  # used to count throttle logs
        self.INIT_LOG_THROTTLE = 10  # the init log throttle threshold
        self.CONFIG_LOG_THROTTLE = 50  # the config log throttle threshold
        self.http_retry_count = 0  # track http error cases
        self.HTTP_RETRY_THROTTLE = 6  # http retry threshold
        self.phase = 0  # tracks current phase; init, sampling

        collectd.debug("%s Common PluginObject constructor [%s]" %
                       (plugin, url))