Esempio n. 1
0
def setup_collectd():
    """
    Registers callback functions with collectd
    """
    collectd.register_init(init)
    collectd.register_config(config)
    collectd.register_shutdown(shutdown)
Esempio n. 2
0
    def __init__(self, typeinfo):
        self.nameserver = "unknown"
        self.cluster = "none"
        self.ns = None
        self.ip = "0.0.0.0"
        self.publishTimeout = 600
        self.q = multiprocessing.Queue()
        self.qthread = None
        self.typesdb = "/usr/share/collectd/types.db"
        self.types = {}
        self.typeinfo = typeinfo
        self.cachedValues = {}

        collectd.register_config(self.config)
        collectd.register_init(self.init)
        collectd.register_write(self.write)
        collectd.register_shutdown(self.shutdown)
def register_plugin(collectd):
    "Bind plugin hooks to collectd and viceversa"

    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd)
    log_handler.cfg = config
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.NOTSET)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_config(instance.config)
    collectd.register_write(instance.write)
    collectd.register_shutdown(instance.shutdown)
        elif kv.key == 'MinCPUPercent':
            if int(kv.values[0]) == 0 or int(kv.values[0]) > 100:
                raise Exception('invalid value for ' + kv.key)
            MIN_CPU_USAGE_PERCENT = int(kv.values[0])
        elif kv.key == 'MinMemoryPercent':
            if int(kv.values[0]) == 0 or int(kv.values[0]) > 100:
                raise Exception('invalid value for ' + kv.key)
            MIN_MEM_USAGE_PERCENT = int(kv.values[0])
        elif kv.key == 'ReportDockerContainerNames':
            REPORT_DOCKER_CONTAINER_NAMES = kv.values[0]
            if type(REPORT_DOCKER_CONTAINER_NAMES).__name__ != 'bool':
                REPORT_DOCKER_CONTAINER_NAMES = str2bool(kv.values[0])
        else:
            raise Exception('unknown config parameter')
    collectd.register_read(send_metrics)


def write_metrics(mmaps, plugin_name):
    for name,mmap in mmaps.iteritems():
        for metric,val in mmap.iteritems():
            write_val(plugin_name, name, metric, val)


def send_metrics():
    pmaps = get_processes_info()
    write_metrics(pmaps, PROCESS_PLUGIN_NAME)


collectd.register_init(process_watch_init)
collectd.register_config(process_watch_config)
Esempio n. 5
0
            if m.has_key('lastHeartbeat'):
                self.submit(rs_name, t, '{0}-last_heartbeat'.format(n), tstofloat(m['lastHeartbeat']))

            if m.has_key('lastHeartbeatRecv'):
                self.submit(rs_name, t, '{0}-last_heartbeat_recv'.format(n), tstofloat(m['lastHeartbeatRecv']))
            if m.has_key('pingMs'):
                self.submit(rs_name, t, '{0}-ping_ms'.format(n), m['pingMs'])

        if self_optime != None and primary_optime != None:
            n = "self-{0}".format(self_port)
            self.submit(rs_name, t, '{0}-replication_lag'.format(n), int(primary_optime - self_optime))

    def config(self, obj):
        for node in obj.children:
            if node.key == 'Port':
                self.mongo_port = int(node.values[0])
            elif node.key == 'Host':
                self.mongo_host = node.values[0]
            elif node.key == 'User':
                self.mongo_user = node.values[0]
            elif node.key == 'Password':
                self.mongo_password = node.values[0]
            else:
                collectd.warning("mongodb_replset plugin: Unkown configuration key %s" % node.key)


mongodb_replset = MongoDBReplSet()
collectd.register_config(mongodb_replset.config)
collectd.register_read(mongodb_replset.do_status)
Esempio n. 6
0
            self.submit('counter', 'object_count', db_stats['objects'], mongo_db)
            self.submit('counter', 'collections', db_stats['collections'], mongo_db)
            self.submit('counter', 'num_extents', db_stats['numExtents'], mongo_db)
            self.submit('counter', 'indexes', db_stats['indexes'], mongo_db)

            # stats sizes
            self.submit('file_size', 'storage', db_stats['storageSize'], mongo_db)
            self.submit('file_size', 'index', db_stats['indexSize'], mongo_db)
            self.submit('file_size', 'data', db_stats['dataSize'], mongo_db)

        #con.disconnect()

    def config(self, obj):
        for node in obj.children:
            if node.key == 'Port':
                self.mongo_port = int(node.values[0])
            elif node.key == 'Host':
                self.mongo_host = node.values[0]
            elif node.key == 'User':
                self.mongo_user = node.values[0]
            elif node.key == 'Password':
                self.mongo_password = node.values[0]
            elif node.key == 'Database':
                self.mongo_db = node.values
            else:
                collectd.warning("mongodb plugin: Unkown configuration key %s" % node.key)

mongodb = MongoDB()
collectd.register_read(mongodb.do_server_status)
collectd.register_config(mongodb.config)
        """Collectd write callback"""
        # pylint: disable=broad-except
        # pass arguments to the writer
        try:
            self._writer.write(vl, data)
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during write: %s' % exc)

    def shutdown(self):
        """Shutdown callback"""
        # pylint: disable=broad-except
        collectd.info("SHUTDOWN")
        try:
            self._writer.flush()
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during shutdown: %s' % exc)


# The collectd plugin instance
# pylint: disable=invalid-name
instance = Plugin()
# pylint: enable=invalid-name

# Register plugin callbacks
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown)
Esempio n. 8
0
   collectd.info('buddyinfo plugin: configuring host: %s' % (host_name)) 

def initer():
   get_host_type()
   collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
   collectd.info('buddyinfo initer: white list: %s ' % (white_list))
   init_stats_cache()
   collectd.info('buddyinfo init: stats_cache: %s ' % (stats_cache))

def reader(input_data=None):
   collect_buddyinfo()
   swap_current_cache()

def writer(metric, data=None):
   for i in metric.values:
      collectd.debug("%s (%s): %f" % (metric.plugin, metric.type, i))

def shutdown():
   collectd.info("buddyinfo plugin shutting down")

#== Callbacks ==#
if (os_name == 'Linux'):
   collectd.register_config(configer)
   collectd.register_init(initer)
   collectd.register_read(reader)
   collectd.register_write(writer)
   collectd.register_shutdown(shutdown)
else:
   collectd.warning('buddyinfo plugin currently works for Linux only')

            else: 
                collectd.warning("Unrecognized conf parameter %s - ignoring" % node.values[0])

    def init_callback(self):
        self.client = DockerClient(base_url=self.BASE_URL)

    def read_callback(self):
        for container in self.client.containers():
            if not container["Status"].startswith("Up"):
                continue
            stats = self.client.stats(container).next()
            t = stats["read"]
            for key, value in stats.items():
                klass = self.CLASSES.get(key)
                if klass:
                    klass.read(container, value, t)


plugin = DockerPlugin()

if __name__ == "__main__":
    if len(sys.argv) > 1:
        plugin.BASE_URL = sys.argv[1]
    plugin.init_callback()
    plugin.read_callback()

else:
    collectd.register_config(plugin.configure_callback)
    collectd.register_init(plugin.init_callback)
    collectd.register_read(plugin.read_callback)
Esempio n. 10
0
            if rlat is not None:
                distance = greatcircle(rlat, rlon, a['lat'], a['lon'])
                if distance > max_range: max_range = distance
            if 'lat' in a.get('mlat', ()):
                mlat += 1

    V.dispatch(plugin_instance = instance_name,
               host=host,
               type='dump1090_aircraft',
               type_instance='recent',
               time=aircraft_data['now'],
               values = [total, with_pos])
    V.dispatch(plugin_instance = instance_name,
               host=host,
               type='dump1090_mlat',
               type_instance='recent',
               time=aircraft_data['now'],
               values = [mlat])

    if max_range > 0:
        V.dispatch(plugin_instance = instance_name,
                   host=host,
                   type='dump1090_range',
                   type_instance='max_range',
                   time=aircraft_data['now'],
                   values = [max_range])
        

collectd.register_config(callback=handle_config, name='dump1090')

Esempio n. 11
0
        ports = portstat.get_bfdstats("localhost")
        for port_object in ports:
            port_name = port_object["Object"]["IpAddr"]
            stat_rx = portstat.parse_bfdrx(port_object)
            self.sendToCollect('derive', port_name, stat_rx)


if __name__ == '__main__':
    portstat = BfdStat()
    portmon = BfdMon()
    ports = portstat.get_bfdstats("localhost")
    if len(ports) == 0:
        dummy_port = "dummy"
        self.SendToCollect('derive', dummy_port, 0)
    for port_object in ports:
        port_name = json.dumps(port_object["Object"]["IpAddr"])
        stat_rx = portstat.parse_bfdrx(port_object)
        self.sendToCollect('derive', port_name, stat_rx)

    sys.exit(0)
else:
    import collectd

    portmon = BfdMon()

    # Register callbacks

    collectd.register_init(portmon.init_callback)
    collectd.register_config(portmon.configure_callback)
    collectd.register_read(portmon.read_callback)
Esempio n. 12
0
    return stats


def pgbouncer_read(data=None):
    stats = get_stats()
    if not stats:
        collectd.error('pgbouncer plugin: No info received')
        return

    for database, metrics in stats.iteritems():
        for metric, value in metrics.iteritems():
            type_instance = '%s.%s' % (database, metric)
            val = collectd.Values(plugin='pgbouncer_info')
            val.type = 'gauge'
            val.type_instance = type_instance
            val.values = [value]
            val.dispatch()


def pgbouncer_config(c):
    for child in c.children:
        value = child.values[0]
        config.update({child.key: value})


#stats=get_stats()
#print stats
collectd.register_read(pgbouncer_read)
collectd.register_config(pgbouncer_config)
Esempio n. 13
0
    log.info("sqlalchemy.collectd server listening for "
             "SQLAlchemy clients on UDP %s %d" % (host, port))

    listener.listen(receiver_)

    monitor_host, monitor_port = config_dict.get("monitor", (None, None))
    if monitor_host is not None and monitor_port is not None:
        from sqlalchemy_collectd.connmon import plugin as connmon

        log.warn(
            "the connmon plugin should now be configured separately in its "
            "own <Module> section")
        connmon.start_plugin(config)


def read(data=None):
    """Extract data from received messages periodically and broadcast to
    the collectd server in which we are embedded.

    The values are sent as "external" types, meaning we are using the
    "derive" and "count" types in collectd types.db.

    """
    global receiver_
    now = time.time()
    receiver_.summarize(collectd, now)


collectd.register_config(get_config)
collectd.register_read(read)
Esempio n. 14
0
            collectd.info('unknown config key %s for plugin disk' % key)


def fake_disk_read():

    type_l = [
        'disk_io_time', 'disk_merged', 'disk_octets', 'disk_ops', 'disk_time'
    ]

    for i in disk_instances:
        for temp_type in type_l:
            val = collectd.Values(host=instance,
                                  plugin='disk',
                                  plugin_instance=str(i),
                                  type=temp_type)
            if temp_type == 'disk_octets':
                values_read = random.randint(0, MAX_VALUE * 1024)
                values_write = random.randint(0, MAX_VALUE * 1024)
            elif temp_type == 'disk_ops':
                values_read = random.randint(0, MAX_VALUE)
                values_write = random.randint(0, MAX_VALUE)
            else:
                values_read = random.randint(0, MAX_VALUE * 1024)
                values_write = random.randint(0, MAX_VALUE * 1024)

            val.dispatch(values=[values_read, values_write])


collectd.register_config(fake_disk_config)
collectd.register_read(fake_disk_read)
                            metric = collectd.Values()
                            metric.plugin = PLUGIN_NAME
                            metric.plugin_instance = process.process
                            metric.meta = sample.labels
                            metric.type = 'gauge'
                            metric.type_instance = metric_name
                            metric.values = [sample.value]
                            metric.dispatch()

                            collectd.debug(
                                "Name: {0} Labels: {1} Value: {2}".format(
                                    *sample))
                        else:
                            collectd.debug(
                                "Name: {0} not match Labels: {1} Value: {2}".
                                format(*sample))
            except Exception as e:
                collectd.error(
                    'unable to get prometheus data %s://%s:%s/metrics with current configuration %s: %s'
                    % (process.protocol, process.host, process.port,
                       json.dumps(process.__dict__), e))


def init():
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)


prom = Prometheus()
collectd.register_init(init)
collectd.register_config(prom.config)
Esempio n. 16
0
                    self.ceph_pool_stats_interval)

    def run_command(self, command, check_output=True):
        """Run a command for this collectd plugin. Returns a tuple with command
        success and output or False and None for output.
        """
        output = None
        try:
            if check_output:
                output = subprocess.check_output(command)
            else:
                stdin, stdout, stderr = os.popen3(' '.join(command))
                output = stdout.read()
        except Exception as exc:
            collectd.error('collectd-ceph-storage: {} exception: {}'.format(
                command, exc))
            collectd.error('collectd-ceph-storage: {} traceback: {}'.format(
                command, traceback.format_exc()))
            return False, None

        if output is None:
            collectd.error(
                'collectd-ceph-storage: failed to {}: output is None'.format(
                    command))
            return False, None
        return True, output


collectd_ceph_storage = CollectdCephStorage()
collectd.register_config(collectd_ceph_storage.configure_callback)
Esempio n. 17
0
            data[ALLOCATED_MEMORY] = float(memstat.get('actual')) / 1024
            # TODO USED_MEMORY is not correct, Need to do more research on this
            data[USED_MEMORY] = round(float(memstat.get('rss')) / 1024, 2)
            # TODO RAM_UTIL is not correct, Need to do more research on this
            data[RAM_UTIL] = round(
                ((data[USED_MEMORY] / data[ALLOCATED_MEMORY]) * 100.0), 2)
            stats = domain.getCPUStats(True)
            data[CPU_TIME] = stats[0]['cpu_time']
            collectd.info("Memory stats collected for VM: %s" %
                          (domain.name()))
        except libvirt.libvirtError as e:
            collectd.warning("Unable to collect memory stats for VM: %s,"
                             " Reason: %s" %
                             (domain.name(), e.get_error_message()))
        cpu_util = get_cpu_percentage(
            self.prev_comp_data.get(data[VMNAME], {}), data)
        if cpu_util != NAN:
            data[CPU_UTIL] = round(cpu_util, 2)
        return data

    def read_temp(self):
        collectd.unregister_read(self.read_temp)
        collectd.register_read(self.read, interval=int(self.interval))


collectd.info("Registering '%s' ... " % PLUGIN_NAME)
virt = LibvirtCompute()
collectd.register_config(virt.read_config)
collectd.register_read(virt.read_temp)
collectd.info("Registered '%s' plugin successfully :)" % PLUGIN_NAME)
Esempio n. 18
0
                for ii in xrange(0, len(tvector)-1):
                    intervals.append(tvector[ii+1] - tvector[ii])
                self._log("tvector:  {!r}".format(tvector))
                self._log("intervals:{!r}".format(intervals))



## collectd call-back functions
##

def config_cb(config, data):
    cfg = util.map_collectd_config(config)
    data.config(cfg)


def write_cb(vl, data):
    data.track_metric(vl)


## Register the call-back functions

data = MetricWriteTracker()

collectd.register_write(write_cb, data)
collectd.register_config(config_cb, data)


## Local Variables:
## mode: python
## End:
Esempio n. 19
0
    if v.plugin_instance:
        metric_fields.append(sanitize_field(v.plugin_instance))

    metric_fields.append(v.type)
    if v.type_instance:
        metric_fields.append(sanitize_field(v.type_instance))

    time = v.time

    lines = []

    for i, value in enumerate(v.values):
        ds_name = v_type[i][0]
        ds_type = v_type[i][1]

        path_fields = metric_fields[:]
        path_fields.append(ds_name)
        metric = '.'.join(path_fields)
        new_value = None        
        lines.append('%s %f %d' % ( metric, value, time ))

    lock.acquire()
    channel.basic_publish(amqp.Message('\n'.join(lines)), AMQP_EXCHANGE)
    lock.release()



collectd.register_config(amqp_config)


                             self.plugin_name)
            vrouter_traffic = VRouterTraffic()
            statuses = vrouter_traffic.get_table1_delta()
            #{"nw_src":['dl_src', n_packets, n_bytes]
            #{"10.1.4.77":['ac:bc:32:98:c4:83', 513, 66011], }
            for key, value in statuses.iteritems():
                #type_instance=10.1.4.77
                #plugin_instance=ac:bc:32:98:c4:83
                #type=n_packets or n_bytes
                host = "%s__%s__%s" % (self.account_id, self.hostname,
                                       self.vm_type)
                self.dispatch_value(self.plugin_name, host, 'n_packets', key,
                                    value[0], value[1])
                self.dispatch_value(self.plugin_name, host, 'n_bytes', key,
                                    value[0], value[2])

        except Exception as exp:
            self.log_verbose(traceback.print_exc())
            self.log_verbose(exp.message)


if __name__ == '__main__':
    vpn_status = VRouterTraffic()
    result = vpn_status.get_table1_delta()
    print str(result)
else:
    import collectd
    vrouter_mon = VRouterTrafficMon()
    collectd.register_config(vrouter_mon.configure_callback)
    collectd.register_read(vrouter_mon.read_callback)
                dispatch_value(
                    queue["message_stats"]["publish_details"], "rate", "publish_rate", plugin_instance, type_instance
                )
            dispatch_value(queue["message_stats"], "deliver_no_ack", "deliver_no_ack", plugin_instance, type_instance)
            if "deliver_no_ack_details" in queue["message_stats"]:
                dispatch_value(
                    queue["message_stats"]["deliver_no_ack_details"],
                    "rate",
                    "deliver_no_ack_rate",
                    plugin_instance,
                    type_instance,
                )
            dispatch_value(queue["message_stats"], "deliver_get", "deliver_get", plugin_instance, type_instance)
            if "deliver_get_details" in queue["message_stats"]:
                dispatch_value(
                    queue["message_stats"]["deliver_get_details"],
                    "rate",
                    "deliver_get_rate",
                    plugin_instance,
                    type_instance,
                )


def shutdown():
    collectd.info("python plugin shutting down")


collectd.register_config(configure)
collectd.register_read(read)
collectd.register_shutdown(shutdown)
Esempio n. 22
0
def handle_write(vl, data=None):
    "The collectd write callback."
    global sock

    if vl.type not in type_db:
        return

    # Only retry once in this loop to avoid blocking collectd.
    if sock is None:
        try:
            init_socket()
        except socket.error:
            return

    lines = []
    for val_type, val in zip(type_db[vl.type], vl.values):
        key = value_key(vl, val_type)
        lines.append("%s %f %d\n" % (key, val, vl.time))

    try:
        sock.sendall(''.join(lines))
    except socket.error:
        # Wait until the next pass to reconnect
        sock = None


if collectd:
    collectd.register_config(handle_config)
    collectd.register_init(handle_init)
    collectd.register_write(handle_write)
Esempio n. 23
0
        val = item.values[0]
        if key == 'host':
            config['host'] = val
        elif key == 'port':
            config['port'] = int(val)
        elif key == 'separator':
            config[key] = val
# not applicable to slave
#        elif key == 'trackingname':
#            config['tracking_name'] = val
        elif key == 'configfile':
            config['config_file'] = val
        else:
            collectd.warning('mesos-slave plugin: unknown config key {} = {}'.format(item.key, val))

    #
    # this cannot be overridden
    #
    config['master'] = False

    client = MesosSlave(config)


def reader():
    global client
    client.emit_metrics(client.fetch_metrics())


collectd.register_config(configurator)
collectd.register_read(reader)
Esempio n. 24
0
            'appId': 0,
            'appAttemptId': 0,
            'stageAttemptId': 0,
            'stageId': 0,
            'time': 0,
            'timePeriodStart': 0,
            'timePeriodEnd': 0,
            'duration': 0,
            'taskCount': 0
        }]
        for doc in docs:
            self.add_common_params(doc, doc['_documentType'])
            write_json.write(doc)

    def read(self):
        self.collect_data()

    def read_temp(self):
        """
        Collectd first calls register_read. At that time default interval is taken,
        hence temporary function is made to call, the read callback is unregistered
        and read() is called again with interval obtained from conf by register_config callback.
        """
        collectd.unregister_read(self.read_temp)  # pylint: disable=E1101
        collectd.register_read(self.read, interval=int(self.interval))  # pylint: disable=E1101


sparkinstance = Spark()
collectd.register_config(sparkinstance.read_config)  # pylint: disable=E1101
collectd.register_read(sparkinstance.read_temp)  # pylint: disable=E1101
Esempio n. 25
0
            The data is read from all actions defined in SERVICE_ACTIONS.
            This function returns a dict in the following format:
            {instance: (value_type, value)} where value_type and instance are
            mapped from VALUES and CONVERSION.
        """
        values = {}

        # Don't try to gather data if the connection is not available
        if self._fc is None:
            return values

        # Combine all values available in SERVICE_ACTIONS into a dict
        for service, action in self.SERVICE_ACTIONS:
            values.update(self._fc.call_action(service, action))

        # Construct a dict: {instance: (value_type, value)} from the queried
        # results applying a conversion (if defined)
        result = {
            instance:
            (value_type,
             self.CONVERSION.get(key, lambda x: x)(values.get(key)))
            for key, (instance, value_type) in self.VALUES.items()
        }
        return result


FC = FritzCollectd()
collectd.register_config(FC.callback_configure)
collectd.register_init(FC.callback_init)
collectd.register_read(FC.callback_read)
Esempio n. 26
0
                    try:
                        stats.add(exportstats.GetNFSv42IO(export_id))
                    except Exception:
                        if "v42" not in self.errors:
                            collectd.error(traceback.format_exc())
                            self.errors.add("v42")

            self.dispatch_value('read', stats.read_ops)
            self.dispatch_value('write', stats.write_ops)
            self.dispatch_value('read_bytes', stats.read_bytes)
            self.dispatch_value('write_bytes', stats.write_bytes)
        except Exception:
            collectd.error(traceback.format_exc())

    def dispatch_value(self, type_instance, value):
        val = collectd.Values()
        val.plugin = 'nfsstat'
        val.plugin_instance = 'server'
        val.type = 'nfsstat'
        val.type_instance = type_instance
        val.values = [value]
        val.meta = {'0': True}
        val.dispatch(interval=READ_INTERVAL)


nfs_stat = NFSStat()

collectd.register_config(nfs_stat.config)
collectd.register_init(nfs_stat.init)
collectd.register_read(nfs_stat.read, READ_INTERVAL)
Esempio n. 27
0
    def cache(self):
        return self.stats()["CACHE"]

    def diff_from(self, old_time):
        delta = datetime.datetime.now() - old_time
        return delta.seconds + (delta.microseconds / 1000000.0)

    def stats_for(self, key):
        stats = self.cache()[key]["stats"]
        ret = {}
        for key in self.keys_to_track:
            ret[key] = stats.get(key, None)
        return ret

    def all_stats(self):
        ret = {}
        for key in self.keys:
            lowered_key = self.field_value_mapping[key]
            ret[lowered_key] = self.stats_for(key)
        return ret

    def info(self, message):
        if self.verbose:
            collectd.info("solr plugin [verbose]: %s" % (message))


server = SolrServer()
collectd.register_config(server.configure)
collectd.register_read(server.read)
Esempio n. 28
0
        if key == 'instance':
            global instance
            instance = val
        elif key == 'disk_instances':
            global disk_instances
            disk_instances = val.split()
        else:
            collectd.info('unknown config key %s for plugin disk' % key)

def fake_disk_read():

    type_l = ['disk_io_time', 'disk_merged', 'disk_octets', 'disk_ops', 'disk_time']

    for i in disk_instances:
        for temp_type in type_l:
            val = collectd.Values(host=instance, plugin='disk', plugin_instance=str(i), type=temp_type)
            if temp_type == 'disk_octets':
                values_read = random.randint(0, MAX_VALUE*1024)
                values_write = random.randint(0, MAX_VALUE*1024)
            elif temp_type == 'disk_ops':
                values_read = random.randint(0, MAX_VALUE)
                values_write = random.randint(0, MAX_VALUE)
            else:
                values_read = random.randint(0, MAX_VALUE*1024)
                values_write = random.randint(0, MAX_VALUE*1024)

            val.dispatch(values=[values_read, values_write])

collectd.register_config(fake_disk_config)
collectd.register_read(fake_disk_read)
Esempio n. 29
0
            self.submit('file_size', 'index', db_stats['indexSize'], mongo_db)
            self.submit('file_size', 'data', db_stats['dataSize'], mongo_db)

        con.close()

    def config(self, obj):
        for node in obj.children:
            if node.key == 'Port':
                self.mongo_port = int(node.values[0])
            elif node.key == 'Host':
                self.mongo_host = node.values[0]
            elif node.key == 'User':
                self.mongo_user = node.values[0]
            elif node.key == 'Password':
                self.mongo_password = node.values[0]
            elif node.key == 'Database':
                self.mongo_db = node.values[0]
            elif node.key == 'Ssl_certfile':
                self.ssl_certfile = node.values[0]
            elif node.key == 'Ssl_ca_certs':
                self.ssl_ca_certs = node.values[0]
            elif node.key == 'ReplicaSet':
                self.replicaSet = node.values[0]
            else:
                collectd.warning(
                    "mongodb plugin: Unkown configuration key %s" % node.key)

mongodb = MongoDB()
collectd.register_read(mongodb.do_server_status)
collectd.register_config(mongodb.config)
Esempio n. 30
0
            self._batch(host, values)

        self.values = not_to_flush

    def _batch(self, host, values):
        host_id = "collectd:" + host.replace("/", "_")
        measures = {host_id: collections.defaultdict(list)}
        for v in values:
            ident, suffixes = self._serialize_identifier(v)
            for i, value in enumerate(v.values):
                if not math.isnan(value):
                    measures[host_id][ident + suffixes[i]].append({
                        "timestamp":
                        v.time,
                        "value":
                        value,
                    })
        try:
            self.g.metric.batch_resources_metrics_measures(measures,
                                                           create_metrics=True)
        except exceptions.BadRequest:
            # Create the resource and try again
            self._ensure_resource_exists(host_id, host)
            self.g.metric.batch_resources_metrics_measures(measures,
                                                           create_metrics=True)


g = Gnocchi()
collectd.register_config(g.config)
collectd.register_init(g.init)
Esempio n. 31
0
        m.shutdown_flag.set()


def generate_metrics(
    target,
    success,
    failed,
    latency,
):
    target_str = '%s:%i' % (target[0], target[1])
    droprate = failed / (success + failed)
    v = collectd.Values(plugin='tcplat',
                        type='tcplat',
                        plugin_instance=target_str)
    v.dispatch(values=[
        droprate,
        np.mean(latency),
        np.std(latency),
        np.amin(latency),
        np.amax(latency),
        np.percentile(latency, 99),
        np.percentile(latency, 95),
        np.percentile(latency, 90),
    ])


collectd.register_config(read_config)
collectd.register_read(read_data)
collectd.register_init(start_monitoring)
collectd.register_shutdown(shutdown)
Esempio n. 32
0
                temperatures = c.call('disk.temperatures', self.disks,
                                      self.powermode)

            for disk, temp in temperatures.items():
                if temp is not None:
                    self.dispatch_value(disk,
                                        'temperature',
                                        temp,
                                        data_type='temperature')
        except CallTimeout:
            collectd.error("Timeout collecting disk temperatures")
        except Exception:
            collectd.error(traceback.format_exc())

    def dispatch_value(self, name, instance, value, data_type=None):
        val = collectd.Values()
        val.plugin = 'disktemp'
        val.plugin_instance = name
        if data_type:
            val.type = data_type
        val.values = [value]
        val.meta = {'0': True}
        val.dispatch(interval=READ_INTERVAL)


disktemp = DiskTemp()

collectd.register_config(disktemp.config)
collectd.register_init(disktemp.init)
collectd.register_read(disktemp.read, READ_INTERVAL)
            list = status.split()
            ndx = list.index('Temperature:')
            temperature = float(list[ndx + 1])
            frequency = float(list[ndx + 16])
            signal_noise = float(list[ndx + 41][6:])
            signal_power = float(list[ndx + 49][6:])
            self.submit('temperature', 'gauge', temperature)
            self.submit('frequency', 'gauge', frequency)
            self.submit('signal_noise', 'gauge', signal_noise)
            self.submit('signal_power', 'gauge', signal_power)

    def config(self, obj):
        """
        Get the configuration from collectd
        """
        for child in obj.children:
            if child.key == 'Debug':
                self.debug = True
            elif child.key == 'SerialDevice':
                self.device = child.values[0]
                self.set_plugin()
            elif child.key == 'SerialSpeed':
                self.speed = int(child.values[0])

        collectd.info('SierraSerial: configuration')


sierra = SierraSerial()
collectd.register_config(sierra.config)
collectd.register_read(sierra.get_value)
Esempio n. 34
0
                self.dispatch_value(self.plugin_name, host, "mod_dl_dst_n_bytes", key, value[0])
                self.dispatch_value(self.plugin_name, host, "mod_dl_dst_n_packets", key, value[1])
        except Exception as exp:
            self.log_verbose(traceback.print_exc())
            self.log_verbose("plugin %s run into exception" % self.plugin_name)
            self.log_verbose(exp.message)


if __name__ == '__main__':
    stat_1_1 = parse_table_1(1)
    stat_6_1 = parse_table_6(6)
    print '***********'
    print stat_1_1
    print stat_6_1
    import time
    time.sleep(5)
    stat_1_2 = parse_table_1(1)
    stat_6_2 = parse_table_6(6)
    print '***********'
    print stat_1_2
    print stat_6_2

    print '***********'
    print get_delta_value(stat_1_1, stat_1_2)
    print get_delta_value(stat_6_1, stat_6_2)
else:
    import collectd
    vrouter_status = VRouterTrafficStatMon()
    collectd.register_config(vrouter_status.configure_callback)
    collectd.register_init(vrouter_status.init)
    collectd.register_read(vrouter_status.read_callback)
Esempio n. 35
0
                    private += int(line.split()[1])
                elif line.startswith("Pss"):
                    pss += 0.5 + float(line.split()[1])

            F.close()

            if pss > 0:
                shared = pss - private

            M.values = [1024 * int(private + shared)]  # in bytes

        else:
            # rough, but quick estimate
            # I'd use `with` statement, but not sure if it's present in Python 2.6
            statm = open("/proc/%s/statm" % pid, "rt")
            S = statm.readline().split()
            statm.close()
            statm = S

            shared = int(statm[2]) * PAGESIZE
            Rss = int(statm[1]) * PAGESIZE
            private = Rss - shared
            M.values = [int(private) + int(shared)]

        M.dispatch()


collectd.register_config(config_memory)
collectd.register_init(init_memory)
collectd.register_read(read_memory)
Esempio n. 36
0
            global memory
            memory = val
        else:
            collectd.info('unknown config key %s for plugin mem' % key)


def fake_mem_read():

    plugin_instance_l = [
        'used', 'buffered', 'cached', 'free', 'slab_recl', 'slab_unrecl'
    ]

    for temp_instance in plugin_instance_l:
        val = collectd.Values(host=instance,
                              plugin='memory',
                              plugin_instance=temp_instance,
                              type='memory')
        if temp_instance == 'used':
            used_values = random.randint(0, (int(memory) / 2) * 1024 * 1024)
            val.dispatch(values=[used_values])
        elif temp_instance == 'free':
            free_values = int(memory) * 1024 * 1024 - used_values
            val.dispatch(values=[free_values])
        else:
            values = 0
            val.dispatch(values=[values])


collectd.register_config(fake_mem_config)
collectd.register_read(fake_mem_read)
Esempio n. 37
0
        values=[pg_json["pg_stats_sum"]["stat_sum"]["num_objects"]]\
    ).dispatch()
    collectd.Values(plugin="cephtool",\
        type='pg_stats_sum_num_bytes',\
        values=[pg_json["pg_stats_sum"]["stat_sum"]["num_bytes"]]\
    ).dispatch()
    collectd.Values(plugin="cephtool",\
        type='num_objects_missing_on_primary',\
        values=[pg_json["pg_stats_sum"]["stat_sum"]["num_objects_missing_on_primary"]]\
    ).dispatch()
    collectd.Values(plugin="cephtool",\
        type='num_objects_degraded',\
        values=[pg_json["pg_stats_sum"]["stat_sum"]["num_objects_degraded"]]\
    ).dispatch()
    collectd.Values(plugin="cephtool",\
        type='num_objects_unfound',\
        values=[pg_json["pg_stats_sum"]["stat_sum"]["num_objects_unfound"]]\
    ).dispatch()

    collectd.Values(plugin="cephtool",\
        type='num_monitors',\
        values=[len(mon_json["mons"])],
    ).dispatch()
    collectd.Values(plugin="cephtool",\
        type='num_monitors_in_quorum',\
        values=[len(mon_json["quorum"])],
    ).dispatch()

collectd.register_config(cephtool_config)
collectd.register_read(cephtool_read)
Esempio n. 38
0
                    job_hostsite = "undefined"
            except ValueError:
                collectd.info("pandajob module: current job is \'" + job + "\' (hostsite)")
                job_hostsite = "undefined"

            if (job_hostsite not in metrics):
                metrics[job_hostsite] = {'pending':0, 'defined':0, 'waiting':0, \
                   'assigned':0, 'throttled':0, 'activated':0, 'sent':0, 'starting':0, \
                   'running':0, 'holding':0, 'merging':0, 'transferring':0, 'finished':0, \
                   'failing':0, 'failed':0, 'cancelled':0, }


            # Increment appropriate job status
            try:
                job_status = p[p.index('"jobstatus":')+1][1:-2].lower() 
                metrics[job_hostsite][job_status] += 1
            except ValueError:
                collectd.info("pandajob module: current job is \'" + job + "\' (status)")
                job_status = "undefined"

        # Send the data back to collectd
        for hostsite in metrics:
            for status in metrics[hostsite]:
                val = collectd.Values(type='gauge', host='pandajobs', plugin=queue, plugin_instance=hostsite, time=timestamp, type_instance=status)
                val.dispatch(values=[metrics[hostsite][status]])

# Collectd register callback functions
collectd.register_config(config_pandajob_plugin)
collectd.register_init(init_pandajob_plugin)
collectd.register_read(read_panda_jobs, 3600)  # 3600: only poll once an hour
Esempio n. 39
0
                    new_value = value - old_value

                if (isinstance(new_value, (float, int)) and
                        data['differentiate_values_over_time']):
                    interval = time - old_time
                    if interval < 1:
                        interval = 1
                    new_value = new_value / interval

            # update previous value
            data['values'][metric] = ( time, value )

        else:
            new_value = value

        if new_value is not None:
            line = '%s %f %d' % ( metric, new_value, time )
            lines.append(line)

        i += 1

    data['lock'].release()

    lines.append('')
    carbon_write_data(data, '\n'.join(lines))

collectd.register_config(carbon_config)
collectd.register_init(carbon_init)

########NEW FILE########
Esempio n. 40
0
            identifier += '/' + self.type
            if getattr(self, 'type_instance', None):
                identifier += '-' + self.type_instance
            print 'PUTVAL', identifier, \
                  ':'.join(map(str, [int(self.time)] + self.values))

    class ExecCollectd:
        def Values(self):
            return ExecCollectdValues()

        def warning(self, msg):
            print 'WARNING:', msg

        def info(self, msg):
            print 'INFO:', msg

    collectd = ExecCollectd()
    plugin = DockerPlugin()
    if len(sys.argv) > 1:
        plugin.docker_url = sys.argv[1]

    if plugin.init_callback():
        plugin.read_callback()

# Normal plugin execution via CollectD
else:
    import collectd
    plugin = DockerPlugin()
    collectd.register_config(plugin.configure_callback)
    collectd.register_init(plugin.init_callback)
Esempio n. 41
0
    parser.add_option("","--plugin-name", default="RabbitMQ")
    parser.add_option("-H", "--host", default="localhost",
        help="RabbitMQ hostname. Default localhost")
    parser.add_option("-P", "--port", default=55672,
        help="RabbitMQ Admin port. Default 55672.")
    parser.add_option("", "--username", default="guest",
        help="Username credential. Default guest.")
    parser.add_option("", "--password", default="guest",
        help="Password credential. Default guest.")
    parser.add_option("", "--vhost", default="/",
        help="Virtual host. Default /")

    opts, arg_files = parser.parse_args()

    CONFIGS = ([{'plugin_name' : opts.plugin_name, 'host' : opts.host, 'admin_port' : opts.port, 'vhost' : opts.vhost, 'username' : opts.username, 'password' : opts.password }])
    co = RabbitMQ()
    co.check_run_multi_config()
    if co.debug_info:
        str_json = json.dumps(METRICS_RESULTS)
        print str_json


if not DEBUG:
    collectd_rabbitMQ = RabbitMQ()
    collectd.register_config(collectd_rabbitMQ.multi_config)
    collectd.register_read(collectd_rabbitMQ.check_run_multi_config)
    collectd.register_write(collectd_rabbitMQ.write)

if __name__ == '__main__':
    main()
Esempio n. 42
0
            # Intentionally *not* wrapped in a try/except so that an
            # exception here causes collectd to slow down trying to write
            # stats.
            client.gauge(path, value)
        else:
            # No statsd client, be noisy
            message = 'Statsd client is None, not sending metrics!'
            collectd.warning(message)
            # Raise an exception so we aren't *too* noisy.
            raise RuntimeError(message)


def get_stats_writer(plugin):
    """
    Returns a writer function for the given plugin. If no custom writer
    function is defined, the default write_stats function is returned.
    """
    return globals().get('write_%s' % plugin, write_stats)


def statsd_write(values, data=None):
    """
    Entry point from collectd. Dispatches to a custom writer for the
    plugin, if one exists, or calls the default writer.
    """
    writer = get_stats_writer(values.plugin)
    return writer(values, data['types'], client=data['stats'])


collectd.register_config(configure)
                if v not in clist:
                    clist.append(v)

def enhanceio_read(data=None):
    if not len(clist):
        return

    values = collectd.Values(plugin='enhanceio')

    for cache in clist:
        d = {}
        procfile = '/proc/enhanceio/' + cache + '/stats'
        with open(procfile, 'r') as fd:
            d = dict(line.strip().split(None, 1) for line in fd)
            for key in d:
		if key in ['read_hit_pct','write_hit_pct','dirty_write_hit_pct']:
                    values.dispatch(plugin_instance=cache, type='percent', type_instance=key, values=[d[key]])
	        elif key in ['nr_blocks', 'nr_dirty', 'nr_sets']:
                    values.dispatch(plugin_instance=cache, type='gauge', type_instance=key, values=[d[key]])
                else:
                    values.dispatch(plugin_instance=cache, type='derive', type_instance=key, values=[d[key]])
   
            values.dispatch(plugin_instance=cache, type='disk_ops', type_instance='enhanceio_ssd_rw', values=[d['ssd_reads'], d['ssd_writes']])
            values.dispatch(plugin_instance=cache, type='df', type_instance='df_cache', values=[d['cached_blocks'],str(int(d['nr_blocks'])-int(d['cached_blocks']))])

        fd.close()

collectd.register_read(enhanceio_read)
collectd.register_config(enhanceio_config)

Esempio n. 44
0
            "ReceivedBytes": 0,
            "RpcAuthorizationFailures": 0,
            "NumOpenConnections": 0,
            "RpcAuthorizationSuccesses": 0,
            "name": "Hadoop:service=NameNode,name=RpcActivityForPort8020",
            "RpcProcessingTimeNumOps": 0,
            "RpcQueueTimeNumOps": 0,
            "time": 0,
            "CallQueueLength": 0
        }]
        for doc in docs:
            self.add_common_params(doc, doc['_documentType'])
            write_json.write(doc)

    def read(self):
        self.collect_data()

    def read_temp(self):
        """
        Collectd first calls register_read. At that time default interval is taken,
        hence temporary function is made to call, the read callback is unregistered
        and read() is called again with interval obtained from conf by register_config callback.
        """
        collectd.unregister_read(self.read_temp)  # pylint: disable=E1101
        collectd.register_read(self.read, interval=int(self.interval))  # pylint: disable=E1101


namenodeinstance = Namenode()
collectd.register_config(namenodeinstance.read_config)  # pylint: disable=E1101
collectd.register_read(namenodeinstance.read_temp)  # pylint: disable=E1101
            }

        if ds_type == 'GAUGE':
            gauges.append(measurement)
        else:
            counters.append(measurement)

    librato_queue_measurements(gauges, counters, data)

def librato_init():
    import threading

    try:
        librato_parse_types_file(config['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (plugin_name, config['types_db'])
        raise Exception(msg)

    d = {
        'lock' : threading.Lock(),
        'last_flush_time' : get_time(),
        'gauges' : [],
        'counters' : []
        }

    collectd.register_write(librato_write, data = d)

collectd.register_config(librato_config)
collectd.register_init(librato_init)
Esempio n. 46
0
    def request(self, service_id, time_from, time_to):
        """
        Requests stats from Fastly's API and return a dict of data. May
        contain multiple time periods.
        """
        params = {
            'from': time_from,
            'to': time_to,
            'by': "minute",
        }
        url = self.STATS_URL % {
            'service_id': service_id,
        }
        headers = {
            'Fastly-Key': self.api_key,
        }

        self.auth()
        resp = self.session.get(url, params=params, timeout=self.api_timeout)
        if resp.status_code != 200:
            self._raise("Non-200 response")

        data = resp.json()['data']
        return data


cdn_fastly = CdnFastly()
collectd.register_config(cdn_fastly.config)
collectd.register_read(cdn_fastly.read, INTERVAL)
Esempio n. 47
0
def restore_sigchld():
    """
    Restore SIGCHLD handler for python <= v2.6
    It will BREAK exec plugin!!!
    See https://github.com/deniszh/collectd-iostat-python/issues/2 for details
    """
    if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)


if __name__ == '__main__':
    iostat = IOStat()
    ds = iostat.get_diskstats()

    for disk in ds:
        for metric in ds[disk]:
            tbl = string.maketrans('/-%', '___')
            metric_name = metric.translate(tbl)
            print("%s.%s:%s" % (disk, metric_name, ds[disk][metric]))

    sys.exit(0)
else:
    import collectd

    iomon = IOMon()

    # Register callbacks
    collectd.register_init(restore_sigchld)
    collectd.register_config(iomon.configure_callback)
Esempio n. 48
0
            with_pos += 1
            if rlat is not None:
                distance = greatcircle(rlat, rlon, a['lat'], a['lon'])
                if distance > max_range: max_range = distance
            if 'lat' in a.get('mlat', ()):
                mlat += 1

    V.dispatch(plugin_instance=instance_name,
               host=host,
               type='dump1090_aircraft',
               type_instance='recent',
               time=aircraft_data['now'],
               values=[total, with_pos])
    V.dispatch(plugin_instance=instance_name,
               host=host,
               type='dump1090_mlat',
               type_instance='recent',
               time=aircraft_data['now'],
               values=[mlat])

    if max_range > 0:
        V.dispatch(plugin_instance=instance_name,
                   host=host,
                   type='dump1090_range',
                   type_instance='max_range',
                   time=aircraft_data['now'],
                   values=[max_range])


collectd.register_config(callback=handle_config, name='dump1090')
Esempio n. 49
0
    return


def flush_cb(timeout, identifier, data=None):
    return

def log_cb(severity, message, data=None):
    return


## Register the call-back functions

data = "stub-string"         # placeholder
name = init_cb.__module__    # the default
interval = 10                # the default

collectd.register_config(config_cb, data, name)
collectd.register_init(init_cb, data, name)
collectd.register_shutdown(shutdown_cb, data, name)

collectd.register_read(read_cb, interval, data, name)
collectd.register_write(write_cb, data, name)
collectd.register_notification(notification_cb, data, name)

collectd.register_flush(flush_cb, data, name)
collectd.register_log(log_cb, data, name)

## Local Variables:
## mode: python
## End:
Esempio n. 50
0
                obj.NETWORKS[NETWORK_OAM] = iface(NETWORK_OAM, name, 0)
                collectd.info("%s monitoring oam interface: %s" %
                              (PLUGIN,
                               obj.NETWORKS[NETWORK_OAM].master['name']))

    return 0


# The sample read function - called on every audit interval
def read_func():

    if obj.NETWORKS[NETWORK_MGMNT].state == 0:
        obj.NETWORKS[NETWORK_MGMNT].state = 100
    else:
        obj.NETWORKS[NETWORK_MGMNT].state -= 25

    # Dispatch usage value to collectd
    val = collectd.Values(host=obj.hostname)
    val.plugin = 'interface'
    val.plugin_instance = 'mgmnt'
    val.type = 'absolute'
    val.type_instance = 'used'
    val.dispatch(values=[obj.NETWORKS[NETWORK_MGMNT].state])
    return 0


# register the config, init and read functions
collectd.register_config(config_func)
collectd.register_init(init_func)
collectd.register_read(read_func)
Esempio n. 51
0
        self.dispatch_failed()

    def dispatch_failed(self):
        type_instance = "failed"
        value = int(self.redis_client.get("resque:stat:failed"))
        val = collectd.Values(plugin=PLUGIN_NAME, type_instance=type_instance, values=[value], type="gauge")
        self.info("Sending value: %s=%s" % (type_instance, value))
        val.dispatch()

    def dispatch_queue_sized(self):
        for key in self.queues():
            value = self.queue_size(key)
            type_instance = "queue-%s" % (key)
            val = collectd.Values(plugin=PLUGIN_NAME, type_instance=type_instance, values=[value], type="gauge")
            self.info("Sending value: %s=%s" % (type_instance, value))
            val.dispatch()

    def queues(self):
        return self.redis_client.smembers("resque:queues") - set("*",)

    def queue_size(self, key):
        return self.redis_client.llen("resque:queue:%s" % key)

    def info(self, message):
        if self.verbose:
            collectd.info("solr plugin [verbose]: %s" % (message))

monitor = ResqueMonitor()
collectd.register_config(monitor.configure)
collectd.register_read(monitor.read)
Esempio n. 52
0
                json += '}'

                json += '}'
            i += 1

        json += ']'

        collectd.debug(json)
        self.kairosdb_send_http_data(data, json)

    @staticmethod
    def load_plugin_formatters(formatter_directory):
        if os.path.exists(formatter_directory):
            plugins_to_format = {}
            for filename in os.listdir(formatter_directory):
                if filename.endswith(".py"):
                    formatter_name, extension = os.path.splitext(filename)
                    plugin_formatter = imp.load_source(
                        formatter_name, formatter_directory + "/" + filename)
                    for plugin in plugin_formatter.plugins():
                        plugins_to_format[plugin] = plugin_formatter

            return plugins_to_format
        else:
            return {}


writer = KairosdbWriter()
collectd.register_config(writer.kairosdb_config)
collectd.register_init(writer.kairosdb_init)
Esempio n. 53
0
            if i > 0:
                json += ','

            json += '{'
            json += '"name":"%s",' % new_name
            json += '"datapoints":[[%d, %f]],' % (timestamp, new_value)
            json += '"tags": {'

            first = True
            for tn, tv in tags.iteritems():
                if first:
                    first = False
                else:
                    json += ", "

                json += '"%s": "%s"' % (tn, tv)
                
            json += '}'

            json += '}'
        i += 1

    json += ']'

    collectd.debug(json)
    kairosdb_send_http_data(data, json)


collectd.register_config(kairosdb_config)
collectd.register_init(kairosdb_init)
Esempio n. 54
0
        dispatch_value('variables', key, mysql_variables[key], 'gauge')

    mysql_master_status = fetch_mysql_master_stats(conn)
    for key in mysql_master_status:
        dispatch_value('master', key, mysql_master_status[key], 'gauge')

    mysql_states = fetch_mysql_process_states(conn)
    for key in mysql_states:
        dispatch_value('state', key, mysql_states[key], 'gauge')

    slave_status = fetch_mysql_slave_stats(conn)
    for key in slave_status:
        dispatch_value('slave', key, slave_status[key], 'gauge')

    response_times = fetch_mysql_response_times(conn)
    for key in response_times:
        dispatch_value('response_time_total', str(key),
                       response_times[key]['total'], 'counter')
        dispatch_value('response_time_count', str(key),
                       response_times[key]['count'], 'counter')

    innodb_status = fetch_innodb_stats(conn)
    for key in MYSQL_INNODB_STATUS_VARS:
        if key not in innodb_status: continue
        dispatch_value('innodb', key, innodb_status[key],
                       MYSQL_INNODB_STATUS_VARS[key])


collectd.register_read(read_callback)
collectd.register_config(configure_callback)
Esempio n. 55
0
        metrics_list = list(metrics(vl, config))
        ts = datetime.fromtimestamp(vl.time)
        data = []

        for i, v in enumerate(vl.values):
            fullname, unit, dims = metrics_list[i]
            name = fullname[:255]
            if len(name) < len(fullname):
                collectd.warning('Metric name was truncated for CloudWatch: {}'.format(fullname))

            data.append(dict(
                MetricName=name,
                Timestamp=ts,
                Value=v,
                Unit=unit,
                Dimensions=dims
            ))

        client.put_metric_data(Namespace=vl.plugin, MetricData=data)
    except Exception, e:
        collectd.error(str(e))

def plugin_init():
    collectd.info('Initializing write_cloudwatch')
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)

config = Config()
collectd.register_config(plugin_config, config)
collectd.register_init(plugin_init)
collectd.register_write(plugin_write, config)
Esempio n. 56
0
        """Collects all data."""
        start_time = datetime.datetime.now()
        dict_linux = self.collect_data()
        end_time = datetime.datetime.now()
        elapsed_time = end_time - start_time
        if not dict_linux:
            return

        # dispatch data to collectd
        dict_linux["elapsed_time"] = elapsed_time.microseconds
        self.dispatch_data(dict_linux)

    def read_temp(self):
        """
        Collectd first calls register_read. At that time default interval is taken,
        hence temporary function is made to call, the read callback is unregistered
        and read() is called again with interval obtained from conf by register_config callback.
        """
        collectd.unregister_read(self.read_temp)
        collectd.register_read(self.read, interval=int(self.interval))


def init():
    """When new process is formed, action to SIGCHLD is reset to default behavior."""
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)


OBJ = LinuxStats()
collectd.register_config(OBJ.config)
collectd.register_read(OBJ.read_temp)
        except Exception as exp:
            self.log_verbose(traceback.print_exc())
            self.log_verbose("plugin %s run into exception" % (self.plugin_name))
            self.log_verbose(exp.message)


if __name__ == '__main__':
    print "************\n"
    # print str(get_intergrp_stat())
    # print "***********\n"
    # print str(get_internet_stat())
    # print "************\n"
    # print str(get_intragrp_stat())
    # print "**********\n"
    # print str(get_policy_stat())
    # print '------------------\n'
    a = get_internet_stat()
    print a
    import time
    time.sleep(10)
    print '------------------\n'
    b = get_internet_stat()
    c = get_delta_value(a, b)
    print '---delta value----'
    print c
else:
    import collectd
    firewall_status = FireWallUserStatMon()
    collectd.register_config(firewall_status.configure_callback)
    collectd.register_init(firewall_status.init)
    collectd.register_read(firewall_status.read_callback)
Esempio n. 58
0
    collectd.info('fusionio init: stats_cache: %s ' % (stats_cache))


def reader(input_data=None):
    get_fiostats()
    collect_fiostats()
    #dispatch_metrics()
    swap_current_cache()


def writer(metric, data=None):
    for i in metric.values:
        collectd.debug("%s (%s): %f" % (metric.plugin, metric.type, i))


def shutdown():
    collectd.info("fusionio plugin shutting down")


#== Callbacks ==#
get_host_type()
if ((host_type == 'search') and (os_name == 'Linux') and (is_fio_device())):
    collectd.register_config(configer)
    collectd.register_init(initer)
    collectd.register_read(reader)
    collectd.register_write(writer)
    collectd.register_shutdown(shutdown)
else:
    collectd.error('fio plugin works for search hosts only; type: %s os: %s' %
                   (host_type, os_name))
Esempio n. 59
0
        for snapshot in snapshots:
            try:
                tenant_id = getattr(snapshot, 'os-extended-snapshot-attributes:project_id')
            except AttributeError:
                continue
            try:
                data_tenant = data[self.prefix]["tenant-%s" % tenants[tenant_id]]
            except KeyError:
                continue
            data_tenant['volume-snapshots']['count'] += 1
            data_tenant['volume-snapshots']['bytes'] += (snapshot.size * 1024 * 1024 * 1024)

        return data

try:
    plugin = CinderPlugin()
except Exception as exc:
    collectd.error("openstack-cinder: failed to initialize cinder plugin :: %s :: %s"
            % (exc, traceback.format_exc()))

def configure_callback(conf):
    """Received configuration information"""
    plugin.config_callback(conf)

def read_callback():
    """Callback triggerred by collectd on read"""
    plugin.read_callback()

collectd.register_config(configure_callback)
collectd.register_read(read_callback, plugin.interval)
Esempio n. 60
0
                            self.kafka_client)
                        self.producer.send_messages(self.kafka_topic, data)
        except Exception as exp:
            self.log_verbose(traceback.print_exc())
            self.log_verbose("plugin %s run into exception" %
                             (self.plugin_name))
            self.log_verbose(exp.message)
            raise Exception("Failed to send msg to kafka: %s\n%s" %
                            (exp.message, traceback.print_exc()))

    def _escape_proc_name(self, proc):
        if str(proc) == '0':
            return '0'
        return proc.replace('[',
                            '').replace(']',
                                        '').replace('/',
                                                    '.').replace("-", "_")


if __name__ == '__main__':
    # process_status = ProcessStatus("pyth[on], /usr/bin/ss[hd], ja[va]")
    producer = create_kafka_producer("172.17.8.113:9092")
    producer.send_messages('dms-event', b'this is for test process')
    print 'aa'
else:
    import collectd
    process_status_mon = ProcessStatusMon()
    collectd.register_config(process_status_mon.configure_callback)
    collectd.register_init(process_status_mon.init)
    collectd.register_read(process_status_mon.read_callback)