def cephtool_read_osd(osd_json): num_in = 0 num_up = 0 total = 0 for osd in osd_json: total = total + 1 if osd["in"] == 1: num_in = num_in + 1 if osd["up"] == 1: num_up = num_up + 1 collectd.Values(plugin="cephtool",\ type="num_osds_in",\ values=[num_in],\ ).dispatch() collectd.Values(plugin="cephtool",\ type="num_osds_out",\ values=[total - num_in],\ ).dispatch() collectd.Values(plugin="cephtool",\ type="num_osds_up",\ values=[num_up],\ ).dispatch() collectd.Values(plugin="cephtool",\ type="num_osds_down",\ values=[total - num_up],\ ).dispatch()
def read(data=None): global base_url global headers req = urllib.request.Request(f'{base_url}/api/status/inputs', headers=headers) data = json.loads(urllib.request.urlopen(req).read().decode('utf-8')) for entry in data['entries']: input = entry['input'] if entry['signal_scale'] == 1: signal_pct = entry['signal'] / 65535.0 collectd.Values(type='gauge').dispatch(values=[signal_pct], plugin='tvheadend_input', plugin_instance=input, type_instance='signal_pct') if entry['signal_scale'] == 2: signal_db = entry['signal'] / 1000.0 collectd.Values(type='gauge').dispatch(values=[signal_db],plugin='tvheadend_input', plugin_instance=input, type_instance='signal_db') if entry['snr_scale'] == 1: snr_pct = entry['snr'] / 65535.0 collectd.Values(type='gauge').dispatch(values=[snr_pct], plugin='tvheadend_input', plugin_instance=input, type_instance='snr_pct') if entry['snr_scale'] == 2: snr_db = entry['snr'] / 1000.0 collectd.Values(type='gauge').dispatch(values=[snr_db], plugin='tvheadend_input', plugin_instance=input, type_instance='snr_db') for var_name in ('ber', 'bps', 'cc', 'ec_bit', 'ec_block', 'subs', 'tc_bit', 'tc_block', 'te', 'unc', 'weight'): v = entry.get(var_name, None) if v is not None: collectd.Values(type='gauge').dispatch(values=[v], plugin='tvheadend_input', plugin_instance=input, type_instance=var_name)
def read(data=None): starttime = time.time() auth = v2.Password(username=os_username, password=os_password, tenant_name=os_tenant, auth_url=os_auth_url) sess = session.Session(auth=auth) gnocchi = client.Client(session=sess) status = gnocchi.status.get() metric = collectd.Values() metric.plugin = 'gnocchi_status' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = 'measures' metric.values = [status['storage']['summary']['measures']] metric.dispatch() metric = collectd.Values() metric.plugin = 'gnocchi_status' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = 'metrics' metric.values = [status['storage']['summary']['metrics']] metric.dispatch() timediff = time.time() - starttime if timediff > INTERVAL: collectd.warning('gnocchi_status: Took: {} > {}'.format( round(timediff, 2), INTERVAL))
def read(data=None): starttime = time.time() gnocchi = client.Client(session=keystone_session) try: status = gnocchi.status.get() metric = collectd.Values() metric.plugin = 'gnocchi_status' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = 'measures' metric.values = [status['storage']['summary']['measures']] metric.dispatch() metric = collectd.Values() metric.plugin = 'gnocchi_status' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = 'metrics' metric.values = [status['storage']['summary']['metrics']] metric.dispatch() except Exception as err: collectd.error( 'gnocchi_status: Exception getting status: {}'.format(err)) timediff = time.time() - starttime if timediff > INTERVAL: collectd.warning('gnocchi_status: Took: {} > {}'.format( round(timediff, 2), INTERVAL))
def read(): # Read values global sht21, lock_handle try: if lock_handle: flock(lock_handle, LOCK_EX) temperature = sht21.read_temperature() humidity = sht21.read_humidity() if lock_handle: flock(lock_handle, LOCK_UN) except IOError as e: collectd.error('sht21 plugin: Could not read sensor data: %s' % e) return # Calculate values try: dewpoint = td(temperature, humidity) except ValueError as e: collectd.error('sht21 plugin: Could not calculate dew point: %s' % e) dewpoint = 0 absolute_humidity = ah(temperature, humidity) # Dispatch values v_tmp = collectd.Values(plugin='sht21', type='temperature', type_instance='current') v_tmp.dispatch(values=[temperature]) v_hum = collectd.Values(plugin='sht21', type='humidity', type_instance='relative_humidity') v_hum.dispatch(values=[humidity]) v_abs = collectd.Values(plugin='sht21', type='gauge', type_instance='absolute_humidity') v_abs.dispatch(values=[absolute_humidity]) v_dew = collectd.Values(plugin='sht21', type='temperature', type_instance='dewpoint') v_dew.dispatch(values=[dewpoint])
def read_func(): total = 0 domain_counter = {} for d in domains: domain_counter[d] = 0 records = utmp.UtmpRecord() for rec in records: if rec.ut_type == USER_PROCESS: (rec.ut_user, rec.ut_line, rec.ut_pid, rec.ut_host, time.ctime(rec.ut_tv[0])) host = rec.ut_host for d in domains: collectd.debug("HERE: %s %s" % (host, d)) if d in host and host.endswith(d) == True: collectd.debug('Matches') domain_counter[d] = domain_counter[d] + 1 total = total + 1 records.endutent() datapoint = collectd.Values(plugin='sessions', ) datapoint.type = 'count' datapoint.type_instance = 'total_sessions' datapoint.values = [total] collectd.debug('Dispatching a value of %s for total sessions' % total) datapoint.dispatch() for d in domains: datapoint = collectd.Values(plugin='sessions', ) datapoint.type = 'count' datapoint.type_instance = d datapoint.values = [domain_counter[d]] collectd.debug('Dispatching a value of %s for domain sessions %s' % (domain_counter[d], d)) datapoint.dispatch()
def read_callback(): logger("verb", "read_callback") info = get_stats(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) if not info: logger("err", "No information received") return for key in info.keys(): if key == "tenant": for uuid in info[key].keys(): for field in info[key][uuid]: logger( 'verb', 'Dispatching glance.images.tenant.%s.%s : %i' % (uuid, field, int(info[key][uuid][field]))) path = 'glance.images.%s.%s' % (uuid, field) val = collectd.Values(plugin=path) val.type = 'gauge' val.values = [int(info[key][uuid][field])] val.dispatch() else: logger('verb', 'Dispatching %s : %i' % (key, int(info[key]))) path = 'glance.images.%s' % (key) val = collectd.Values(plugin=path) val.type = 'gauge' val.values = [int(info[key])] val.dispatch()
def read(plugin_conf): sval = None hval = None if BAD_CONFIG in plugin_conf: val = 1 collectd.Values(plugin=PLUGIN_NAME, type_instance='plugin.conf.error', type=TYPE, values=[val]).dispatch() log('Invalid config keys found. Will not collect metrics') return if 'URL' in plugin_conf: sval, hval = _get_health_status(plugin_conf) if sval is not None and hval is not None: collectd.Values(plugin=PLUGIN_NAME, type_instance='service.health.status', plugin_instance=plugin_conf.get('Instance'), type=TYPE, values=[sval]).dispatch() collectd.Values(plugin=PLUGIN_NAME, type_instance='service.health.value', plugin_instance=plugin_conf.get('Instance'), type=TYPE, values=[hval]).dispatch()
def read(): # Read values with open(DEV_TMP.format(HWMON), 'rb') as f: val = f.read().strip() temperature = float(int(val)) / 1000 with open(DEV_HUM.format(HWMON), 'rb') as f: val = f.read().strip() humidity = float(int(val)) / 1000 # Calculate values try: dewpoint = td(temperature, humidity) except ValueError as e: collectd.error('shtc3 plugin: Could not calculate dew point: %s' % e) dewpoint = 0 absolute_humidity = ah(temperature, humidity) # Dispatch values v_tmp = collectd.Values(plugin='shtc3', type='temperature', type_instance='temperature') v_tmp.dispatch(values=[temperature]) v_hum = collectd.Values(plugin='shtc3', type='humidity', type_instance='relative_humidity') v_hum.dispatch(values=[humidity]) v_abs = collectd.Values(plugin='shtc3', type='gauge', type_instance='absolute_humidity') v_abs.dispatch(values=[absolute_humidity]) v_dew = collectd.Values(plugin='shtc3', type='temperature', type_instance='dewpoint') v_dew.dispatch(values=[dewpoint])
def read(): """ This method has been registered as the read callback and will be called every polling interval to dispatch metrics. We emit three metrics: one gauge, a sine wave; two counters for the number of datapoints and notifications we've seen. :return: None """ val = sin(time.time() * 2 * pi / 60 * FREQUENCY) collectd.Values(plugin=PLUGIN_NAME, type_instance="sine", plugin_instance=PLUGIN_INSTANCE % FREQUENCY, type="gauge", values=[val]).dispatch() collectd.Values(plugin=PLUGIN_NAME, type_instance="datapoints", type="counter", values=[DATAPOINT_COUNT]).dispatch() collectd.Values(plugin=PLUGIN_NAME, type_instance="notifications", type="counter", values=[NOTIFICATION_COUNT]).dispatch() global SEND if SEND: notif = collectd.Notification( plugin=PLUGIN_NAME, type_instance="started", type="objects") # need a valid type for notification notif.severity = 4 # OKAY notif.message = "The %s plugin has just started" % PLUGIN_NAME notif.dispatch() SEND = False
def read_callback(): stats = get_all() if not stats: return # blarg, this should be fixed for key in stats.keys(): path = "%s" % key value = stats[key] if type(value) != type({}): # must be an int val = collectd.Values(plugin=path) val.type = 'gauge' val.values = [int(value)] val.dispatch() else: # must be a hash for subvalue in value.keys(): path = '%s.%s' % (key, subvalue) val = collectd.Values(plugin=path) val.type = 'gauge' val.values = [int(value[subvalue])] val.dispatch()
def read_callback(): s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(SOCK) js = '' while True: data = s.recv(4096) if len(data) < 1: break js += data.decode('utf8') s.close() dd = json.loads(js) worker_statuses = [w['status'] for w in dd['workers']] counter = {'total': len(worker_statuses), 'idle': 0, 'busy': 0} for status in worker_statuses: counter[status] += 1 idle = collectd.Values(type='gauge') idle.plugin = 'pypi_backend.uwsgi.idle' idle.dispatch(values=[int(counter['idle'])]) busy = collectd.Values(type='gauge') busy.plugin = 'pypi_backend.uwsgi.busy' busy.dispatch(values=[int(counter['busy'])]) total = collectd.Values(type='gauge') total.plugin = 'pypi_backend.uwsgi.total' total.dispatch(values=[int(counter['total'])])
def read_callback(data=None): global debug global doc, pvs, pvstate # EPICS PVs for pv in pvs: if pv.pvname in pvstate: if pvstate[pv.pvname] == True: # PV is connected pv.get() if debug: collectd.info("b2-eclmon: PV get: %s - %d" % (pv.pvname, pv.value)) if pv.value <> None: # just to be sure... metric = collectd.Values() metric.plugin = doc[pv.pvname]['sector'] metric.type = doc[pv.pvname]['measure'] metric.plugin_instance = str(doc[pv.pvname]['id']) metric.values = [round(pv.value, doc[pv.pvname]['round'])] metric.meta = {'0': True} metric.dispatch() # uSOP board voltages f = open("/sys/bus/iio/devices/iio:device0/in_voltage7_raw", 'r') value_raw = int(f.read()) value_sys_V = 2 * (1.8 * value_raw) / 4095 f.close() if value_sys_V <> None: metric = collectd.Values() metric.plugin = "board" metric.type = "voltage" metric.plugin_instance = "1" metric.values = [round(value_sys_V, 2)] metric.meta = {'0': True} metric.dispatch() f = open("/sys/bus/iio/devices/iio:device0/in_voltage1_raw", 'r') value_raw = int(f.read()) value_bus_V = 2 * (1.8 * value_raw) / 4095 f.close() if value_bus_V <> None: metric = collectd.Values() metric.plugin = "bus" metric.type = "voltage" metric.plugin_instance = "1" metric.values = [round(value_bus_V, 2)] metric.meta = {'0': True} metric.dispatch()
def read(): """Read sensor data and dispatch values""" bme280.read_data(osrs_t, osrs_p, osrs_h) v_temp = collectd.Values(plugin=plugin_name, type='temperature') v_temp.dispatch(values=[bme280.get_temperature()]) v_pres = collectd.Values(plugin=plugin_name, type='pressure') v_pres.dispatch(values=[bme280.get_pressure()]) v_humi = collectd.Values(plugin=plugin_name, type='humidity') v_humi.dispatch(values=[bme280.get_humidity()])
def read_callback(): global MEMCACHED_HOST, TYPES_DB, MEMCACHED_PORTS, COLLECTION_MODE # FIXME always check available arcus ports MEMCACHED_PORTS = get_memcached_ports() for port in MEMCACHED_PORTS: try: stats, stats_detail = fetch_stat(MEMCACHED_HOST, port) if COLLECTION_MODE == 'stat': # stats for type, entries in TYPES_DB.iteritems(): if not type.startswith('arcus_stats'): continue varray = [] for dsname in entries['dsnames']: if stats.has_key(dsname): varray.append(str_to_num(stats[dsname])) else: varray.append(0) value = collectd.Values(plugin='arcus_stat-%d' % port) value.type = type value.values = varray value.dispatch() elif COLLECTION_MODE == 'prefix': # stats detail dump for prefix, props in stats_detail.iteritems(): # TODO refine prefix for type, entries in TYPES_DB.iteritems(): if not type.startswith('arcus_prefixes'): continue if type.startswith('arcus_prefixes_meta'): continue varray = [] for dsname in entries['dsnames']: if props.has_key(dsname): varray.append(str_to_num(props[dsname])) else: varray.append(0) value = collectd.Values(plugin='arcus_prefix-%d' % port) value.type_instance = prefix value.type = type value.values = varray value.dispatch() # number of prefixes nprefixes = len(stats_detail) value = collectd.Values(plugin='arcus_prefix-%d' % port) value.type_instance = 'arcus_prefixes_meta' value.type = 'arcus_prefixes_meta' value.values = [nprefixes] value.dispatch() else: collectd.warning('invalid mode : ' % COLLECTION_MODE) except Exception, e: #collectd.warning('arcus_stat plugin: %s : type=%s, entries=%s'%(e, type, entries)) collectd.error('arcus_stat plugin: %s' % (traceback.format_exc()))
def read(data=None): # stats about me :) global rows global handler v1 = collectd.Values(type='gauge', interval=10) v1.plugin = 'pgstore-rows' v1.dispatch(values=[rows / 10]) resetrows() v2 = collectd.Values(type='gauge', interval=10) v2.plugin = 'pgstore-threads' v2.dispatch(values=[handler.threadcount()])
def read_callback(): with open(os.path.join(MIRROR_DIR, 'status'), 'r') as f: current_serial = f.read() with open(os.path.join(MIRROR_DIR, 'web', 'last-modified'), 'r') as f: last_modified_timestamp = f.read().rstrip('\n') cur_serial = collectd.Values(type='gauge') cur_serial.plugin='pypi_mirror.current_serial' cur_serial.dispatch(values=[int(current_serial)]) last_modified_unix = calendar.timegm(datetime.strptime(last_modified_timestamp, "%Y%m%dT%H:%M:%S").utctimetuple()) last_mod = collectd.Values(type='gauge') last_mod.plugin='pypi_mirror.last_modified' last_mod.dispatch(values=[int(last_modified_unix)])
def read_status_page(data=None): global _elastic_ip global _elastic_port latency = 0 status = -1 json_data = None try: start = time.time() collectd.debug('http://%s:%s/_cluster/health' % (_elastic_ip, _elastic_port)) response = requests.get('http://%s:%s/_cluster/health' % (_elastic_ip, _elastic_port), timeout=5) latency = time.time() - start json_data = response.json() except requests.exceptions.ConnectionError: collectd.error( "Connection problem, cannot query http://%s:%s/_cluster/health" % (_elastic_ip, _elastic_port)) return except requests.exceptions.Timeout: collectd.error("Elasticsearch is timing out with 5 second timeout") return except ValueError: collectd.error("Malformed json retrieved: %s" % json_data) return if 'status' not in json_data: collect.error("Status node is not present in received json") return statuses = {"green": 0, "yellow": 1, "red": 2} status = statuses.get(json_data["status"], -1) latency_gauge = collectd.Values(type='gauge', type_instance='latency') status_gauge = collectd.Values(type='gauge', type_instance='status') latency_gauge.plugin = PLUGIN_NAME status_gauge.plugin = PLUGIN_NAME latency_gauge.values = [latency] status_gauge.values = [status] latency_gauge.dispatch() status_gauge.dispatch()
def read_callback(): log.verbose('beginning read_callback') info = get_stats() if not info: log.warning('%s: No data received' % PLUGIN_NAME) return for key, value in info.iteritems(): key_prefix = '' key_root = key if not value in METRIC_TYPES: try: key_prefix, key_root = key.rsplit(METRIC_DELIM, 1) except ValueError: pass if not key_root in METRIC_TYPES: continue key_root, val_type = METRIC_TYPES[key_root] if key_prefix == '': key_name = key_root else: key_name = METRIC_DELIM.join([key_prefix, key_root]) log.verbose('{0}: {1}'.format(key_name, value)) val = collectd.Values(plugin=PLUGIN_NAME, type=val_type) val.type_instance = key_name val.values = [value] val.meta = {'bug_workaround': True} val.dispatch()
def count_old_bad_pgs(curtime): global g_bad_states_to_pgs global g_linger_timeout pg_to_bad_time = {} for bad_state_name, bad_pgs in g_bad_states_to_pgs.items(): for pgid, etime in bad_pgs.items(): if (etime > curtime): continue diff = curtime - etime if (diff > g_linger_timeout): if (pg_to_bad_time.has_key(pgid)): if (pg_to_bad_time.has_key[pgid] >= etime): pg_to_bad_time[pgid] = etime else: pg_to_bad_time[pgid] = etime num_old_bad_pgs = { "old": 0, "older": 0, "oldest": 0, } for pgid, etime in pg_to_bad_time.items(): if (etime > curtime): continue diff = curtime - etime if (diff > 3 * g_linger_timeout): num_old_bad_pgs["oldest"] = num_old_bad_pgs["oldest"] + 1 elif (diff > 2 * g_linger_timeout): num_old_bad_pgs["older"] = num_old_bad_pgs["older"] + 1 elif (diff > g_linger_timeout): num_old_bad_pgs["old"] = num_old_bad_pgs["old"] + 1 for desc, num in num_old_bad_pgs.items(): collectd.Values(plugin="cephtool",\ type=('num_' + desc + "_bad_pgs"),\ values=[num]\ ).dispatch()
def read(data=None): for stream, viewer in count_hls_viewers().items(): vl = collectd.Values(plugin='hls', type='users', type_instance=stream, values=[viewer]) vl.dispatch()
def prepare_and_dispatch_metric(module_config, name, value, _type, extra_dimensions=None): ''' Prepares and dispatches a metric ''' data_point = collectd.Values(plugin=PLUGIN_NAME) data_point.type_instance = name data_point.type = _type data_point.plugin_instance = prepare_plugin_instance( module_config['member_id'], module_config['custom_dimensions'], extra_dimensions) data_point.values = [value] # With some versions of CollectD, a dummy metadata map must to be added # to each value for it to be correctly serialized to JSON by the # write_http plugin. See # https://github.com/collectd/collectd/issues/716 data_point.meta = {'0': True} pprint_dict = { 'plugin': data_point.plugin, 'plugin_instance': data_point.plugin_instance, 'type': data_point.type, 'type_instance': data_point.type_instance, 'values': data_point.values, } collectd.debug(pprint.pformat(pprint_dict)) data_point.dispatch()
def dispatch_value(info, key, type, plugin_instance=None, type_instance=None): """Read a key from info response data and dispatch a value""" if key not in info: collectd.warning('redis_info plugin: Info key not found: %s' % key) return if plugin_instance is None: plugin_instance = 'unknown redis' collectd.error( 'redis_info plugin: plugin_instance is not set, Info key: %s' % key) if not type_instance: type_instance = key try: value = int(info[key]) except ValueError: value = float(info[key]) log_verbose('Sending value: %s=%s' % (type_instance, value)) val = collectd.Values(plugin='redis_info') val.type = type val.type_instance = type_instance val.plugin_instance = plugin_instance val.values = [value] val.dispatch()
def dispatch_value(plugin_instance, info, key, type, type_instance=None): """Read a key from info response data and dispatch a value""" global REDIS_PI_DEFAULT if key not in info: log_verbose('redis_info plugin: Info key not found: %s' % key) return if not type_instance: type_instance = key try: value = int(info[key]) except ValueError: value = float(info[key]) except TypeError: log_verbose('No info for key: %s' % key) return log_verbose('redis_info plugin : Sending value: %s/%s=%s' % (plugin_instance, type_instance, value)) val = collectd.Values(plugin='redis_info') if plugin_instance != "default" or REDIS_PI_DEFAULT: val.plugin_instance = plugin_instance val.type = type val.type_instance = type_instance val.values = [value] val.dispatch()
def dispatch(self): value = collectd.Values(plugin=self.plugin, type=self.plugin_type) if self.type_instance: value.type_instance = self.type_instance value.plugin_instance = self.plugin_instance value.host = self.hostname value.dispatch(values=self.value)
def dispatch_value(self, value, type, plugin_instance=None, type_instance=None, dimensions={}): """Read a key from info response data and dispatch a value""" try: value = int(value) except ValueError: value = float(value) self.log_verbose('Sending value: %s=%s (%s)' % (type_instance, value, dimensions)) val = collectd.Values(plugin='redis_info') val.type = type val.type_instance = type_instance val.values = [value] plugin_instance = self.instance if plugin_instance is None: plugin_instance = '{host}:{port}'.format(host=self.host, port=self.port) val.plugin_instance = "{0}{1}".format(plugin_instance, _format_dimensions(dimensions)) # With some versions of CollectD, a dummy metadata map must be added # to each value for it to be correctly serialized to JSON by the # write_http plugin. See # https://github.com/collectd/collectd/issues/716 val.meta = {'0': True} val.dispatch()
def netstats(data=None): for dir in glob("/var/run/ganeti/kvm-hypervisor/nic/*"): if not os.path.isdir(dir): continue hostname = os.path.basename(dir) for nic in glob(os.path.join(dir, "*")): idx = int(os.path.basename(nic)) with open(nic) as nicfile: try: iface = nicfile.readline().strip() except EnvironmentError: continue if not os.path.isdir("/sys/class/net/%s" % iface): continue bytes_in = read_int("/sys/class/net/%s/statistics/rx_bytes" % iface) bytes_out = read_int("/sys/class/net/%s/statistics/tx_bytes" % iface) vl = collectd.Values(type="derive") vl.host = hostname vl.plugin = "interface" vl.type = "if_octets" vl.type_instance = "eth%d" % idx vl.dispatch(values=[bytes_out, bytes_in])
def dispatch_value(val_type, value): """Dispatch a value""" val = collectd.Values(plugin='cpu') val.type = 'cpu' val.type_instance = val_type val.values = [value] val.dispatch()
def dispatch_value(value, type_name, plugin_name, date=None, type_instance=None, plugin_instance=None, host=None): """Dispatch a value""" log_verbose('Sending value: %s=%s' % (host + '.' + plugin_name + '-' + plugin_instance + '.' + type_name + '-' + type_instance, value)) val = collectd.Values() val.plugin = plugin_name val.type = type_name val.values = value if plugin_instance: val.plugin_instance = plugin_instance if type_instance: val.type_instance = type_instance if host: val.host = host if date: val.time = date val.dispatch()
def _post_metrics(metrics, module_config): """ Posts metrics to collectd. Args: :param metrics : Array of Metrics objects """ for metric in metrics: datapoint = collectd.Values() datapoint.type = DEFAULT_METRIC_TYPE datapoint.type_instance = metric.name datapoint.plugin = PLUGIN_NAME datapoint.plugin_instance = _format_dimensions(metric.dimensions, module_config["field_length"]) datapoint.values = (metric.value,) # With some versions of CollectD, a dummy metadata map must be added # to each value for it to be correctly serialized to JSON by the # write_http plugin. See # https://github.com/collectd/collectd/issues/716 datapoint.meta = {"0": True} pprint_dict = { "plugin": datapoint.plugin, "plugin_instance": datapoint.plugin_instance, "type": datapoint.type, "type_instance": datapoint.type_instance, "values": datapoint.values, "interval": module_config["interval"], } collectd.debug(pprint.pformat(pprint_dict)) datapoint.dispatch()