def _send(self): """ Send data to statsd. Fire and forget. Cross fingers and it'll arrive. """ if not statsd: return for metric in self.metrics: # Split the path into a prefix and a name # to work with the statsd module's view of the world. # It will get re-joined by the python-statsd module. (prefix, name) = metric.path.rsplit(".", 1) logging.debug("Sending %s %s|g", name, metric.value) if metric.metric_type == 'GAUGE': statsd.Gauge(prefix, self.connection).send(name, metric.value) else: # To send a counter, we need to just send the delta # but without any time delta changes value = metric.raw_value if metric.path in self.old_values: value = value - self.old_values[metric.path] self.old_values[metric.path] = metric.raw_value statsd.Counter(prefix, self.connection).increment(name, value) self.metrics = []
def send_data_to_statsd(mqtt_data): metrics = ['air_temperature', 'wort_temperature', 'hot', 'cold', 'bubbles'] statsd.Connection.set_defaults(host='statsd.domain.tld', port=8125) c = 0 gauge = statsd.Gauge('Homebrew.batch_dev') for data in mqtt_data.split(','): gauge.send(metrics[c], float(data)) c += 1
def redis_celery_queue_depth(): try: g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX, )) llen = redis_instance.llen("celery") g.send("queue_depth", llen) except: # if we can't connect to statsd don't complain about it. # not every installation will have statsd available return
def clickhouse_lag(): if check_ee_enabled() and settings.EE_AVAILABLE: from ee.clickhouse.client import sync_execute QUERY = """select max(_timestamp) observed_ts, now() now_ts, now() - max(_timestamp) as lag from events;""" lag = sync_execute(QUERY)[0][2] g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX, )) g.send("clickhouse_even_table_lag_seconds", lag) else: pass
def set_gauge(self, key, value): """ Set gauge value. """ check_key(key) assert isinstance(value, Number) key = get_full_key_name(key) gauge = statsd.Gauge(key) gauge.send(None, value)
def _send(self, metric): """ Send data to statsd. Fire and forget. Cross fingers and it'll arrive. """ # Split the path into a prefix and a name # to work with the statsd module's view of the world. # It will get re-joined by the python-statsd module. (prefix, name) = metric.path.rsplit(".", 1) logging.debug("Sending {0} {1}|g".format(name, metric.value)) statsd.Gauge(prefix, self.connection).send(name, metric.value)
def dec_gauge(self, key, amount=1): """ Decrement gauge value. """ check_key(key) assert isinstance(amount, Number) key = get_full_key_name(key) gauge = statsd.Gauge(key) gauge.decrement(None, amount)
def statsd_gauge(name, value): try: if not isinstance(value, (float, int)): value = float(value) if not ig_conf.STATSD_HOST: return else: gauge = statsd.Gauge(ig_conf.STATSD_PREFIX) gauge.send(name, value) except Exception as e: logger.exception(e)
def clickhouse_row_count(): if is_ee_enabled() and settings.EE_AVAILABLE: from ee.clickhouse.client import sync_execute for table in CLICKHOUSE_TABLES: QUERY = """select count(1) freq from {table};""" query = QUERY.format(table=table) rows = sync_execute(query)[0][0] g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,)) g.send("clickhouse_{table}_table_row_count".format(table=table), rows) else: pass
def clickhouse_lag(): if is_ee_enabled() and settings.EE_AVAILABLE: from ee.clickhouse.client import sync_execute for table in CLICKHOUSE_TABLES: QUERY = """select max(_timestamp) observed_ts, now() now_ts, now() - max(_timestamp) as lag from {table};""" query = QUERY.format(table=table) lag = sync_execute(query)[0][2] g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,)) g.send("clickhouse_{table}_table_lag_seconds".format(table=table), lag) else: pass
def setup(hass, config): """ Setup the StatsD component. """ from statsd.compat import NUM_TYPES import statsd conf = config[DOMAIN] host = conf[CONF_HOST] port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT) sample_rate = util.convert(conf.get(CONF_RATE), int, DEFAULT_RATE) prefix = util.convert(conf.get(CONF_PREFIX), str, DEFAULT_PREFIX) statsd_connection = statsd.Connection(host=host, port=port, sample_rate=sample_rate, disabled=False) meter = statsd.Gauge(prefix, statsd_connection) def statsd_event_listener(event): """ Listen for new messages on the bus and sends them to StatsD. """ state = event.data.get('new_state') if state is None: return if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON): _state = 1 elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN, STATE_BELOW_HORIZON): _state = 0 else: _state = state.state if _state == '': return try: _state = float(_state) except ValueError: pass if not isinstance(_state, NUM_TYPES): return _LOGGER.debug('Sending %s.%s', state.entity_id, _state) meter.send(state.entity_id, _state) hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener) return True
def handle(self): statsd_connection = statsd.Connection( host=self.settings.get('statsd').get('host', '127.0.0.1'), port=self.settings.get('statsd').get('port', 8125), sample_rate=self.settings.get('statsd').get('sample_rate', 1), ) meter = statsd.Gauge( self.settings.get('statsd').get('prefix', 'sensu'), statsd_connection) key = '{}.{}'.format(self.event['client']['name'].replace('.', '_'), self.event['check']['name'].replace('.', '_')) meter.send(key, self.event['check']['status'])
def sync_execute(query, args=None, settings=None): start_time = time() try: result = ch_client.execute(query, args, settings=settings) finally: execution_time = time() - start_time g = statsd.Gauge("%s_clickhouse_sync_execution_time" % (STATSD_PREFIX,)) g.send("clickhouse_sync_query_time", execution_time) if app_settings.SHELL_PLUS_PRINT_SQL: print(format_sql(query, args)) print("Execution time: %.6fs" % (execution_time,)) if _save_query_user_id: save_query(query, args, execution_time) return result
def statsd_sensor_counts(): """ A periodic task to gather metrics for the number of sensors per team and push them to the statsd instance on the monitoring server """ gauge = statsd.Gauge('sensor.saas.sensors') gauge.send('count', Sensor.objects.count()) for team in Team.objects.annotate(num_sensors=Count('sensors')): gauge.send("team.%s.count" % team.identifier, team.num_sensors) logger.info("Logged sensor counts to statsd.")
def clickhouse_part_count(): if is_ee_enabled() and settings.EE_AVAILABLE: from ee.clickhouse.client import sync_execute QUERY = """ select table, count(1) freq from system.parts group by table order by freq desc; """ rows = sync_execute(QUERY) for (table, parts) in rows: g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX,)) g.send("clickhouse_{table}_table_parts_count".format(table=table), parts) else: pass
def send_messages(self): '''Main processing for sending messages.''' try: conn = mstatsd.Connection(host=self.host, port=self.port) self.client = mstatsd.Client(name='statsd-generator', connection=conn) for index in range(1, self.num_of_iterations + 1): print("Starting iteration " + str(index) + " of " + str(self.num_of_iterations)) counter = self.client.get_counter('teraflops') counter.increment(5) gauge = self.client.get_gauge() gauge.send('num_of_teraflops', random.uniform(1.0, 10.0), dimensions={'origin': 'dev', 'environment': 'test'}) histogram = self.client.get_histogram('hist') histogram.send('file.upload.size', random.randrange(1, 100), dimensions={'version': '1.0'}) set = self.client.get_set('hist') set.send('load_time', random.randrange(1, 100), dimensions={'page_name': 'mypage.html'}) timer = self.client.get_timer('timer') @timer.timed('config_db_time', dimensions={'db_name': 'mydb'}) def time_db(): time.sleep(0.2) time_db() with timer.time('time_block'): time.sleep(0.3) # Send some regular statsd messages counter = statsd.Counter('statsd_counter') counter += 1 gauge = statsd.Gauge('statsd_gauge') gauge.send('cpu_percent', random.uniform(1.0, 100.0)) print("Completed iteration " + str(index) + ". Sleeping for " + str(self.delay) + " seconds...") time.sleep(self.delay) except Exception: print ("Error sending statsd messages...") raise
def sync_execute(query, args=None, settings=None): with ch_pool.get_client() as client: start_time = time() settings = settings or {} settings[ "max_threads"] = 48 # :TODO: Nuke this, update configuration try: result = client.execute(query, args, settings=settings) finally: execution_time = time() - start_time g = statsd.Gauge("%s_clickhouse_sync_execution_time" % (STATSD_PREFIX, )) g.send("clickhouse_sync_query_time", execution_time) if app_settings.SHELL_PLUS_PRINT_SQL: print(format_sql(query, args)) print("Execution time: %.6fs" % (execution_time, )) if _save_query_user_id: save_query(query, args, execution_time) return result
def clickhouse_mutation_count(): if is_ee_enabled() and settings.EE_AVAILABLE: from ee.clickhouse.client import sync_execute QUERY = """ SELECT table, count(1) AS freq FROM system.mutations GROUP BY table ORDER BY freq DESC """ rows = sync_execute(QUERY) for (table, muts) in rows: g = statsd.Gauge("%s_posthog_celery" % (settings.STATSD_PREFIX, )) g.send( "clickhouse_{table}_table_mutations_count".format(table=table), muts) else: pass
def measure_comments_per_user(request, issue): """Sends number of published comments per issue and per user"". :param request: django request :param issue: object codereview issue """ from codereview import models number_of_comments = models.Comment.objects.filter( patch__patchset__issue=issue, author=request.user.id, draft=False).count() try: case_id = CASE_ID_RE.match(issue.subject).groups()[0] except AttributeError: return gauge = statsd.Gauge('{0}.codereview.number_of_comments.{case_id}'.format( HOSTNAME, case_id=case_id)) gauge.send( '{author_name}'.format( author_name=request.user.username.replace('.', '_')), number_of_comments)
def statsd_gauge_task(slug, current_value, **kwargs): conn = get_statsd_conn() gauge = statsd.Gauge(slug, connection=conn) # We send nothing here, since we only have one name/slug to work with here. gauge.send('', current_value)
import statsd SAMPLE_RATE = 1 HOST = "10.10.10.180" PORT = 8125 statsd_connection = statsd.Connection( host=HOST, port=PORT, sample_rate=SAMPLE_RATE, disabled=False ) PREFIX = 'gauge.test' gauge = statsd.Gauge(PREFIX, statsd_connection) gauge.send('metric1', 50) gauge.send('metric3', 10)
def setUp(self): self.con = statsd.Connection(host=HOST, port=PORT, disabled=False) self.gauge = statsd.Gauge("core_prod", self.con) self.counter = statsd.Counter("core_prod", self.con)
def Gauge(name, suffix=None): if suffix: name = append_suffix(name, suffix) return statsd.Gauge("%s.%s" % (get_prefix(), name))
import os import time from django.conf import settings try: from django.core.management.base import NoArgsCommand as BaseCommand except ImportError: from django.core.management.base import BaseCommand import psutil import statsd last_disk_io = psutil.disk_io_counters() last_net_io = psutil.net_io_counters() gauge = statsd.Gauge('system') time.sleep(1) def io_change(last, current): return dict([(f, getattr(current, f) - getattr(last, f)) for f in last._fields]) while True: memory = psutil.phymem_usage() disk = psutil.disk_usage("/") disk_io = psutil.disk_io_counters() disk_io_change = io_change(last_disk_io, disk_io) net_io = psutil.net_io_counters()
def messageReceived(data): logging.debug("Got packet {0}".format(data)) # This is a test program, so use global variables and # save the addresses so they can be used later global switchLongAddr global switchShortAddr switchLongAddr = data['source_addr_long'] switchShortAddr = data['source_addr'] clusterId = int.from_bytes(data['cluster'], byteorder='big') sourceAddrHex = switchLongAddr.hex() clusterIdHex = hex(clusterId) gauge = statsd.Gauge('xbee-{0}'.format(sourceAddrHex)) clusterCmd = int(data['rf_data'][2]) logging.debug("Packet from addr {0} cluster {1} cmd {2}".format( sourceAddrHex, clusterIdHex, hex(clusterCmd))) if (clusterId == 0x13): # This is the device announce message. # due to timing problems with the switch itself, I don't # respond to this message, I save the response for later after the # Match Descriptor request comes in. You'll see it down below. # if you want to see the data that came in with this message, just # uncomment the 'print data' comment up above #print 'Device Announce Message' pass elif (clusterId == 0x8005): # this is the Active Endpoint Response This message tells you # what the device can do, but it isn't constructed correctly to match # what the switch can do according to the spec. This is another # message that gets it's response after I receive the Match Descriptor #print 'Active Endpoint Response' pass elif (clusterId == 0x0006): # Match Descriptor Request; this is the point where I finally # respond to the switch. Several messages are sent to cause the # switch to join with the controller at a network level and to cause # it to regard this controller as valid. # # First the Active Endpoint Request payload1 = '\x00\x00' zb.send('tx_explicit', dest_addr_long=switchLongAddr, dest_addr=switchShortAddr, src_endpoint='\x00', dest_endpoint='\x00', cluster='\x00\x05', profile='\x00\x00', data=payload1) #print 'sent Active Endpoint' # Now the Match Descriptor Response payload2 = '\x00\x00\x00\x00\x01\x02' zb.send('tx_explicit', dest_addr_long=switchLongAddr, dest_addr=switchShortAddr, src_endpoint='\x00', dest_endpoint='\x00', cluster='\x80\x06', profile='\x00\x00', data=payload2) #print 'Sent Match Descriptor' # Now there are two messages directed at the hardware # code (rather than the network code. The switch has to # receive both of these to stay joined. payload3 = '\x11\x01\x01' zb.send('tx_explicit', dest_addr_long=switchLongAddr, dest_addr=switchShortAddr, src_endpoint='\x00', dest_endpoint='\x02', cluster='\x00\xf6', profile='\xc2\x16', data=payload2) payload4 = '\x19\x01\xfa\x00\x01' zb.send('tx_explicit', dest_addr_long=switchLongAddr, dest_addr=switchShortAddr, src_endpoint='\x00', dest_endpoint='\x02', cluster='\x00\xf0', profile='\xc2\x16', data=payload4) #print 'Sent hardware join messages' elif (clusterId == 0xef): if (clusterCmd == 0x81): # per desert-home.com, instantaneous power is sent little indian watts = int.from_bytes(data['rf_data'][3:4], byteorder='little') logging.debug('Instantaneous Power {0}W'.format(watts)) gauge.send('instant_power', watts) elif (clusterCmd == 0x82): #print "Minute Stats:", #print 'Usage, ', usage = int.from_bytes(data['rf_data'][3:6], byteorder='little') logging.debug('Watt seconds {0}'.format(usage)) gauge.send('watt_hours', usage / 3600) #print usage, 'Watt Seconds ', #print 'Up Time,', upTime = int.from_bytes(data['rf_data'][7:10], byteorder='little') logging.debug('Uptime {0} seconds'.format(upTime)) #print upTime, 'Seconds' gauge.send('uptime', upTime) elif (clusterId == 0xf0): logging.debug('Cluster 0xf0 processing cmd: {0}'.format( hex(clusterCmd))) if (clusterCmd == 0xfb): # note: the temp part of this packet seems to not work? # temp is likely in C * 100? #temp_raw = int.from_bytes(data['rf_data'][8:10], byteorder='little') # convert into F #temp = temp_raw / 100 * 1.8 + 32 logging.debug('AlertMe Lifesign Cluster 0xf0: {0}'.format(data)) # decode per https://github.com/arcus-smart-home/arcusplatform/blob/a02ad0e9274896806b7d0108ee3644396f3780ad/common/arcus-protocol/src/main/irp/ame-general.irp # note status_flags indicate capabilities lifesign_packet = { "status_flags": data['rf_data'][3], "msTimer": int.from_bytes(data['rf_data'][4:7], byteorder='little'), "psuVoltage": data['rf_data'][8:9], "temperature": data['rf_data'][10:11], "rssi": data['rf_data'][12], "lqi": data['rf_data'][13], "switch_mask": data['rf_data'][14], "switch_state": data['rf_data'][15], } logging.debug("RSSI = {0}, LQI = {1}, msTimer = {2}".format( lifesign_packet["rssi"], lifesign_packet["lqi"], lifesign_packet["msTimer"])) else: #print "Unimplemented" logging.debug('Unimplemented AlertMe general cluster') elif (clusterId == 0xf6): if (clusterCmd == 0xfd): rssi = int(data['rf_data'][3]) logging.info('RSSI value: {0}'.format(rssi)) gauge.send('rssi', rssi) elif (clusterCmd == 0xfe): logging.info('Received Version information') else: logging.info(data['rf_data']) elif (clusterId == 0xee): if (clusterCmd == 0x80): switch_status = "OFF" if (data['rf_data'][3] & 0x01): switch_status = "ON" logging.debug( "Packet from addr {0} cluster {1} Switch Status {2}".format( sourceAddrHex, clusterIdHex, switch_status)) elif (clusterId == 0x11): # TODO add something here to handle multiple devices sending temp #print "cluster 0x11 from {0}: profile=0x{1} dest_endpoint=0x{2} data=0x{3}".format( # sourceAddrHex, # data['profile'].encode('hex'), # data['dest_endpoint'].encode('hex'), # data['rf_data'].encode('hex')) #print "rf_data {0}".format(data['rf_data']) #print "data temp = {0} C , humidity = {1}".format( # unpack('f',data['rf_data'][0:4])[0], # unpack('f',data['rf_data'][4:])[0] # ) # used before the JSON era #temp_c = unpack('f',data['rf_data'][0:4])[0] #humidity = unpack('f',data['rf_data'][4:])[0] inbound_data = json.loads(data['rf_data']) # convert temp_c to temp_f # grab sensor name which should be the first field in the JSON ie: # {"Temp1":{"temp":18.46,"humi":30.41785,"dew":0.737981,"utime":1110}} for sensor_name in inbound_data: #print "Sensor data from {0}".format(sensor_name) temp_c = inbound_data[sensor_name]["temp"] humidity = inbound_data[sensor_name]["humi"] temp_f = temp_c * 1.8 + 32 gauge.send('temp', temp_f) gauge.send('humidity', humidity) else: logging.info("Unimplemented Cluster ID", hex(clusterId)) print
from __future__ import with_statement from decimal import Decimal import mock import statsd with mock.patch('statsd.Client') as mock_client: instance = mock_client.return_value instance._send.return_value = 1 gauge = statsd.Gauge('testing') gauge.send('', 10.5) mock_client._send.assert_called_with(mock.ANY, {'testing': '10.5|g'}) gauge.send('', Decimal('6.576')) mock_client._send.assert_called_with(mock.ANY, {'testing': '6.576|g'}) gauge.send('', 1) mock_client._send.assert_called_with(mock.ANY, {'testing': '1|g'})
#!/usr/bin/env python # -*- coding: utf-8 -*- import freebox_v5_status.freeboxstatus import statsd metrics_prefix = "freebox" fbx = freebox_v5_status.freeboxstatus.FreeboxStatus() while True: timer = statsd.Timer(metrics_prefix) timer.start() fbx.update() timer.stop("dataAcquisitionTime") gauge = statsd.Gauge(metrics_prefix) gauge.send("connection.debit.down", fbx.status["adsl"]["synchro_speed"]["down"]) gauge.send("connection.debit.up", fbx.status["adsl"]["synchro_speed"]["up"]) gauge.send("network.WAN.down", fbx.status["network"]["interfaces"]["WAN"]["down"]) gauge.send("network.WAN.up", fbx.status["network"]["interfaces"]["WAN"]["up"]) gauge.send("network.ethernet.down", fbx.status["network"]["interfaces"]["ethernet"]["down"]) gauge.send("network.ethernet.up", fbx.status["network"]["interfaces"]["ethernet"]["up"]) gauge.send("network.switch.down", fbx.status["network"]["interfaces"]["switch"]["down"]) gauge.send("network.switch.up", fbx.status["network"]["interfaces"]["switch"]["up"])
def prepare_statsd(parameters): r"""Sends data to statsd Sends a value to statsd. host defaults to ``127.0.0.1`` port defaults to ``8125`` sample_rate defaults to ``1.0`` type Accepted values are ``counter``, ``gauge`` and ``timer``, defaults to ``counter`` value The value to send. Defaults to ``1.0`` multiplier The amount to multiply the value by. Defaults to ``1.0`` delta boolean, only used for gauge, whether to send differential values or absolute values. Defaults to ``False`` prefix the prefix for the stat name backreferences not allowed name the name for the stat, backreferences allowed (required) Example: .. code:: yaml match: Duration: (\d+.\d+)s statsd: type: timer value: {1} prefix: appserver.request name: duration statsd: prefix: appserver.request name: count """ import statsd # noqa statsd_connection = statsd.Connection( host=parameters.get('host', '127.0.0.1'), port=int(parameters.get('port', 8125)), sample_rate=float(parameters.get('sample_rate', 1.0)), ) meter_type = parameters.get('type', 'counter') name_template = logshipper.context.prepare_template(parameters['name']) val_template = logshipper.context.prepare_template( parameters.get('value', 1)) multiplier = float(parameters.get('multiplier', 1.0)) if meter_type == 'counter': statsd_client = statsd.Counter(parameters.get('prefix'), statsd_connection) delta = True elif meter_type == 'gauge': statsd_client = statsd.Gauge(parameters.get('prefix'), statsd_connection) delta_str = str(parameters.get("delta", False)).lower() delta = delta_str in filters.TRUTH_VALUES elif meter_type == 'timer': statsd_client = statsd.Timer(parameters.get('prefix'), statsd_connection) delta = False else: raise ValueError("Unknown meter type, should be one of counter, " "gauge or timer") # pragma: nocover def handle_statsd(message, context): name = name_template.interpolate(context) value = val_template.interpolate(context) if delta: statsd_client.increment(name, float(value) * multiplier) else: statsd_client.send(name, float(value) * multiplier) return handle_statsd
def setUp(self): self.gauge = statsd.Gauge('testing')
def send_stat(): gauge1 = statsd.Gauge('CPU') gauge1.send('cpu_percent', psutil.cpu_percent()) gauge1.send('mem_utilization', psutil.virtual_memory().percent)