async def monitoring_task(self): while not self._stopping.is_set(): try: now = time.time() streams = await self._redis.zrangebyscore( "streams", min=0, max=now, start=self.worker_count, num=1, withscores=True, ) # NOTE(sileht): The latency may not be exact with the next StreamSelector # based on hash+modulo if streams: latency = now - streams[0][1] statsd.timing("engine.streams.latency", latency) else: statsd.timing("engine.streams.latency", 0) statsd.gauge("engine.workers.count", self.worker_count) statsd.gauge("engine.processes.count", self.process_count) statsd.gauge("engine.workers-per-process.count", self.worker_per_process) except Exception: LOG.warning("monitoring task failed", exc_info=True) await self._sleep_or_stop(60)
def __exit__(self, *args): now = time() tags = [ '{}:{}'.format(tag_name, value) for tag_name, value in self.tags.items() ] statsd.timing(self.metric, (now - self.start) * 1000, tags=tags)
def on_message_create(self, event): if event.author.bot: return if event.channel.type is ChannelType.DM: return tags = { 'channel_id': event.channel_id, 'author_id': event.author.id, 'guild_id': event.guild.id } if event.author.id == self.client.state.me.id: if event.nonce in self.nonces: statsd.timing('latency.message_send', time.time() - self.nonces[event.nonce], tags=to_tags(tags)) del self.nonces[event.nonce] if event.message.mention_everyone: tags[ 'mentions_everyone'] = '1' # Does Datadog support booleans? It does now. statsd.increment('guild.messages.create', tags=to_tags(tags))
async def monitoring_task(self) -> None: while not self._stopping.is_set(): try: now = time.time() streams = await self._redis.zrangebyscore( "streams", min=0, max=now, withscores=True, ) # NOTE(sileht): The latency may not be exact with the next StreamSelector # based on hash+modulo if len(streams) > self.worker_count: latency = now - streams[self.worker_count][1] statsd.timing("engine.streams.latency", latency) else: statsd.timing("engine.streams.latency", 0) statsd.gauge("engine.streams.backlog", len(streams)) statsd.gauge("engine.workers.count", self.worker_count) statsd.gauge("engine.processes.count", self.process_count) statsd.gauge("engine.workers-per-process.count", self.worker_per_process) except asyncio.CancelledError: LOG.debug("monitoring task killed") return except Exception: LOG.error("monitoring task failed", exc_info=True) await self._sleep_or_stop(60)
def test_context_manager(self): fake_socket = FakeSocket() with DogStatsd() as statsd: statsd.socket = fake_socket statsd.gauge('page.views', 123) statsd.timing('timer', 123) t.assert_equal('page.views:123|g\ntimer:123|ms', fake_socket.recv())
def test_context_manager(self): fake_socket = FakeSocket() with DogStatsd(telemetry_min_flush_interval=0) as statsd: statsd.socket = fake_socket statsd.gauge('page.views', 123) statsd.timing('timer', 123) metric = "page.views:123|g\ntimer:123|ms" assert_equal(metric, fake_socket.recv()) assert_equal(telemetry_metrics(metrics=2, bytes_sent=len(metric)), fake_socket.recv())
def test_context_manager(self): fake_socket = FakeSocket() with DogStatsd() as statsd: statsd.socket = fake_socket statsd.gauge('page.views', 123) statsd.timing('timer', 123) assert_equal_telemetry("page.views:123|g\ntimer:123|ms", fake_socket.recv(), telemetry=telemetry_metrics(metrics=2))
def timed(metricname, tags=None): start = time.time() try: yield except: raise finally: statsd.timing( metricname, (time.time() - start) * 1000, tags=['{}:{}'.format(k, v) for k, v in (tags or {}).items()])
def timed(metricname, tags=None): start = time.time() try: yield except: raise finally: if tags and isinstance(tags, dict): tags = to_tags(tags) statsd.timing(metricname, (time.time() - start) * 1000, tags=tags)
def on_message(self, ws, message): start = datetime.now() logger.debug('websocket | ' + self.channel + ' | message: ' + message) short_href = HubTasks.get_short_path(message) timestamp = get_item_timestamp(short_href) if timestamp < websockets[self.channel]['start']: logger.info('item before start time: ' + short_href) else: HubTasks.verify_ordered(self.channel, short_href, websockets, "websocket") elapsed = (datetime.now() - start).total_seconds() * 1000 statsd.timing('hub.locust.websocket.on_message.time', elapsed, tags=['channel:'+self.channel])
def _record_timing(self, api_name): if settings.DEBUG: print('Record Timing - %s:%s', api_name, self.duration) metric = 'apiserver.timing.%s' % api_name statsd.timing(metric, self.duration) service_name = self._get_service_name(api_name) metric_timing_all = 'apiserver.timing.all' statsd.timing( metric_timing_all, self.duration, tags=["api_name:%s" % api_name, "service_name:%s" % service_name])
def timing(self, key, value, instance=None, tags=None, sample_rate=1): if tags is None: tags = {} if self.tags: tags.update(self.tags) if instance: tags["instance"] = instance if tags: tags = [f"{k}:{v}" for k, v in tags.items()] statsd.timing(self._get_key(key), value, sample_rate=sample_rate, tags=tags)
def timing(self, key, value, instance=None, tags=None, sample_rate=1): if tags is None: tags = {} if self.tags: tags.update(self.tags) if instance: tags["instance"] = instance if tags: tags = ["{}:{}".format(*i) for i in tags.items()] statsd.timing(self._get_key(key), value, sample_rate=sample_rate, tags=tags)
def on_message(self, ws, message): start = datetime.now() logger.debug('websocket | ' + self.channel + ' | message: ' + message) short_href = HubTasks.get_short_path(message) timestamp = get_item_timestamp(short_href) if timestamp < websockets[self.channel]['start']: logger.info('item before start time: ' + short_href) else: HubTasks.verify_ordered(self.channel, short_href, websockets, "websocket") elapsed = (datetime.now() - start).total_seconds() * 1000 statsd.timing('hub.locust.websocket.on_message.time', elapsed, tags=['channel:' + self.channel])
def timing(self, key, value, instance=None, tags=None, sample_rate=1): if tags is None: tags = {} if self.tags: tags.update(self.tags) if instance: tags['instance'] = instance if tags: tags = [u'{}:{}'.format(*i) for i in tags.items()] statsd.timing( self._get_key(key), value, sample_rate=sample_rate, tags=tags, )
def on_message_create(self, event): tags = { 'channel_id': event.channel_id, 'author_id': event.author.id, } if event.guild: tags['guild_id'] = event.guild.id if event.author.id == self.client.state.me.id: if event.nonce in self.nonces: statsd.timing('latency.message_send', time.time() - self.nonces[event.nonce], tags=to_tags(tags)) del self.nonces[event.nonce] statsd.increment('guild.messages.create', tags=to_tags(tags))
def process_timer(self, timer): message = timer['message'] channel = timer['channel'] interval = timer['interval'] timer_id = self.get_timer_id(message, interval, channel) last_post_timestamp = self.db.get( 'plugin.timers.{}.last_post_timestamp'.format(timer_id)) last_post_timestamp = int(last_post_timestamp or 0) next_announce = last_post_timestamp + timer['interval'] now = math.floor(time()) if now < next_announce: return next_announce with self._lock: last_messages = get_channel_messages(channel, limit=1) webhook_id = 'timers:{}'.format(channel) do_post = True if len(last_messages) > 0: last_message = last_messages[-1] if self.db.sismember('plugin.timers.webhooks', last_message.webhook_id): do_post = False now = math.floor(time()) self.db.set('plugin.timers.{}.last_post_timestamp'.format(timer_id), now) if do_post: post_message = send_webhook_message(webhook_id, channel, message) self.db.sadd('plugin.timers.webhooks', post_message.webhook_id) self.log('Announcing timer message ({} interval) in {}'.format( interval, channel)) if last_post_timestamp != 0: statsd.timing('timers_delay', int(time()) - next_announce) return now + timer['interval']
def request_stop(metrics, response): metrics['Request-Timer'].stop() metrics['Request-Metric-Millis'] = metrics['Request-Timer'].ms statsd.incr(metrics['Request-Metric-ID']) statsd.incr("{}.{}".format(metrics['Request-Metric-ID'], response.status_code)) if DATADOG_METRICS: datadog_statsd.increment(metrics['Request-Metric-ID']) datadog_statsd.increment("{}.{}".format(metrics['Request-Metric-ID'], response.status_code)) datadog_statsd.timing(metrics['Request-Metric-ID'], metrics['Request-Timer'].ms) metrics.pop('Request-Timer') for name, value in metrics.items(): response._headers[name] = (name, str(value))
async def monitoring_task(self): while not self._stopping.is_set(): now = time.time() streams = await self._redis.zrangebyscore( "streams", min=0, max=now, start=self.worker_count, num=1, withscores=True, ) if streams: latency = now - streams[0][1] statsd.timing("engine.streams.latency", latency) else: statsd.timing("engine.streams.latency", 0) statsd.gauge("engine.workers.count", self.worker_count) await self._sleep_or_stop(60)
def request_stop(metrics, response): metrics['Request-Timer'].stop() metrics['Request-Metric-Millis'] = metrics['Request-Timer'].ms statsd.incr(metrics['Request-Metric-ID']) statsd.incr(f"{metrics['Request-Metric-ID']}.{response.status_code}") if DATADOG_METRICS: datadog_statsd.increment(metrics['Request-Metric-ID'], tags=DATADOG_TAGS) datadog_statsd.increment( f"{metrics['Request-Metric-ID']}.{response.status_code}", tags=DATADOG_TAGS) datadog_statsd.timing(metrics['Request-Metric-ID'], metrics['Request-Timer'].ms, tags=DATADOG_TAGS) metrics.pop('Request-Timer') for name, value in metrics.items(): response._headers[name] = (name, str(value))
from datadog import initialize, statsd import random import time options = {'statsd_host': '127.0.0.1', 'statsd_port': 8125} initialize(**options) namespace = "testing7" # statsd.distribution('example_metric.distribution', random.randint(0, 20), tags=["environment:dev"]) statsd.timing("%s.timing" % namespace, random.randint(1, 20), tags=["environment:dev"]) statsd.distribution("%s.distribution" % namespace, 50 + random.randint(1, 20), tags=["environment:dev"]) # time.sleep(5) # statsd.timing("%s.timing"%namespace, random.randint(1, 20), tags=["environment:dev"]) # statsd.distribution("%s.distribution"%namespace, 50 + random.randint(1, 20), tags=["environment:dev"])
def _record_timing(service_name, duration): print("duration {0}:{1}".format(service_name, duration)) metric = 'availability.timing.%s' % service_name statsd.timing(metric, duration)