class DatadogMetrics(object): """DataDog Metric backend""" def __init__(self, api_key, app_key, flush_interval=10, namespace="aplt"): datadog.initialize(api_key=api_key, app_key=app_key) self._client = ThreadStats() self._flush_interval = flush_interval self._host = get_hostname() self._namespace = namespace def _prefix_name(self, name): return "%s.%s" % (self._namespace, name) def start(self): self._client.start(flush_interval=self._flush_interval, roll_up_interval=self._flush_interval) def increment(self, name, count=1, **kwargs): self._client.increment(self._prefix_name(name), count, host=self._host, **kwargs) def timing(self, name, duration, **kwargs): self._client.timing(self._prefix_name(name), value=duration, host=self._host, **kwargs)
def test_metric_type(self): """ Checks the submitted metric's metric type. """ # Set up ThreadStats with a namespace dog = ThreadStats(namespace="foo") dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = self.reporter # Send a few metrics dog.gauge("gauge", 20, timestamp=100.0) dog.increment("counter", timestamp=100.0) dog.histogram('histogram.1', 20, 100.0) dog.flush(200.0) (first, second, p75, p85, p95, p99, avg, cnt, max_, min_) = self.sort_metrics(reporter.metrics) # Assert Metric type nt.assert_equal(first['type'], 'rate') nt.assert_equal(second['type'], 'gauge') nt.assert_equal(p75['type'], 'gauge') nt.assert_equal(p85['type'], 'gauge') nt.assert_equal(p95['type'], 'gauge') nt.assert_equal(p99['type'], 'gauge') nt.assert_equal(avg['type'], 'gauge') nt.assert_equal(cnt['type'], 'rate') nt.assert_equal(max_['type'], 'gauge') nt.assert_equal(min_['type'], 'gauge')
class DatadogMiddleware(object): DD_TIMING_ATTRIBUTE = '_dd_start_time' def __init__(self): app_name = settings.DATADOG_APP_NAME self.stats = ThreadStats() self.stats.start() self.error_metric = '{0}.errors'.format(app_name) self.timing_metric = '{0}.request_time'.format(app_name) self.event_tags = [app_name, 'exception'] def process_request(self, request): setattr(request, self.DD_TIMING_ATTRIBUTE, time.time()) def process_response(self, request, response): """ Submit timing metrics from the current request """ if not hasattr(request, self.DD_TIMING_ATTRIBUTE): return response # Calculate request time and submit to Datadog request_time = time.time() - getattr(request, self.DD_TIMING_ATTRIBUTE) tags = self._get_metric_tags(request) self.stats.histogram(self.timing_metric, request_time, tags=tags) return response def process_exception(self, request, exception): """ Captures Django view exceptions as Datadog events """ if isinstance(exception, Http404): # Don't report 404 not found return # Get a formatted version of the traceback. exc = traceback.format_exc() # Make request.META json-serializable. szble = {} for k, v in request.META.items(): if isinstance(v, (list, basestring, bool, int, float, long)): szble[k] = v else: szble[k] = str(v) title = 'Exception from {0}'.format(request.path) text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \ .format(exc, json.dumps(szble, indent=2)) # Submit the exception to Datadog self.stats.event(title, text, alert_type='error', aggregation_key=request.path, tags=self.event_tags) # Increment our errors metric tags = self._get_metric_tags(request) self.stats.increment(self.error_metric, tags=tags) def _get_metric_tags(self, request): return ['path:{0}'.format(request.path)]
class DatadogAdapter(BaseAdapter): """ DatadogAdapter sends the given `Receipt` values to a local Datadog agent via dogstatsd. """ METRIC_PREFIX = 'shop.{}'.format(slugify(settings.REGISTER_NAME)) def __init__(self): # prepare the statsd client options = { 'api_key': settings.DATADOG_API_KEY, } initialize(**options) # start the statsd thread disabled = not settings.DATADOG_API_KEY self.statsd = ThreadStats() self.statsd.start(flush_interval=1, roll_up_interval=1, disabled=disabled) logger.debug('statsd thread initialized, disabled: %s', disabled) def push(self, receipt): """ Sends data to a local Datadog agent. The `Receipt` products are properly tagged using a stringify function so that they can be easily aggregated through Datadog backend. """ try: # count the receipt timestamp = receipt.date.timestamp() count_metric = '{prefix}.receipt.count'.format(prefix=self.METRIC_PREFIX) self.statsd.increment(count_metric, timestamp=timestamp) for item in receipt.sell_set.all(): # generate tags and metrics name tags = ['product:{}'.format(slugify(item.product.name))] items_count = '{prefix}.receipt.items.count'.format(prefix=self.METRIC_PREFIX) receipt_amount = '{prefix}.receipt.amount'.format(prefix=self.METRIC_PREFIX) # compute item metrics quantity = item.quantity total = float((item.price * item.quantity).amount) # send data self.statsd.increment(items_count, timestamp=timestamp, value=quantity, tags=tags) self.statsd.increment(receipt_amount, timestamp=timestamp, value=total, tags=tags) logger.debug('pushed metrics for %d sold items', receipt.sell_set.count()) except Exception: raise AdapterPushFailed
class DatadogMetricsBackend(MetricsBackend): def __init__(self, prefix=None, **kwargs): # TODO(dcramer): it'd be nice if the initialize call wasn't a global initialize(**kwargs) self._stats = ThreadStats() self._stats.start() super(DatadogMetricsBackend, self).__init__(prefix=prefix) def __del__(self): self._stats.stop() def incr(self, key, amount=1, sample_rate=1): self._stats.increment(self._get_key(key), amount, sample_rate=sample_rate) def timing(self, key, value, sample_rate=1): self._stats.timing(self._get_key(key), value, sample_rate=sample_rate)
def test_metric_namespace(self): """ Namespace prefixes all metric names. """ # Set up ThreadStats with a namespace dog = ThreadStats(namespace="foo") dog.start(roll_up_interval=1, flush_in_thread=False) dog.reporter = self.reporter # Send a few metrics dog.gauge("gauge", 20, timestamp=100.0) dog.increment("counter", timestamp=100.0) dog.flush(200.0) # Metric names are prefixed with the namespace self.assertMetric(count=2) self.assertMetric(name="foo.gauge", count=1) self.assertMetric(name="foo.counter", count=1)
class DatadogMetricsBackend(MetricsBackend): def __init__(self, prefix=None, **kwargs): self._stats = ThreadStats() self._stats.start() # TODO(dcramer): it'd be nice if the initialize call wasn't a global initialize(**kwargs) super(DatadogMetricsBackend, self).__init__(prefix=prefix) def __del__(self): self._stats.stop() def incr(self, key, amount=1, sample_rate=1): self._stats.increment(self._get_key(key), amount, sample_rate=sample_rate) def timing(self, key, value, sample_rate=1): self._stats.timing(self._get_key(key), value, sample_rate=sample_rate)
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['tags'], None) nt.assert_equal(c1['points'][0][1], 1) nt.assert_equal(c2['tags'], ['env:production', 'db']) nt.assert_equal(c2['points'][0][1], 1) nt.assert_equal(c3['tags'], ['env:staging']) nt.assert_equal(c3['points'][0][1], 1) (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3]) nt.assert_equal(g1['tags'], None) nt.assert_equal(g1['points'][0][1], 10) nt.assert_equal(g2['tags'], ['env:production', 'db']) nt.assert_equal(g2['points'][0][1], 15) nt.assert_equal(g3['tags'], ['env:staging']) nt.assert_equal(g3['points'][0][1], 20)
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"]) dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"]) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, tags=["env:production", "db"]) dog.increment("counter", timestamp=100.0, tags=["env:staging"]) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3]) nt.assert_equal(c1["tags"], None) nt.assert_equal(c1["points"][0][1], 1) nt.assert_equal(c2["tags"], ["env:production", "db"]) nt.assert_equal(c2["points"][0][1], 1) nt.assert_equal(c3["tags"], ["env:staging"]) nt.assert_equal(c3["points"][0][1], 1) (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3]) nt.assert_equal(g1["tags"], None) nt.assert_equal(g1["points"][0][1], 10) nt.assert_equal(g2["tags"], ["env:production", "db"]) nt.assert_equal(g2["points"][0][1], 15) nt.assert_equal(g3["tags"], ["env:staging"]) nt.assert_equal(g3["points"][0][1], 20)
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) assert_equal(c1['tags'], None) assert_equal(c1['points'][0][1], 0.1) assert_equal(c2['tags'], ['env:production', 'db']) assert_equal(c2['points'][0][1], 0.1) assert_equal(c3['tags'], ['env:staging']) assert_equal(c3['points'][0][1], 0.1) (assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3]) assert_equal(g1['tags'], None) assert_equal(g1['points'][0][1], 10) assert_equal(g2['tags'], ['env:production', 'db']) assert_equal(g2['points'][0][1], 15) assert_equal(g3['tags'], ['env:staging']) assert_equal(g3['points'][0][1], 20)
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 12, timestamp=100.0, host='') # unset the host dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) assert len(metrics) == 6 [c1, c2, c3, g1, g2, g3] = metrics assert c1['metric'] == 'counter' assert c2['metric'] == 'counter' assert c3['metric'] == 'counter' assert c1['host'] is None assert c1['tags'] is None assert c1['points'][0][1] == 0.2 assert c2['host'] == 'test' assert c2['tags'] is None assert c2['points'][0][1] == 0.1 assert c3['host'] == 'test' assert c3['tags'] == ['tag'] assert c3['points'][0][1] == 0.2 assert g1['metric'] == 'gauge' assert g2['metric'] == 'gauge' assert g3['metric'] == 'gauge' assert g1['host'] is None assert g1['points'][0][1] == 10 assert g2['host'] == '' assert g2['points'][0][1] == 12 assert g3['host'] == 'test' assert g3['points'][0][1] == 15 # Ensure histograms work as well. @dog.timed('timed', host='test') def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, host='test') reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['host'] == 'test'
def test_counter(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.increment("test.counter.1", timestamp=1000.0) dog.increment("test.counter.1", value=2, timestamp=1005.0) dog.increment("test.counter.2", timestamp=1015.0) dog.increment("test.counter.3", timestamp=1025.0) dog.flush(1021.0) # Assert they've been properly flushed. metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 2) (first, second) = metrics nt.assert_equal(first["metric"], "test.counter.1") nt.assert_equal(first["points"][0][0], 1000.0) nt.assert_equal(first["points"][0][1], 3) nt.assert_equal(second["metric"], "test.counter.2") # Test decrement dog.increment("test.counter.1", value=10, timestamp=1000.0) dog.decrement("test.counter.1", value=2, timestamp=1005.0) reporter.metrics = [] dog.flush(1021.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 1) first, = metrics nt.assert_equal(first["metric"], "test.counter.1") nt.assert_equal(first["points"][0][0], 1000.0) nt.assert_equal(first["points"][0][1], 8) nt.assert_equal(second["metric"], "test.counter.2") # Flush again and make sure we're progressing. reporter.metrics = [] dog.flush(1030.0) nt.assert_equal(len(reporter.metrics), 1) # Finally, make sure we've flushed all metrics. reporter.metrics = [] dog.flush(1050.0) nt.assert_equal(len(reporter.metrics), 0)
def test_counter(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.increment('test.counter.1', timestamp=1000.0) dog.increment('test.counter.1', value=2, timestamp=1005.0) dog.increment('test.counter.2', timestamp=1015.0) dog.increment('test.counter.3', timestamp=1025.0) dog.flush(1021.0) # Assert they've been properly flushed. metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 2) (first, second) = metrics nt.assert_equal(first['metric'], 'test.counter.1') nt.assert_equal(first['points'][0][0], 1000.0) nt.assert_equal(first['points'][0][1], 0.3) nt.assert_equal(second['metric'], 'test.counter.2') # Test decrement dog.increment('test.counter.1', value=10, timestamp=1000.0) dog.decrement('test.counter.1', value=2, timestamp=1005.0) reporter.metrics = [] dog.flush(1021.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 1) first, = metrics nt.assert_equal(first['metric'], 'test.counter.1') nt.assert_equal(first['points'][0][0], 1000.0) nt.assert_equal(first['points'][0][1], 0.8) nt.assert_equal(second['metric'], 'test.counter.2') # Flush again and make sure we're progressing. reporter.metrics = [] dog.flush(1030.0) nt.assert_equal(len(reporter.metrics), 1) # Finally, make sure we've flushed all metrics. reporter.metrics = [] dog.flush(1050.0) nt.assert_equal(len(reporter.metrics), 0)
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 12, timestamp=100.0, host='') # unset the host dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['host'], None) nt.assert_equal(c1['tags'], None) nt.assert_equal(c1['points'][0][1], 0.2) nt.assert_equal(c2['host'], 'test') nt.assert_equal(c2['tags'], None) nt.assert_equal(c2['points'][0][1], 0.1) nt.assert_equal(c3['host'], 'test') nt.assert_equal(c3['tags'], ['tag']) nt.assert_equal(c3['points'][0][1], 0.2) (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3]) nt.assert_equal(g1['host'], None) nt.assert_equal(g1['points'][0][1], 10) nt.assert_equal(g2['host'], '') nt.assert_equal(g2['points'][0][1], 12) nt.assert_equal(g3['host'], 'test') nt.assert_equal(g3['points'][0][1], 15) # Ensure histograms work as well. @dog.timed('timed', host='test') def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, host='test') reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['host'] == 'test'
def test_constant_tags(self): """ Constant tags are attached to all metrics. """ dog = ThreadStats(constant_tags=["type:constant"]) dog.start(roll_up_interval=1, flush_in_thread=False) dog.reporter = self.reporter # Post the same metric with different tags. dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", 'db']) dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"]) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, tags=["env:production", 'db']) dog.increment("counter", timestamp=100.0, tags=["env:staging"]) dog.flush(200.0) # Assertions on all metrics self.assertMetric(count=6) # Assertions on gauges self.assertMetric(name='gauge', value=10, tags=["type:constant"], count=1) self.assertMetric(name="gauge", value=15, tags=["env:production", "db", "type:constant"], count=1) # noqa self.assertMetric(name="gauge", value=20, tags=["env:staging", "type:constant"], count=1) # Assertions on counters self.assertMetric(name="counter", value=1, tags=["type:constant"], count=1) self.assertMetric(name="counter", value=1, tags=["env:production", "db", "type:constant"], count=1) # noqa self.assertMetric(name="counter", value=1, tags=["env:staging", "type:constant"], count=1) # Ensure histograms work as well. @dog.timed('timed', tags=['version:1']) def do_nothing(): """ A function that does nothing, but being timed. """ pass with patch("datadog.threadstats.base.time", return_value=300): do_nothing() dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2']) self.reporter.metrics = [] dog.flush(400.0) # Histograms, and related metric types, produce 8 different metrics self.assertMetric(tags=["version:1", "type:constant"], count=8) self.assertMetric(tags=["db", "version:2", "type:constant"], count=8)
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge("gauge", 12, timestamp=100.0, host="") # unset the host dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, host="test") dog.gauge("gauge", 15, timestamp=100.0, host="test") dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, host="test") dog.increment("counter", timestamp=100.0, host="test", tags=["tag"]) dog.increment("counter", timestamp=100.0, host="test", tags=["tag"]) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3]) nt.assert_equal(c1["host"], None) nt.assert_equal(c1["tags"], None) nt.assert_equal(c1["points"][0][1], 2) nt.assert_equal(c2["host"], "test") nt.assert_equal(c2["tags"], None) nt.assert_equal(c2["points"][0][1], 1) nt.assert_equal(c3["host"], "test") nt.assert_equal(c3["tags"], ["tag"]) nt.assert_equal(c3["points"][0][1], 2) (nt.assert_equal(g["metric"], "gauge") for g in [g1, g2, g3]) nt.assert_equal(g1["host"], None) nt.assert_equal(g1["points"][0][1], 10) nt.assert_equal(g2["host"], "") nt.assert_equal(g2["points"][0][1], 12) nt.assert_equal(g3["host"], "test") nt.assert_equal(g3["points"][0][1], 15) # Ensure histograms work as well. @dog.timed("timed", host="test") def test(): pass test() dog.histogram("timed", 20, timestamp=300.0, host="test") reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric["host"] == "test"
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 12, timestamp=100.0, host='') # unset the host dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['host'], None) nt.assert_equal(c1['tags'], None) nt.assert_equal(c1['points'][0][1], 2) nt.assert_equal(c2['host'], 'test') nt.assert_equal(c2['tags'], None) nt.assert_equal(c2['points'][0][1], 1) nt.assert_equal(c3['host'], 'test') nt.assert_equal(c3['tags'], ['tag']) nt.assert_equal(c3['points'][0][1], 2) (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3]) nt.assert_equal(g1['host'], None) nt.assert_equal(g1['points'][0][1], 10) nt.assert_equal(g2['host'], '') nt.assert_equal(g2['points'][0][1], 12) nt.assert_equal(g3['host'], 'test') nt.assert_equal(g3['points'][0][1], 15) # Ensure histograms work as well. @dog.timed('timed', host='test') def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, host='test') reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['host'] == 'test'
def test_constant_tags(self): dog = ThreadStats(constant_tags=['type:constant']) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['tags'], ['env:production', 'db', 'type:constant']) nt.assert_equal(c1['points'][0][1], 1) nt.assert_equal(c2['tags'], ['env:staging', 'type:constant']) nt.assert_equal(c2['points'][0][1], 1) nt.assert_equal(c3['tags'], ['type:constant']) nt.assert_equal(c3['points'][0][1], 1) (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3]) nt.assert_equal(g1['tags'], ['env:production', 'db', 'type:constant']) nt.assert_equal(g1['points'][0][1], 15) nt.assert_equal(g2['tags'], ['env:staging', 'type:constant']) nt.assert_equal(g2['points'][0][1], 20) nt.assert_equal(g3['tags'], ['type:constant']) nt.assert_equal(g3['points'][0][1], 10) # Ensure histograms work as well. @dog.timed('timed', tags=['version:1']) def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2']) reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['tags'] # this is enough
def test_constant_tags(self): dog = ThreadStats(constant_tags=["type:constant"]) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"]) dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"]) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, tags=["env:production", "db"]) dog.increment("counter", timestamp=100.0, tags=["env:staging"]) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3]) nt.assert_equal(c1["tags"], ["env:production", "db", "type:constant"]) nt.assert_equal(c1["points"][0][1], 1) nt.assert_equal(c2["tags"], ["env:staging", "type:constant"]) nt.assert_equal(c2["points"][0][1], 1) nt.assert_equal(c3["tags"], ["type:constant"]) nt.assert_equal(c3["points"][0][1], 1) (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3]) nt.assert_equal(g1["tags"], ["env:production", "db", "type:constant"]) nt.assert_equal(g1["points"][0][1], 15) nt.assert_equal(g2["tags"], ["env:staging", "type:constant"]) nt.assert_equal(g2["points"][0][1], 20) nt.assert_equal(g3["tags"], ["type:constant"]) nt.assert_equal(g3["points"][0][1], 10) # Ensure histograms work as well. @dog.timed("timed", tags=["version:1"]) def test(): pass test() dog.histogram("timed", 20, timestamp=300.0, tags=["db", "version:2"]) reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric["tags"] # this is enough
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) assert len(metrics) == 6 [c1, c2, c3, g1, g2, g3] = metrics assert c1['metric'] == 'counter' assert c2['metric'] == 'counter' assert c3['metric'] == 'counter' assert c1['tags'] is None assert c1['points'][0][1] == 0.1 assert c2['tags'] == ['env:production', 'db'] assert c2['points'][0][1] == 0.1 assert c3['tags'] == ['env:staging'] assert c3['points'][0][1] == 0.1 assert g1['metric'] == 'gauge' assert g2['metric'] == 'gauge' assert g3['metric'] == 'gauge' assert g1['tags'] is None assert g1['points'][0][1] == 10 assert g2['tags'] == ['env:production', 'db'] assert g2['points'][0][1] == 15 assert g3['tags'] == ['env:staging'] assert g3['points'][0][1] == 20
import numpy as np from datadog import initialize def read_properties(file): properties = {} for line in file: parts = line.split('=') if len(parts) > 1: properties[parts[0].strip()] = parts[1].strip() return properties dd_props = read_properties(open('datadog.ini', 'r')) options = {'api_key': dd_props['api_key'], 'app_key': dd_props['app_key']} initialize(**options) num = 100000 refdist = {'apple': 0.35, 'orange': 0.35, 'grape': 0.25, 'durian': 0.05} from datadog import ThreadStats stats = ThreadStats() stats.start() for _ in range(num): sampled_fruit = np.random.choice(list(refdist.keys()), p=list(refdist.values())) stats.increment('fruit.picked', 1, tags=['fruit:' + sampled_fruit])
from datadog import initialize def read_properties(file): properties = {} for line in file: parts = line.split('=') if len(parts) > 1: properties[parts[0].strip()] = parts[1].strip() return properties dd_props = read_properties(open('datadog.ini', 'r')) options = {'api_key': dd_props['api_key'], 'app_key': dd_props['app_key']} initialize(**options) from datadog import ThreadStats stats = ThreadStats() stats.start() stats.increment('kwyho.laugh') stats.increment('kwyho.smile', 1, tags=["class:0"]) stats.increment('kwyho.smile', 1, tags=["class:1"]) stats.increment('kwyho.smile', 1, tags=["class:2"])
stats.start() initialize(**options) # Iperf Client Settings client = iperf3.Client() client.server_hostname = remote_site client.zerocopy = True client.verbose = False client.reverse = True client.duration = int(test_duration) # Run test and load into dictionary data = json.loads(str(client.run())) # Extract output into integers and convert to Megabits per second sent_mbps_avg = int(data['end']['sum_sent']['bits_per_second']) / 1000000 received_mbps_avg = int( data['end']['sum_received']['bits_per_second']) / 1000000 # Debugging output print('Sent Average ' + str(sent_mbps_avg) + ' Mbps') print('Received Average ' + str(received_mbps_avg) + ' Mbps') # Feed Metrics into DogStatsd print('iperf.' + siteabbrv + '.mbps.avg.ingress') stats.increment('iperf.' + siteabbrv + '.mbps.avg.ingress') stats.gauge('iperf.' + siteabbrv + '.mbps.avg.ingress', sent_mbps_avg) stats.increment('iperf.' + siteabbrv + '.mbps.avg.egress') stats.gauge('iperf.' + siteabbrv + '.mbps.avg.egress', received_mbps_avg)
class Bot(commands.AutoShardedBot): def __init__(self, *args, **kwargs): super().__init__(*args, game=discord.Game(name="rp!help for help!"), **kwargs) self.prefixes = {} self.owner_id = 122739797646245899 self.lounge_id = 166349353999532035 self.uptime = datetime.datetime.utcnow() self.commands_used = Counter() self.server_commands = Counter() self.socket_stats = Counter() self.shutdowns = [] self.lotteries = dict() self.in_character = defaultdict(lambda: defaultdict(str)) self.logger = logging.getLogger('discord') # Discord Logging self.logger.setLevel(logging.INFO) self.handler = logging.FileHandler(filename=os.path.join( 'resources', 'discord.log'), encoding='utf-8', mode='w') self.handler.setFormatter( logging.Formatter( '%(asctime)s:%(levelname)s:%(name)s: %(message)s')) self.logger.addHandler(self.handler) self.session = aiohttp.ClientSession(loop=self.loop) self.shutdowns.append(self.shutdown) with open("resources/auth") as af: self._auth = json.loads(af.read()) with open("resources/dnditems.json", 'r') as dndf: self.dnditems = json.loads(dndf.read()) with open("resources/dnditems.json", 'r') as dndf2: self.dndmagic = json.loads(dndf2.read()) with open("resources/pokemonitems.json", 'r') as dndf3: self.pokemonitems = json.loads(dndf3.read()) with open("resources/starwars.json", 'r') as swf: self.switems = json.loads(swf.read()) if 'debug' not in sys.argv: self.httpserver = server.API(self) self.loop.create_task(self.httpserver.host()) self.db: db.Database = db.Database(self) self.di: data.DataInteraction = data.DataInteraction(self) self.default_udata = data.default_user self.default_servdata = data.default_server self.rnd = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ" with open("resources/patrons.json") as pj: self.patrons = { int(k): v for k, v in json.loads(pj.read()).items() } with open("resources/newtranslations.json", 'rb') as trf: self.translations = json.loads(trf.read().decode()) self.languages = ["en", "fr", "de", "ru", "es"] with open("resources/blacklist.json") as blf: self.blacklist = json.loads(blf.read()) with open("savedata/prefixes.json") as prf: self.prefixes = json.loads(prf.read()) atexit.register(lambda: json.dump(self.prefixes, open("savedata/prefixes.json", 'w'))) icogs = [ cogs.admin.Admin(self), cogs.team.Team(self), cogs.economy.Economy(self), cogs.inventory.Inventory(self), cogs.settings.Settings(self), cogs.misc.Misc(self), cogs.characters.Characters(self), cogs.pets.Pets(self), cogs.groups.Groups(self), cogs.user.User(self), cogs.salary.Salary(self), cogs.map.Mapping(self), cogs.backups.Backups(self), ] for cog in icogs: self.add_cog(cog) # self.loop.create_task(self.start_serv()) init_dd(self._auth[3], self._auth[4]) self.stats = ThreadStats() self.stats.start() self._first = True async def on_ready(self): print('Logged in as') print(self.user.name) print(self.user.id) print('------') if self._first: self.loop.create_task(self.update_stats()) self._first = False async def on_message(self, msg): if msg.author.id not in self.blacklist: """ if False and msg.guild: prefixes = await self.di.get_cmd_prefixes(msg.guild) for cmd, prefix in prefixes.items(): if msg.content.startswith(prefix): msg.content = msg.content.replace(prefix, "rp!" + (cmd.replace(".", " ")) + " ", 1) break """ ctx = await self.get_context(msg) await self.invoke(ctx) if ctx.command is None and ctx.guild: if ctx.guild.id in self.in_character: if ctx.author.id in self.in_character[ctx.guild.id]: char = self.in_character[ctx.guild.id][ctx.author.id] hooks = await ctx.guild.webhooks() hook = discord.utils.get(hooks, name=char) if hook is None: # await ctx.send(await _(ctx, "Webhook missing!")) del self.in_character[ctx.guild.id][ctx.author.id] return content = msg.content files = msg.attachments dfiles = [] for f in files: dio = BytesIO() await f.save(dio) dfiles.append( discord.File(dio, f.filename, spoiler=f.is_spoiler())) embeds = msg.embeds if hook.channel.id != msg.channel.id: await hook.delete() hook = await msg.channel.create_webhook(name=char) await msg.delete() url = (await self.di.get_character(ctx.guild, char)).meta.get("icon") await hook.send(content, avatar_url=url, files=dfiles, embeds=embeds) async def update_stats(self): url = "https://bots.discord.pw/api/bots/{}/stats".format(self.user.id) while not self.is_closed(): payload = json.dumps(dict(server_count=len(self.guilds))).encode() headers = { 'authorization': self._auth[1], "Content-Type": "application/json" } async with self.session.post(url, data=payload, headers=headers) as response: await response.read() url = "https://discordbots.org/api/bots/{}/stats".format( self.user.id) payload = json.dumps(dict(server_count=len(self.guilds))).encode() headers = { 'authorization': self._auth[2], "Content-Type": "application/json" } async with self.session.post(url, data=payload, headers=headers) as response: await response.read() await asyncio.sleep(14400) async def on_command(self, ctx): self.stats.increment("RPGBot.commands", tags=["RPGBot:commands"], host="scw-8112e8") self.stats.increment( f"RPGBot.commands.{str(ctx.command).replace(' ', '.')}", tags=["RPGBot:commands"], host="scw-8112e8") self.commands_used[ctx.command] += 1 if self.commands_used[ctx.command] % 100 == 0: seed(self.socket_stats["PRESENCE_UPDATE"]) if isinstance(ctx.author, discord.Member): self.server_commands[ctx.guild.id] += 1 if ctx.guild.id not in self.patrons: if (self.server_commands[ctx.guild.id] % 50) == 0: await ctx.send( discord.Embed(description=await _( ctx, "This bot costs $300/yr to run. If you like the utilities it provides," " consider buying me a [coffee](https://ko-fi.com/henrys)" " or subscribe as a [Patron](https://www.patreon.com/henry232323)" " We've also recently released a new version of the bot, get it [here](https://discord.com/oauth2/authorize?client_id=673737213959208980&scope=bot&permissions=805596240) " ))) if await self.di.get_exp_enabled(ctx.guild): add = choice([0, 0, 0, 0, 0, 1, 1, 2, 3]) fpn = ctx.command.full_parent_name.lower() if fpn: values = { "character": 2, "inventory": 1, "economy": 1, "pet": 2, "guild": 2, "team": 1, } add += values.get(fpn, 0) if add: await asyncio.sleep(4) r = await self.di.add_exp(ctx.author, add) if r is not None: await ctx.message.add_reaction("\u23EB") time = await self.di.get_delete_time(ctx.guild) if time: await asyncio.sleep(time) await ctx.message.delete() async def on_member_join(self, member): if await self.di.get_balance(member) != 0: return amount = await self.di.get_guild_start(member.guild) if amount: await self.di.set_eco(member, amount) async def on_member_remove(self, member): setting = await self.di.get_leave_setting(member.guild) if setting: await self.db.update_user_data(member, {}) async def on_command_error(self, ctx, exception): self.stats.increment("RPGBot.errors", tags=["RPGBot:errors"], host="scw-8112e8") logging.info( f"Exception in {ctx.command} {ctx.guild}:{ctx.channel} {exception}" ) exception = getattr(exception, "original", exception) traceback.print_tb(exception.__traceback__) print(exception) try: if isinstance(exception, commands.MissingRequiredArgument): await ctx.send(f"```{exception}```") elif isinstance(exception, TimeoutError): await ctx.send(await _( ctx, "This operation ran out of time! Please try again")) elif isinstance(exception, discord.Forbidden): await ctx.send(await _( ctx, "Error: This command requires the bot to have permission to send links." )) else: await ctx.send( f"`{exception} If this is unexpected, please report this to the bot creator`" ) except discord.Forbidden: pass async def on_guild_join(self, guild): if guild.id in self.blacklist: await guild.leave() self.stats.increment("RPGBot.guilds", tags=["RPGBot:guilds"], host="scw-8112e8") async def on_guild_leave(self, guild): self.stats.increment("RPGBot.guilds", -1, tags=["RPGBot:guilds"], host="scw-8112e8") async def on_socket_response(self, msg): self.socket_stats[msg.get('t')] += 1 async def get_bot_uptime(self): """Get time between now and when the bot went up""" now = datetime.datetime.utcnow() delta = now - self.uptime hours, remainder = divmod(int(delta.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) days, hours = divmod(hours, 24) if days: fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds' else: fmt = '{h} hours, {m} minutes, and {s} seconds' return fmt.format(d=days, h=hours, m=minutes, s=seconds) def randsample(self): return "".join(sample(self.rnd, 6)) @staticmethod def get_exp(level): return int(0.1 * level**2 + 5 * level + 4) @staticmethod def get_ram(): """Get the bot's RAM usage info.""" mem = psutil.virtual_memory() return f"{mem.used / 0x40_000_000:.2f}/{mem.total / 0x40_000_000:.2f}GB ({mem.percent}%)" @staticmethod def format_table(lines, separate_head=True): """Prints a formatted table given a 2 dimensional array""" # Count the column width widths = [] for line in lines: for i, size in enumerate([len(x) for x in line]): while i >= len(widths): widths.append(0) if size > widths[i]: widths[i] = size # Generate the format string to pad the columns print_string = "" for i, width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " if not len(print_string): return print_string = print_string[:-3] # Print the actual data fin = [] for i, line in enumerate(lines): fin.append(print_string.format(*line)) if i == 0 and separate_head: fin.append("-" * (sum(widths) + 3 * (len(widths) - 1))) return "\n".join(fin) async def shutdown(self): with open("savedata/prefixes.json", 'w') as prf: json.dump(self.prefixes, prf) await self.session.close()
class Bot(commands.AutoShardedBot): def __init__(self, *args, **kwargs): super().__init__(*args, shard_count=3, game=discord.Game(name="rp!help for help!"), **kwargs) self.owner_id = 122739797646245899 self.lounge_id = 166349353999532035 self.uptime = datetime.datetime.utcnow() self.commands_used = Counter() self.server_commands = Counter() self.socket_stats = Counter() self.shutdowns = [] self.lotteries = dict() self.logger = logging.getLogger('discord') # Discord Logging self.logger.setLevel(logging.INFO) self.handler = logging.FileHandler(filename=os.path.join( 'resources', 'discord.log'), encoding='utf-8', mode='w') self.handler.setFormatter( logging.Formatter( '%(asctime)s:%(levelname)s:%(name)s: %(message)s')) self.logger.addHandler(self.handler) self.session = aiohttp.ClientSession(loop=self.loop) self.shutdowns.append(self.shutdown) with open("resources/auth", 'r') as af: self._auth = json.loads(af.read()) self.db: db.Database = db.Database(self) self.di: data.DataInteraction = data.DataInteraction(self) self.default_udata = data.default_user self.default_servdata = data.default_server self.rnd = "1234567890abcdefghijklmnopqrstuvwxyz" icogs = [ cogs.admin.Admin(self), cogs.team.Team(self), cogs.economy.Economy(self), cogs.inventory.Inventory(self), cogs.settings.Settings(self), cogs.misc.Misc(self), cogs.characters.Characters(self), cogs.pokemon.Pokemon(self), cogs.groups.Groups(self), cogs.user.User(self), cogs.salary.Salary(self) ] for cog in icogs: self.add_cog(cog) self.loop.create_task(self.start_serv()) self.loop.create_task(self.db.connect()) init_dd(self._auth[3], self._auth[4]) self.stats = ThreadStats() self.stats.start() async def on_ready(self): print('Logged in as') print(self.user.name) print(self.user.id) print('------') await self.update_stats() async def update_stats(self): url = "https://bots.discord.pw/api/bots/{}/stats".format(self.user.id) payload = json.dumps(dict(server_count=len(self.guilds))).encode() headers = { 'authorization': self._auth[1], "Content-Type": "application/json" } async with self.session.post(url, data=payload, headers=headers) as response: await response.read() url = "https://discordbots.org/api/bots/{}/stats".format(self.user.id) payload = json.dumps(dict(server_count=len(self.guilds))).encode() headers = { 'authorization': self._auth[2], "Content-Type": "application/json" } async with self.session.post(url, data=payload, headers=headers) as response: await response.read() self.loop.call_later( 14400, lambda: asyncio.ensure_future(self.update_stats())) async def on_message(self, message): if message.author.bot: return await self.process_commands(message) async def on_command(self, ctx): self.stats.increment("RPGBot.commands", tags=["RPGBot:commands"], host="scw-8112e8") self.stats.increment( f"RPGBot.commands.{str(ctx.command).replace(' ', '.')}", tags=["RPGBot:commands"], host="scw-8112e8") self.commands_used[ctx.command] += 1 if isinstance(ctx.author, discord.Member): self.server_commands[ctx.guild.id] += 1 if not (self.server_commands[ctx.guild.id] % 50): await ctx.send( "This bot costs $130/yr to run. If you like the utilities it provides," " consider buying me a coffee <https://ko-fi.com/henrys>" " or subscribe as a Patron <https://www.patreon.com/henry232323>" ) add = choice([0, 0, 0, 0, 0, 1, 1, 2, 3]) fpn = ctx.command.full_parent_name if fpn: values = { "character": 2, "inventory": 1, "economy": 1, "pokemon": 2, "guild": 2, "team": 1, } add += values.get(fpn, 0) if add: await asyncio.sleep(3) r = await self.di.add_exp(ctx.author, add) if r is not None: await ctx.send(f"{ctx.author.mention} is now level {r}!") async def on_command_error(self, ctx, exception): self.stats.increment("RPGBot.errors", tags=["RPGBot:errors"], host="scw-8112e8") logging.info( f"Exception in {ctx.command} {ctx.guild}:{ctx.channel} {exception}" ) if isinstance(exception, commands.MissingRequiredArgument): await ctx.send(f"`{exception}`") else: await ctx.send(f"`{exception}`") async def on_guild_join(self, guild): if sum(1 for m in guild.members if m.bot) / guild.member_count >= 3 / 4: try: await guild.channels[0].send( "This server has too many bots! I'm just going to leave if thats alright" ) finally: await guild.leave() else: self.stats.increment("RPGBot.guilds", tags=["RPGBot:guilds"], host="scw-8112e8") async def on_guild_leave(self, guild): self.stats.increment("RPGBot.guilds", -1, tags=["RPGBot:guilds"], host="scw-8112e8") async def on_socket_response(self, msg): self.socket_stats[msg.get('t')] += 1 async def get_bot_uptime(self): """Get time between now and when the bot went up""" now = datetime.datetime.utcnow() delta = now - self.uptime hours, remainder = divmod(int(delta.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) days, hours = divmod(hours, 24) if days: fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds' else: fmt = '{h} hours, {m} minutes, and {s} seconds' return fmt.format(d=days, h=hours, m=minutes, s=seconds) def randsample(self): return "".join(sample(self.rnd, 6)) @staticmethod def get_exp(level): return int(0.1 * level**2 + 5 * level + 4) @staticmethod def get_ram(): """Get the bot's RAM usage info.""" mem = psutil.virtual_memory() return f"{mem.used / 0x40_000_000:.2f}/{mem.total / 0x40_000_000:.2f}GB ({mem.percent}%)" @staticmethod def format_table(lines, separate_head=True): """Prints a formatted table given a 2 dimensional array""" # Count the column width widths = [] for line in lines: for i, size in enumerate([len(x) for x in line]): while i >= len(widths): widths.append(0) if size > widths[i]: widths[i] = size # Generate the format string to pad the columns print_string = "" for i, width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " if not len(print_string): return print_string = print_string[:-3] # Print the actual data fin = [] for i, line in enumerate(lines): fin.append(print_string.format(*line)) if i == 0 and separate_head: fin.append("-" * (sum(widths) + 3 * (len(widths) - 1))) return "\n".join(fin) async def shutdown(self): self.session.close() async def start_serv(self): self.webapp = Kyoukai(__name__) @self.webapp.route("/servers/<int:snowflake>/", methods=["GET"]) async def getservinfo(ctx: HTTPRequestContext, snowflake: int): try: snowflake = int(snowflake) req = f"""SELECT info FROM servdata WHERE UUID = {snowflake};""" async with self.db._conn.acquire() as connection: response = await connection.fetchval(req) return Response(response if response else json.dumps( self.default_servdata, indent=4), status=200) except: return HTTPException( "Invalid snowflake!", Response("Failed to fetch info!", status=400)) @self.webapp.route("/users/<int:snowflake>/", methods=["GET"]) async def getuserinfo(ctx: HTTPRequestContext, snowflake: int): try: snowflake = int(snowflake) req = f"""SELECT info FROM userdata WHERE UUID = {snowflake};""" async with self.db._conn.acquire() as connection: response = await connection.fetchval(req) return Response(response if response else json.dumps( self.default_udata, indent=4), status=200) except: return HTTPException( "Invalid snowflake!", Response("Failed to fetch info!", status=400)) await self.webapp.start('0.0.0.0', 1441)
tags = ['version:1', 'application:web'] api.Event.create(title=title, text=text, tags=tags) # Use Statsd, a Python client for DogStatsd from datadog import statsd # Increment a counter. statsd.increment('page.views') # Or ThreadStats, an alternative tool to collect and flush metrics,using Datadog REST API from datadog import ThreadStats stats = ThreadStats() stats.start() stats.increment('page.views') def load_initial(): server = 'localhost' database = 'acmedb' username = '******' password = '******' cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + username + ';PWD=' + password) cursor = cnxn.cursor() cursor.execute("SELECT DISTINCT company from archchk_tbl") # row=cursor.fetchone() row = cursor.fetchall() cnxn = cnxn.close()
class Bot(commands.AutoShardedBot): def __init__(self, *args, **kwargs): super().__init__(*args, shard_count=5, game=discord.Game(name="rp!help for help!"), **kwargs) self.owner_id = 122739797646245899 self.lounge_id = 166349353999532035 self.uptime = datetime.datetime.utcnow() self.commands_used = Counter() self.server_commands = Counter() self.socket_stats = Counter() self.shutdowns = [] self.lotteries = dict() self.logger = logging.getLogger('discord') # Discord Logging self.logger.setLevel(logging.INFO) self.handler = logging.FileHandler(filename=os.path.join('resources', 'discord.log'), encoding='utf-8', mode='w') self.handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) self.logger.addHandler(self.handler) self.session = aiohttp.ClientSession(loop=self.loop) self.shutdowns.append(self.shutdown) with open("resources/auth") as af: self._auth = json.loads(af.read()) with open("resources/dnditems.json", 'r') as dndf: self.dnditems = json.loads(dndf.read()) with open("resources/dnditems.json", 'r') as dndf2: self.dndmagic = json.loads(dndf2.read()) with open("resources/pokemonitems.json", 'r') as dndf3: self.pokemonitems = json.loads(dndf3.read()) self.httpserver = server.API(self, "RPGBot") server.makepaths(self.httpserver) self.db: db.Database = db.Database(self) self.di: data.DataInteraction = data.DataInteraction(self) self.default_udata = data.default_user self.default_servdata = data.default_server self.rnd = "1234567890abcdefghijklmnopqrstuvwxyz" with open("resources/patrons.json") as pj: self.patrons = {int(k): v for k, v in json.loads(pj.read()).items()} with open("resources/newtranslations.json") as trf: self.translations = json.loads(trf.read()) self.languages = ["en", "fr", "de", "ru", "es"] with open("resources/blacklist.json") as blf: self.blacklist = json.loads(blf.read()) icogs = [ cogs.admin.Admin(self), cogs.team.Team(self), cogs.economy.Economy(self), cogs.inventory.Inventory(self), cogs.settings.Settings(self), cogs.misc.Misc(self), cogs.characters.Characters(self), cogs.pokemon.Pokemon(self), cogs.groups.Groups(self), cogs.user.User(self), cogs.salary.Salary(self), cogs.map.Mapping(self), ] for cog in icogs: self.add_cog(cog) # self.loop.create_task(self.start_serv()) self.loop.create_task(self.httpserver.host()) init_dd(self._auth[3], self._auth[4]) self.stats = ThreadStats() self.stats.start() async def on_ready(self): print('Logged in as') print(self.user.name) print(self.user.id) print('------') self.loop.create_task(self.update_stats()) async def on_message(self, msg): if msg.author.id not in self.blacklist: await self.process_commands(msg) async def update_stats(self): url = "https://bots.discord.pw/api/bots/{}/stats".format(self.user.id) while not self.is_closed(): payload = json.dumps(dict(server_count=len(self.guilds))).encode() headers = {'authorization': self._auth[1], "Content-Type": "application/json"} async with self.session.post(url, data=payload, headers=headers) as response: await response.read() url = "https://discordbots.org/api/bots/{}/stats".format(self.user.id) payload = json.dumps(dict(server_count=len(self.guilds))).encode() headers = {'authorization': self._auth[2], "Content-Type": "application/json"} async with self.session.post(url, data=payload, headers=headers) as response: await response.read() await asyncio.sleep(14400) async def on_command(self, ctx): self.stats.increment("RPGBot.commands", tags=["RPGBot:commands"], host="scw-8112e8") self.stats.increment(f"RPGBot.commands.{str(ctx.command).replace(' ', '.')}", tags=["RPGBot:commands"], host="scw-8112e8") self.commands_used[ctx.command] += 1 if isinstance(ctx.author, discord.Member): self.server_commands[ctx.guild.id] += 1 if ctx.guild.id not in self.patrons: if (self.server_commands[ctx.guild.id] % 50) == 0: await ctx.send(await _(ctx, "This bot costs $300/yr to run. If you like the utilities it provides," " consider buying me a coffee <https://ko-fi.com/henrys>" " or subscribe as a Patron <https://www.patreon.com/henry232323>" " Also consider upvoting the bot to help us grow <https://discordbots.org/bot/305177429612298242>" )) if await self.di.get_exp_enabled(ctx.guild): add = choice([0, 0, 0, 0, 0, 1, 1, 2, 3]) fpn = ctx.command.full_parent_name.lower() if fpn: values = { "character": 2, "inventory": 1, "economy": 1, "pokemon": 2, "guild": 2, "team": 1, } add += values.get(fpn, 0) if add: await asyncio.sleep(4) r = await self.di.add_exp(ctx.author, add) if r is not None: await ctx.message.add_reaction("\u23EB") time = await self.di.get_delete_time(ctx.guild) if time: await asyncio.sleep(time) await ctx.message.delete() async def on_command_error(self, ctx, exception): self.stats.increment("RPGBot.errors", tags=["RPGBot:errors"], host="scw-8112e8") logging.info(f"Exception in {ctx.command} {ctx.guild}:{ctx.channel} {exception}") if isinstance(exception, commands.MissingRequiredArgument): await ctx.send(f"`{exception}`") elif isinstance(exception, TimeoutError): await ctx.send(await _(ctx, "This operation ran out of time! Please try again")) else: await ctx.send(f"`{exception}`") async def on_guild_join(self, guild): if guild.id in self.blacklist: await guild.leave() self.stats.increment("RPGBot.guilds", tags=["RPGBot:guilds"], host="scw-8112e8") async def on_guild_leave(self, guild): self.stats.increment("RPGBot.guilds", -1, tags=["RPGBot:guilds"], host="scw-8112e8") async def on_socket_response(self, msg): self.socket_stats[msg.get('t')] += 1 async def get_bot_uptime(self): """Get time between now and when the bot went up""" now = datetime.datetime.utcnow() delta = now - self.uptime hours, remainder = divmod(int(delta.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) days, hours = divmod(hours, 24) if days: fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds' else: fmt = '{h} hours, {m} minutes, and {s} seconds' return fmt.format(d=days, h=hours, m=minutes, s=seconds) def randsample(self): return "".join(sample(self.rnd, 6)) @staticmethod def get_exp(level): return int(0.1 * level ** 2 + 5 * level + 4) @staticmethod def get_ram(): """Get the bot's RAM usage info.""" mem = psutil.virtual_memory() return f"{mem.used / 0x40_000_000:.2f}/{mem.total / 0x40_000_000:.2f}GB ({mem.percent}%)" @staticmethod def format_table(lines, separate_head=True): """Prints a formatted table given a 2 dimensional array""" # Count the column width widths = [] for line in lines: for i, size in enumerate([len(x) for x in line]): while i >= len(widths): widths.append(0) if size > widths[i]: widths[i] = size # Generate the format string to pad the columns print_string = "" for i, width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " if not len(print_string): return print_string = print_string[:-3] # Print the actual data fin = [] for i, line in enumerate(lines): fin.append(print_string.format(*line)) if i == 0 and separate_head: fin.append("-" * (sum(widths) + 3 * (len(widths) - 1))) return "\n".join(fin) async def shutdown(self): self.session.close()