Exemple #1
0
    def state_changed_listener(event):
        """Listen for new messages on the bus and sends them to Datadog."""
        state = event.data.get("new_state")

        if state is None or state.state == STATE_UNKNOWN:
            return

        states = dict(state.attributes)
        metric = f"{prefix}.{state.domain}"
        tags = [f"entity:{state.entity_id}"]

        for key, value in states.items():
            if isinstance(value, (float, int)):
                attribute = f"{metric}.{key.replace(' ', '_')}"
                statsd.gauge(attribute,
                             value,
                             sample_rate=sample_rate,
                             tags=tags)

                _LOGGER.debug("Sent metric %s: %s (tags: %s)", attribute,
                              value, tags)

        try:
            value = state_helper.state_as_number(state)
        except ValueError:
            _LOGGER.debug("Error sending %s: %s (tags: %s)", metric,
                          state.state, tags)
            return

        statsd.gauge(metric, value, sample_rate=sample_rate, tags=tags)

        _LOGGER.debug("Sent metric %s: %s (tags: %s)", metric, value, tags)
def report_exceptions():
    """tracks the number of exceptions found in the log"""
    import subprocess
    import socket
    redis_key = 'track-exceptions-%s-%s' % (os.environ['ENV'], socket.gethostname())
    REDIS_URL = 'kinit-app-prod.qugi0x.ng.0001.use1.cache.amazonaws.com'
    completed_process = subprocess.run("cat /var/log/kinappserver.err.log|grep Traceback|wc -l", shell=True, stdout=subprocess.PIPE)
    try:
        num_exceptions = int(completed_process.stdout)
    except Exception as e:
        log.error('failed to calculate num_exceptions from stdout %s. aborting' % completed_process.stdout)
        return False

    # compare against value in redis
    redis_con = redis.StrictRedis(host=REDIS_URL, port=6379, db=0)
    try:
        previous_value = int(redis_con.get(redis_key))
    except Exception as e:
        log.error('cant get previous value from reids. Exception %s. defaulting to 0' % e)
        previous_value = 0

    # evaluate
    if previous_value < num_exceptions:
        print('detected new exceptions. previous value: %s current: %s' % (previous_value, num_exceptions))
        found_new_exceptions = 2 # datadog treshold set to '1'
    elif previous_value >= num_exceptions:
        found_new_exceptions = 0

    # persist to redis
    redis_con.set(redis_key, num_exceptions)

    # also send to dd
    statsd.gauge('track-exceptions', found_new_exceptions, tags=['app:kinit,env:%s' % os.environ['ENV']])
    return True
Exemple #3
0
def report_statsd(data: AwairDict) -> None:
    """ Reports to statsd
    """
    if not get_statsd_reporting():
        return

    display_keys = {
        "aqi": "purple_air",
        "co2": "co2",
        "dew_point": "dew_point",
        "farenheit": "temperature",
        "humid": "humidity",
        "pm10_est": "pm10",
        "pm25": "pm25",
        "score": "awair_score",
        "voc": "voc",
        "voc_h2_raw": "voc_raw",
        "voc_baseline": "voc_baseline",
        "voc_ethanol_raw": "voc_ethanol",
    }

    device_name = str(data["device_uuid"]).replace("awair-", "")
    tags = [f"device:{device_name}"]

    for original_name, new_name in display_keys.items():
        statsd_key = f"awair.{new_name}"
        number = int(data[original_name])

        statsd.gauge(statsd_key, number, tags)
Exemple #4
0
 def logProgress(self):
     progress = self.shelve[Config.CONFIG_PROCESSED_COUNT] / self.shelve[
         Config.CONFIG_TOTAL] * 100
     logging.debug('progress {}/{} {}%'.format(
         self.shelve[Config.CONFIG_PROCESSED_COUNT],
         self.shelve[Config.CONFIG_TOTAL], progress))
     statsd.gauge('url.downloader.progress', progress)
def bulk_upload_submissions(raw_submissions: List[RawSubmission], api_key: str,
                            job_mode: str) -> None:
    start_time = time.time()
    json_submissions = []
    for raw_submission in raw_submissions:
        json_submissions.append(raw_submission.to_dict())

    headers = {"Content-Type": "application/json", "Api-Token": api_key}

    data_dump = dumps({"data": json_submissions})

    resp = requests.post(url=f"{LARAVEL_HOST}/{LARAVEL_ENDPOINT}",
                         data=data_dump,
                         headers=headers)
    end_time = time.time()

    statsd.gauge(f"laravel.{LARAVEL_ENDPOINT}.duration",
                 end_time - start_time,
                 tags=[f"job_mode:{job_mode}"])
    statsd.gauge(f"laravel.{LARAVEL_ENDPOINT}.request_size",
                 len(pickle.dumps(data_dump)),
                 tags=[f"job_mode:{job_mode}"])
    statsd.increment(f"laravel.{LARAVEL_ENDPOINT}.success",
                     1,
                     tags=[f"job_mode:{job_mode}"])

    if resp.status_code != 200:
        raise ValueError(f"Failed to upload to Laravel with resp: {resp.text}")
    def _send_metrics(self):
        temp_store, count = get_and_clear_store()
        all_metrics = []
        start_time = time.time()
        for metric, val in temp_store.iteritems():
            try:
                tags = []
                components = metric.split('.')

                # Customize to meet the format of you metric
                datacenter = 'datacenter:' + components.pop(2)
                env = 'env:' + components.pop(2)
                instance = 'instance:' + components.pop(2)
                tenant_id = 'tenant_id:' + components.pop(3)
                tags = [datacenter, env, instance, tenant_id]

                metric = '.'.join(components)
                all_metrics.append({'metric': metric, 'points': val, 'tags': tags})
            except Exception as ex:
                LOGGER.error(ex)
        if all_metrics:
            if SEND_VIA_API:
                api.Metric.send(all_metrics)
            else:
                for metric in all_metrics:
                    statsd.gauge(metric['metric'], metric['points'], tags=metric['tags'])
            LOGGER.info("sent %r metrics with %r unique names in %r seconds\n",
                        count, len(temp_store), time.time() - start_time)
        else:
            LOGGER.info("no metrics received")
        threading.Timer(DELAY, self._send_metrics).start()
Exemple #7
0
def gauge_metric(metric_name, value, tags_str=''):
    """increment a counter with the given name and value"""
    # set env to undefined for local tests (which do not emit stats, as there's no agent)
    tags = 'app:kinit,env:%s' % config.DEPLOYMENT_ENV
    if tags_str:
        tags = tags + ',' + tags_str
    statsd.gauge(metric_name, value, tags=[tags])
Exemple #8
0
    def test_telemetry_flush_interval_alternate_destination(self):
        statsd = DogStatsd(telemetry_host='foo')
        fake_socket = FakeSocket()
        statsd.socket = fake_socket
        fake_telemetry_socket = FakeSocket()
        statsd.telemetry_socket = fake_telemetry_socket

        assert statsd.telemetry_host != None
        assert statsd.telemetry_port != None
        assert statsd._dedicated_telemetry_destination()

        # set the last flush time in the future to be sure we won't flush
        statsd._last_flush_time = time.time(
        ) + statsd._telemetry_flush_interval
        statsd.gauge('gauge', 123.4)

        assert_equal('gauge:123.4|g', fake_socket.recv())

        t1 = time.time()
        # setting the last flush time in the past to trigger a telemetry flush
        statsd._last_flush_time = t1 - statsd._telemetry_flush_interval - 1
        statsd.gauge('gauge', 123.4)

        assert_equal('gauge:123.4|g', fake_socket.recv())
        assert_equal_telemetry('',
                               fake_telemetry_socket.recv(),
                               telemetry=telemetry_metrics(metrics=2,
                                                           bytes_sent=13 * 2,
                                                           packets_sent=2))
        # assert that _last_flush_time has been updated
        assert t1 < statsd._last_flush_time
Exemple #9
0
def main():
    logging.warning("start logging")
    host = os.environ.get('AGENT_HOST')
    logging.warning(host)
    options = {'statsd_host': host, 'statsd_port': 8125}
    initialize(**options)

    i = 0
    while (1):
        #i+=1
        l1, l2 = getInfo()
        for s in l1:
            ns, jn = getjn(s)
            for s2 in l2:
                ns2, jn2 = getjn(s2)
                if ns == ns2 and jn == jn2:
                    starttime = s2.split(' ')[1]
                    now = time.time()
                    uptime = now - float(starttime)
                    print(ns + "   " + jn + "   " + str(uptime))
                    statsd.gauge('job_uptime',
                                 uptime,
                                 tags=["namespace:" + ns, "jobname:" + jn])
        # if st!=0 and jn!="":
        #     statsd.gauge('job_uptime', st, tags=["namespace:"+ns,"jobname:"+jn])
        time.sleep(15)
Exemple #10
0
def send_queue_metrics(sqs_queue) -> None:
    print("sending queue metrics")
    statsd.gauge(
        "sqs.queue.message_count",
        queue_length(sqs_queue),
        tags=[f"queue:{queue_name(sqs_queue)}"],
    )
    def state_changed_listener(event):
        """Listen for new messages on the bus and sends them to Datadog."""
        state = event.data.get('new_state')

        if state is None or state.state == STATE_UNKNOWN:
            return

        if state.attributes.get('hidden') is True:
            return

        states = dict(state.attributes)
        metric = "{}.{}".format(prefix, state.domain)
        tags = ["entity:{}".format(state.entity_id)]

        for key, value in states.items():
            if isinstance(value, (float, int)):
                attribute = "{}.{}".format(metric, key.replace(' ', '_'))
                statsd.gauge(attribute,
                             value,
                             sample_rate=sample_rate,
                             tags=tags)

                _LOGGER.debug('Sent metric %s: %s (tags: %s)', attribute,
                              value, tags)

        try:
            value = state_helper.state_as_number(state)
        except ValueError:
            _LOGGER.debug('Error sending %s: %s (tags: %s)', metric,
                          state.state, tags)
            return

        statsd.gauge(metric, value, sample_rate=sample_rate, tags=tags)

        _LOGGER.debug('Sent metric %s: %s (tags: %s)', metric, value, tags)
Exemple #12
0
 def test_tags_from_environment_and_constant(self):
     with preserve_environment_variable('DATADOG_TAGS'):
        os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
        statsd = DogStatsd(constant_tags=['country:canada', 'red'])
     statsd.socket = FakeSocket()
     statsd.gauge('gt', 123.4)
     t.assert_equal('gt:123.4|g|#country:canada,red,country:china,age:45,blue', statsd.socket.recv())
Exemple #13
0
def cli(interval, use_datadog):
    interval = int(os.getenv('DISKMON_INTERVAL', interval))
    statsd = get_statsd(use_datadog)

    device_list = DeviceList()
    while True:
        for dev in device_list.devices:
            assert isinstance(dev, Device)
            dev.update()

            for a in filter(None, dev.attributes):
                assert isinstance(a, Attribute)

                datapoint = {'metric': f'system.disk.smart.{a.num}_{a.name}'}

                if 's' in a.raw:
                    parts = time_re.match(a.raw)
                    time_components = {name: float(param) for name, param in parts.groupdict().items() if param}
                    datapoint['value'] = timedelta(**time_components).total_seconds()
                else:
                    try:
                        datapoint['value'] = int(a.raw)
                    except ValueError:
                        logger.warning('cannot parse SMART raw value: %s', a.raw)
                        continue

                if use_datadog:
                    datapoint['tags'] = ['device:'+dev.name]
                else:
                    datapoint['key'] += '.' + dev.name
                statsd.gauge(**datapoint)
        sleep(interval)
 def _send_metrics(self):
     temp_store = self._aggregate_and_clear_metrics()
     all_metrics = []
     start_time = time.time()
     for metric, val in temp_store.items():
         try:
             metric, tags = _convert_graphite_to_tags(metric)
             all_metrics.append({
                 'metric': metric,
                 'points': val,
                 'tags': tags
             })
         except Exception as ex:
             LOGGER.error(ex)
     if all_metrics:
         for metric in all_metrics:
             statsd.gauge(metric['metric'],
                          metric['points'],
                          tags=metric['tags'])
         LOGGER.info("sent %r unique metric names in %r seconds\n",
                     len(all_metrics),
                     time.time() - start_time)
     else:
         LOGGER.info("no metrics received")
     threading.Timer(DELAY, self._send_metrics).start()
Exemple #15
0
def cards():
    global submit_count
    form = Cards()
    cards = {}
    print("submit count:", submit_count)
    if form.validate_on_submit():
        submit_count += 1
        statsd.gauge("flask_cards.submit", submit_count)

        #
        # TODO: add statsd guage
        ##
        words = new_cards()
        if len(words) > 0:
            #    # Shuffle the words
            #    random.shuffle(words)
            j = 0
            for i in range(7):
                c = "card" + str(i)
                d = "discard" + str(i)
                old_word = form[c].data
                discard = form[d].data
                form[d].checked = False
                if len(old_word) == 0 or discard is True:
                    # select new card if needed
                    cards[i] = words[j]
                    j += 1
                else:
                    # use old card otherwise
                    cards[i] = old_word
    else:
        # prepare blank page on first load
        for i in range(7):
            cards[i] = ""
    return render_template('cards.html', title='Cards', form=form, cards=cards)
    def state_changed_listener(event):
        """Listen for new messages on the bus and sends them to Datadog."""
        state = event.data.get('new_state')

        if state is None or state.state == STATE_UNKNOWN:
            return

        if state.attributes.get('hidden') is True:
            return

        states = dict(state.attributes)
        metric = "{}.{}".format(prefix, state.domain)
        tags = ["entity:{}".format(state.entity_id)]

        for key, value in states.items():
            if isinstance(value, (float, int)):
                attribute = "{}.{}".format(metric, key.replace(' ', '_'))
                statsd.gauge(
                    attribute, value, sample_rate=sample_rate, tags=tags)

                _LOGGER.debug(
                    "Sent metric %s: %s (tags: %s)", attribute, value, tags)

        try:
            value = state_helper.state_as_number(state)
        except ValueError:
            _LOGGER.debug(
                "Error sending %s: %s (tags: %s)", metric, state.state, tags)
            return

        statsd.gauge(metric, value, sample_rate=sample_rate, tags=tags)

        _LOGGER.debug('Sent metric %s: %s (tags: %s)', metric, value, tags)
Exemple #17
0
 def test_tags_from_environment_and_constant(self):
     with preserve_environment_variable('DATADOG_TAGS'):
        os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
        statsd = DogStatsd(constant_tags=['country:canada', 'red'])
     statsd.socket = FakeSocket()
     statsd.gauge('gt', 123.4)
     t.assert_equal('gt:123.4|g|#country:canada,red,country:china,age:45,blue', statsd.socket.recv())
Exemple #18
0
    def test_telemetry_flush_interval(self):
        statsd = DogStatsd()
        fake_socket = FakeSocket()
        statsd.socket = fake_socket

        # set the last flush time in the future to be sure we won't flush
        statsd._last_flush_time = time.time(
        ) + statsd._telemetry_flush_interval
        statsd.gauge('gauge', 123.4)

        metric = 'gauge:123.4|g'
        assert_equal(metric, fake_socket.recv())

        t1 = time.time()
        # setting the last flush time in the past to trigger a telemetry flush
        statsd._last_flush_time = t1 - statsd._telemetry_flush_interval - 1
        statsd.gauge('gauge', 123.4)
        assert_equal_telemetry(metric,
                               fake_socket.recv(2),
                               telemetry=telemetry_metrics(metrics=2,
                                                           bytes_sent=2 *
                                                           len(metric),
                                                           packets_sent=2))

        # assert that _last_flush_time has been updated
        assert t1 < statsd._last_flush_time
Exemple #19
0
 def send_members(self):
     if not self.tags:
         return
     members = list(self.bot.get_all_members())
     unique = set(m.id for m in members)
     statsd.gauge('bot.members', len(members), tags=self.tags)
     statsd.gauge('bot.unique_members', len(unique), tags=self.tags)
def task_checker(connector_names):
    from datadog import statsd
    try:
        for connector in connector_names:
            r = requests.get('http://{0}/connectors/{1}/status'.format(CONNECT_HOST, connector))
            r.raise_for_status()
            assert (r.status_code == 200), 'connector API does not return 200'
            connector_status = r.json()
            connector_state = connector_status.get('connector').get('state')
            if connector_state != 'RUNNING':
                print("connector {0} is down, restarting the connector".format(connector))
                r = requests.post('http://{0}/connectors/{1}/restart'.format(CONNECT_HOST, connector))
                r.raise_for_status()
                #assert (r.status_code == 200), 'connector API does not return 200'
                statsd.gauge('kafka_connect.num_running_tasks', 0, tags=dd_tags)
            else:
                tasks = connector_status.get('tasks')
                print("{0} has num of running of tasks : {1}".format(connector, len(tasks)))
                statsd.gauge('kafka_connect.num_running_tasks', len(tasks), tags=dd_tags)
                for task in tasks:
                    task_state = task.get('state')
                    if task_state != 'RUNNING':
                        print("task {0} of the connector {1} is down, restarting the task".format(task.get('id'), connector))
                        r = requests.post('http://{0}/connectors/{1}/tasks/{2}/restart'.format(CONNECT_HOST, connector, task.get('id')))
                        r.raise_for_status()
                        #assert (r.status_code == 200), 'connector API does not return 200'
    except requests.exceptions.HTTPError as err:
        print(err)
    return r.status_code
Exemple #21
0
 def send_members(self):
     if not self.tags:
         return
     members = list(self.bot.get_all_members())
     unique = set(m.id for m in members)
     statsd.gauge('bot.members', len(members), tags=self.tags)
     statsd.gauge('bot.unique_members', len(unique), tags=self.tags)
Exemple #22
0
    def test_context_manager(self):
        fake_socket = FakeSocket()
        with DogStatsd() as statsd:
            statsd.socket = fake_socket
            statsd.gauge('page.views', 123)
            statsd.timing('timer', 123)

        t.assert_equal('page.views:123|g\ntimer:123|ms', fake_socket.recv())
Exemple #23
0
def queueMetric(metric, datapoint, tags):
    try:
        ts = datapoint[0]
        value = datapoint[1]
        statsd.gauge(metric, value, tags=tags)
    except Exception, e:
        logging.exception("Unparsable metric: {0}".format(metric))
        return None, None, None
Exemple #24
0
    def _record_connection_acquiring(self, value=0):
        self._connections_acquiring += value

        statsd.gauge(
            f"{self._service_name}.pool.connections_acquiring",
            self._connections_acquiring,
            tags=self._extra_tags,
        )
def pulse(metric, gauge=None):
    full_metric = 'launch_sporestack.{}'.format(metric)
    if gauge is None:
        statsd.increment(full_metric)
        debug('Sending stat: {}'.format(full_metric))
    else:
        statsd.gauge(full_metric, gauge)
        debug('Sending stat: {}: {}'.format(full_metric, gauge))
Exemple #26
0
 async def on_guild_remove(self, guild):
     servers = len(self.bot.guilds)
     try:
         statsd.gauge('thehammer.guilds.total', servers)
         statsd.decrement('thehammer.guilds.joins',
                          tags=["guild:{}".format(guild.id)])
     except BaseException as e:
         self.bot.sentry.captureException()
    def test_context_manager(self):
        fake_socket = FakeSocket()
        with DogStatsd() as statsd:
            statsd.socket = fake_socket
            statsd.gauge('page.views', 123)
            statsd.timing('timer', 123)

        t.assert_equal('page.views:123|g\ntimer:123|ms', fake_socket.recv())
Exemple #28
0
 def test_context_manager(self):
     fake_socket = FakeSocket()
     with DogStatsd(telemetry_min_flush_interval=0) as statsd:
         statsd.socket = fake_socket
         statsd.gauge('page.views', 123)
         statsd.timing('timer', 123)
     metric = "page.views:123|g\ntimer:123|ms"
     assert_equal(metric, fake_socket.recv())
     assert_equal(telemetry_metrics(metrics=2, bytes_sent=len(metric)), fake_socket.recv())
Exemple #29
0
def getCrashLocationPoints():
    latList = []
    longList = []
    points = CrashDataPoint.query.all()
    for point in points:
        latList.append(point.gfo)
        longList.append(point.rotation)
    statsd.gauge('RideSafe.crashLocationCount.gauge', len(latList), tags=["app:flapi"]) 
    return latList, longList
Exemple #30
0
 def test_tags_from_environment(self):
     with preserve_environment_variable('DATADOG_TAGS'):
         os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
         statsd = DogStatsd(telemetry_min_flush_interval=0)
     statsd.socket = FakeSocket()
     statsd.gauge('gt', 123.4)
     metric = 'gt:123.4|g|#country:china,age:45,blue'
     assert_equal(metric, statsd.socket.recv())
     assert_equal(telemetry_metrics(tags="country:china,age:45,blue", bytes_sent=len(metric)), statsd.socket.recv())
def showFriends():

    #metric to count the web.page_views_friends
    statsd.increment('web.page_views_friends', tags=["page:friends"])

    #metric to count the overall number of page views
    statsd.increment('web.page_views_total')

    #start timer
    start_time = time()
    print start_time
    #connection to the DB
    connection = MySQLdb.connect(host="localhost",
                                 user="******",
                                 passwd="cacapipi",
                                 db="bucketlist")

    #prepare a cursor object using cursor() method
    cursor = connection.cursor()

    #execute the SQL query using execute() method
    cursor.execute(
        "select user_name, user_username, user_password from tbl_user_friends "
    )

    #fetch all the rows from the query
    data = cursor.fetchall()

    #print the rows

    #THIS_DIR = os.path.dirname(os.path.abspath(__file__))
    # Create the jinja2 environment
    # Notice the use of trim_blocks, which greatly helps control whitespace.
    #j2_env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)
    #print j2_env.get_template('community.html').render(items=data)

    #env = Environment(loader=PackageLoader('app', 'template'))
    #template = env.get_template('community.html')
    #print template.render(items=data)

    for row in data:
        print row[0], row[1]

    cursor.close()

    #close the connection
    connection.close()

    #return timer
    duration = time() - start_time
    print duration
    statsd.histogram('databaseFriends.query.time',
                     duration,
                     tags=["page:friends"])
    statsd.gauge('test3', 300)
    #exit the program
    sys.exit()
 def test_entity_tag_from_environment_and_constant(self):
     with preserve_environment_variable('DD_ENTITY_ID'):
         os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'
         statsd = DogStatsd(constant_tags=['country:canada', 'red'])
     statsd.socket = FakeSocket()
     statsd.gauge('gt', 123.4)
     t.assert_equal(
         'gt:123.4|g|#country:canada,red,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d',
         statsd.socket.recv())
def compensate_T(adc_T):
	global t_fine
	v1 = (adc_T / 16384.0 - digT[0] / 1024.0) * digT[1]
	v2 = (adc_T / 131072.0 - digT[0] / 8192.0) * (adc_T / 131072.0 - digT[0] / 8192.0) * digT[2]
	t_fine = v1 + v2
	temperature = t_fine / 5120.0
	# print "temp : %-6.2f ℃" % (temperature) 
	# c.kv.put('pi/temp', str(temperature) )
	statsd.gauge('raspberry_pi.temperature.gauge', temperature, tags=["host:raspberrypi"])
Exemple #34
0
 def test_entity_tag_from_environment(self):
     with preserve_environment_variable('DD_ENTITY_ID'):
         os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'
         statsd = DogStatsd()
     statsd.socket = FakeSocket()
     statsd.gauge('gt', 123.4)
     assert_equal(
         'gt:123.4|g|#dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d',
         statsd.socket.recv())
Exemple #35
0
 def send_channels(self):
     if not self.tags:
         return
     channels = list(self.bot.get_all_channels())
     text_channels = sum(c.type == ChannelType.text for c in channels)
     voice_channels = sum(c.type == ChannelType.voice for c in channels)
     statsd.gauge('bot.channels', voice_channels,
                  tags=[*self.tags, 'channel_type:voice'])
     statsd.gauge('bot.channels', text_channels,
                  tags=[*self.tags, 'channel_type:text'])
Exemple #36
0
 def send_players(self):
     if not self.tags:
         return
     avcs = []
     for vc in self.bot.voice_clients:
         if hasattr(vc, 'audio_player') and not vc.audio_player.is_done():
             avcs.append(vc)
     num_avcs = len(avcs)
     audience = sum(self.notbot(vc.channel) for vc in avcs if vc.channel)
     statsd.gauge('bot.voice_playing', num_avcs, tags=self.tags)
     statsd.gauge('bot.voice_audience', audience, tags=self.tags)
def showCommunity():

	#metric to count the web.page_views_community
	statsd.increment('web.page_views_community',tags = ["page:community"])
	
	#metric to count the overall number of page views
	statsd.increment('web.page_views_total')

	#start timer
	start_time = time()
	print start_time
	#connection to the DB
	connection = MySQLdb.connect (host = "localhost", user = "******", passwd = "cacapipi", db = "bucketlist")
	
	#prepare a cursor object using cursor() method
	cursor = connection.cursor()
	
	#execute the SQL query using execute() method
	cursor.execute("select user_name, user_username, user_password from tbl_user ")
	
	#fetch all the rows from the query
	data = cursor.fetchall()
	
	#print the rows
	
	#THIS_DIR = os.path.dirname(os.path.abspath(__file__))
	# Create the jinja2 environment
    # Notice the use of trim_blocks, which greatly helps control whitespace.
	#j2_env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)
	#print j2_env.get_template('community.html').render(items=data)
    
	
	#env = Environment(loader=PackageLoader('app', 'template'))
	#template = env.get_template('community.html')
	#print template.render(items=data)
		
	for row in data:	
		print row[0], row[1]
	
	
	cursor.close()
	
	#close the connection
	connection.close()
	
	#return timer
	duration = time() - start_time
	print duration
	statsd.histogram('database.query.time', duration, tags = ["page:community"])
	statsd.gauge('test2',200)
	#exit the program
	sys.exit()
def main(args):

	if len(args) < 2:
		print usage
	elif args[0] == 'i':
		statsd.increment(args[1])
	elif args[0] == 'd':
		statsd.decrement(args[1])
	elif args[0] == 'g':
		statsd.gauge(args[1], float(args[2]))
	elif args[0] == 'e':
		api.Event.create(title=args[1], text=args[2], tags=args[3])
	else:
		print usage
def DataDogCmd(cmd):
	args = shlex.split(cmd)
	print args
	if len(args) < 2:
		print usage
		return False
	elif args[0] == 'i':
		statsd.increment(args[1])
	elif args[0] == 'd':
		statsd.decrement(args[1])
	elif args[0] == 'g':
		statsd.gauge(args[1], float(args[2]))
	elif args[0] == 'e':
		api.Event.create(title=args[1], text=args[2], tags=args[3])
	else:
		print usage
		return False

	return True
Exemple #40
0
 def export(self, category, measure_type, data):
     """
     Export data to DataDog
     :param category: Data meant
     :param measure_type: Type of measure
     :param data: Data to posting
     :raise: DataDogExporterError if data is invalid
     """
     if isinstance(data, (int, float, long)):
         metric_name = self._suffix
         if category == ExportCategory.files:
             metric_name += "files."
         else:
             metric_name += "mysql."
         if measure_type == ExportMeasureType.backup:
             metric_name += "backup_time"
         else:
             metric_name += "restore_time"
         statsd.gauge(metric_name, data)
     else:
         raise DataDogExporterError("Invalid input data")
Exemple #41
0
def dog_counter(difftime):
# Use Statsd, a Python client for DogStatsd
    from datadog import statsd

    statsd.increment('LatestFileMtime')
    statsd.gauge('timeGap', difftime)
def monitor(*args):
    global last_metrics

    while True:
        current_metrics = mod_wsgi.process_metrics()

        if last_metrics is not None:
            cpu_user_time = (current_metrics['cpu_user_time'] -
                    last_metrics['cpu_user_time'])
            cpu_system_time = (current_metrics['cpu_system_time'] -
                    last_metrics['cpu_system_time'])

            request_busy_time = (current_metrics['request_busy_time'] -
                    last_metrics['request_busy_time'])

            request_threads = current_metrics['request_threads']

            timestamp = int(current_metrics['current_time'] * 1000000000)

            statsd.gauge('mod_wsgi.process.cpu_user_time', cpu_user_time)
            statsd.gauge('mod_wsgi.process.cpu_system_time', cpu_system_time)

            statsd.gauge('mod_wsgi.process.cpu_usage', ((cpu_user_time +
                    cpu_system_time) / (current_metrics['current_time'] -
                    last_metrics['current_time'])))

            statsd.gauge('mod_wsgi.process.request_busy_time', request_busy_time)
            statsd.gauge('mod_wsgi.process.request_busy_usage',
                    (request_busy_time / mod_wsgi.threads_per_process))

            statsd.gauge('mod_wsgi.process.threads_per_process',
                    mod_wsgi.threads_per_process)
            statsd.gauge('mod_wsgi.process.request_threads', request_threads)

        last_metrics = current_metrics

        current_time = current_metrics['current_time']
        delay = max(0, (current_time + 1.0) - time.time())
        time.sleep(delay)
Exemple #43
0
 def send_uptime(self):
     if not self.tags:
         return
     now = datetime.datetime.now()
     uptime = (now - self.bot.uptime).total_seconds()
     statsd.gauge('bot.uptime', uptime, tags=self.tags)
Exemple #44
0
 def run(self):
     while True:
         api.ServiceCheck.check(check=os.environ['symbol'] + '.app.ok', host_name=os.environ['symbol'], status=CheckStatus.OK)
         statsd.gauge('matchingEngineOnline', 1, tags=['exchange:aex','application:matching_engine','symbol:'+os.environ['symbol'], 'mode:'+os.environ['mode']])
         time.sleep(59)
def event_handler(name, **kwargs):
    if name == 'request_finished':
        statsd.increment('mod_wsgi.request.count')

        application_time = kwargs.get('application_time')

        statsd.histogram('mod_wsgi.request.application_time', application_time)

        statsd.gauge('mod_wsgi.request.input_reads', kwargs.get('input_reads'))
        statsd.gauge('mod_wsgi.request.input_length', kwargs.get('input_length'))
        statsd.gauge('mod_wsgi.request.input_time', kwargs.get('input_time'))

        statsd.gauge('mod_wsgi.request.output_writes', kwargs.get('output_writes'))
        statsd.gauge('mod_wsgi.request.output_length', kwargs.get('output_length'))
        statsd.gauge('mod_wsgi.request.output_time', kwargs.get('output_time'))

        cpu_user_time = kwargs.get('cpu_user_time')
        cpu_system_time = kwargs.get('cpu_system_time')

        statsd.gauge('mod_wsgi.request.cpu_user_time', cpu_user_time)
        statsd.gauge('mod_wsgi.request.cpu_system_time', cpu_system_time)

        if cpu_user_time is not None and application_time:
            cpu_burn = (cpu_user_time + cpu_system_time) / application_time
            statsd.gauge('mod_wsgi.request.cpu_burn', cpu_burn)
Exemple #46
0
def gauge(metric_name, value):
    return statsd.gauge(DATADOG_METRICS[metric_name], value)
Exemple #47
0
    def test_accessing_socket_multiple_times_returns_same_socket(self):
        dogpound = DogStatsd()
        fresh_socket = FakeSocket()
        dogpound.socket = fresh_socket
        t.assert_equal(fresh_socket, dogpound.get_socket())
        t.assert_not_equal(FakeSocket(), dogpound.get_socket())

    def test_tags_from_environment(self):
        with preserve_environment_variable('DATADOG_TAGS'):
            os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
            statsd = DogStatsd()
        statsd.socket = FakeSocket()
        statsd.gauge('gt', 123.4)
        t.assert_equal('gt:123.4|g|#country:china,age:45,blue', statsd.socket.recv())

    def test_tags_from_environment_and_constant(self):
        with preserve_environment_variable('DATADOG_TAGS'):
           os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
           statsd = DogStatsd(constant_tags=['country:canada', 'red'])
        statsd.socket = FakeSocket()
        statsd.gauge('gt', 123.4)
        t.assert_equal('gt:123.4|g|#country:canada,red,country:china,age:45,blue', statsd.socket.recv())

if __name__ == '__main__':
    statsd = statsd
    while True:
        statsd.gauge('test.gauge', 1)
        statsd.increment('test.count', 2)
        time.sleep(0.05)
    if not response.ok:
        # got an error response from the Instaclustr API - raise an alert in DataDog after 3 consecutive fails
        consecutive_fails += 1
        print "Error retrieving metrics from Instaclustr API: {0} - {1}".format(response.status_code, response.content)
        if consecutive_fails > 3:
            statsd.event("Instaclustr monitoring API error", "Error code is: {0}".format(response.status_code))
        sleep(20)
        continue

    consecutive_fails = 0
    metrics = json.loads(response.content)
    for node in metrics:
        public_ip = node["publicIp"]
        for metric in node["payload"]:
            dd_metric_name = 'instaclustr.{0}.{1}'.format(public_ip,metric["metric"])
            if metric["metric"] == "nodeStatus":
                # node status metric maps to a data dog service check
                if metric["values"][0]["value"] =="WARN":
                    statsd.service_check(dd_metric_name, 1) # WARN status

                else:
                    statsd.service_check(dd_metric_name, 0) # OK status

            else:
                # all other metrics map to a data dog guage
                statsd.gauge(dd_metric_name, metric["values"][0]["value"])

    sleep(20)


Exemple #49
0
 def send_voice(self):
     if not self.tags:
         return
     vcs = len(self.bot.voice_clients)
     statsd.gauge('bot.voice_clients', vcs, tags=self.tags)
Exemple #50
0
    def _send_gauge(self, metric_name, value, tags=[]):
        all_tags = tags + self.default_tags

        namespaced_metric = "{namespace}.{metric_name}".format(namespace=self._config.metric_namespace,
                                                               metric_name=metric_name)
        statsd.gauge(namespaced_metric, value, tags=all_tags)
#from jinja2 import Environment, FileSystemLoader
#jinja2 library to print the MySQL requests in the HTML page 'Community'

import os
#To interact with the operating system

from time import time 

from datadog import statsd
# Use Statsd, a Python client for DogStatsd


#tests
statsd.increment('whatever')
statsd.gauge('foo', 42)
statsd.gauge('test1',100)

##Previous connection attempt for the signing up page
##mysql= MySQL()

app = Flask(__name__)


 
@app.route('/')
def main():

	#metric to count the  web.page_view
	statsd.increment('web.page_views',tags = ["page:home"])
	
 def gauge(self, metric, value, tags=[]):
     statsd.gauge(metric=metric, value=value, tags=tags)
     if self.debug:
         print "{0} = {1} :: type={3} :: tags={2}".format(metric, value, tags, 'gauge')
Exemple #53
0
 def send_servers(self):
     if not self.tags:
         return
     servers = len(self.bot.servers)
     statsd.gauge('bot.servers', servers, tags=self.tags)