Пример #1
0
def synchronize_statsd_subscriptions_gauges(full=False):
    """ synchronize all subscription-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for Subscription.*'):

        statsd.gauge('subscriptions.counts.total',
                     Subscription.objects.all().count())
Пример #2
0
 def emit(self, stat_name, tags, value):
     # Convert the dictionary of tags into an array of strings separated by a colon
     string_tags = map(lambda (k, v): (self.dd_tag_string.format(key=k, value=v)), tags.iteritems())
     statsd.gauge(self.dd_metric_string.format(
         scope=self.scope,
         stat=stat_name
     ), value, tags=string_tags)
Пример #3
0
    def _deliver_submission(self, submission):
        payload = {'xqueue_body': submission.xqueue_body,
                   'xqueue_files': submission.s3_urls}

        submission.grader_id = self.worker_url
        submission.push_time = timezone.now()
        start = time.time()
        (grading_success, grader_reply) = _http_post(self.worker_url, json.dumps(payload), settings.GRADING_TIMEOUT)
        statsd.histogram('xqueue.consumer.consumer_callback.grading_time', time.time() - start,
                         tags=['queue:{0}'.format(self.queue_name)])

        job_count = get_queue_length(self.queue_name)
        statsd.gauge('xqueue.consumer.consumer_callback.queue_length', job_count,
                     tags=['queue:{0}'.format(self.queue_name)])

        submission.return_time = timezone.now()

        # TODO: For the time being, a submission in a push interface gets one chance at grading,
        #       with no requeuing logic
        if grading_success:
            submission.grader_reply = grader_reply
            submission.lms_ack = post_grade_to_lms(submission.xqueue_header, grader_reply)
        else:
            log.error("Submission {} to grader {} failure: Reply: {}, ".format(submission.id, self.worker_url, grader_reply))
            submission.num_failures += 1
            submission.lms_ack = post_failure_to_lms(submission.xqueue_header)

        # NOTE: retiring pushed submissions after one shot regardless of grading_success
        submission.retired = True

        submission.save()
Пример #4
0
def basefeed_post_save(instance, **kwargs):
    """ Do whatever useful on Feed.post_save(). """

    if not kwargs.get('created', False):
        return

    feed = instance

    try:
        feed.post_create_pre_refresh()

    except AttributeError:
        pass

    # if feed._db_name != settings.MONGODB_NAME_ARCHIVE:
    # Update the feed immediately after creation.

    # HEADS UP: this task name will be registered later
    # by the register_task_method() call.
    basefeed_refresh_task.delay(feed.id)  # NOQA

    try:
        feed.post_create_post_refresh()

    except AttributeError:
        pass

    statsd.gauge('feeds.counts.total', 1, delta=True)
    statsd.gauge('feeds.counts.open', 1, delta=True)
Пример #5
0
def rssatomfeed_post_save(instance, **kwargs):

    if not kwargs.get('created', False):
        return

    statsd.gauge('feeds.counts.total', 1, delta=True)
    statsd.gauge('feeds.counts.rssatom', 1, delta=True)
Пример #6
0
def process_exit_surveys():
    """Exit survey handling.

    * Collect new exit survey results.
    * Save results to our metrics table.
    * Add new emails collected to the exit survey.
    """

    _process_exit_survey_results()

    # Get the email addresses from two days ago and add them to the survey
    # campaign (skip this on stage).
    if settings.STAGE:
        # Only run this on prod, it doesn't need to be running multiple times
        # from different places.
        return

    startdate = date.today() - timedelta(days=2)
    enddate = date.today() - timedelta(days=1)

    for survey in SURVEYS.keys():
        if 'email_collection_survey_id' not in SURVEYS[survey]:
            # Some surveys don't have email collection on the site
            # (the askers survey, for example).
            continue

        emails = get_email_addresses(survey, startdate, enddate)
        for email in emails:
            add_email_to_campaign(survey, email)

        statsd.gauge('survey.{0}'.format(survey), len(emails))
Пример #7
0
def process_exit_surveys():
    """Exit survey handling.

    * Collect new exit survey results.
    * Save results to our metrics table.
    * Add new emails collected to the exit survey.
    """

    _process_exit_survey_results()

    # Get the email addresses from two days ago and add them to the survey
    # campaign (skip this on stage).
    if settings.STAGE:
        # Only run this on prod, it doesn't need to be running multiple times
        # from different places.
        return

    startdate = date.today() - timedelta(days=2)
    enddate = date.today() - timedelta(days=1)

    for survey in SURVEYS.keys():
        if 'email_collection_survey_id' not in SURVEYS[survey]:
            # Some surveys don't have email collection on the site
            # (the askers survey, for example).
            continue

        emails = get_email_addresses(survey, startdate, enddate)
        for email in emails:
            add_email_to_campaign(survey, email)

        statsd.gauge('survey.{0}'.format(survey), len(emails))
Пример #8
0
def rssatomfeed_post_save(instance, **kwargs):

    if not kwargs.get('created', False):
        return

    statsd.gauge('feeds.counts.total', 1, delta=True)
    statsd.gauge('feeds.counts.rssatom', 1, delta=True)
Пример #9
0
def report_stats(host, topology, toporoot, topic):
        state = urllib2.urlopen(
            "http://{}/api/status?toporoot={}&topic={}".format(
                host, toporoot, topic
            )
        ).read()

        data = json.loads(state)

        amount = 0
        for looplord in data:
            if looplord['amount'] is not None:
                statsd.gauge(
                    'razor.kafkamon.topology.partition',
                    looplord['amount'],
                    tags = [
                        "topic:{}".format(topic),
                        "topology:{}".format(topology),
                        "partition:{}".format(looplord['partition'])
                    ]
                )
                amount += looplord['amount']

        print "Got {} for {}".format(amount, topology)

        statsd.gauge(
            'razor.kafkamon.total_delta',
            amount, tags = [
                "topic:{}".format(topic),
                "topology:{}".format(topology)
            ]
        )
Пример #10
0
    def post_create_task(self):
        """ Method meant to be run from a celery task. """

        if not self.slug:
            self.slug = slugify(self.name)
            self.save()

            statsd.gauge('tags.counts.total', 1, delta=True)
Пример #11
0
    def post_create_task(self):
        """ Method meant to be run from a celery task. """

        if not self.slug:
            self.slug = slugify(self.name)
            self.save()

            statsd.gauge('mongo.tags.counts.total', 1, delta=True)
Пример #12
0
def measure_queue_lag(queued_time):
    """A task that measures the time it was sitting in the queue.

    It saves the data to graphite via statsd.
    """
    lag = datetime.now() - queued_time
    lag = (lag.days * 3600 * 24) + lag.seconds
    statsd.gauge('rabbitmq.lag', max(lag, 0))
Пример #13
0
def synchronize_statsd_authors_gauges(full=False):

    with benchmark('synchronize statsd gauges for Author.*'):

        statsd.gauge('authors.counts.total', Author._get_collection().count())

        if full:
            duplicates = Author.objects(duplicate_of__ne=None).no_cache()
            statsd.gauge('authors.counts.duplicates', duplicates.count())
Пример #14
0
def synchronize_statsd_websites_gauges(full=False):

    with benchmark('synchronize statsd gauges for WebSite.*'):

        statsd.gauge('websites.counts.total', WebSite._get_collection().count())

        if full:
            duplicates = WebSite.objects(duplicate_of__ne=None).no_cache()
            statsd.gauge('websites.counts.duplicates', duplicates.count())
Пример #15
0
 def emit(self, stat_name, tags, value):
     # Convert the dictionary of tags into an array of strings separated by a colon
     string_tags = map(
         lambda (k, v): (self.dd_tag_string.format(key=k, value=v)),
         tags.iteritems())
     statsd.gauge(self.dd_metric_string.format(scope=self.scope,
                                               stat=stat_name),
                  value,
                  tags=string_tags)
Пример #16
0
def synchronize_mongodb_statsd_tags_gauges(full=False):
    """ synchronize all tag-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for Tag.*'):

        statsd.gauge('mongo.tags.counts.total', Tag._get_collection().count())

        if full:
            duplicates = Tag.objects(duplicate_of__ne=None).no_cache()
            statsd.gauge('mongo.tags.counts.duplicates', duplicates.count())
Пример #17
0
def synchronize_statsd_websites_gauges(full=False):
    """ synchronize all website-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for WebSite.*'):

        statsd.gauge('websites.counts.total', WebSite.objects.all().count())

        if full:
            duplicates = WebSite.objects.exclude(duplicate_of=None)
            statsd.gauge('websites.counts.duplicates', duplicates.count())
Пример #18
0
def synchronize_statsd_tags_gauges(full=False):
    """ synchronize all tag-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for Tag.*'):

        statsd.gauge('tags.counts.total', Tag.objects.all().count())

        if full:
            duplicates = Tag.objects.exclude(duplicate_of=None)
            statsd.gauge('tags.counts.duplicates', duplicates.count())
Пример #19
0
def synchronize_mongodb_statsd_tags_gauges(full=False):
    """ synchronize all tag-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for Tag.*'):

        statsd.gauge('mongo.tags.counts.total', Tag._get_collection().count())

        if full:
            duplicates = Tag.objects(duplicate_of__ne=None).no_cache()
            statsd.gauge('mongo.tags.counts.duplicates', duplicates.count())
Пример #20
0
def synchronize_statsd_authors_gauges(full=False):
    """ synchronize all author-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for Author.*'):

        statsd.gauge('authors.counts.total', Author.objects.all().count())

        if full:
            duplicates = Author.objects.exclude(duplicate_of=None)
            statsd.gauge('authors.counts.duplicates', duplicates.count())
Пример #21
0
    def register_duplicate(self, duplicate, force=False):
        """ TODO. """

        # be sure this helper method is called
        # on a document that has the atribute.
        assert hasattr(duplicate, 'duplicate_of')

        _cls_name_ = self.__class__.__name__
        _cls_name_lower_ = _cls_name_.lower()
        # TODO: get this from a class attribute?
        # I'm not sure for MongoEngine models.
        lower_plural = _cls_name_lower_ + u's'

        if duplicate.duplicate_of:
            if duplicate.duplicate_of != self:
                # NOTE: for Article, this situation can't happen IRL
                # (demonstrated with Willian 20130718).
                #
                # Any "second" duplicate *will* resolve to the master via the
                # redirect chain. It will *never* resolve to an intermediate
                # URL in the chain.
                #
                # For other objects it should happen too, because the
                # `get_or_create()` methods should return the `.duplicate_of`
                # attribute if it is not None.

                LOGGER.warning(u'%s %s is already a duplicate of '
                               u'another instance, not %s. Aborting.',
                               _cls_name_, duplicate, duplicate.duplicate_of)
                return

        LOGGER.info(u'Registering %s %s as duplicate of %s…',
                    _cls_name_, duplicate, self)

        # Register the duplication immediately, for other
        # background operations to use ourselves as value.
        duplicate.duplicate_of = self
        duplicate.save()

        statsd.gauge('mongo.%s.counts.duplicates' % lower_plural, 1, delta=True)

        try:
            # Having tasks not as methods because of Celery bugs forces
            # us to do strange things. We have to "guess" and lookup the
            # task name in the current module. OK, not *that* big deal.
            self.nonrel_globals[
                _cls_name_lower_ + '_replace_duplicate_everywhere_task'].delay(
                self.id, duplicate.id)

        except KeyError:
            LOGGER.warning(u'Object %s has no `replace_duplicate_everywhere()` '
                           u'method, or the method has not been registered as '
                           u'a task with `register_task_method()`.', self)
Пример #22
0
    def post_create_task(self):
        """ Method meant to be run from a celery task. """

        if not self.slug:
            if self.name is None:
                proto, host_and_port, remaining = WebSite.split_url(self.url)
                self.name = host_and_port.replace(u'_', u' ').title()

            self.slug = slugify(self.name)

            self.save()

            statsd.gauge('mongo.websites.counts.total', 1, delta=True)
Пример #23
0
		    def g729_metrics(self):
		        if (self.g729):
		            g729_count = yield self.api('g729_count')
		            g729_count = int(g729_count)
		            statsd.gauge('freeswitch.g729.total', g729_count)
		            g729_counts = yield self.api('g729_used')
		            g729_enc, g729_dec = [int(e) for e in g729_counts.split(":")]
		            statsd.gauge('freeswitch.g729.used.encoder', g729_enc)
		            statsd.gauge('freeswitch.g729.used.decoder', g729_dec)
		            if (g729_enc > g729_dec):
		                statsd.gauge('freeswitch.g729.utilization', g729_enc / g729_count)
		            else:
		                statsd.gauge('freeswitch.g729.utilization', g729_dec / g729_count)
Пример #24
0
def subscription_pre_delete(instance, **kwargs):
    """ Subscribe the mailfeed's owner if feed is beiing created. """

    subscription = instance

    statsd.gauge('subscriptions.counts.total', -1, delta=True)

    feed = subscription.feed

    if feed.subscriptions.all().count() == 1 \
            and feed.AUTO_CLOSE_WHEN_NO_SUBSCRIPTION_LEFT:
        feed.close(u'No subscription left on this feed (last subscribed '
                   u'user: {0})'.format(subscription.user.username))
Пример #25
0
def mark_tweet_deleted(tweet_id):

        try:
            tweet = Tweet.objects.get(tweet_id=tweet_id)

        except:
            LOGGER.warning(u'Unknown tweet to delete: %s', tweet_id)

        else:
            tweet.is_deleted = True
            tweet.save()

            statsd.gauge('tweets.counts.deleted', 1, delta=True)
            LOGGER.info(u'Tweet %s marked as deleted.', tweet)
Пример #26
0
    def close(self, reason=None, commit=True):
        """ Close the feed with or without a reason. """

        self.is_active = False
        self.date_closed = now()
        self.closed_reason = reason or _(u'NO REASON GIVEN')

        if commit:
            self.save()

        statsd.gauge('feeds.counts.open', -1, delta=True)

        LOGGER.warning(u'%s %s: closed with reason “%s”.',
                       self._meta.verbose_name, self.id, self.closed_reason)
Пример #27
0
def mark_tweet_deleted(tweet_id):

    try:
        tweet = Tweet.objects.get(tweet_id=tweet_id)

    except:
        LOGGER.warning(u'Unknown tweet to delete: %s', tweet_id)

    else:
        tweet.is_deleted = True
        tweet.save()

        statsd.gauge('tweets.counts.deleted', 1, delta=True)
        LOGGER.info(u'Tweet %s marked as deleted.', tweet)
Пример #28
0
 def g729_metrics(self):
     if (self.g729):
         g729_count = yield self.api('g729_count')
         g729_count = int(g729_count)
         statsd.gauge('freeswitch.g729.total', g729_count)
         g729_counts = yield self.api('g729_used')
         g729_enc, g729_dec = [int(e) for e in g729_counts.split(":")]
         statsd.gauge('freeswitch.g729.used.encoder', g729_enc)
         statsd.gauge('freeswitch.g729.used.decoder', g729_dec)
         if (g729_enc > g729_dec):
             statsd.gauge('freeswitch.g729.utilization',
                          g729_enc / g729_count)
         else:
             statsd.gauge('freeswitch.g729.utilization',
                          g729_dec / g729_count)
Пример #29
0
def _execute():

	statsd.connect('localhost', 8125)

	result = _netfilter()

	for chain, entries in result.iteritems():

		for number, item in entries.iteritems():

			key, bytes = _generate_key(chain, item)
			if key is None or key == '':
				continue

			_println('[info]: send gauge=[', key, '], value=[', str(bytes), ']')
			# statsd.histogram(key, bytes)
			statsd.gauge(key, bytes)
Пример #30
0
    def _deliver_submission(self, submission):
        payload = {
            'xqueue_body': submission.xqueue_body,
            'xqueue_files': submission.urls
        }

        submission.grader_id = self.worker_url
        submission.push_time = timezone.now()
        start = time.time()
        (grading_success, grader_reply) = _http_post(self.worker_url,
                                                     json.dumps(payload),
                                                     settings.GRADING_TIMEOUT)
        grading_time = time.time() - start
        statsd.histogram('xqueue.consumer.consumer_callback.grading_time',
                         grading_time,
                         tags=['queue:{0}'.format(self.queue_name)])

        if grading_time > settings.GRADING_TIMEOUT:
            log.error(
                "Grading time above {} for submission. grading_time: {}s body: {} files: {}"
                .format(settings.GRADING_TIMEOUT, grading_time,
                        submission.xqueue_body, submission.urls))

        job_count = get_queue_length(self.queue_name)
        statsd.gauge('xqueue.consumer.consumer_callback.queue_length',
                     job_count,
                     tags=['queue:{0}'.format(self.queue_name)])

        submission.return_time = timezone.now()

        # TODO: For the time being, a submission in a push interface gets one chance at grading,
        #       with no requeuing logic
        if grading_success:
            submission.grader_reply = grader_reply
            submission.lms_ack = post_grade_to_lms(submission.xqueue_header,
                                                   grader_reply)
        else:
            log.error("Submission {} to grader {} failure: Reply: {}, ".format(
                submission.id, self.worker_url, grader_reply))
            submission.num_failures += 1
            submission.lms_ack = post_failure_to_lms(submission.xqueue_header)

        # NOTE: retiring pushed submissions after one shot regardless of grading_success
        submission.retired = True

        submission.save()
Пример #31
0
    def report(self, metric_type, value, **kwargs):
        if not statsd_installed:
            return

        if not self.stats_connected:
            statsd.connect(self.host, self.port)
            self.stats_connected = True
        key = "spike.test"
        tags = ["%s:%s" % (k, v) for k, v in kwargs.iteritems()]
        if "postfix" in kwargs:
            key = ".".join([key, kwargs["postfix"]])
            del kwargs["postfix"]

        if metric_type == "counter":
            statsd.increment(key, value, tags=tags)
        elif metric_type == "gauge":
            statsd.gauge(key, value, tags=tags)
Пример #32
0
    def report(self, metric_type, value, **kwargs):
        if not statsd_installed:
            return

        if not self.stats_connected:
            statsd.connect(self.host, self.port)
            self.stats_connected = True
        key = "spike.test"
        tags = ["%s:%s" % (k, v) for k, v in kwargs.iteritems()]
        if "postfix" in kwargs:
            key = ".".join([key, kwargs["postfix"]])
            del kwargs["postfix"]

        if metric_type == "counter":
            statsd.increment(key, value, tags=tags)
        elif metric_type == "gauge":
            statsd.gauge(key, value, tags=tags)
Пример #33
0
def synchronize_statsd_feeds_gauges(full=False):
    """ synchronize all feed-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for BaseFeed.*'):

        all_feeds = BaseFeed.objects.all()

        statsd.gauge('feeds.counts.total', all_feeds.count())
        statsd.gauge('feeds.counts.open', BaseFeed.objects.active().count())

        if full:
            statsd.gauge('feeds.counts.mail', all_feeds.mail().count())
            statsd.gauge('feeds.counts.twitter', all_feeds.twitter().count())

            duplicates = BaseFeed.objects.exclude(duplicate_of=None)
            statsd.gauge('feeds.counts.duplicates', duplicates.count())

        pass
Пример #34
0
def survey_recent_askers():
    """Add question askers to a surveygizmo campaign to get surveyed."""
    if settings.STAGE:
        # Only run this on prod, it doesn't need to be running multiple times
        # from different places.
        return

    # We get the email addresses of all users that asked a question 2 days
    # ago. Then, all we have to do is send the email address to surveygizmo
    # and it does the rest.
    two_days_ago = date.today() - timedelta(days=2)
    yesterday = date.today() - timedelta(days=1)

    emails = (Question.objects.filter(created__gte=two_days_ago,
                                      created__lt=yesterday).values_list(
                                          'creator__email', flat=True))
    for email in emails:
        add_email_to_campaign('askers', email)

    statsd.gauge('survey.askers', len(emails))
Пример #35
0
    def reopen(self, message=None, verbose=True, commit=True):
        """ Reopen the feed, clearing errors, date closed, etc. """

        self.errors = []
        self.is_active = True
        self.date_closed = now()
        self.closed_reason = u'Reopen on %s' % now().isoformat()

        if commit:
            self.save()

        statsd.gauge('feeds.counts.open', 1, delta=True)

        if verbose:
            if message is None:
                LOGGER.info(u'%s %s: %sre-opened.', self._meta.verbose_name,
                            self.id, u'' if commit else u'temporarily ')

            else:
                LOGGER.info(u'%s %s: %s', self._meta.verbose_name, self.id,
                            message)
Пример #36
0
def survey_recent_askers():
    """Add question askers to a surveygizmo campaign to get surveyed."""
    if settings.STAGE:
        # Only run this on prod, it doesn't need to be running multiple times
        # from different places.
        return

    # We get the email addresses of all users that asked a question 2 days
    # ago. Then, all we have to do is send the email address to surveygizmo
    # and it does the rest.
    two_days_ago = date.today() - timedelta(days=2)
    yesterday = date.today() - timedelta(days=1)

    emails = (
        Question.objects
        .filter(created__gte=two_days_ago, created__lt=yesterday)
        .values_list('creator__email', flat=True))
    for email in emails:
            add_email_to_campaign('askers', email)

    statsd.gauge('survey.askers', len(emails))
Пример #37
0
 def on_shutter(self, state):
     if not state.event_count:
         # No new events since last snapshot.
         return
     statsd.incr("celery.tasks.total", state.task_count)
     statsd.gauge("celery.workers.total", len(state.workers))
     statsd.gauge("celery.workers.alive.count",
         sum(1 for _, worker in state.workers.items() if worker.alive))
     statsd.gauge("celery.workers.dead.count",
         sum(1 for _, worker in state.workers.items() if not worker.alive))
     map(self.handle_task, state.tasks.items())
Пример #38
0
def update_stats(provider):
    state_names = {
        vmdatabase.BUILDING: 'building',
        vmdatabase.READY: 'ready',
        vmdatabase.USED: 'used',
        vmdatabase.ERROR: 'error',
        vmdatabase.HOLD: 'hold',
        vmdatabase.DELETE: 'delete',
        }

    for base_image in provider.base_images:
        states = {
            vmdatabase.BUILDING: 0,
            vmdatabase.READY: 0,
            vmdatabase.USED: 0,
            vmdatabase.ERROR: 0,
            vmdatabase.HOLD: 0,
            vmdatabase.DELETE: 0,
            }
        for machine in base_image.machines:
            if machine.state not in states:
                continue
            states[machine.state] += 1
        if statsd:
            for state_id, count in states.items():
                key = 'devstack.pool.%s.%s.%s' % (
                    provider.name,
                    base_image.name,
                    state_names[state_id])
                statsd.gauge(key, count)

            key = 'devstack.pool.%s.%s.min_ready' % (
                provider.name,
                base_image.name)
            statsd.gauge(key, base_image.min_ready)

    if statsd:
        key = 'devstack.pool.%s.max_servers' % provider.name
        statsd.gauge(key, provider.max_servers)
Пример #39
0
    def updateStats(self, session, provider_name):
        if not statsd:
            return
        # This may be called outside of the main thread.
        provider = self.config.providers[provider_name]

        states = {}

        for target in self.config.targets.values():
            for image in target.images.values():
                image_key = 'nodepool.target.%s.%s' % (
                    target.name, image.name)
                key = '%s.min_ready' % image_key
                statsd.gauge(key, image.min_ready)
                for provider in image.providers.values():
                    provider_key = '%s.%s' % (
                        image_key, provider.name)
                    for state in nodedb.STATE_NAMES.values():
                        key = '%s.%s' % (provider_key, state)
                        states[key] = 0

        for node in session.getNodes():
            if node.state not in nodedb.STATE_NAMES:
                continue
            key = 'nodepool.target.%s.%s.%s.%s' % (
                node.target_name, node.image_name,
                node.provider_name, nodedb.STATE_NAMES[node.state])
            if key not in states:
                states[key] = 0
            states[key] += 1

        for key, count in states.items():
            statsd.gauge(key, count)

        for provider in self.config.providers.values():
            key = 'nodepool.provider.%s.max_servers' % provider.name
            statsd.gauge(key, provider.max_servers)
Пример #40
0
def update_stats(provider):
    state_names = {
        vmdatabase.BUILDING: 'building',
        vmdatabase.READY: 'ready',
        vmdatabase.USED: 'used',
        vmdatabase.ERROR: 'error',
        vmdatabase.HOLD: 'hold',
        vmdatabase.DELETE: 'delete',
    }

    for base_image in provider.base_images:
        states = {
            vmdatabase.BUILDING: 0,
            vmdatabase.READY: 0,
            vmdatabase.USED: 0,
            vmdatabase.ERROR: 0,
            vmdatabase.HOLD: 0,
            vmdatabase.DELETE: 0,
        }
        for machine in base_image.machines:
            if machine.state not in states:
                continue
            states[machine.state] += 1
        if statsd:
            for state_id, count in states.items():
                key = 'devstack.pool.%s.%s.%s' % (
                    provider.name, base_image.name, state_names[state_id])
                statsd.gauge(key, count)

            key = 'devstack.pool.%s.%s.min_ready' % (provider.name,
                                                     base_image.name)
            statsd.gauge(key, base_image.min_ready)

    if statsd:
        key = 'devstack.pool.%s.max_servers' % provider.name
        statsd.gauge(key, provider.max_servers)
Пример #41
0
 def run(self):
     """
     This function starts the main loop.
     """
     # Construct an oping argument
     destinations = [ip for ip in self.pool]
     while 1:
         try:
             var = subprocess.check_output(['fping', '-qC5'] + destinations,
                                           stderr=subprocess.STDOUT)
         except KeyboardInterrupt:
             logger.warning('Process was interrupted from keyboard')
             return 0
         except subprocess.CalledProcessError as e:
             # We don't care if fping exited with non zero exit code, but
             # we need its output
             var = e.output
         except OSError:
             logger.exception('Do you have fping installed?')
             raise
         data = (line.split() for line in var.splitlines())
         for record in data:
             try:
                 ip, name = record[0], self.pool[record[0]]
             except KeyError:
                 # Sometimes fping returns additional errors
                 logger.debug('fping message: %s', ' '.join(record))
                 continue
             logger.debug('Preparing data for %s, %s', name, ip)
             try:
                 values = [
                     float(value) if value != '-' else 0
                     for value in record[2:]
                 ]
                 if not values:
                     continue
             except Exception as e:
                 logger.warning(
                     'Exception occurred during value unpack: %s', e)
                 continue
             try:
                 positive_values = filter(lambda x: x > 0, values)
                 minimum = min(positive_values) if positive_values else 0
                 maximum = max(values)
                 average = sum(values) / len(positive_values) \
                     if positive_values else 0
                 jitter = sum(
                     abs(values[i] - values[i - 1])
                     for i in xrange(1, len(values))) / (len(values) - 1)
                 loss = sum(100 / len(values) for x in values if x == 0)
                 logger.debug('Raw values for %s: %s', ip, values)
                 logger.debug(
                     'Parsed values for %s: (min, max, avg, jtr, los)'
                     '%s, %s, %s, %s, %s', ip, minimum, maximum, average,
                     jitter, loss)
             except Exception as e:
                 logger.warning('Exception occurred during calculation: %s',
                                e)
                 continue
             try:
                 statsd.gauge('pingtest.min',
                              minimum,
                              tags=self._tags(ip, name))
                 statsd.gauge('pingtest.max',
                              maximum,
                              tags=self._tags(ip, name))
                 statsd.gauge('pingtest.avg',
                              average,
                              tags=self._tags(ip, name))
                 statsd.gauge('pingtest.jitter',
                              jitter,
                              tags=self._tags(ip, name))
                 statsd.gauge('pingtest.loss',
                              loss,
                              tags=self._tags(ip, name))
             except AttributeError as e:
                 # Dogstatsd sometimes fails to call get_socket()
                 logger.warning('Statsd error: %s', e)
Пример #42
0
state = urllib2.urlopen(
    "http://{}/api/status?toporoot={}&topic={}".format(
        host, toporoot, topic
    )
).read()

data = json.loads(state)

amount = 0
for looplord in data:
    if looplord['amount'] is not None:
        statsd.gauge(
            'razor.kafkamon.topology.partition',
            looplord['amount'],
            tags = [
                "topic:{}".format(topic),
                "topology:{}".format(topology),
                "partition:{}".format(looplord['partition'])
            ]
        )
        amount += looplord['amount']

print "Got {} for {}".format(amount, topology)

statsd.gauge(
    'razor.kafkamon.total_delta',
    amount, tags = [
        "topic:{}".format(topic),
        "topology:{}".format(topology)
    ]
)
Пример #43
0
def report_stats(host, topology, toporoot, topic):
    state = urllib2.urlopen("http://{}/api/status?toporoot={}&topic={}".format(
        host, toporoot, topic)).read()

    data = json.loads(state)

    total_delta = 0
    total_kafka_current = 0
    total_spout_current = 0
    for looplord in data:
        if looplord['amount'] is not None:
            partition_tags = [
                "topic:{}".format(topic), "topology:{}".format(topology),
                "partition:{}".format(looplord['partition'])
            ]
            statsd.gauge('razor.kafkamon.topology.partition',
                         looplord['amount'],
                         tags=partition_tags)
            total_delta += looplord['amount']
            statsd.gauge('razor.kafkamon.topology.partition.kafka.current',
                         looplord['current'],
                         tags=partition_tags)
            total_kafka_current += looplord['current']
            statsd.gauge('razor.kafkamon.topology.partition.spout.current',
                         looplord['storm'],
                         tags=partition_tags)
            total_spout_current += looplord['storm']

    print "Got amount={}, kafka current={}, spout current={} for {}".format(
        total_delta, total_kafka_current, total_spout_current, topology)

    total_tags = ["topic:{}".format(topic), "topology:{}".format(topology)]
    statsd.gauge('razor.kafkamon.total_delta', total_delta, tags=total_tags)
    statsd.gauge('razor.kafkamon.total_kafka_current',
                 total_kafka_current,
                 tags=total_tags)
    statsd.gauge('razor.kafkamon.total_spout_current',
                 total_spout_current,
                 tags=total_tags)
Пример #44
0
def rssatomfeed_pre_delete(instance, **kwargs):

    statsd.gauge('feeds.counts.total', -1, delta=True)
    statsd.gauge('feeds.counts.rssatom', -1, delta=True)
Пример #45
0
cgminer = APIClient()
cloud_watch = CloudWatchMetrics()
while True:
    try:
        summary = cgminer.summary()
        devs = cgminer.devs()
        print "[%s] API Query OK" % datetime.now()
    except Exception, e:
        print "Error while querying CGMiner: %s" % e
        sleep(REPORT_INTERVAL)
        continue

    # Global stats
    if ENABLE_DATADOG:
        statsd.gauge("cgminer.work_util", int(float(summary["Work Utility"])))
        statsd.gauge("system.uptime_days", uptime() / 3600 / 24)
    if ENABLE_CLOUDWATCH:
        cloud_watch.report_metric("App/CGMner", "WorkUtility", float(summary["Work Utility"]), { "Host": HOSTNAME})

    # GPU stats (temperature, KHash/s, etc.)
    for name,info in devs.iteritems():
        if not "GPU" in name: continue
        gpu_id = int(name.split("=")[1])
        temp = float(info["Temperature"])
        khash_s = float(info["MHS 5s"]) * 1000
        fan_speed = int(info["Fan Speed"])
        fan_pct = int(info["Fan Percent"])
        hw_errors = int(info["Hardware Errors"])
        if ENABLE_DATADOG:
            gauge = lambda k,v: statsd.gauge(k, v, tags=["gpu:%s" % gpu_id])
Пример #46
0
def website_pre_delete(instance, **kwargs):

    statsd.gauge('websites.counts.total', -1, delta=True)
Пример #47
0
def report_stats(host, topology, toporoot, topic):
        state = urllib2.urlopen(
            "http://{}/api/status?toporoot={}&topic={}".format(
                host, toporoot, topic
            )
        ).read()

        data = json.loads(state)

        total_delta = 0
        total_kafka_current = 0
        total_spout_current = 0
        for looplord in data:
            if looplord['amount'] is not None:
                partition_tags = [
                    "topic:{}".format(topic),
                    "topology:{}".format(topology),
                    "partition:{}".format(looplord['partition'])
                ]
                statsd.gauge(
                    'razor.kafkamon.topology.partition',
                    looplord['amount'],
                    tags = partition_tags
                )
                total_delta += looplord['amount']
                statsd.gauge(
                    'razor.kafkamon.topology.partition.kafka.current',
                    looplord['current'],
                    tags = partition_tags
                )
                total_kafka_current += looplord['current']
                statsd.gauge(
                    'razor.kafkamon.topology.partition.spout.current',
                    looplord['storm'],
                    tags = partition_tags
                )
                total_spout_current += looplord['storm']

        print "Got amount={}, kafka current={}, spout current={} for {}".format(
                total_delta, total_kafka_current, total_spout_current, topology)

        total_tags = [
            "topic:{}".format(topic),
            "topology:{}".format(topology)
        ]
        statsd.gauge(
            'razor.kafkamon.total_delta',
            total_delta, tags = total_tags
        )
        statsd.gauge(
            'razor.kafkamon.total_kafka_current',
            total_kafka_current, tags = total_tags
        )
        statsd.gauge(
            'razor.kafkamon.total_spout_current',
            total_spout_current, tags = total_tags
        )
Пример #48
0
Файл: tag.py Проект: 1flow/1flow
def simpletag_post_save(instance, **kwargs):

    if kwargs.get('created', False):
        statsd.gauge('tags.counts.total', 1, delta=True)
Пример #49
0
def rssatomfeed_pre_delete(instance, **kwargs):

    statsd.gauge('feeds.counts.total', -1, delta=True)
    statsd.gauge('feeds.counts.rssatom', -1, delta=True)
Пример #50
0
def BaseItem_postprocess_feedparser_data_method(self, force=False,
                                                commit=True):
    """ XXX: should disappear when feedparser_data is useless. """

    if self.original_data.feedparser_processed and not force:
        LOGGER.info('feedparser data already post-processed.')
        raise OriginalDataStopProcessingException

    fpod = self.original_data.feedparser_hydrated

    need_save = False

    if fpod:

        if self.date_published is None:
            self.date_published = datetime_from_feedparser_entry(fpod)

            if self.date_published is not None:
                need_save = True

        if self.tags == [] and 'tags' in fpod:
            tags = list(
                Tag.get_tags_set((
                    t['term']
                    # Sometimes, t['term'] can be None.
                    # http://dev.1flow.net/webapps/1flow/group/4082/
                    for t in fpod['tags'] if t['term'] is not None),
                    origin=self))

            self.update_tags(tags, initial=True, need_reload=False)

        if not self.authors.exists():
            # No need to save(), this is an M2M relation.
            Author.get_authors_from_feedparser_article(fpod,
                                                       set_to_article=self)

        if self.language is None:
            language = fpod.get('summary_detail', {}).get('language', None)

            if language is None:
                language = fpod.get('title_detail', {}).get(
                    'language', None)

            if language is not None:
                try:
                    self.language = Language.get_by_code(language)
                    need_save = True

                except:
                    # This happens if the language code of the
                    # feedparser data does not correspond to a
                    # Django setting language we support.
                    LOGGER.exception(u'Cannot set language %s on '
                                     u'article %s.', language, self)

        if self.is_orphaned:
            # We have a chance to get at least *some* content. It will
            # probably be incomplete, but this is better than nothing.

            detail = fpod.get('summary_detail', {})

            if detail:
                detail_type = detail.get('type', None)
                detail_value = detail.get('value', '')

                # We need some *real* data, though
                if len(detail_value) > 20:

                    if detail_type == 'text/plain':
                        self.content = detail_value
                        self.content_type = CONTENT_TYPES.MARKDOWN
                        need_save = True

                        statsd.gauge('articles.counts.markdown',
                                     1, delta=True)

                    elif detail_type == 'text/html':
                        self.content = detail_value
                        self.content_type = CONTENT_TYPES.HTML
                        need_save = True

                        statsd.gauge('articles.counts.html',
                                     1, delta=True)

                        self.convert_to_markdown(commit=False)
                        need_save = True

                    else:
                        LOGGER.warning(u'No usable content-type found '
                                       u'while trying to recover article '
                                       u'%s content: %s => "%s".', self,
                                       detail_type, detail_value)
                else:
                    LOGGER.warning(u'Empty (or nearly) content-type '
                                   u'found while trying to recover '
                                   u'orphaned article %s '
                                   u'content: %s => "%s".', self,
                                   detail_type, detail_value)
            else:
                LOGGER.warning(u'No summary detail found while trying '
                               u'to recover orphaned article %s '
                               u'content.', self)

        if self.comments_feed_url is None:

            comments_feed_url = fpod.get('wfw_commentrss', None)

            if comments_feed_url:
                self.comments_feed_url = comments_feed_url
                need_save = True

        # We don't care anymore, it's already in another database.
        # self.offload_attribute('feedparser_original_data')

    if need_save and commit:
        self.save()

    self.original_data.feedparser_processed = True
    self.original_data.save()
Пример #51
0
def subscription_post_save(instance, **kwargs):
    """ Subscribe the mailfeed's owner if feed is beiing created. """

    if kwargs.get('created', False):
        statsd.gauge('subscriptions.counts.total', 1, delta=True)
Пример #52
0
def synchronize_mongodb_statsd_articles_gauges(full=False):
    """ synchronize all articles-related gauges on our statsd server. """

    with benchmark('synchronize statsd gauges for Article.*'):

        empty               = Article.objects(content_type=0).no_cache()
        # empty_pending       = empty.filter(content_error='', url_error='')
        # empty_content_error = empty.filter(content_error__ne='')
        # empty_url_error     = empty.filter(url_error__ne='')

        parsed             = Article.objects(
            content_type__ne=CONTENT_TYPES.NONE)
        html               = parsed.filter(content_type=CONTENT_TYPES.HTML)
        markdown           = parsed.filter(content_type=CONTENT_TYPES.MARKDOWN)

        absolutes          = Article.objects(url_absolute=True).no_cache()
        duplicates         = Article.objects(duplicate_of__ne=None).no_cache()
        orphaned           = Article.objects(orphaned=True).no_cache()
        content_errors     = Article.objects(content_error__ne='').no_cache()
        url_errors         = Article.objects(url_error__ne='').no_cache()

        statsd.gauge('mongo.articles.counts.total',
                     Article._get_collection().count())
        statsd.gauge('mongo.articles.counts.markdown', markdown.count())
        statsd.gauge('mongo.articles.counts.html', html.count())
        statsd.gauge('mongo.articles.counts.empty', empty.count())
        statsd.gauge('mongo.articles.counts.content_errors',
                     content_errors.count())
        statsd.gauge('mongo.articles.counts.url_errors', url_errors.count())

        if full:
            statsd.gauge('mongo.articles.counts.orphaned', orphaned.count())
            statsd.gauge('mongo.articles.counts.absolutes', absolutes.count())
            statsd.gauge('mongo.articles.counts.duplicates', duplicates.count())
Пример #53
0
 def run(self):
     """
     This function starts the main loop.
     """
     # Construct an oping argument
     destinations = [ip for ip in self.pool]
     while 1:
         try:
             var = subprocess.check_output(['fping', '-qC5'] + destinations,
                 stderr=subprocess.STDOUT)
         except KeyboardInterrupt:
             logger.warning('Process was interrupted from keyboard')
             return 0
         except subprocess.CalledProcessError as e:
             # We don't care if fping exited with non zero exit code, but
             # we need its output
             var = e.output
         except OSError:
             logger.exception('Do you have fping installed?')
             raise
         data = (line.split() for line in var.splitlines())
         for record in data:
             try:
                 ip, name = record[0], self.pool[record[0]]
             except KeyError:
                 # Sometimes fping returns additional errors
                 logger.debug('fping message: %s', ' '.join(record))
                 continue
             logger.debug('Preparing data for %s, %s', name, ip)
             try:
                 values = [float(value) if value != '-' else 0
                     for value in record[2:]]
                 if not values:
                     continue
             except Exception as e:
                 logger.warning('Exception occurred during value unpack: %s',
                     e)
                 continue
             try:
                 positive_values = filter(lambda x: x > 0, values)
                 minimum = min(positive_values) if positive_values else 0
                 maximum = max(values)
                 average = sum(values) / len(positive_values) \
                     if positive_values else 0
                 jitter = sum(abs(values[i] - values[i-1])
                     for i in xrange(1, len(values))) / (len(values) - 1)
                 loss = sum(100/len(values) for x in values if x == 0)
                 logger.debug('Raw values for %s: %s', ip, values)
                 logger.debug('Parsed values for %s: (min, max, avg, jtr, los)'
                     '%s, %s, %s, %s, %s', ip, minimum, maximum, average,
                     jitter, loss)
             except Exception as e:
                 logger.warning('Exception occurred during calculation: %s',
                     e)
                 continue
             try:
                 statsd.gauge('pingtest.min', minimum,
                     tags=self._tags(ip, name))
                 statsd.gauge('pingtest.max', maximum,
                     tags=self._tags(ip, name))
                 statsd.gauge('pingtest.avg', average,
                     tags=self._tags(ip, name))
                 statsd.gauge('pingtest.jitter', jitter,
                     tags=self._tags(ip, name))
                 statsd.gauge('pingtest.loss', loss, tags=self._tags(ip, name))
             except AttributeError as e:
                 # Dogstatsd sometimes fails to call get_socket()
                 logger.warning('Statsd error: %s', e)
Пример #54
0
    def push_translations(self):
        gengo_api = FjordGengo()

        if not gengo_api.is_configured():
            # If Gengo isn't configured, then we drop out here rather
            # than raise a GengoConfig error.
            return

        balance = gengo_api.get_balance()
        threshold = settings.GENGO_ACCOUNT_BALANCE_THRESHOLD

        # statsd the balance so we can track it with graphite
        statsd.gauge('translation.gengo.balance', balance)

        if not self.balance_good_to_continue(balance, threshold):
            # If we don't have enough balance, stop.
            return

        # Create language buckets for the jobs
        jobs = GengoJob.objects.filter(status=STATUS_CREATED)
        lang_buckets = {}
        for job in jobs:
            lang_buckets.setdefault(job.src_lang, []).append(job)

        # For each bucket, assemble and order and post it.
        for lang, jobs in lang_buckets.items():
            batch = []
            for job in jobs:
                batch.append({
                    'id': job.id,
                    'lc_src': job.src_lang,
                    'lc_dst': job.dst_lang,
                    'text': getattr(job.content_object, job.src_field),
                    'unique_id': job.unique_id
                })

            # This will kick up a GengoAPIFailure which has the
            # complete response in the exception message. We want that
            # to propagate that and end processing in cases where
            # something bad happened because then we can learn more
            # about the state things are in. Thus we don't catch
            # exceptions here.
            resp = gengo_api.human_translate_bulk(batch)

            # We should have an order_id at this point, so we create a
            # GengoOrder with it.
            order = GengoOrder(order_id=resp['order_id'])
            order.save()
            order.log('created', metadata={'response': resp})

            # Persist the order on all the jobs and change their
            # status.
            for job in jobs:
                job.assign_to_order(order)

            # Update the balance and see if we're below the threshold.
            balance = balance - float(resp['credits_used'])

            if not self.balance_good_to_continue(balance, threshold):
                # If we don't have enough balance, stop.
                return
Пример #55
0
    def absolutize_url(self, requests_response=None, force=False, commit=True):
        """ Make the current article URL absolute.

        Eg. transform:

        http://feedproxy.google.com/~r/francaistechcrunch/~3/hEIhLwVyEEI/

        into:

        http://techcrunch.com/2013/05/18/hell-no-tumblr-users-wont-go-to-yahoo/ # NOQA
            ?utm_source=feeurner&utm_medium=feed&utm_campaign=Feed%3A+francaistechcrunch+%28TechCrunch+en+Francais%29 # NOQA

        and then remove all these F*G utm_* parameters to get a clean
        final URL for the current article.

        Returns ``True`` if the operation succeeded, ``False`` if the
        absolutization pointed out that the current article is a
        duplicate of another. In this case the caller should stop its
        processing because the current article will be marked for deletion.

        Can also return ``None`` if absolutizing is disabled globally
        in ``constance`` configuration.
        """

        # Another example: http://rss.lefigaro.fr/~r/lefigaro/laune/~3/7jgyrQ-PmBA/story01.htm # NOQA

        if self.absolutize_url_must_abort(force=force, commit=commit):
            return

        if requests_response is None:
            try:
                requests_response = requests.get(self.url)

            except requests.ConnectionError as e:
                statsd.gauge('articles.counts.url_errors', 1, delta=True)
                message = u'Connection error while absolutizing “%s”: %s'
                args = (
                    self.url,
                    str(e),
                )

                self.url_error = message % args
                # Don't waste a version just for that.
                self.save_without_historical_record()

                LOGGER.error(message, *args)
                return

        if not requests_response.ok or requests_response.status_code != 200:

            message = u'HTTP Error %s while absolutizing “%s”: %s'
            args = (requests_response.status_code, requests_response.url,
                    requests_response.reason)

            with statsd.pipeline() as spipe:
                spipe.gauge('articles.counts.url_errors', 1, delta=True)

                if requests_response.status_code in (404, ):
                    self.is_orphaned = True

                    # This is not handled by the post_save()
                    # which acts only at article creation.
                    spipe.gauge('articles.counts.orphaned', 1, delta=True)

            self.url_error = message % args

            # Don't waste a version just for that.
            self.save_without_historical_record()

            LOGGER.error(message, *args)
            return

        #
        # NOTE: we could also get it eventually from r.headers['link'],
        #       which contains '<another_url>'. We need to strip out
        #       the '<>', and re-absolutize this link, because in the
        #       example it's another redirector. Also r.links is a good
        #       candidate but in the example I used, it contains the
        #       shortlink, which must be re-resolved too.
        #
        #       So: as we already are at the final address *now*, no need
        #       bothering re-following another which would lead us to the
        #       the same final place.
        #

        final_url = clean_url(requests_response.url)

        # LOGGER.info(u'\n\nFINAL: %s vs. ORIG: %s\n\n', final_url, self.url)

        if final_url != self.url:

            # Just for displaying purposes, see below.
            old_url = self.url

            if self.url_error:
                statsd.gauge('articles.counts.url_errors', -1, delta=True)

            # Even if we are a duplicate, we came until here and everything
            # went fine. We won't need to lookup again the absolute URL.
            statsd.gauge('articles.counts.absolutes', 1, delta=True)
            self.url_absolute = True
            self.url_error = None

            self.url = final_url

            try:
                if self.name.endswith(old_url):
                    self.name = self.name.replace(old_url, final_url)
            except:
                LOGGER.exception(u'Could not replace URL in name of %s #%s',
                                 self._meta.model.__name__, self.id)

            duplicate = False

            with transaction.atomic():
                # Without the atomic() block, saving the current article
                # (beiing a duplicate) will trigger the IntegrityError,
                # but will render the current SQL context unusable, unable
                # to register duplicate, potentially leading to massive
                # inconsistencies in the caller's context.
                try:
                    # Don't waste a version just for that.
                    self.save_without_historical_record()

                except IntegrityError:
                    duplicate = True

            if duplicate:
                params = {'%s___url' % self._meta.model.__name__: final_url}
                original = BaseItem.objects.get(**params)

                # Just to display the right “old” one in logs.
                self.url = old_url

                LOGGER.info(
                    u'%s #%s is a duplicate of #%s, '
                    u'registering as such.', self._meta.model.__name__,
                    self.id, original.id)

                original.register_duplicate(self)
                return False

            # Any other exception will raise. This is intentional.
            else:
                LOGGER.info(
                    u'URL of %s (#%s) successfully absolutized '
                    u'from %s to %s.', self._meta.model.__name__, self.id,
                    old_url, final_url)

        else:
            # Don't do the job twice.
            if self.url_error:
                statsd.gauge('articles.counts.url_errors', -1, delta=True)

            statsd.gauge('articles.counts.absolutes', 1, delta=True)
            self.url_absolute = True
            self.url_error = None

            # Don't waste a version just for that.
            self.save_without_historical_record()

        return True
Пример #56
0
def record_queue_size():
    """Records the rabbitmq size in statsd"""
    statsd.gauge('rabbitmq.size', rabbitmq_queue_size())
Пример #57
0
    def test_timed(self):

        @self.statsd.timed('timed.test')
        def func(a, b, c=1, d=1):
            """docstring"""
            time.sleep(0.5)
            return (a, b, c, d)

        t.assert_equal('func', func.__name__)
        t.assert_equal('docstring', func.__doc__)

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        t.assert_equal(result, (1, 2, 1, 3))

        packet = self.recv()
        name_value, type_ = packet.split('|')
        name, value = name_value.split(':')

        t.assert_equal('ms', type_)
        t.assert_equal('timed.test', name)
        self.assert_almost_equal(0.5, float(value), 0.1)


if __name__ == '__main__':
    statsd = statsd
    while True:
        statsd.gauge('test.gauge', 1)
        statsd.increment('test.count', 2)
        time.sleep(0.05)