Beispiel #1
0
def process_exit_surveys():
    """Exit survey handling.

    * Collect new exit survey results.
    * Save results to our metrics table.
    * Add new emails collected to the exit survey.
    """

    _process_exit_survey_results()

    # Get the email addresses from two days ago and add them to the survey
    # campaign (skip this on stage).
    if settings.STAGE:
        # Only run this on prod, it doesn't need to be running multiple times
        # from different places.
        return

    startdate = date.today() - timedelta(days=2)
    enddate = date.today() - timedelta(days=1)

    for survey in SURVEYS.keys():
        if 'email_collection_survey_id' not in SURVEYS[survey]:
            # Some surveys don't have email collection on the site
            # (the askers survey, for example).
            continue

        emails = get_email_addresses(survey, startdate, enddate)
        for email in emails:
            add_email_to_campaign(survey, email)

        statsd.gauge('survey.{0}'.format(survey), len(emails))
Beispiel #2
0
    def listen(self):
        """Listens for incoming messages on the Redis queue"""
        while 1:
            val = None
            try:
                val = self.bot_bus.blpop('q', 1)

                # Track q length
                ql = self.bot_bus.llen('q')
                statsd.gauge(".".join(["plugins", "q"]), ql)

                if val:
                    _, val = val
                    LOG.debug('Recieved: %s', val)
                    line = Line(json.loads(val), self)

                    # Calculate the transport latency between go and the plugins.
                    delta = datetime.utcnow().replace(tzinfo=utc) - line._received
                    statsd.timing(".".join(["plugins", "latency"]),
                                 delta.total_seconds() * 1000)

                    self.dispatch(line)
            except Exception:
                LOG.error("Line Dispatch Failed", exc_info=True, extra={
                    "line": val
                })
Beispiel #3
0
    def listen(self):
        """Listens for incoming messages on the Redis queue"""
        while 1:
            val = None
            try:
                val = self.bot_bus.blpop('q', 1)

                # Track q length
                ql = self.bot_bus.llen('q')
                statsd.gauge(".".join(["plugins", "q"]), ql)

                if val:
                    _, val = val
                    LOG.debug('Recieved: %s', val)
                    line = Line(json.loads(val), self)

                    # Calculate the transport latency between go and the plugins.
                    delta = datetime.utcnow().replace(
                        tzinfo=utc) - line._received
                    statsd.timing(".".join(["plugins", "latency"]),
                                  delta.total_seconds() * 1000)

                    if line.is_valid():
                        self.dispatch(line)
            except Exception:
                LOG.error("Line Dispatch Failed",
                          exc_info=True,
                          extra={"line": val})
Beispiel #4
0
def process_exit_surveys():
    """Exit survey handling.

    * Collect new exit survey results.
    * Save results to our metrics table.
    * Add new emails collected to the exit survey.
    """

    _process_exit_survey_results()

    # Get the email addresses from 4-5 hours ago and add them to the survey
    # campaign (skip this on stage).

    # The cron associated with this process is set to run every hour,
    # with the intent of providing a 4-5 hour wait period between when a
    # visitor enters their email address and is then sent a follow-up
    # survey.
    # The range here is set between 4 and 8 hours to be sure no emails are
    # missed should a particular cron run be skipped (e.g. during a deployment)
    startdatetime = datetime.now() - timedelta(hours=8)
    enddatetime = datetime.now() - timedelta(hours=4)

    for survey in SURVEYS.keys():
        if not SURVEYS[survey][
                'active'] or 'email_collection_survey_id' not in SURVEYS[
                    survey]:
            # Some surveys don't have email collection on the site
            # (the askers survey, for example).
            continue

        emails = get_email_addresses(survey, startdatetime, enddatetime)
        for email in emails:
            add_email_to_campaign(survey, email)

        statsd.gauge('survey.{0}'.format(survey), len(emails))
    def handle(self, **options):
        """
        * Collect new exit survey results.
        * Save results to our metrics table.
        * Add new emails collected to the exit survey.
        """

        utils._process_exit_survey_results()

        # Get the email addresses from 4-5 hours ago and add them to the survey
        # campaign (skip this on stage).

        # The cron associated with this process is set to run every hour,
        # with the intent of providing a 4-5 hour wait period between when a
        # visitor enters their email address and is then sent a follow-up
        # survey.
        # The range here is set between 4 and 8 hours to be sure no emails are
        # missed should a particular cron run be skipped (e.g. during a deployment)
        startdatetime = datetime.now() - timedelta(hours=8)
        enddatetime = datetime.now() - timedelta(hours=4)

        for survey in SURVEYS.keys():
            if (
                not SURVEYS[survey]["active"] or
                "email_collection_survey_id" not in SURVEYS[survey]
            ):
                # Some surveys don't have email collection on the site
                # (the askers survey, for example).
                continue

            emails = get_email_addresses(survey, startdatetime, enddatetime)
            for email in emails:
                add_email_to_campaign(survey, email)

            statsd.gauge("survey.{0}".format(survey), len(emails))
Beispiel #6
0
 def _record_time(self, request):
     if hasattr(request, '_start_time'):
         ms = int((time.time() - request._start_time) * 1000)
         data = dict(module=request._view_module,
                     name=request._view_name,
                     method=request.method)
         statsd.gauge(
             'view.response_ms.{module}.{name}.{method}'.format(**data), ms)
Beispiel #7
0
def measure_queue_lag(queued_time):
    """A task that measures the time it was sitting in the queue.

    It saves the data to graphite via statsd.
    """
    lag = datetime.now() - queued_time
    lag = (lag.days * 3600 * 24) + lag.seconds
    statsd.gauge('celery.lag', max(lag, 0))
Beispiel #8
0
def task_sms_send_count_gauge_statsd():

    sms_stats = SMSRecord.objects.filter(
        status__in=(SMSRecord.SMS_COMMIT,
                    SMSRecord.SMS_COMPLETE)).values('task_type').annotate(
                        Count('id')).values_list('task_type', 'id__count')

    for task_type, sms_count in sms_stats:
        statsd.gauge('xiaolumm.sms.send.%s' % task_type, sms_count)
Beispiel #9
0
    def obj_create(self, bundle):
        dataset = bundle.data["dataset"]
        input_dims = [b for b in bundle.data["dimensions"]]
        
        
        dset = DatasetMetaResource().get_via_uri(dataset, bundle.request)
        dset_dims = dset.dimensions.all()
        
        idim_l = []
        for idim in input_dims:
            idd = DimensionResource().get_via_uri(idim, bundle.request)
            idd.input_value = bundle.data["dimensions"][idim]
            idim_l.append(idd)
        
        
        #for d in idim_l:
        #    if d not in dset_dims:
        #        raise Exception("invalid input dim")
        
        #for d in dset_dims:
        #    if d not in idim_l:
        #        raise Exception("not enough input dims")

        session = settings.CASSANDRA_SESSION

        columns = ",".join([str(i.ts_column) for i in idim_l])
        values = ",".join([str(i.input_value) for i in idim_l])
        bucket = dset.get_bucket_list()[-1]

        futures = []
        
        stmt = "insert into tsstore (bucket,dataset, %s) values (%s, %s, %s)" % (columns, bucket, dset.id, values)
        #raise Exception(stmt)
        fut = session.execute_async(stmt)
        futures.append(fut)
        try:
            is_update = bundle.data["update"]
        except KeyError:
            is_update = False
        if False:
            for dim in idim_l:
                if dim.ts_column == "time":
                    if not dset.highest_ts or dset.highest_ts > dim.input_value:
                        dset.highest_ts = dim.input_value.strip("'")
                        dset.save()
                    if not dset.lowest_ts or dset.lowest_ts < dim.input_value:
                        dset.lowest_ts = dim.input_value.strip("'")
                        dset.save()
                    break
        if not is_update:
            dset.datapoint_count += 1
            dset.save()
        #raise Exception(futures)
        for future in futures:
            future.result()
        statsd.gauge("test_signal",random.choice(range(20)))
Beispiel #10
0
def task_celery_queue_message_statsd():

    from django_statsd.clients import statsd
    resp = requests.get(FLOWER_QUEUE_LENGTH_API,
                        auth=HTTPBasicAuth(FLOWER_USERNAME, FLOWER_PASSWORD))

    # if 502 means the flower server are restarting
    if resp.status_code == 502:
        return

    queue_stats = resp.json().get('active_queues', [])
    for stat in queue_stats:
        statsd.gauge('celery.queue.%s' % stat['name'], stat['messages'])
def update_fxa_data(current_timestamps):
    """Store the updated timestamps in a local dict, the cache, and SFMC."""
    global UPDATE_COUNT
    UPDATE_COUNT = 0
    total_count = len(current_timestamps)
    log('attempting to update %s fxa timestamps' % total_count)
    pool = ThreadPool(8)
    pool.map(update_fxa_records, chunk_fxa_data(current_timestamps))
    pool.close()
    pool.join()
    log('updated %s fxa timestamps' % UPDATE_COUNT)
    set_in_process_files_done()
    statsd.gauge('process_fxa_data.updates', UPDATE_COUNT)
Beispiel #12
0
def update_fxa_data(current_timestamps):
    """Store the updated timestamps in a local dict, the cache, and SFMC."""
    global UPDATE_COUNT
    UPDATE_COUNT = 0
    total_count = len(current_timestamps)
    log('attempting to update %s fxa timestamps' % total_count)
    pool = ThreadPool(8)
    pool.map(update_fxa_records, chunk_fxa_data(current_timestamps))
    pool.close()
    pool.join()
    log('updated %s fxa timestamps' % UPDATE_COUNT)
    set_in_process_files_done()
    statsd.gauge('process_fxa_data.updates', UPDATE_COUNT)
Beispiel #13
0
def send_smses(send_deferred=False, backend=None, limit=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        lock = FileLock('send_sms_deferred')
    else:
        lock = FileLock('send_sms')
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logger.info('Could not acquire lock.')
        return
    except LockTimeout:
        logger.info('Lock timed out.')
        return

    successes, failures = 0, 0
    try:
        # Get SMSes that need to be sent (deferred or non-deferred)
        if send_deferred:
            to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
        else:
            to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

        if isinstance(limit, int):
            to_send = to_send[:limit]

        logger.info("Trying to send %i messages." % to_send.count())

        # Send each SMS
        for sms in to_send:
            if backend:
                sms_using = backend
            else:
                sms_using = None if sms.using == '__none__' else sms.using
            if send(sms.to, sms.content, sms.signature, sms_using, sms.reliable):
                # Successfully sent, remove from queue
                logger.info("SMS to %s sent." % sms.to)
                sms.delete()
                successes += 1
            else:
                # Failed to send, defer SMS
                logger.info("SMS to %s failed." % sms.to)
                sms.defer()
                failures += 1
    finally:
        lock.release()
        if successes and failures:
            statsd.gauge('smsgateway.success_rate', successes / failures)
        else:
            statsd.gauge('smsgateway.success_rate', 1)
    def handle(self, **options):
        # We get the email addresses of all users that asked a question 2 days
        # ago. Then, all we have to do is send the email address to surveygizmo
        # and it does the rest.
        two_days_ago = date.today() - timedelta(days=2)
        yesterday = date.today() - timedelta(days=1)

        emails = Question.objects.filter(
            created__gte=two_days_ago, created__lt=yesterday
        ).values_list("creator__email", flat=True)

        for email in emails:
            add_email_to_campaign("askers", email)

        statsd.gauge("survey.askers", len(emails))
Beispiel #15
0
def survey_recent_askers():
    """Add question askers to a surveygizmo campaign to get surveyed."""
    # We get the email addresses of all users that asked a question 2 days
    # ago. Then, all we have to do is send the email address to surveygizmo
    # and it does the rest.
    two_days_ago = date.today() - timedelta(days=2)
    yesterday = date.today() - timedelta(days=1)

    emails = (Question.objects.filter(created__gte=two_days_ago,
                                      created__lt=yesterday).values_list(
                                          'creator__email', flat=True))

    for email in emails:
        add_email_to_campaign('askers', email)

    statsd.gauge('survey.askers', len(emails))
Beispiel #16
0
def send_smses(send_deferred=False, backend=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        lock = FileLock('send_sms_deferred')
    else:
        lock = FileLock('send_sms')
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logger.info('Could not acquire lock.')
        return
    except LockTimeout:
        logger.info('Lock timed out.')
        return

    successes, failures = 0, 0
    try:
        # Get SMSes that need to be sent (deferred or non-deferred)
        if send_deferred:
            to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
        else:
            to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

        logger.info("Trying to send %i messages." % to_send.count())

        # Send each SMS
        for sms in to_send:
            if backend:
                sms_using = backend
            else:
                sms_using = None if sms.using == '__none__' else sms.using
            if send(sms.to, sms.content, sms.signature, sms_using, sms.reliable):
                # Successfully sent, remove from queue
                logger.info("SMS to %s sent." % sms.to)
                sms.delete()
                successes += 1
            else:
                # Failed to send, defer SMS
                logger.info("SMS to %s failed." % sms.to)
                sms.defer()
                failures += 1
    finally:
        lock.release()
        if successes and failures:
            statsd.gauge('smsgateway.success_rate', successes / failures)
        else:
            statsd.gauge('smsgateway.success_rate', 1)
Beispiel #17
0
def survey_recent_askers():
    """Add question askers to a surveygizmo campaign to get surveyed."""
    # We get the email addresses of all users that asked a question 2 days
    # ago. Then, all we have to do is send the email address to surveygizmo
    # and it does the rest.
    two_days_ago = date.today() - timedelta(days=2)
    yesterday = date.today() - timedelta(days=1)

    emails = (
        Question.objects
        .filter(created__gte=two_days_ago, created__lt=yesterday)
        .values_list('creator__email', flat=True))

    for email in emails:
        add_email_to_campaign('askers', email)

    statsd.gauge('survey.askers', len(emails))
Beispiel #18
0
def total_hours_estimated_vs_logged():
    for pid, hours in total_hours_estimated_by_project():
        statsd.gauge("projects.%d.hours_estimated" % pid, int(hours))
    for pid, hours in total_hours_logged_by_project():
        statsd.gauge("projects.%d.hours_logged" % pid, int(hours))

    for mid, hours in total_hours_estimated_by_milestone():
        statsd.gauge("milestones.%d.hours_estimated" % mid, int(hours))
    for mid, hours in total_hours_logged_by_milestone():
        statsd.gauge("milestones.%d.hours_logged" % mid, int(hours))
Beispiel #19
0
def total_hours_estimated_vs_logged():
    for pid, hours in total_hours_estimated_by_project():
        statsd.gauge("projects.%d.hours_estimated" % pid, int(hours))
    for pid, hours in total_hours_logged_by_project():
        statsd.gauge("projects.%d.hours_logged" % pid, int(hours))

    for mid, hours in total_hours_estimated_by_milestone():
        statsd.gauge("milestones.%d.hours_estimated" % mid, int(hours))
    for mid, hours in total_hours_logged_by_milestone():
        statsd.gauge("milestones.%d.hours_logged" % mid, int(hours))
Beispiel #20
0
def task_boutique_mama_weekly_active():
    """ 精英妈妈连续七天活跃度 """
    ctr_qs = CouponTransferRecord.objects.filter(status=1)

    dt = datetime.datetime.now()
    df = dt - datetime.timedelta(days=7)

    active_elite_mama_values_list = ctr_qs.filter(
        date_field__range=(df, dt)).values_list('coupon_from_mama_id',
                                                'coupon_to_mama_id')

    active_elite_mama_array = set()
    for mama_list in active_elite_mama_values_list:
        active_elite_mama_array.add(mama_list[0])
        active_elite_mama_array.add(mama_list[1])

    statsd.gauge('xiaolumm.boutique.weekly.active_mama_count',
                 len(active_elite_mama_array))
Beispiel #21
0
def _send_smses(send_deferred=False, backend=None, limit=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        send_lock_name = 'smsgateway_send_sms_deferred'
    else:
        send_lock_name = 'smsgateway_send_sms'

    with Lock(redis=Redis.from_url(settings.SMSGATEWAY_REDIS_URL),
              name='smsgateway-' + send_lock_name,
              blocking_timeout=0):
        successes, failures = 0, 0
        try:
            # Get SMSes that need to be sent (deferred or non-deferred)
            if send_deferred:
                to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
            else:
                to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

            if isinstance(limit, int):
                to_send = to_send[:limit]

            # Send each SMS
            for sms in to_send:
                if backend:
                    sms_using = backend
                else:
                    sms_using = None if sms.using == '__none__' else sms.using
                if send(sms.to, sms.content, sms.signature, sms_using,
                        sms.reliable):
                    # Successfully sent, remove from queue
                    sms.delete()
                    successes += 1
                else:
                    # Failed to send, defer SMS
                    sms.defer()
                    failures += 1
        finally:
            if successes and failures:
                statsd.gauge('smsgateway.success_rate', successes / failures)
            else:
                statsd.gauge('smsgateway.success_rate', 1)
Beispiel #22
0
def process_common_voice_batch():
    if not settings.COMMON_VOICE_BATCH_PROCESSING:
        return

    updates = CommonVoiceUpdate.objects.filter(ack=False)[
        : settings.COMMON_VOICE_BATCH_CHUNK_SIZE
    ]
    per_user = {}
    for update in updates:
        # last_active_date is when the update was sent basically, so we can use it for ordering
        data = update.data
        last_active = isoparse(data["last_active_date"])
        if (
            data["email"] in per_user
            and per_user[data["email"]]["last_active"] > last_active
        ):
            continue

        per_user[data["email"]] = {
            "last_active": last_active,
            "data": data,
        }

    for info in per_user.values():
        record_common_voice_update.delay(info["data"])

    for update in updates:
        # do them one at a time to ensure that we don't ack new ones that have
        # come in since we started
        update.ack = True
        update.save()

    statsd.incr("news.tasks.process_common_voice_batch.all_updates", len(updates))
    # delete ack'd updates more than 24 hours old
    when = now() - timedelta(hours=24)
    deleted, _ = CommonVoiceUpdate.objects.filter(ack=True, when__lte=when).delete()
    statsd.incr("news.tasks.process_common_voice_batch.deleted", deleted)
    statsd.gauge(
        "news.tasks.process_common_voice_batch.queue_volume",
        CommonVoiceUpdate.objects.filter(ack=False).count(),
    )
Beispiel #23
0
def survey_recent_askers():
    """Add question askers to a surveygizmo campaign to get surveyed."""
    if settings.STAGE:
        # Only run this on prod, it doesn't need to be running multiple times
        # from different places.
        return

    # We get the email addresses of all users that asked a question 2 days
    # ago. Then, all we have to do is send the email address to surveygizmo
    # and it does the rest.
    two_days_ago = date.today() - timedelta(days=2)
    yesterday = date.today() - timedelta(days=1)

    emails = (
        Question.objects
        .filter(created__gte=two_days_ago, created__lt=yesterday)
        .values_list('creator__email', flat=True))
    for email in emails:
            add_email_to_campaign('askers', email)

    statsd.gauge('survey.askers', len(emails))
Beispiel #24
0
    def _call_salesforce(self, method, url, **kwargs):
        if self.session_is_expired():
            self.refresh_session()

        kwargs['timeout'] = settings.SFDC_REQUEST_TIMEOUT
        try:
            statsd.incr('news.backends.sfdc.call_salesforce')
            resp = super(RefreshingSFType, self)._call_salesforce(method, url, **kwargs)
        except sfapi.SalesforceExpiredSession:
            statsd.incr('news.backends.sfdc.call_salesforce')
            statsd.incr('news.backends.sfdc.session_expired')
            self.refresh_session()
            resp = super(RefreshingSFType, self)._call_salesforce(method, url, **kwargs)

        if 'sforce-limit-info' in resp.headers:
            try:
                usage, limit = resp.headers['sforce-limit-info'].split('=')[1].split('/')
            except Exception:
                usage = limit = None

            if usage:
                statsd.gauge('news.backends.sfdc.daily_api_used', usage, rate=0.5)
                statsd.gauge('news.backends.sfdc.daily_api_limit', limit, rate=0.5)
                percentage = float(usage) / float(limit) * 100
                statsd.gauge('news.backends.sfdc.percent_daily_api_used', percentage, rate=0.5)

        return resp
Beispiel #25
0
    def _call_salesforce(self, method, url, **kwargs):
        if self.session_is_expired():
            self.refresh_session()

        kwargs["timeout"] = settings.SFDC_REQUEST_TIMEOUT
        try:
            statsd.incr("news.backends.sfdc.call_salesforce")
            resp = super()._call_salesforce(method, url, **kwargs)
        except sfapi.SalesforceExpiredSession:
            statsd.incr("news.backends.sfdc.call_salesforce")
            statsd.incr("news.backends.sfdc.session_expired")
            self.refresh_session()
            resp = super()._call_salesforce(method, url, **kwargs)

        if self.api_usage:
            usage = self.api_usage.get("api-usage")
            if usage:
                statsd.gauge(
                    "news.backends.sfdc.daily_api_used", int(usage.used), rate=0.5,
                )
                statsd.gauge(
                    "news.backends.sfdc.daily_api_limit", int(usage.total), rate=0.5,
                )
                percentage = float(usage.used) / float(usage.total) * 100
                statsd.gauge(
                    "news.backends.sfdc.percent_daily_api_used", percentage, rate=0.5,
                )

        return resp
Beispiel #26
0
    def _call_salesforce(self, method, url, **kwargs):
        if self.session_is_expired():
            self.refresh_session()

        kwargs['timeout'] = settings.SFDC_REQUEST_TIMEOUT
        try:
            statsd.incr('news.backends.sfdc.call_salesforce')
            resp = super(RefreshingSFType,
                         self)._call_salesforce(method, url, **kwargs)
        except sfapi.SalesforceExpiredSession:
            statsd.incr('news.backends.sfdc.call_salesforce')
            statsd.incr('news.backends.sfdc.session_expired')
            self.refresh_session()
            resp = super(RefreshingSFType,
                         self)._call_salesforce(method, url, **kwargs)

        limit_info = resp.headers.get('sforce-limit-info')
        if limit_info:
            usages = sfapi.Salesforce.parse_api_usage(limit_info)
            usage = usages.get('api-usage')
            if usage:
                statsd.gauge('news.backends.sfdc.daily_api_used',
                             int(usage.used),
                             rate=0.5)
                statsd.gauge('news.backends.sfdc.daily_api_limit',
                             int(usage.total),
                             rate=0.5)
                percentage = float(usage.used) / float(usage.total) * 100
                statsd.gauge('news.backends.sfdc.percent_daily_api_used',
                             percentage,
                             rate=0.5)

        return resp
Beispiel #27
0
def _save_integration_statsd(integration, start_time):
    ms = int((time.time() - start_time) * 1000)
    statsd.incr('core.sync.cnt')
    statsd.gauge('core.sync.runtime_ms', ms)
    statsd.incr('core.sync.integration.cnt')
    statsd.gauge('core.sync.integration.runtime_ms', ms)
    statsd.incr('core.sync.integration.%s.cnt' % integration.service_id)
    statsd.gauge('core.sync.integration.%s.runtime_ms' % integration.service_id, ms)
Beispiel #28
0
def _send_smses(send_deferred=False, backend=None, limit=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        send_lock_name = 'smsgateway_send_sms_deferred'
    else:
        send_lock_name = 'smsgateway_send_sms'

    with NonBlockingLock.objects.acquire_lock(lock_name=send_lock_name):
        successes, failures = 0, 0
        try:
            # Get SMSes that need to be sent (deferred or non-deferred)
            if send_deferred:
                to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
            else:
                to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

            if isinstance(limit, int):
                to_send = to_send[:limit]

            # Send each SMS
            for sms in to_send:
                if backend:
                    sms_using = backend
                else:
                    sms_using = None if sms.using == '__none__' else sms.using
                if send(sms.to, sms.content, sms.signature, sms_using, sms.reliable):
                    # Successfully sent, remove from queue
                    sms.delete()
                    successes += 1
                else:
                    # Failed to send, defer SMS
                    sms.defer()
                    failures += 1
        finally:
            if successes and failures:
                statsd.gauge('smsgateway.success_rate', successes / failures)
            else:
                statsd.gauge('smsgateway.success_rate', 1)
Beispiel #29
0
def _save_integration_statsd(integration, start_time):
    ms = int((time.time() - start_time) * 1000)
    statsd.incr('core.sync.cnt')
    statsd.gauge('core.sync.runtime_ms', ms)
    statsd.incr('core.sync.integration.cnt')
    statsd.gauge('core.sync.integration.runtime_ms', ms)
    statsd.incr('core.sync.integration.%s.cnt' % integration.service_id)
    statsd.gauge(
        'core.sync.integration.%s.runtime_ms' % integration.service_id, ms)
Beispiel #30
0
def estimates_report():
    start = time.time()
    d = item_counts()
    # item counts
    statsd.gauge('items.open.sm', d['open_sm_count'])

    # hour estimates
    statsd.gauge('estimates.sm', d['estimates_sm'])
    statsd.gauge('estimates.non_sm', d['estimates_non_sm'])

    end = time.time()
    statsd.timing('celery.estimates_report', int((end - start) * 1000))
Beispiel #31
0
def estimates_report():
    start = time.time()
    d = item_counts()
    # item counts
    statsd.gauge("items.open.sm", d["open_sm_count"])

    # hour estimates
    statsd.gauge("estimates.sm", d["estimates_sm"])
    statsd.gauge("estimates.non_sm", d["estimates_non_sm"])

    end = time.time()
    statsd.timing("celery.estimates_report", int((end - start) * 1000))
Beispiel #32
0
def estimates_report():
    start = time.time()
    d = item_counts()
    # item counts
    statsd.gauge('items.open.sm', d['open_sm_count'])

    # hour estimates
    statsd.gauge('estimates.sm', d['estimates_sm'])
    statsd.gauge('estimates.non_sm', d['estimates_non_sm'])

    end = time.time()
    statsd.timing('celery.estimates_report', int((end - start) * 1000))
Beispiel #33
0
 def middleware(request):
     cpu_before = time.clock_gettime(time.CLOCK_PROCESS_CPUTIME_ID)
     mem_before = summed(psutil.Process().memory_maps())
     try:
         return get_response(request)
     finally:
         cpu_after = time.clock_gettime(time.CLOCK_PROCESS_CPUTIME_ID)
         statsd.gauge('cpu.{}'.format(get_view_name()),
                      cpu_after - cpu_before)
         mem_after = summed(psutil.Process().memory_maps())
         mem_key_base = 'memory.{}.{{}}'.format(get_view_name())
         for name, after in mem_after.items():
             diff = after - mem_before[name]
             statsd.gauge(mem_key_base.format(name) + '.total', after)
             statsd.gauge(mem_key_base.format(name) + '.change', diff)
Beispiel #34
0
    def _call_salesforce(self, method, url, **kwargs):
        if self.session_is_expired():
            self.refresh_session()

        kwargs['timeout'] = settings.SFDC_REQUEST_TIMEOUT
        try:
            statsd.incr('news.backends.sfdc.call_salesforce')
            resp = super(RefreshingSFType,
                         self)._call_salesforce(method, url, **kwargs)
        except sfapi.SalesforceExpiredSession:
            statsd.incr('news.backends.sfdc.call_salesforce')
            statsd.incr('news.backends.sfdc.session_expired')
            self.refresh_session()
            resp = super(RefreshingSFType,
                         self)._call_salesforce(method, url, **kwargs)

        if 'sforce-limit-info' in resp.headers:
            try:
                usage, limit = resp.headers['sforce-limit-info'].split(
                    '=')[1].split('/')
            except Exception:
                usage = limit = None

            if usage:
                statsd.gauge('news.backends.sfdc.daily_api_used',
                             int(usage),
                             rate=0.5)
                statsd.gauge('news.backends.sfdc.daily_api_limit',
                             int(limit),
                             rate=0.5)
                percentage = float(usage) / float(limit) * 100
                statsd.gauge('news.backends.sfdc.percent_daily_api_used',
                             percentage,
                             rate=0.5)

        return resp
Beispiel #35
0
def read_stats(sender, instance, created, **kwargs):
    from django_statsd.clients import statsd
    read_count = XlmmMessageRel.objects.filter(
        message=instance.message).count()
    key = "MamaNotificationMessage.%d" % instance.message.id
    statsd.gauge(key, read_count)
 def _log_quota(self, quota):
     statsd.gauge('email.quota', quota)
Beispiel #37
0
def minutes_video():
    print "minutes_video()"
    statsd.gauge("minutes_video", int(minutes_video_stats()))
Beispiel #38
0
def hourly_s3_usage_report():
    print "hourly_s3_report()"
    (cnt, total) = s3_stats()
    statsd.gauge("s3.total", total)
    statsd.gauge("s3.cnt", cnt)
Beispiel #39
0
def hours_logged_report():
    start = time.time()
    statsd.gauge("hours.one_week", hours_logged())
    end = time.time()
    statsd.timing('celery.hours_logged_report', int((end - start) * 1000))
Beispiel #40
0
 def _save_statsd(self, start_time):
     ms = int((time.time() - start_time) * 1000)
     statsd.incr('core.sync.cnt')
     statsd.gauge('core.sync.runtime_ms', ms)
     statsd.incr('core.sync.user.cnt')
     statsd.gauge('core.sync.user.runtime_ms', ms)
Beispiel #41
0
def item_stats_report():
    start = time.time()
    d = get_item_counts_by_status()
    statsd.gauge("items.total", d['total'])
    statsd.gauge("items.open", d['open'])
    statsd.gauge("items.inprogress", d['inprogress'])
    statsd.gauge("items.resolved", d['resolved'])
    statsd.gauge("items.closed", d['closed'])
    statsd.gauge("items.verified", d['verified'])
    end = time.time()
    statsd.timing('celery.item_stats_report', int((end - start) * 1000))
Beispiel #42
0
def hours_logged_report():
    start = time.time()
    statsd.gauge("hours.one_week", hours_logged())
    end = time.time()
    statsd.timing('celery.hours_logged_report', int((end - start) * 1000))
 def _log_quota(self, quota):
     statsd.gauge('email.quota', quota)
Beispiel #44
0
def task_transfer_coupon_order_statsd():
    ctr_qs = CouponTransferRecord.objects.filter(status=1, transfer_status=3)
    coupon_qs = UserCoupon.objects.filter(coupon_type=UserCoupon.TYPE_TRANSFER)

    coupon_sale_detail = coupon_qs.aggregate(coupon_sale_num=Count('id'),
                                             coupon_sale_amount=Sum('value'))
    coupon_sale_num = coupon_sale_detail.get('coupon_sale_num') or 0
    coupon_sale_amount = coupon_sale_detail.get('coupon_sale_amount') or 0

    values = coupon_qs.filter(status=UserCoupon.USED).aggregate(
        coupon_used_num=Count('id'), coupon_used_amount=Sum('value'))
    coupon_used_num = values.get('coupon_used_num') or 0
    coupon_used_amount = values.get('coupon_used_amount') or 0

    transfer_details = ctr_qs.filter(
        transfer_type=CouponTransferRecord.OUT_TRANSFER).aggregate(
            transfer_count=Count('id'),
            transfer_nums=Sum('coupon_num'),
            transfer_amounts=Sum(F('coupon_num') * F('coupon_value'),
                                 output_field=FloatField()),
        )

    refund_return_num = ctr_qs.filter(
        transfer_type=CouponTransferRecord.OUT_CASHOUT).aggregate(
            transfer_amounts=Sum(F('coupon_num') * F('coupon_value'),
                                 output_field=FloatField())).get(
                                     'transfer_amounts') or 0

    exchg_order_num = ctr_qs.filter(
        transfer_type=CouponTransferRecord.OUT_EXCHG_SALEORDER).aggregate(
            exchg_amounts=Sum(F('coupon_num') * F('coupon_value'),
                              output_field=FloatField())).get(
                                  'exchg_amounts') or 0

    coupon_chained_detail = UserCoupon.objects.filter(
        coupon_type=UserCoupon.TYPE_TRANSFER, is_chained=True).exclude(status=UserCoupon.CANCEL)\
        .aggregate(chained_num=Count('id'), chained_amount=Sum('value'))

    coin_stats = XiaoluCoinLog.objects.values('subject').annotate(
        Sum('amount'))

    dt_str = datetime.datetime.now().strftime('%Y.%m.%d')
    statsd.gauge('xiaolumm.boutique.coupon.sale_num.%s' % dt_str,
                 coupon_sale_num)
    statsd.gauge('xiaolumm.boutique.coupon.sale_amount.%s' % dt_str,
                 coupon_sale_amount)
    statsd.gauge('xiaolumm.boutique.coupon.used_num.%s' % dt_str,
                 coupon_used_num)
    statsd.gauge('xiaolumm.boutique.coupon.used_amount.%s' % dt_str,
                 coupon_used_amount)
    statsd.gauge('xiaolumm.boutique.coupon.refund_over_amount.%s' % dt_str,
                 refund_return_num)
    statsd.gauge('xiaolumm.boutique.coupon.exchg_order_amount.%s' % dt_str,
                 exchg_order_num)
    statsd.gauge('xiaolumm.boutique.coupon.chained_num.%s' % dt_str,
                 coupon_chained_detail.get('chained_num') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.chained_amount.%s' % dt_str,
                 coupon_chained_detail.get('chained_amount') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.transfer_count.%s' % dt_str,
                 transfer_details.get('transfer_count') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.transfer_nums.%s' % dt_str,
                 transfer_details.get('transfer_nums') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.transfer_amounts.%s' % dt_str,
                 transfer_details.get('transfer_amounts') or 0)

    statsd.gauge('xiaolumm.boutique.coupon.sale_num', coupon_sale_num)
    statsd.gauge('xiaolumm.boutique.coupon.sale_amount', coupon_sale_amount)
    statsd.gauge('xiaolumm.boutique.coupon.used_num', coupon_used_num)
    statsd.gauge('xiaolumm.boutique.coupon.used_amount', coupon_used_amount)
    statsd.gauge('xiaolumm.boutique.coupon.refund_over_amount',
                 refund_return_num)
    statsd.gauge('xiaolumm.boutique.coupon.exchg_order_amount',
                 exchg_order_num)
    statsd.gauge('xiaolumm.boutique.coupon.chained_num',
                 coupon_chained_detail.get('chained_num') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.chained_amount',
                 coupon_chained_detail.get('chained_amount') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.transfer_count',
                 transfer_details.get('transfer_count') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.transfer_nums',
                 transfer_details.get('transfer_nums') or 0)
    statsd.gauge('xiaolumm.boutique.coupon.transfer_amounts',
                 transfer_details.get('transfer_amounts') or 0)

    for coin_stat in coin_stats:
        statsd.gauge(
            'xiaolumm.boutique.coin.%s.%s' % (coin_stat['subject'], dt_str),
            coin_stat['amount__sum'] / 100 or 0)
        statsd.gauge('xiaolumm.boutique.coin.%s' % coin_stat['subject'],
                     coin_stat['amount__sum'] / 100 or 0)
Beispiel #45
0
def nightly_tahoe_report():
    print "nightly_tahoe_report()"
    (cnt, total) = tahoe_stats()
    statsd.gauge("tahoe.total", total)
    statsd.gauge("tahoe.cnt", cnt)
Beispiel #46
0
def task_boutique_mama_statsd():
    ctr_qs = CouponTransferRecord.objects.filter(status=1)

    dt = datetime.datetime.now()
    elite_mama_count = ctr_qs.filter(
        transfer_status=CouponTransferRecord.DELIVERED).values(
            'coupon_to_mama_id').distinct().count()

    active_elite_mama_count = ctr_qs.filter(
        date_field=dt.date(),
        transfer_status=CouponTransferRecord.DELIVERED).values(
            'coupon_to_mama_id').distinct().count()

    order_mama_count = OrderCarry.objects.filter(
        date_field=dt.date(),
        status__in=(1, 2, 3),
        carry_type__in=(1, 2),
        mama_id__gt=0).values_list('mama_id', flat=True).distinct().count()

    dt_str = dt.strftime('%Y.%m.%d')
    statsd.gauge('xiaolumm.boutique.mama.elite_count.%s' % dt_str,
                 elite_mama_count)
    statsd.gauge('xiaolumm.boutique.mama.active_count.%s' % dt_str,
                 active_elite_mama_count)
    statsd.gauge('xiaolumm.boutique.mama.ordered_count.%s' % dt_str,
                 order_mama_count)

    statsd.gauge('xiaolumm.boutique.mama.elite_count', elite_mama_count)
    statsd.gauge('xiaolumm.boutique.mama.active_count',
                 active_elite_mama_count)
    statsd.gauge('xiaolumm.boutique.mama.ordered_count', order_mama_count)
Beispiel #47
0
def item_stats_report():
    start = time.time()
    d = get_item_counts_by_status()
    statsd.gauge("items.total", d["total"])
    statsd.gauge("items.open", d["open"])
    statsd.gauge("items.inprogress", d["inprogress"])
    statsd.gauge("items.resolved", d["resolved"])
    statsd.gauge("items.closed", d["closed"])
    statsd.gauge("items.verified", d["verified"])
    end = time.time()
    statsd.timing("celery.item_stats_report", int((end - start) * 1000))
Beispiel #48
0
 def _record_time(self, request):
     if hasattr(request, "_start_time"):
         ms = int((time.time() - request._start_time) * 1000)
         data = dict(module=request._view_module, name=request._view_name, method=request.method)
         statsd.gauge("view.response_ms.{module}.{name}.{method}".format(**data), ms)
Beispiel #49
0
    def handle(self, *args, **kwargs):
        all_women = set([w.slug for w in Woman.objects.all()])
        existing = len(all_women)
        statsd.gauge('women.count')
        gc = gspread.login(settings.GOOGLE_AUTH[0],
                           settings.GOOGLE_AUTH[1])
        wks = gc.open_by_url(settings.SPREADSHEET_URL).sheet1
        d = wks.get_all_values()
        spreadsheet_rows = len(d[2:])
        statsd.gauge('spreadsheet.count')

        if spreadsheet_rows < existing * 0.9:
            # more than a 10% drop, we assume someone messed with
            # the spreadsheet so, bail
            return
        if spreadsheet_rows > existing * 1.1:
            # likewise, on a more than 10% increase in the last hour
            # it probably means trouble.
            return

        seen = set([])
        for row in d[2:]:
            name = row[0]
            if name == '':
                continue
            url = row[1]
            twitter = row[2]
            location = row[3]
            games = row[4]
            bio = row[5]
            skills = row[6]
            talks = row[7]
            key = wkey(twitter, name)
            if key == '':
                continue
            r = Woman.objects.filter(slug=key)
            if r.exists():
                woman = r[0]
                woman.name = name
                woman.url = url
                woman.twitter = twitter
                woman.location = location
                woman.bio = bio
                woman.games = games
                woman.talks = talks
                woman.skills = skills
                woman.save()
                print "UPDATED %s" % key
            else:
                Woman.objects.create(
                    slug=key,
                    name=name, url=url, twitter=twitter,
                    location=location, bio=bio,
                    games=games, talks=talks,
                    skills=skills,
                    )
                print "ADDED %s" % key
            seen.add(key)
        to_delete = all_women - seen
        for w in to_delete:
            print "DELETING %s" % w.slug
            w.delete()
Beispiel #50
0
def user_stats():
    active_users = User.objects.filter(status='active', grp=False).count()
    claimed = Claim.objects.all().count()
    statsd.gauge('users.active', active_users)
    statsd.gauge('users.claimed', claimed)
Beispiel #51
0
def operations_report():
    print "operations_report()"
    r = operation_count_by_status()
    statsd.gauge("operations.failed", r['failed'])
    statsd.gauge("operations.complete", r['complete'])
    statsd.gauge("operations.submitted", r['submitted'])
    statsd.gauge("operations.inprogress", r['in progress'])
    statsd.gauge("operations.enqueued", r['enqueued'])
    statsd.gauge("operations.total", sum(r.values()))
Beispiel #52
0
 def _save_statsd(self, start_time):
     ms = int((time.time() - start_time) * 1000)
     statsd.incr('core.sync.cnt')
     statsd.gauge('core.sync.runtime_ms', ms)
     statsd.incr('core.sync.user.cnt')
     statsd.gauge('core.sync.user.runtime_ms', ms)
Beispiel #53
0
 def _set(self, key, value, timeout=None):
     statsd.gauge("cache.l10n.entries", len(self._cache), 0.1)
     super(L10nCache, self)._set(key, value, timeout)
Beispiel #54
0
def item_stats_report():
    start = time.time()
    d = get_item_counts_by_status()
    statsd.gauge("items.total", d['total'])
    statsd.gauge("items.open", d['open'])
    statsd.gauge("items.inprogress", d['inprogress'])
    statsd.gauge("items.resolved", d['resolved'])
    statsd.gauge("items.closed", d['closed'])
    statsd.gauge("items.verified", d['verified'])
    end = time.time()
    statsd.timing('celery.item_stats_report', int((end - start) * 1000))
Beispiel #55
0
def user_stats():
    active_users = User.objects.filter(status='active', grp=False).count()
    claimed = Claim.objects.all().count()
    statsd.gauge('users.active', active_users)
    statsd.gauge('users.claimed', claimed)
Beispiel #56
0
                            smtplib.SMTPAuthenticationError,
                            smtplib.SMTPDataError), err:
                        # Sending failed, defer message
                        message.defer()
                        logger.info('Message deferred due to failure: %s' % err)
                        MessageLog.objects.log(message, RESULT_MAPPING['failure'], log_message=str(err))
                        failures += 1
                    else:
                        # Sending succeeded
                        MessageLog.objects.log(message, RESULT_MAPPING['success'])
                        message.delete()
                        successes += 1
                total += 1
        finally:
            if successes and failures:
                statsd.gauge('mailer.success_rate', successes / failures)
            else:
                statsd.gauge('mailer.success_rate', 1)


def send_loop():
    """
    Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
    sending messages if any are on queue.
    """

    while True:
        while Message.objects.count() == 0:
            time.sleep(EMPTY_QUEUE_SLEEP)
        send_all()
Beispiel #57
0
 def _set(self, key, value, timeout=None):
     statsd.gauge('cache.l10n.entries', len(self._cache), 0.1)
     super(L10nCache, self)._set(key, value, timeout)