コード例 #1
0
    def stop(self, cancelled=False):
        statsd.incr("timer.stopped")
        if self.is_stopped():
            raise ValueError(u"The timer has already been stopped")

        last_log = self.log_set.order_by("created").last()  # get last log
        if not last_log and not cancelled:
            raise ValueError(u"You can't stop a timer without a log")

        # stop and update this model
        self.stopped = postgres_now()

        self.cancelled = cancelled
        if last_log:
            self.linked_case = last_log.case

        self.save()
        if self.linked_case:
            # update billable time on case
            cursor = connection.cursor()
            cursor.execute(
                """
                select sum(ceiling(EXTRACT(epoch FROM a.stopped-a.created)))
                    from timer_timer as a
                    where
                    a.cancelled = false and
                    a.stopped is not null and a.linked_case_id = %s""",
                [self.linked_case.id],
            )
            total_billable_time, = cursor.fetchone()
            if total_billable_time:
                self.linked_case.billable_time = total_billable_time
                if total_billable_time:
                    statsd.timing("timer.total_time", total_billable_time * 1000)
                self.linked_case.save(update_fields=["billable_time"])
コード例 #2
0
ファイル: models.py プロジェクト: doismellburning/cla_backend
    def stop(self, cancelled=False):
        statsd.incr('timer.stopped')
        if self.is_stopped():
            raise ValueError(u'The timer has already been stopped')

        last_log = self.log_set.order_by('created').last()  # get last log
        if not last_log and not cancelled:
            raise ValueError(u'You can\'t stop a timer without a log')

        # stop and update this model
        self.stopped = timezone.now()  # stop
        self.cancelled = cancelled
        if last_log:
            self.linked_case = last_log.case

        self.save()
        if self.linked_case:
            # update billable time on case
            cursor = connection.cursor()
            cursor.execute('''
                select sum(ceiling(EXTRACT(epoch FROM a.stopped-a.created)))
                    from timer_timer as a
                    where
                    a.cancelled = false and
                    a.stopped is not null and a.linked_case_id = %s''', [self.linked_case.id])
            total_billable_time, = cursor.fetchone()
            if total_billable_time:
                self.linked_case.billable_time = total_billable_time
                if total_billable_time:
                    statsd.timing('timer.total_time', total_billable_time * 1000)
                self.linked_case.save(update_fields=['billable_time'])
コード例 #3
0
ファイル: views.py プロジェクト: Witia1/webpay
def trans_start_url(request):
    """
    JSON handler to get the Bango payment URL to start a transaction.
    """
    trans = None
    trans_id = request.session.get('trans_id')
    data = {'url': None, 'status': None}

    if not trans_id:
        log.error('trans_start_url(): no transaction ID in session')
        return http.HttpResponseBadRequest()
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transaction'):
            trans = solitude.get_transaction(trans_id)
        data['status'] = trans['status']
        data['provider'] = constants.PROVIDERS_INVERTED[trans['provider']]
    except ObjectDoesNotExist:
        log.error('trans_start_url() transaction does not exist: {t}'
                  .format(t=trans_id))

    if data['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        url = get_payment_url(trans)
        log.info('async call got payment URL {url} for trans {tr}'
                 .format(url=url, tr=trans))
        data['url'] = url
    return data
コード例 #4
0
ファイル: runner.py プロジェクト: Hugodby/botbot-web
    def listen(self):
        """Listens for incoming messages on the Redis queue"""
        while 1:
            val = None
            try:
                val = self.bot_bus.blpop('q', 1)

                # Track q length
                ql = self.bot_bus.llen('q')
                statsd.gauge(".".join(["plugins", "q"]), ql)

                if val:
                    _, val = val
                    LOG.debug('Recieved: %s', val)
                    line = Line(json.loads(val), self)

                    # Calculate the transport latency between go and the plugins.
                    delta = datetime.utcnow().replace(tzinfo=utc) - line._received
                    statsd.timing(".".join(["plugins", "latency"]),
                                 delta.total_seconds() * 1000)

                    self.dispatch(line)
            except Exception:
                LOG.error("Line Dispatch Failed", exc_info=True, extra={
                    "line": val
                })
コード例 #5
0
ファイル: tasks.py プロジェクト: akatsoulas/kitsune
def _rebuild_kb_chunk(data):
    """Re-render a chunk of documents.

    Note: Don't use host components when making redirects to wiki pages; those
    redirects won't be auto-pruned when they're 404s.

    """
    log.info('Rebuilding %s documents.' % len(data))

    pin_this_thread()  # Stick to master.

    messages = []
    start = time.time()
    for pk in data:
        message = None
        try:
            document = Document.objects.get(pk=pk)

            # If we know a redirect link to be broken (i.e. if it looks like a
            # link to a document but the document isn't there), log an error:
            url = document.redirect_url()
            if (url and points_to_document_view(url) and
                    not document.redirect_document()):
                log.warn('Invalid redirect document: %d' % pk)

            html = document.parse_and_calculate_links()
            if document.html != html:
                # We are calling update here to so we only update the html
                # column instead of all of them. This bypasses post_save
                # signal handlers like the one that triggers reindexing.
                # See bug 797038 and bug 797352.
                Document.objects.filter(pk=pk).update(html=html)
                statsd.incr('wiki.rebuild_chunk.change')
            else:
                statsd.incr('wiki.rebuild_chunk.nochange')
        except Document.DoesNotExist:
            message = 'Missing document: %d' % pk
        except Revision.DoesNotExist:
            message = 'Missing revision for document: %d' % pk
        except ValidationError as e:
            message = 'ValidationError for %d: %s' % (pk, e.messages[0])
        except SlugCollision:
            message = 'SlugCollision: %d' % pk
        except TitleCollision:
            message = 'TitleCollision: %d' % pk

        if message:
            log.debug(message)
            messages.append(message)
    d = time.time() - start
    statsd.timing('wiki.rebuild_chunk', int(round(d * 1000)))

    if messages:
        subject = ('[%s] Exceptions raised in _rebuild_kb_chunk()' %
                   settings.PLATFORM_NAME)
        mail_admins(subject=subject, message='\n'.join(messages))
    if not transaction.get_connection().in_atomic_block:
        transaction.commit()

    unpin_this_thread()  # Not all tasks need to do use the master.
コード例 #6
0
ファイル: api.py プロジェクト: ccarvalheira/wsep
    def get_detail(self, request, **kwargs):
        """
        Returns a single serialized resource.

        Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
        set and serializes it.

        Should return a HttpResponse (200 OK).
        """
        basic_bundle = self.build_bundle(request=request)

        try:
            obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
        except ObjectDoesNotExist:
            return http.HttpNotFound()
        except MultipleObjectsReturned:
            return http.HttpMultipleChoices("More than one resource is found at this URI.")

        bundle = self.build_bundle(obj=obj, request=request)
        bundle = self.full_dehydrate(bundle)
        #inserted here to display the datapoints for this particular dataset
        try:
            bundle.request.GET["no_points"]
        except KeyError:
            bundle = self.dehydrant_detail(bundle)
        ###
        bundle = self.alter_detail_data_to_serialize(request, bundle)
        statsd.timing("dataset_detail_read_time", int((time.time()-bundle.request._start_time)*1000))
        return self.create_response(request, bundle)
コード例 #7
0
ファイル: views.py プロジェクト: ShimaYasuhiro/webpay
def trans_start_url(request):
    """
    JSON handler to get the Bango payment URL to start a transaction.
    """
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transaction'):
            trans = solitude.get_transaction(request.session['trans_id'])
    except ObjectDoesNotExist:
        log.error('trans_start_url() transaction does not exist: {t}'
                  .format(t=request.session['trans_id']))
        trans = {'status': None}

    data = {'url': None, 'status': trans['status']}
    if trans['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        url = get_payment_url(trans)
        log.info('async call got payment URL {url} for trans {tr}'
                 .format(url=url, tr=trans))
        data['url'] = url
    return data
コード例 #8
0
ファイル: models.py プロジェクト: justinpotts/addons-server
    def from_upload(cls, upload, addon, platforms, send_signal=True,
                    source=None, is_beta=False):
        from olympia.addons.models import AddonFeatureCompatibility

        data = utils.parse_addon(upload, addon)
        try:
            license = addon.versions.latest().license_id
        except Version.DoesNotExist:
            license = None
        v = cls.objects.create(
            addon=addon,
            version=data['version'],
            license_id=license,
            source=source
        )
        log.info('New version: %r (%s) from %r' % (v, v.id, upload))

        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(addon=addon)[0]
            )
            feature_compatibility.update(e10s=e10s_compatibility)

        AV = ApplicationsVersions
        for app in data.get('apps', []):
            AV(version=v, min=app.min, max=app.max,
               application=app.id).save()
        if addon.type == amo.ADDON_SEARCH:
            # Search extensions are always for all platforms.
            platforms = [amo.PLATFORM_ALL.id]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        for platform in platforms:
            File.from_upload(upload, v, platform, parse_data=data,
                             is_beta=is_beta)

        v.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        if send_signal:
            version_uploaded.send(sender=v)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'
                 .format(delta=upload_time, version=v,
                         created=upload.created, now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return v
コード例 #9
0
ファイル: models.py プロジェクト: diox/solitude
def time_status_change(sender, **kwargs):
    # There's no status change if the transaction was just created.
    if kwargs.get('raw', False) or kwargs.get('created', False):
        return

    obj = kwargs['instance']
    status = constants.STATUSES_INVERTED[obj.status]
    statsd.timing('transaction.status.{0}'.format(status),
                  (obj.modified - obj.created).seconds)
コード例 #10
0
ファイル: views.py プロジェクト: AdStack/django-statsd
def process_key(start, key, value):
    if 'timing' in key:
        # Some values will be zero. We want the output of that to
        # be zero relative to start.
        value = max(start, int(value)) - start
        statsd.timing(key, value)
    elif key == 'window.performance.navigation.type':
        statsd.incr('%s.%s' % (key, types[value]))
    elif key == 'window.performance.navigation.redirectCount':
        statsd.incr(key, int(value))
コード例 #11
0
def time_f(fun, metric, *args, **kwargs):
    start = time.time()
    ret = fun(*args, **kwargs)
    lapse = int((time.time() - start) * 1000)
    if STATSD:
        statsd.timing(metric, lapse)
    else:
        log = logging.getLogger(metric)
        log.info('timing: %d', lapse)
    return ret
コード例 #12
0
ファイル: tasks.py プロジェクト: coati-00/dmt
def item_stats_report():
    start = time.time()
    d = get_item_counts_by_status()
    statsd.gauge("items.total", d['total'])
    statsd.gauge("items.open", d['open'])
    statsd.gauge("items.inprogress", d['inprogress'])
    statsd.gauge("items.resolved", d['resolved'])
    statsd.gauge("items.closed", d['closed'])
    statsd.gauge("items.verified", d['verified'])
    end = time.time()
    statsd.timing('celery.item_stats_report', int((end - start) * 1000))
コード例 #13
0
ファイル: tasks.py プロジェクト: nikolas/dmt
def item_stats_report():
    start = time.time()
    d = get_item_counts_by_status()
    statsd.gauge("items.total", d["total"])
    statsd.gauge("items.open", d["open"])
    statsd.gauge("items.inprogress", d["inprogress"])
    statsd.gauge("items.resolved", d["resolved"])
    statsd.gauge("items.closed", d["closed"])
    statsd.gauge("items.verified", d["verified"])
    end = time.time()
    statsd.timing("celery.item_stats_report", int((end - start) * 1000))
コード例 #14
0
ファイル: views.py プロジェクト: KevinBrolly/django-statsd
def _process_summaries(start, keys):
    calculated = {
        "network": keys["window.performance.timing.responseStart"] - start,
        "app": keys["window.performance.timing.domLoading"] - keys["window.performance.timing.responseStart"],
        "dom": keys["window.performance.timing.domComplete"] - keys["window.performance.timing.domLoading"],
        "rendering": keys["window.performance.timing.loadEventEnd"] - keys["window.performance.timing.domComplete"],
    }
    for k, v in calculated.items():
        # If loadEventEnd still does not get populated, we could end up with
        # negative numbers here.
        statsd.timing("window.performance.calculated.%s" % k, max(v, 0))
コード例 #15
0
ファイル: tasks.py プロジェクト: mozmar/basket
def snitch(start_time=None):
    if start_time is None:
        snitch.delay(time())
        return

    snitch_id = settings.SNITCH_ID
    totalms = int((time() - start_time) * 1000)
    statsd.timing('news.tasks.snitch.timing', totalms)
    requests.post('https://nosnch.in/{}'.format(snitch_id), data={
        'm': totalms,
    })
コード例 #16
0
ファイル: tasks.py プロジェクト: coati-00/dmt
def estimates_report():
    start = time.time()
    d = item_counts()
    # item counts
    statsd.gauge('items.open.sm', d['open_sm_count'])

    # hour estimates
    statsd.gauge('estimates.sm', d['estimates_sm'])
    statsd.gauge('estimates.non_sm', d['estimates_non_sm'])

    end = time.time()
    statsd.timing('celery.estimates_report', int((end - start) * 1000))
コード例 #17
0
ファイル: celery.py プロジェクト: Awingu/django-statsd
def on_task_postrun(sender=None, task_id=None, task=None, **kwds):
    """
    Handle Celery ``task_postrun`` signals.
    """
    # Increase statsd counter.
    statsd.incr('celery.%s.done' % task.name)

    # Log duration.
    start_time = _task_start_times.pop(task_id, False)
    if start_time:
        ms = int((time.time() - start_time) * 1000)
        statsd.timing('celery.%s.runtime' % task.name, ms)
コード例 #18
0
ファイル: search.py プロジェクト: bobsilverberg/zamboni
 def raw(self):
     qs = self._build_query()
     es = get_es()
     try:
         with statsd.timer('search.es.timer') as timer:
             hits = es.search(qs, self.index, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     statsd.timing('search.es.took', hits['took'])
     log.debug('[%s] [%s] %s' % (hits['took'], timer.ms, qs))
     return hits
コード例 #19
0
ファイル: tasks.py プロジェクト: nikolas/dmt
def estimates_report():
    start = time.time()
    d = item_counts()
    # item counts
    statsd.gauge("items.open.sm", d["open_sm_count"])

    # hour estimates
    statsd.gauge("estimates.sm", d["estimates_sm"])
    statsd.gauge("estimates.non_sm", d["estimates_non_sm"])

    end = time.time()
    statsd.timing("celery.estimates_report", int((end - start) * 1000))
コード例 #20
0
ファイル: models.py プロジェクト: peterkinalex/olympia
    def from_upload(cls, upload, addon, platforms, send_signal=True,
                    source=None, is_beta=False):
        data = utils.parse_addon(upload, addon)
        try:
            license = addon.versions.latest().license_id
        except Version.DoesNotExist:
            license = None
        max_len = cls._meta.get_field_by_name('_developer_name')[0].max_length
        developer = data.get('developer_name', '')[:max_len]
        v = cls.objects.create(
            addon=addon,
            version=data['version'],
            license_id=license,
            _developer_name=developer,
            source=source
        )
        log.info('New version: %r (%s) from %r' % (v, v.id, upload))

        AV = ApplicationsVersions
        for app in data.get('apps', []):
            AV(version=v, min=app.min, max=app.max,
               application=app.id).save()
        if addon.type == amo.ADDON_SEARCH:
            # Search extensions are always for all platforms.
            platforms = [amo.PLATFORM_ALL.id]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        for platform in platforms:
            File.from_upload(upload, v, platform, parse_data=data,
                             is_beta=is_beta)

        v.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        if send_signal:
            version_uploaded.send(sender=v)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'
                 .format(delta=upload_time, version=v,
                         created=upload.created, now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return v
コード例 #21
0
ファイル: tasks.py プロジェクト: albre2252/zamboni
    def on_postrun(self, sender, **kw):
        # sender is the task object. task_id in here.
        pending = self.redis.hincrby(self.pending, sender.name, -1)
        # Clamp pending at 0. Tasks could be coming in before we started
        # tracking.
        if pending < 0:
            self.redis.hset(self.pending, sender.name, 0)
        self.redis.hincrby(self.run, sender.name, 1)

        start = self.redis.hget(self.timer, kw['task_id'])
        if start:
            t = (time.time() - float(start)) * 1000
            statsd.timing('tasks.%s' % sender.name, int(t))
コード例 #22
0
ファイル: views.py プロジェクト: AdStack/django-statsd
def _process_summaries(start, keys):
    calculated = {
        'network': keys['window.performance.timing.responseStart'] - start,
        'app': keys['window.performance.timing.domLoading'] -
               keys['window.performance.timing.responseStart'],
        'dom': keys['window.performance.timing.domComplete'] -
               keys['window.performance.timing.domLoading'],
        'rendering': keys['window.performance.timing.loadEventEnd'] -
                     keys['window.performance.timing.domComplete'],
    }
    for k, v in calculated.items():
        # If loadEventEnd still does not get populated, we could end up with
        # negative numbers here.
        statsd.timing('window.performance.calculated.%s' % k, max(v, 0))
コード例 #23
0
ファイル: celery.py プロジェクト: peterkinalex/olympia
def track_task_run_time(task_id, task, **kw):
    timer = TaskTimer()
    start_time = cache.get(timer.cache_key(task_id))
    if start_time is None:
        log.info('could not track task run time; id={id}; name={name}; '
                 'current_dt={current_dt}'
                 .format(id=task_id, name=task.name,
                         current_dt=timer.current_datetime))
    else:
        run_time = timer.current_epoch_ms - start_time
        log.info('tracking task run time; id={id}; name={name}; '
                 'run_time={run_time}; current_dt={current_dt}'
                 .format(id=task_id, name=task.name,
                         current_dt=timer.current_datetime,
                         run_time=run_time))
        statsd.timing('tasks.{}'.format(task.name), run_time)
        cache.delete(timer.cache_key(task_id))
コード例 #24
0
ファイル: logic.py プロジェクト: vikingco/django-triggers
def process_triggers(use_statsd=False, function_logger=None):
    """
    Process all triggers that are ready for processing.

    :param bool use_statsd: whether to use_statsd
    :return: None
    """
    process_async = getattr(settings, 'DJTRIGGERS_ASYNC_HANDLING', False)

    # Get all triggers that need to be processed
    for model in apps.get_models():
        # Check whether it's a trigger
        if not issubclass(model, Trigger) or getattr(model, 'typed', None) is None or isabstract(model):
            continue

        # Get all triggers of this type that need to be processed
        triggers = model.objects.filter(Q(process_after__isnull=True) | Q(process_after__lt=timezone.now()),
                                        date_processed__isnull=True)

        # Process each trigger
        for trigger in triggers:
            try:
                # Process the trigger, either synchronously or in a Celery task
                if process_async:
                    process_trigger.apply_async((trigger.id, trigger._meta.app_label, trigger.__class__.__name__),
                                                {'use_statsd': use_statsd},
                                                max_retries=getattr(settings, 'DJTRIGGERS_CELERY_TASK_MAX_RETRIES', 0))
                else:
                    trigger.process()

                # Send stats to statsd if necessary
                if use_statsd:
                    from django_statsd.clients import statsd
                    statsd.incr('triggers.{}.processed'.format(trigger.trigger_type))
                    if trigger.date_processed and trigger.process_after:
                        statsd.timing('triggers.{}.process_delay_seconds'.format(trigger.trigger_type),
                                      (trigger.date_processed - trigger.process_after).total_seconds())
            # The trigger didn't need processing yet
            except ProcessLaterError:
                pass
            # The trigger raised an (expected) error while processing
            except ProcessError:
                pass
            # In case a trigger got removed (manually or some process), deal with it
            except Trigger.DoesNotExist as e:
                logger.info(e)
コード例 #25
0
ファイル: models.py プロジェクト: eviljeff/olympia
    def get_localepicker(self):
        """
        For a file that is part of a language pack, extract
        the chrome/localepicker.properties file and return as
        a string.
        """
        start = time.time()
        zip = SafeZip(self.file_path, validate=False)

        try:
            is_valid = zip.is_valid()
        except (zipfile.BadZipfile, IOError):
            is_valid = False

        if not is_valid:
            return ''

        try:
            manifest = zip.read('chrome.manifest')
        except KeyError as e:
            log.info('No file named: chrome.manifest in file: %s' % self.pk)
            return ''

        res = self._get_localepicker.search(manifest)
        if not res:
            log.error('Locale browser not in chrome.manifest: %s' % self.pk)
            return ''

        try:
            p = res.groups()[1]
            if 'localepicker.properties' not in p:
                p = os.path.join(p, 'localepicker.properties')
            res = zip.extract_from_manifest(p)
        except (zipfile.BadZipfile, IOError) as e:
            log.error('Error unzipping: %s, %s in file: %s' % (p, e, self.pk))
            return ''
        except (ValueError, KeyError) as e:
            log.error('No file named: %s in file: %s' % (e, self.pk))
            return ''

        end = time.time() - start
        log.info('Extracted localepicker file: %s in %.2fs' %
                 (self.pk, end))
        statsd.timing('files.extract.localepicker', (end * 1000))
        return res
コード例 #26
0
ファイル: views.py プロジェクト: Witia1/webpay
def wait_to_start(request):
    """
    Wait until the transaction is in a ready state.

    The transaction was started previously during the buy flow in the
    background from webpay.pay.tasks.

    Serve JS that polls for transaction state.
    When ready, redirect to the Bango payment URL using
    the generated billing configuration ID.
    """
    trans_id = request.session.get('trans_id', None)
    if not trans_id:
        # This seems like a seriously problem but maybe there is just a race
        # condition. If we see a lot of these in the logs it means the
        # payment will never complete so we should keep an eye on it.
        log.error('wait_to_start() session trans_id {t} was None'
                  .format(t=trans_id))
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transation'):
            trans = solitude.get_transaction(trans_id)
    except ObjectDoesNotExist:
        trans = {'status': None}

    if trans['status'] in constants.STATUS_ENDED:
        statsd.incr('purchase.payment_time.failure')
        log.exception('Attempt to restart finished transaction {0} '
                      'with status {1}'.format(trans_id, trans['status']))
        return system_error(request, code=msg.TRANS_ENDED)

    if trans['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        # Dump any messages so we don't show them later.
        clear_messages(request)
        # The transaction is ready; no need to wait for it.
        url = get_payment_url(trans)
        log.info('immediately redirecting to payment URL {url} '
                 'for trans {tr}'.format(url=url, tr=trans))
        return http.HttpResponseRedirect(url)
    return render(request, 'pay/wait-to-start.html')
コード例 #27
0
    def raw(self):
        build_body = self._build_query()

        es = get_es()
        try:
            with statsd.timer('search.es.timer') as timer:
                hits = es.search(
                    body=build_body,
                    index=self.index,
                    doc_type=self.type._meta.db_table,
                )
        except Exception:
            log.error(build_body)
            raise

        statsd.timing('search.es.took', hits['took'])
        log.debug('[%s] [%s] %s' % (hits['took'], timer.ms, build_body))
        return hits
コード例 #28
0
ファイル: search.py プロジェクト: eviljeff/olympia
    def raw(self):
        build_body = self._build_query()

        es = get_es()
        try:
            with statsd.timer('search.es.timer') as timer:
                hits = es.search(
                    body=build_body,
                    index=self.index,
                    doc_type=self.type._meta.db_table,
                )
        except Exception:
            log.error(build_body)
            raise

        statsd.timing('search.es.took', hits['took'])
        log.info('[%s] [%s] %s' % (hits['took'], timer.ms, build_body))
        return hits
コード例 #29
0
ファイル: views.py プロジェクト: fgallina/django-statsd
def _process_summaries(start, keys):
    calculated = {
        'network':
        keys['window.performance.timing.responseStart'] - start,
        'app':
        keys['window.performance.timing.domLoading'] -
        keys['window.performance.timing.responseStart'],
        'dom':
        keys['window.performance.timing.domComplete'] -
        keys['window.performance.timing.domLoading'],
        'rendering':
        keys['window.performance.timing.loadEventEnd'] -
        keys['window.performance.timing.domComplete'],
    }
    for k, v in calculated.items():
        # If loadEventEnd still does not get populated, we could end up with
        # negative numbers here.
        statsd.timing('window.performance.calculated.%s' % k, max(v, 0))
コード例 #30
0
    def get_localepicker(self):
        """
        For a file that is part of a language pack, extract
        the chrome/localepicker.properties file and return as
        a string.
        """
        start = time.time()
        zip = SafeZip(self.file_path, validate=False)

        try:
            is_valid = zip.is_valid()
        except (zipfile.BadZipfile, IOError):
            is_valid = False

        if not is_valid:
            return ''

        try:
            manifest = zip.read('chrome.manifest')
        except KeyError as e:
            log.info('No file named: chrome.manifest in file: %s' % self.pk)
            return ''

        res = self._get_localepicker.search(manifest)
        if not res:
            log.error('Locale browser not in chrome.manifest: %s' % self.pk)
            return ''

        try:
            p = res.groups()[1]
            if 'localepicker.properties' not in p:
                p = os.path.join(p, 'localepicker.properties')
            res = zip.extract_from_manifest(p)
        except (zipfile.BadZipfile, IOError) as e:
            log.error('Error unzipping: %s, %s in file: %s' % (p, e, self.pk))
            return ''
        except (ValueError, KeyError) as e:
            log.error('No file named: %s in file: %s' % (e, self.pk))
            return ''

        end = time.time() - start
        log.info('Extracted localepicker file: %s in %.2fs' % (self.pk, end))
        statsd.timing('files.extract.localepicker', (end * 1000))
        return res
コード例 #31
0
    def wrapped(self, *args, **kwargs):
        start_time = kwargs.pop('start_time', None)
        if start_time and not self.request.retries:
            total_time = int((time() - start_time) * 1000)
            statsd.timing(self.name + '.timing', total_time)
        statsd.incr(self.name + '.total')
        statsd.incr('news.tasks.all_total')
        if settings.MAINTENANCE_MODE and self.name not in MAINTENANCE_EXEMPT:
            if not settings.READ_ONLY_MODE:
                # record task for later
                QueuedTask.objects.create(
                    name=self.name,
                    args=args,
                    kwargs=kwargs,
                )
                statsd.incr(self.name + '.queued')
            else:
                statsd.incr(self.name + '.not_queued')

            return

        try:
            return func(*args, **kwargs)
        except (IOError, NewsletterException, requests.RequestException,
                sfapi.SalesforceError, RetryTask) as e:
            # These could all be connection issues, so try again later.
            # IOError covers URLError and SSLError.
            if ignore_error(e):
                return

            try:
                if not (isinstance(e, RetryTask)
                        or ignore_error_post_retry(e)):
                    sentry_client.captureException(tags={'action': 'retried'})

                raise self.retry(countdown=2**(self.request.retries + 1) * 60)
            except self.MaxRetriesExceededError:
                statsd.incr(self.name + '.retry_max')
                statsd.incr('news.tasks.retry_max_total')
                # don't bubble certain errors
                if ignore_error_post_retry(e):
                    return

                sentry_client.captureException()
コード例 #32
0
ファイル: common.py プロジェクト: glogiotatidis/basket
        def wrapped(*args, **kwargs):
            starttime = time()
            e = None
            try:
                resp = f(*args, **kwargs)
            except NewsletterException as e:
                pass
            except Exception:
                raise

            totaltime = int((time() - starttime) * 1000)
            statsd.timing(prefix + '.timing', totaltime)
            statsd.timing(prefix + '.{}.timing'.format(f.__name__), totaltime)
            statsd.incr(prefix + '.count')
            statsd.incr(prefix + '.{}.count'.format(f.__name__))
            if e:
                raise
            else:
                return resp
コード例 #33
0
ファイル: celery.py プロジェクト: raman934/addons-server
def track_task_run_time(task_id, task, **kw):
    timer = TaskTimer()
    start_time = cache.get(timer.cache_key(task_id))
    if start_time is None:
        log.info('could not track task run time; id={id}; name={name}; '
                 'current_dt={current_dt}'.format(
                     id=task_id,
                     name=task.name,
                     current_dt=timer.current_datetime))
    else:
        run_time = timer.current_epoch_ms - start_time
        log.info('tracking task run time; id={id}; name={name}; '
                 'run_time={run_time}; current_dt={current_dt}'.format(
                     id=task_id,
                     name=task.name,
                     current_dt=timer.current_datetime,
                     run_time=run_time))
        statsd.timing('tasks.{}'.format(task.name), run_time)
        cache.delete(timer.cache_key(task_id))
コード例 #34
0
ファイル: views.py プロジェクト: hudikwebb/webpay
def trans_start_url(request):
    """
    JSON handler to get the Bango payment URL to start a transaction.
    """
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transation'):
            trans = solitude.get_transaction(request.session['trans_id'])
    except ObjectDoesNotExist:
        trans = {'status': None}
    data = {'url': None, 'status': trans['status']}
    if trans['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        data['url'] = provider.get_start_url(trans['uid_pay'])
    return data
コード例 #35
0
def new_execute_command(self, *args, **options):
    # The key is the name of the command in lowercase
    key = args[0].lower()

    # Start the timer
    start = time.time()

    # Run the command
    ret = self._old_execute_command(*args, **options)

    # Get the time
    ms = int(round((time.time() - start) * 1000))  # delta in ms

    # Log the stats
    statsd.timing('redis.execute', ms, SAMPLE_RATE)
    statsd.timing('redis.execute.' + key, ms, SAMPLE_RATE)

    # Done
    return ret
コード例 #36
0
ファイル: common.py プロジェクト: akatsoulas/basket-1
        def wrapped(*args, **kwargs):
            starttime = time()
            e = None
            try:
                resp = f(*args, **kwargs)
            except NewsletterException as e:
                pass
            except Exception:
                raise

            totaltime = int((time() - starttime) * 1000)
            statsd.timing(prefix + '.timing', totaltime)
            statsd.timing(prefix + '.{}.timing'.format(f.__name__), totaltime)
            statsd.incr(prefix + '.count')
            statsd.incr(prefix + '.{}.count'.format(f.__name__))
            if e:
                raise
            else:
                return resp
コード例 #37
0
ファイル: views.py プロジェクト: jincreator/webpay
def wait_to_start(request):
    """
    Wait until the transaction is in a ready state.

    The transaction was started previously during the buy flow in the
    background from webpay.pay.tasks.

    Serve JS that polls for transaction state.
    When ready, redirect to the Bango payment URL using
    the generated billing configuration ID.
    """
    trans_id = request.session.get('trans_id', None)
    if not trans_id:
        # This seems like a seriously problem but maybe there is just a race
        # condition. If we see a lot of these in the logs it means the
        # payment will never complete so we should keep an eye on it.
        log.error(
            'wait_to_start() session trans_id {t} was None'.format(t=trans_id))
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transation'):
            trans = solitude.get_transaction(trans_id)
    except ObjectDoesNotExist:
        trans = {'status': None}

    if trans['status'] in constants.STATUS_ENDED:
        statsd.incr('purchase.payment_time.failure')
        log.exception('Attempt to restart finished transaction {0} '
                      'with status {1}'.format(trans_id, trans['status']))
        return system_error(request, code=msg.TRANS_ENDED)

    if trans['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        # Dump any messages so we don't show them later.
        clear_messages(request)
        # The transaction is ready; no need to wait for it.
        return http.HttpResponseRedirect(get_payment_url(trans))
    return render(request, 'pay/wait-to-start.html')
コード例 #38
0
ファイル: api.py プロジェクト: tempbottle/webpay
def trans_start_url(request):
    """
    JSON handler to get the provider payment URL to start a transaction.
    """
    trans = None
    trans_id = request.session.get('trans_id')
    data = {'url': None, 'status': None, 'provider': None}

    if not trans_id:
        log.error('trans_start_url(): no transaction ID in session')
        return http.HttpResponseBadRequest()
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transaction'):
            trans = client.get_transaction(trans_id)
        data['status'] = trans['status']
        data['provider'] = constants.PROVIDERS_INVERTED.get(trans['provider'])
    except ObjectDoesNotExist:
        log.error('trans_start_url() transaction does not exist: {t}'
                  .format(t=trans_id))

    if data['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        url = get_payment_url(trans)
        log.info('async call got payment URL {url} for trans {tr}'
                 .format(url=url, tr=trans))
        data['url'] = url

    if trans and trans['status'] == constants.STATUS_ERRORED:
        statsd.incr('purchase.payment_time.errored')
        log.exception('Purchase configuration failed: {0} with status {1}'
                      .format(trans_id, trans['status']))
        return system_error(
            request,
            code=getattr(msg, trans.get('status_reason', 'UNEXPECTED_ERROR'))
        )

    return data
コード例 #39
0
    def _handle_execution_success(self, use_statsd=False):
        """
        Handle execution success of the trigger
        :param bool use_statsd: whether to use statsd
        :return: None
        """
        if self.date_processed is None:
            now = timezone.now()
            self.date_processed = now

        # Send stats to statsd if necessary
        if use_statsd:
            from django_statsd.clients import statsd
            statsd.incr('triggers.{trigger_type}.processed'.format(trigger_type=self.trigger_type))
            if self.date_processed and self.process_after:
                statsd.timing('triggers.{trigger_type}.process_delay_seconds'.format(trigger_type=self.trigger_type),
                              (self.date_processed - self.process_after).total_seconds())

        self.successful = True
        self.save()
コード例 #40
0
ファイル: views.py プロジェクト: jincreator/webpay
def trans_start_url(request):
    """
    JSON handler to get the Bango payment URL to start a transaction.
    """
    try:
        statsd.incr('purchase.payment_time.retry')
        with statsd.timer('purchase.payment_time.get_transaction'):
            trans = solitude.get_transaction(request.session['trans_id'])
    except ObjectDoesNotExist:
        log.error('trans_start_url() transaction does not exist: {t}'.format(
            t=request.session['trans_id']))
        trans = {'status': None}

    data = {'url': None, 'status': trans['status']}
    if trans['status'] == constants.STATUS_PENDING:
        statsd.incr('purchase.payment_time.success')
        payment_start = request.session.get('payment_start', False)
        if payment_start:
            delta = int((time.time() - float(payment_start)) * 1000)
            statsd.timing('purchase.payment_time.duration', delta)
        data['url'] = get_payment_url(trans)
    return data
コード例 #41
0
def handle_upload_validation_result(results, upload_pk, channel, is_mozilla_signed):
    """Annotate a set of validation results and save them to the given
    FileUpload instance.
    """
    upload = FileUpload.objects.get(pk=upload_pk)
    upload.validation = json.dumps(results)
    upload.save()  # We want to hit the custom save().

    # Track the time it took from first upload through validation
    # until the results were processed and saved.
    upload_start = utc_millesecs_from_epoch(upload.created)
    now = datetime.datetime.now()
    now_ts = utc_millesecs_from_epoch(now)
    delta = now_ts - upload_start
    statsd.timing('devhub.validation_results_processed', delta)

    if not storage.exists(upload.path):
        # TODO: actually fix this so we can get stats. It seems that
        # the file maybe gets moved but it needs more investigation.
        log.warning(
            'Scaled upload stats were not tracked. File is '
            'missing: {}'.format(upload.path)
        )
        return

    size = Decimal(storage.size(upload.path))
    megabyte = Decimal(1024 * 1024)

    # Stash separate metrics for small / large files.
    quantifier = 'over' if size > megabyte else 'under'
    statsd.timing(
        'devhub.validation_results_processed_{}_1mb'.format(quantifier), delta
    )

    # Scale the upload / processing time by package size (in MB)
    # so we can normalize large XPIs which naturally take longer to validate.
    scaled_delta = None
    size_in_mb = size / megabyte
    if size > 0:
        # If the package is smaller than 1MB, don't scale it. This should
        # help account for validator setup time.
        unit = size_in_mb if size > megabyte else Decimal(1)
        scaled_delta = Decimal(delta) / unit
        statsd.timing('devhub.validation_results_processed_per_mb', scaled_delta)

    log.info(
        'Time to process and save upload validation; '
        'upload.pk={upload}; processing_time={delta}; '
        'scaled_per_mb={scaled}; upload_size_in_mb={size_in_mb}; '
        'created={created}; now={now}'.format(
            delta=delta,
            upload=upload.pk,
            created=upload.created,
            now=now,
            scaled=scaled_delta,
            size_in_mb=size_in_mb,
        )
    )
コード例 #42
0
 def _record_time(self, request):
     if hasattr(request, '_start_time'):
         ms = int((time.time() - request._start_time) * 1000)
         data = dict(module=request._view_module, name=request._view_name,
                     method=request.method)
         statsd.timing('view.{module}.{name}.{method}'.format(**data), ms)
         if getattr(settings, 'STATSD_VIEW_TIMER_DETAILS', True):
             statsd.timing('view.{module}.{method}'.format(**data), ms)
             statsd.timing('view.{method}'.format(**data), ms)
コード例 #43
0
ファイル: middleware.py プロジェクト: sideffect0/socorro
 def _record_time(self, request):
     if hasattr(request, '_start_time'):
         ms = int((time.time() - request._start_time) * 1000)
         data = dict(module=request._view_module,
                     name=request._view_name,
                     method=request.method)
         statsd.timing('view.{module}.{name}.{method}'.format(**data), ms)
         statsd.timing('view.{module}.{method}'.format(**data), ms)
         statsd.timing('view.{method}'.format(**data), ms)
コード例 #44
0
ファイル: middleware.py プロジェクト: Dreadchild/zamboni
 def _record_time(self, request):
     pre = 'api' if getattr(request, 'API', False) else 'view'
     if hasattr(request, '_start_time'):
         ms = int((time.time() - request._start_time) * 1000)
         data = {'method': request.method,
                 'module': request._view_module,
                 'name': request._view_name,
                 'pre': pre}
         statsd.timing('{pre}.{module}.{name}.{method}'.format(**data), ms)
         statsd.timing('{pre}.{module}.{method}'.format(**data), ms)
         statsd.timing('{pre}.{method}'.format(**data), ms)
コード例 #45
0
ファイル: tasks.py プロジェクト: nothingisdead/addons-server
def handle_upload_validation_result(results, upload_pk, channel,
                                    is_mozilla_signed):
    """Annotate a set of validation results and save them to the given
    FileUpload instance."""
    upload = FileUpload.objects.get(pk=upload_pk)
    # Restrictions applying to new legacy submissions apply if:
    # - It's the very first upload (there is no addon id yet)
    # - It's the first upload in that channel
    is_new_upload = (
        not upload.addon_id
        or not upload.addon.find_latest_version(channel=channel, exclude=()))

    # Annotate results with potential legacy add-ons restrictions.
    if not is_mozilla_signed:
        results = annotate_legacy_addon_restrictions(
            results=results, is_new_upload=is_new_upload)

    annotate_legacy_langpack_restriction(results=results)

    # Check for API keys in submissions.
    # Make sure it is extension-like, e.g. no LWT or search plugin
    try:
        results = check_for_api_keys_in_file(results=results, upload=upload)
    except (ValidationError, BadZipfile, IOError):
        pass

    # Annotate results with potential webext warnings on new versions.
    if upload.addon_id and upload.version:
        results = annotate_webext_incompatibilities(
            results=results,
            file_=None,
            addon=upload.addon,
            version_string=upload.version,
            channel=channel)

    upload.validation = json.dumps(results)
    upload.save()  # We want to hit the custom save().

    # Track the time it took from first upload through validation
    # until the results were processed and saved.
    upload_start = utc_millesecs_from_epoch(upload.created)
    now = datetime.datetime.now()
    now_ts = utc_millesecs_from_epoch(now)
    delta = now_ts - upload_start
    statsd.timing('devhub.validation_results_processed', delta)

    if not storage.exists(upload.path):
        # TODO: actually fix this so we can get stats. It seems that
        # the file maybe gets moved but it needs more investigation.
        log.warning('Scaled upload stats were not tracked. File is '
                    'missing: {}'.format(upload.path))
        return

    size = Decimal(storage.size(upload.path))
    megabyte = Decimal(1024 * 1024)

    # Stash separate metrics for small / large files.
    quantifier = 'over' if size > megabyte else 'under'
    statsd.timing(
        'devhub.validation_results_processed_{}_1mb'.format(quantifier), delta)

    # Scale the upload / processing time by package size (in MB)
    # so we can normalize large XPIs which naturally take longer to validate.
    scaled_delta = None
    size_in_mb = size / megabyte
    if size > 0:
        # If the package is smaller than 1MB, don't scale it. This should
        # help account for validator setup time.
        unit = size_in_mb if size > megabyte else Decimal(1)
        scaled_delta = Decimal(delta) / unit
        statsd.timing('devhub.validation_results_processed_per_mb',
                      scaled_delta)

    log.info('Time to process and save upload validation; '
             'upload.pk={upload}; processing_time={delta}; '
             'scaled_per_mb={scaled}; upload_size_in_mb={size_in_mb}; '
             'created={created}; now={now}'.format(delta=delta,
                                                   upload=upload.pk,
                                                   created=upload.created,
                                                   now=now,
                                                   scaled=scaled_delta,
                                                   size_in_mb=size_in_mb))
コード例 #46
0
    def from_upload(cls,
                    upload,
                    addon,
                    platforms,
                    channel,
                    send_signal=True,
                    source=None,
                    is_beta=False,
                    parsed_data=None):
        from olympia.addons.models import AddonFeatureCompatibility

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        if parsed_data is None:
            parsed_data = utils.parse_addon(upload, addon)
        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        version = cls.objects.create(
            addon=addon,
            version=parsed_data['version'],
            license_id=license_id,
            source=source,
            channel=channel,
        )
        log.info('New version: %r (%s) from %r' %
                 (version, version.id, upload))
        activity.log_create(amo.LOG.ADD_VERSION, version, addon)
        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = parsed_data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(
                    addon=addon)[0])
            feature_compatibility.update(e10s=e10s_compatibility)

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version.compatible_apps = compatible_apps

        if addon.type == amo.ADDON_SEARCH:
            # Search extensions are always for all platforms.
            platforms = [amo.PLATFORM_ALL.id]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        for platform in platforms:
            File.from_upload(upload,
                             version,
                             platform,
                             parsed_data=parsed_data,
                             is_beta=is_beta)

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        if send_signal:
            version_uploaded.send(sender=version)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
コード例 #47
0
    def from_upload(cls,
                    upload,
                    addon,
                    selected_apps,
                    channel,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of compatible app ids, a channel id and
        the parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        from olympia.addons.models import AddonReviewerFlags
        from olympia.addons.utils import RestrictionChecker
        from olympia.git.utils import create_git_extraction_entry

        assert parsed_data is not None

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        if upload.addon and upload.addon != addon:
            raise VersionCreateError(
                'FileUpload was made for a different Addon')

        if not upload.user or not upload.ip_address or not upload.source:
            raise VersionCreateError(
                'FileUpload does not have some required fields')

        if not upload.user.last_login_ip or not upload.user.email:
            raise VersionCreateError(
                'FileUpload user does not have some required fields')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        approval_notes = None
        if parsed_data.get('is_mozilla_signed_extension'):
            approval_notes = (
                'This version has been signed with Mozilla internal certificate.'
            )
        version = cls.objects.create(
            addon=addon,
            approval_notes=approval_notes,
            version=parsed_data['version'],
            license_id=license_id,
            channel=channel,
        )
        email = upload.user.email if upload.user and upload.user.email else ''
        with core.override_remote_addr(upload.ip_address):
            # The following log statement is used by foxsec-pipeline.
            # We override the IP because it might be called from a task and we
            # want the original IP from the submitter.
            log.info(
                f'New version: {version!r} ({version.id}) from {upload!r}',
                extra={
                    'email': email,
                    'guid': addon.guid,
                    'upload': upload.uuid.hex,
                    'user_id': upload.user_id,
                    'from_api': upload.source == amo.UPLOAD_SOURCE_API,
                },
            )
            activity.log_create(amo.LOG.ADD_VERSION,
                                version,
                                addon,
                                user=upload.user or get_task_user())

        if addon.type == amo.ADDON_STATICTHEME:
            # We don't let developers select apps for static themes
            selected_apps = [app.id for app in amo.APP_USAGE]

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            if app.id not in selected_apps:
                # If the user chose to explicitly deselect Firefox for Android
                # we're not creating the respective `ApplicationsVersions`
                # which will have this add-on then be listed only for
                # Firefox specifically.
                continue

            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # Pre-generate _compatible_apps property to avoid accidentally
        # triggering queries with that instance later.
        version._compatible_apps = compatible_apps

        # Create relevant file and update the all_files cached property on the
        # Version, because we might need it afterwards.
        version.all_files = [
            File.from_upload(
                upload=upload,
                version=version,
                parsed_data=parsed_data,
            )
        ]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()

        # After the upload has been copied to its permanent location, delete it
        # from storage. Keep the FileUpload instance (it gets cleaned up by a
        # cron eventually some time after its creation, in amo.cron.gc()),
        # making sure it's associated with the add-on instance.
        storage.delete(upload.path)
        upload.path = ''
        if upload.addon is None:
            upload.addon = addon
        upload.save()

        version_uploaded.send(instance=version, sender=Version)

        if version.is_webextension:
            if (waffle.switch_is_active('enable-yara')
                    or waffle.switch_is_active('enable-customs')
                    or waffle.switch_is_active('enable-wat')):
                ScannerResult.objects.filter(upload_id=upload.id).update(
                    version=version)

        if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
            # Schedule this version for git extraction.
            transaction.on_commit(
                lambda: create_git_extraction_entry(version=version))

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME
                and channel == amo.RELEASE_CHANNEL_LISTED):
            theme_data = parsed_data.get('theme', {})
            generate_static_theme_preview(theme_data, version.pk)

        # Reset add-on reviewer flags to disable auto-approval and require
        # admin code review if the package has already been signed by mozilla.
        reviewer_flags_defaults = {}
        is_mozilla_signed = parsed_data.get('is_mozilla_signed_extension')
        if upload.validation_timeout:
            reviewer_flags_defaults['needs_admin_code_review'] = True
        if is_mozilla_signed and addon.type != amo.ADDON_LPAPP:
            reviewer_flags_defaults['needs_admin_code_review'] = True
            reviewer_flags_defaults['auto_approval_disabled'] = True

        # Check if the approval should be restricted
        if not RestrictionChecker(upload=upload).is_auto_approval_allowed():
            flag = ('auto_approval_disabled'
                    if channel == amo.RELEASE_CHANNEL_LISTED else
                    'auto_approval_disabled_unlisted')
            reviewer_flags_defaults[flag] = True

        if reviewer_flags_defaults:
            AddonReviewerFlags.objects.update_or_create(
                addon=addon, defaults=reviewer_flags_defaults)

        # Authors need to be notified about auto-approval delay again since
        # they are submitting a new version.
        addon.reset_notified_about_auto_approval_delay()

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
コード例 #48
0
ファイル: models.py プロジェクト: raman934/addons-server
    def from_upload(cls, upload, addon, selected_apps, channel,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of compatible app ids, a channel id and
        the parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        from olympia.git.utils import create_git_extraction_entry

        assert parsed_data is not None

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(
                channel=channel, exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        approval_notes = None
        if parsed_data.get('is_mozilla_signed_extension'):
            approval_notes = (u'This version has been signed with '
                              u'Mozilla internal certificate.')
        version = cls.objects.create(
            addon=addon,
            approval_notes=approval_notes,
            version=parsed_data['version'],
            license_id=license_id,
            channel=channel,
        )
        email = upload.user.email if upload.user and upload.user.email else ''
        with core.override_remote_addr(upload.ip_address):
            log.info(
                'New version: %r (%s) from %r' % (version, version.id, upload),
                extra={
                    'email': email,
                    'guid': addon.guid,
                    'upload': upload.uuid.hex,
                    'user_id': upload.user_id,
                    'from_api': upload.source == amo.UPLOAD_SOURCE_API,
                }
            )
            activity.log_create(
                amo.LOG.ADD_VERSION, version, addon,
                user=upload.user or get_task_user())

        if addon.type == amo.ADDON_STATICTHEME:
            # We don't let developers select apps for static themes
            selected_apps = [app.id for app in amo.APP_USAGE]

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            if app.id not in selected_apps:
                # If the user chose to explicitly deselect Firefox for Android
                # we're not creating the respective `ApplicationsVersions`
                # which will have this add-on then be listed only for
                # Firefox specifically.
                continue

            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version._compatible_apps = compatible_apps

        # For backwards compatibility. We removed specific platform
        # support during submission but we don't handle it any different
        # beyond that yet. That means, we're going to simply set it
        # to `PLATFORM_ALL` and also have the backend create separate
        # files for each platform. Cleaning that up is another step.
        # Given the timing on this, we don't care about updates to legacy
        # add-ons as well.
        # Create relevant file and update the all_files cached property on the
        # Version, because we might need it afterwards.
        version.all_files = [File.from_upload(
            upload=upload, version=version, platform=amo.PLATFORM_ALL.id,
            parsed_data=parsed_data
        )]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()

        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        upload.path = ''
        upload.save()

        version_uploaded.send(instance=version, sender=Version)

        if version.is_webextension:
            if (
                    waffle.switch_is_active('enable-yara') or
                    waffle.switch_is_active('enable-customs') or
                    waffle.switch_is_active('enable-wat')
            ):
                ScannerResult.objects.filter(upload_id=upload.id).update(
                    version=version)

        if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
            # Schedule this version for git extraction.
            transaction.on_commit(
                lambda: create_git_extraction_entry(version=version)
            )

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME and
                channel == amo.RELEASE_CHANNEL_LISTED):
            theme_data = parsed_data.get('theme', {})
            generate_static_theme_preview(theme_data, version.pk)

        # Authors need to be notified about auto-approval delay again since
        # they are submitting a new version.
        addon.reset_notified_about_auto_approval_delay()

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'
                 .format(delta=upload_time, version=version,
                         created=upload.created, now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
コード例 #49
0
ファイル: tasks.py プロジェクト: Iamshankhadeep/addons-server
def handle_upload_validation_result(
        results, upload_pk, channel, is_mozilla_signed):
    """Annotate a set of validation results and save them to the given
    FileUpload instance."""
    upload = FileUpload.objects.get(pk=upload_pk)

    if waffle.switch_is_active('enable-yara') and results['errors'] == 0:
        # Run Yara. This cannot be asynchronous because we have no way to know
        # whether the task will complete before we attach a `Version` to it
        # later in the submission process... Because we cannot use `chord`
        # reliably right now (requires Celery 4.2+), this task is actually not
        # run as a task, it's a simple function call.
        #
        # TODO: use `run_yara` as a task in the submission chord once it is
        # possible. See: https://github.com/mozilla/addons-server/issues/12216
        run_yara(upload.pk)

    if waffle.switch_is_active('enable-customs') and results['errors'] == 0:
        # Run customs. This cannot be asynchronous because we have no way to
        # know whether the task will complete before we attach a `Version` to
        # it later in the submission process... Because we cannot use `chord`
        # reliably right now (requires Celery 4.2+), this task is actually not
        # run as a task, it's a simple function call.
        #
        # TODO: use `run_customs` as a task in the submission chord once it is
        # possible. See: https://github.com/mozilla/addons-server/issues/12217
        run_customs(upload.pk)

    if waffle.switch_is_active('enable-wat') and results['errors'] == 0:
        # Run wat. This cannot be asynchronous because we have no way to know
        # whether the task will complete before we attach a `Version` to it
        # later in the submission process... Because we cannot use `chord`
        # reliably right now (requires Celery 4.2+), this task is actually not
        # run as a task, it's a simple function call.
        #
        # TODO: use `run_wat` as a task in the submission chord once it is
        # possible. See: https://github.com/mozilla/addons-server/issues/12224
        run_wat(upload.pk)

    # Check for API keys in submissions.
    # Make sure it is extension-like, e.g. no search plugin
    try:
        results = check_for_api_keys_in_file(results=results, upload=upload)
    except (ValidationError, BadZipfile, IOError):
        pass

    # Annotate results with potential webext warnings on new versions.
    if upload.addon_id and upload.version:
        annotations.annotate_webext_incompatibilities(
            results=results, file_=None, addon=upload.addon,
            version_string=upload.version, channel=channel)

    upload.validation = json.dumps(results)
    upload.save()  # We want to hit the custom save().

    # Track the time it took from first upload through validation
    # until the results were processed and saved.
    upload_start = utc_millesecs_from_epoch(upload.created)
    now = datetime.datetime.now()
    now_ts = utc_millesecs_from_epoch(now)
    delta = now_ts - upload_start
    statsd.timing('devhub.validation_results_processed', delta)

    if not storage.exists(upload.path):
        # TODO: actually fix this so we can get stats. It seems that
        # the file maybe gets moved but it needs more investigation.
        log.warning('Scaled upload stats were not tracked. File is '
                    'missing: {}'.format(upload.path))
        return

    size = Decimal(storage.size(upload.path))
    megabyte = Decimal(1024 * 1024)

    # Stash separate metrics for small / large files.
    quantifier = 'over' if size > megabyte else 'under'
    statsd.timing(
        'devhub.validation_results_processed_{}_1mb'.format(quantifier), delta)

    # Scale the upload / processing time by package size (in MB)
    # so we can normalize large XPIs which naturally take longer to validate.
    scaled_delta = None
    size_in_mb = size / megabyte
    if size > 0:
        # If the package is smaller than 1MB, don't scale it. This should
        # help account for validator setup time.
        unit = size_in_mb if size > megabyte else Decimal(1)
        scaled_delta = Decimal(delta) / unit
        statsd.timing('devhub.validation_results_processed_per_mb',
                      scaled_delta)

    log.info('Time to process and save upload validation; '
             'upload.pk={upload}; processing_time={delta}; '
             'scaled_per_mb={scaled}; upload_size_in_mb={size_in_mb}; '
             'created={created}; now={now}'
             .format(delta=delta, upload=upload.pk,
                     created=upload.created, now=now,
                     scaled=scaled_delta, size_in_mb=size_in_mb))
コード例 #50
0
def hours_logged_report():
    start = time.time()
    statsd.gauge("hours.one_week", hours_logged())
    end = time.time()
    statsd.timing('celery.hours_logged_report', int((end - start) * 1000))
コード例 #51
0
    def wrapped(self, *args, **kwargs):
        start_time = kwargs.pop("start_time", None)
        if start_time and not self.request.retries:
            total_time = int((time() - start_time) * 1000)
            statsd.timing(self.name + ".timing", total_time)
        statsd.incr(self.name + ".total")
        statsd.incr("news.tasks.all_total")
        if settings.MAINTENANCE_MODE and self.name not in MAINTENANCE_EXEMPT:
            if not settings.READ_ONLY_MODE:
                # record task for later
                QueuedTask.objects.create(
                    name=self.name,
                    args=args,
                    kwargs=kwargs,
                )
                statsd.incr(self.name + ".queued")
            else:
                statsd.incr(self.name + ".not_queued")

            return

        try:
            return func(*args, **kwargs)
        except (
                IOError,
                NewsletterException,
                requests.RequestException,
                RetryTask,
                sfapi.SalesforceExpiredSession,
                sfapi.SalesforceGeneralError,
                sfapi.SalesforceRefusedRequest,
                sfapi.SalesforceResourceNotFound,
                sfapi.SalesforceAuthenticationFailed,
                sfapi.SalesforceMalformedRequest,
                SilverpopResponseException,
        ) as e:
            # These could all be connection issues, so try again later.
            # IOError covers URLError and SSLError.
            if ignore_error(e):
                with sentry_sdk.push_scope() as scope:
                    scope.set_tag("action", "ignored")
                    sentry_sdk.capture_exception()
                return

            try:
                if not (isinstance(e, RetryTask)
                        or ignore_error_post_retry(e)):
                    with sentry_sdk.push_scope() as scope:
                        scope.set_tag("action", "retried")
                        sentry_sdk.capture_exception()

                # ~68 hr at 11 retries
                statsd.incr(f"{self.name}.retries.{self.request.retries}")
                statsd.incr(f"news.tasks.retries.{self.request.retries}")
                raise self.retry(
                    countdown=exponential_backoff(self.request.retries))
            except self.MaxRetriesExceededError:
                statsd.incr(self.name + ".retry_max")
                statsd.incr("news.tasks.retry_max_total")
                # don't bubble certain errors
                if ignore_error_post_retry(e):
                    return

                sentry_sdk.capture_exception()
コード例 #52
0
    def from_upload(cls,
                    upload,
                    addon,
                    selected_apps,
                    channel,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of compatible app ids, a channel id and
        the parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        assert parsed_data is not None

        from olympia.addons.models import AddonFeatureCompatibility

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        approvalnotes = None
        if parsed_data.get('is_mozilla_signed_extension'):
            approvalnotes = (u'This version has been signed with '
                             u'Mozilla internal certificate.')
        version = cls.objects.create(
            addon=addon,
            approvalnotes=approvalnotes,
            version=parsed_data['version'],
            license_id=license_id,
            channel=channel,
        )
        log.info('New version: %r (%s) from %r' %
                 (version, version.id, upload))
        activity.log_create(amo.LOG.ADD_VERSION, version, addon)
        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = parsed_data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(
                    addon=addon)[0])
            feature_compatibility.update(e10s=e10s_compatibility)

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            if app.id not in selected_apps:
                # If the user chose to explicitly deselect Firefox for Android
                # we're not creating the respective `ApplicationsVersions`
                # which will have this add-on then be listed only for
                # Firefox specifically.
                continue

            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version._compatible_apps = compatible_apps

        # For backwards compatibility. We removed specific platform
        # support during submission but we don't handle it any different
        # beyond that yet. That means, we're going to simply set it
        # to `PLATFORM_ALL` and also have the backend create separate
        # files for each platform. Cleaning that up is another step.
        # Given the timing on this, we don't care about updates to legacy
        # add-ons as well.
        # Create relevant file and update the all_files cached property on the
        # Version, because we might need it afterwards.
        version.all_files = [
            File.from_upload(upload=upload,
                             version=version,
                             platform=amo.PLATFORM_ALL.id,
                             parsed_data=parsed_data)
        ]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        version_uploaded.send(sender=version)

        if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
            # Extract into git repository
            AddonGitRepository.extract_and_commit_from_file_obj(
                file_obj=version.all_files[0],
                channel=channel,
                author=upload.user)

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME
                and channel == amo.RELEASE_CHANNEL_LISTED):
            dst_root = os.path.join(user_media_path('addons'), str(addon.id))
            theme_data = parsed_data.get('theme', {})
            version_root = os.path.join(dst_root, unicode(version.id))

            utils.extract_header_img(version.all_files[0].file_path,
                                     theme_data, version_root)
            generate_static_theme_preview(theme_data, version_root, version.pk)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
コード例 #53
0
ファイル: models.py プロジェクト: crdoconnor/olympia
        try:
            p = res.groups()[1]
            if 'localepicker.properties' not in p:
                p = os.path.join(p, 'localepicker.properties')
            res = zip.extract_from_manifest(p)
        except (zipfile.BadZipfile, IOError), e:
            log.error('Error unzipping: %s, %s in file: %s' % (p, e, self.pk))
            return ''
        except (ValueError, KeyError), e:
            log.error('No file named: %s in file: %s' % (e, self.pk))
            return ''

        end = time.time() - start
        log.info('Extracted localepicker file: %s in %.2fs' % (self.pk, end))
        statsd.timing('files.extract.localepicker', (end * 1000))
        return res


@receiver(models.signals.post_save,
          sender=File,
          dispatch_uid='cache_localpicker')
def cache_localepicker(sender, instance, **kw):
    if kw.get('raw') or not kw.get('created'):
        return

    try:
        addon = instance.version.addon
    except models.ObjectDoesNotExist:
        return
コード例 #54
0
 def process_response(self, request, response):
     statsd.incr('django.response_codes.%s' % re.sub('\d{2}$','xx', str(response.status_code)))
     if hasattr(request, '_start_time'):
         ms = int((time() - request._start_time) * 1000)
         statsd.timing('django.response', ms)
     return response
コード例 #55
0
ファイル: models.py プロジェクト: waf/addons-server
    def from_upload(cls,
                    upload,
                    addon,
                    platforms,
                    channel,
                    source=None,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of platform ids, a channel id and the
        parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        assert parsed_data is not None

        from olympia.addons.models import AddonFeatureCompatibility

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        version = cls.objects.create(
            addon=addon,
            version=parsed_data['version'],
            license_id=license_id,
            source=source,
            channel=channel,
        )
        log.info('New version: %r (%s) from %r' %
                 (version, version.id, upload))
        activity.log_create(amo.LOG.ADD_VERSION, version, addon)
        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = parsed_data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(
                    addon=addon)[0])
            feature_compatibility.update(e10s=e10s_compatibility)

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version._compatible_apps = compatible_apps

        if addon.type in [amo.ADDON_SEARCH, amo.ADDON_STATICTHEME]:
            # Search extensions and static themes are always for all platforms.
            platforms = [amo.PLATFORM_ALL.id]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        # Create as many files as we have platforms. Update the all_files
        # cached property on the Version while we're at it, because we might
        # need it afterwards.
        version.all_files = [
            File.from_upload(upload,
                             version,
                             platform,
                             parsed_data=parsed_data) for platform in platforms
        ]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        version_uploaded.send(sender=version)

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME
                and channel == amo.RELEASE_CHANNEL_LISTED):
            dst_root = os.path.join(user_media_path('addons'), str(addon.id))
            theme_data = parsed_data.get('theme', {})
            version_root = os.path.join(dst_root, unicode(version.id))

            utils.extract_header_img(version.all_files[0].file_path,
                                     theme_data, version_root)
            preview = VersionPreview.objects.create(version=version)
            generate_static_theme_preview(theme_data, version_root, preview.pk)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
コード例 #56
0
ファイル: models.py プロジェクト: justinpotts/addons-server
    def from_upload(cls,
                    upload,
                    addon,
                    platforms,
                    send_signal=True,
                    source=None,
                    is_beta=False):
        from olympia.addons.models import AddonFeatureCompatibility

        data = utils.parse_addon(upload, addon)
        try:
            license = addon.versions.latest().license_id
        except Version.DoesNotExist:
            license = None
        v = cls.objects.create(addon=addon,
                               version=data['version'],
                               license_id=license,
                               source=source)
        log.info('New version: %r (%s) from %r' % (v, v.id, upload))

        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(
                    addon=addon)[0])
            feature_compatibility.update(e10s=e10s_compatibility)

        AV = ApplicationsVersions
        for app in data.get('apps', []):
            AV(version=v, min=app.min, max=app.max, application=app.id).save()
        if addon.type == amo.ADDON_SEARCH:
            # Search extensions are always for all platforms.
            platforms = [amo.PLATFORM_ALL.id]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        for platform in platforms:
            File.from_upload(upload,
                             v,
                             platform,
                             parse_data=data,
                             is_beta=is_beta)

        v.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        if send_signal:
            version_uploaded.send(sender=v)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=v,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return v
コード例 #57
0
def timing(key, value):
    return statsd.timing(_get_key(key), value)
コード例 #58
0
def timing(key, value):
    # TODO(dcramer): implement timing for tsdb
    return statsd.timing(_get_key(key), value,
                         rate=settings.SENTRY_METRICS_SAMPLE_RATE)
コード例 #59
0
 def log_interval(self, label):
     now = datetime.datetime.utcnow()
     statsd.timing(self.prefix + label, now - self._timestamp)
     log.info(
         "%s: %s", self.prefix + label, now - self._timestamp)
     self._timestamp = now
コード例 #60
0
ファイル: statsd_ping.py プロジェクト: sideffect0/socorro
 def handle(self, *args, **kw):
     statsd.timing(kw.get('key'), time.time())