예제 #1
0
파일: views.py 프로젝트: zalun/FlightDeck
def get_zip(request, hashtag, filename):
    """
    Download zip (it has to be ready)
    """
    if not validator.is_valid("alphanum", hashtag):
        log.warning("[security] Wrong hashtag provided")
        return HttpResponseForbidden("{'error': 'Wrong hashtag'}")
    path = os.path.join(settings.XPI_TARGETDIR, "%s.zip" % hashtag)
    log.info("[zip:%s] Downloading Addon from %s" % (filename, path))

    tend = time.time()
    tkey = _get_zip_cache_key(request, hashtag)
    tqueued = cache.get(tkey)
    if tqueued:
        ttotal = (tend - tqueued) * 1000
        statsd.timing("zip.total", ttotal)
        total = "%dms" % ttotal
    else:
        total = "n/a"

    log.info("[zip:%s] Downloading Add-on (%s)" % (hashtag, total))

    response = serve(request, path, "/", show_indexes=False)
    response["Content-Disposition"] = "attachment; " 'filename="%s.zip"' % filename
    return response
예제 #2
0
파일: tasks.py 프로젝트: sguss2024/kitsune
def _rebuild_kb_chunk(data):
    """Re-render a chunk of documents.

    Note: Don't use host components when making redirects to wiki pages; those
    redirects won't be auto-pruned when they're 404s.

    """
    log.info('Rebuilding %s documents.' % len(data))

    pin_this_thread()  # Stick to master.

    messages = []
    start = time.time()
    for pk in data:
        message = None
        try:
            document = Document.objects.get(pk=pk)

            # If we know a redirect link to be broken (i.e. if it looks like a
            # link to a document but the document isn't there), log an error:
            url = document.redirect_url()
            if (url and points_to_document_view(url) and
                    not document.redirect_document()):
                log.warn('Invalid redirect document: %d' % pk)

            html = document.parse_and_calculate_links()
            if document.html != html:
                # We are calling update here to so we only update the html
                # column instead of all of them. This bypasses post_save
                # signal handlers like the one that triggers reindexing.
                # See bug 797038 and bug 797352.
                Document.objects.filter(pk=pk).update(html=html)
                statsd.incr('wiki.rebuild_chunk.change')
            else:
                statsd.incr('wiki.rebuild_chunk.nochange')
        except Document.DoesNotExist:
            message = 'Missing document: %d' % pk
        except Revision.DoesNotExist:
            message = 'Missing revision for document: %d' % pk
        except ValidationError as e:
            message = 'ValidationError for %d: %s' % (pk, e.messages[0])
        except SlugCollision:
            message = 'SlugCollision: %d' % pk
        except TitleCollision:
            message = 'TitleCollision: %d' % pk

        if message:
            log.debug(message)
            messages.append(message)
    d = time.time() - start
    statsd.timing('wiki.rebuild_chunk', int(round(d * 1000)))

    if messages:
        subject = ('[%s] Exceptions raised in _rebuild_kb_chunk()' %
                   settings.PLATFORM_NAME)
        mail_admins(subject=subject, message='\n'.join(messages))
    if not transaction.get_connection().in_atomic_block:
        transaction.commit()

    unpin_this_thread()  # Not all tasks need to do use the master.
예제 #3
0
def xpi_build_from_model(rev_pk,
                         mod_codes={},
                         att_codes={},
                         hashtag=None,
                         tqueued=None):
    """ Get object and build xpi
    """
    if not hashtag:
        log.critical("No hashtag provided")
        return
    tstart = time.time()
    if tqueued:
        tinqueue = (tstart - tqueued) * 1000
        statsd.timing('xpi.build.queued', tinqueue)
        log.info('[xpi:%s] Addon job picked from queue (%dms)' %
                 (hashtag, tinqueue))
    revision = PackageRevision.objects.get(pk=rev_pk)
    log.debug('[xpi:%s] Building %s' % (hashtag, revision))
    # prepare changed modules and attachments
    modules = []
    attachments = []
    for mod in revision.modules.all():
        if str(mod.pk) in mod_codes:
            mod.code = mod_codes[str(mod.pk)]
            modules.append(mod)
    for att in revision.attachments.all():
        if str(att.pk) in att_codes:
            att.code = att_codes[str(att.pk)]
            attachments.append(att)
    revision.build_xpi(modules=modules,
                       attachments=attachments,
                       hashtag=hashtag,
                       tstart=tstart)
예제 #4
0
def get_zip(request, hashtag, filename):
    """
    Download zip (it has to be ready)
    """
    if not validator.is_valid('alphanum', hashtag):
        log.warning('[security] Wrong hashtag provided')
        return HttpResponseForbidden("{'error': 'Wrong hashtag'}")
    path = os.path.join(settings.XPI_TARGETDIR, '%s.zip' % hashtag)
    log.info('[zip:%s] Downloading Addon from %s' % (filename, path))

    tend = time.time()
    tkey = _get_zip_cache_key(request, hashtag)
    tqueued = cache.get(tkey)
    if tqueued:
        ttotal = (tend - tqueued) * 1000
        statsd.timing('zip.total', ttotal)
        total = '%dms' % ttotal
    else:
        total = 'n/a'

    log.info('[zip:%s] Downloading Add-on (%s)' % (hashtag, total))

    response = serve(request, path, '/', show_indexes=False)
    response['Content-Disposition'] = ('attachment; '
                                       'filename="%s.zip"' % filename)
    return response
예제 #5
0
파일: views.py 프로젝트: KWierso/FlightDeck
def get_download(r, hashtag, filename):
    """
    Download XPI (it has to be ready)
    """
    if not validator.is_valid('alphanum', hashtag):
        log.warning('[security] Wrong hashtag provided')
        return HttpResponseForbidden("{'error': 'Wrong hashtag'}")
    path = os.path.join(settings.XPI_TARGETDIR, '%s.xpi' % hashtag)
    log.info('[xpi:%s] Downloading Addon from %s' % (filename, path))

    tend = time.time()
    tkey = xpi_utils.get_queued_cache_key(hashtag, r)
    tqueued = cache.get(tkey)
    if tqueued:
        ttotal = (tend - tqueued) * 1000
        statsd.timing('xpi.build.total', ttotal)
        total = '%dms' % ttotal
    else:
        total = 'n/a'

    log.info('[xpi:%s] Downloading Add-on (%s)' % (hashtag, total))

    response = serve(r, path, '/', show_indexes=False)
    response['Content-Disposition'] = ('attachment; '
            'filename="%s.xpi"' % filename)
    return response
예제 #6
0
    def handle_submission(self, frame, on_complete):
        """ Handles a submission popped off the dead letter queue.

        Pushes a failure response to XQueue to notify students of the issue.

        """
        submission = frame["submission"]
        submission_id = submission['xqueue_header']['submission_id']
        log.info("Pulled submission #%d off of dead letter queue", submission_id)
        statsd.incr('bux_grader_framework.submissions.dead_lettered')

        # Note time spent in grader
        elapsed_time = int((time.time() - frame["received_time"])*1000.0)
        statsd.timing('bux_grader_framework.total_time_spent', elapsed_time)
        log.info("Submission #%d evaluated in %0.3fms",
                 submission_id, elapsed_time)

        # Check evaluator for extra context to add to fail message.
        hints = ''
        if 'fail_hints' in dir(self.evaluator):
            hints = self.evaluator.fail_hints()

        # Post response to XQueue.
        message = FAIL_RESPONSE.substitute(reason=hints)
        result, success = safe_multi_call(self.xqueue.push_failure,
                                          args=(message, submission),
                                          max_attempts=5,
                                          delay=5)

        # Notifies queue to ack / nack message.
        on_complete(success)
예제 #7
0
파일: tasks.py 프로젝트: Jnull/FlightDeck
def xpi_build_from_model(rev_pk, mod_codes={}, att_codes={}, hashtag=None, tqueued=None):
    """ Get object and build xpi
    """
    if not hashtag:
        log.critical("No hashtag provided")
        return
    tstart = time.time()
    if tqueued:
        tinqueue = (tstart - tqueued) * 1000
        statsd.timing('xpi.build.queued', tinqueue)
        log.info('[xpi:%s] Addon job picked from queue (%dms)' % (hashtag, tinqueue))
    revision = PackageRevision.objects.get(pk=rev_pk)
    log.debug('[xpi:%s] Building %s' % (hashtag, revision))
    # prepare changed modules and attachments
    modules = []
    attachments = []
    for mod in revision.modules.all():
        if str(mod.pk) in mod_codes:
            mod.code = mod_codes[str(mod.pk)]
            modules.append(mod)
    for att in revision.attachments.all():
        if str(att.pk) in att_codes:
            att.code = att_codes[str(att.pk)]
            attachments.append(att)
    revision.build_xpi(
            modules=modules,
            attachments=attachments,
            hashtag=hashtag,
            tstart=tstart)
예제 #8
0
    def submit(self, tags):
        for k in list(self.data.keys()):
            statsd.timing(self.prefix + "." + k, self.data.pop(k),
                          tags=tags, sample_rate=sample_rate)

        if settings.DEBUG:
            assert not self.starts, ('Timer(s) %r were started but never '
                                     'stopped' % self.starts)
예제 #9
0
 def _do_search(self):
     if not self._results_cache:
         qs = self._build_query()
         es = elasticutils.get_es()
         hits = es.search(qs, settings.ES_INDEX, self.type._meta.app_label)
         self._results_cache = results = SearchResults(self.type, hits)
         statsd.timing('search', results.took)
         log.debug('[%s] %s' % (results.took, qs))
     return self._results_cache
예제 #10
0
파일: search.py 프로젝트: MechanisM/zamboni
 def raw(self):
     qs = self._build_query()
     es = elasticutils.get_es()
     try:
         hits = es.search(qs, settings.ES_INDEX, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     statsd.timing('search', hits['took'])
     log.debug('[%s] %s' % (hits['took'], qs))
     return hits
예제 #11
0
def zip_source(pk, hashtag, tqueued=None, **kw):
    if not hashtag:
        log.critical("[zip] No hashtag provided")
        return
    tstart = time.time()
    if tqueued:
        tinqueue = (tstart - tqueued) * 1000
        statsd.timing('zip.queued', tinqueue)
        log.info('[zip:%s] Addon job picked from queue (%dms)' % (hashtag, tinqueue))
    log.debug("[zip:%s] Compressing" % pk)
    PackageRevision.objects.get(pk=pk).zip_source(hashtag=hashtag, tstart=tstart)
    log.debug("[zip:%s] Compressed" % pk)
예제 #12
0
파일: search.py 프로젝트: 21echoes/zamboni
 def raw(self):
     qs = self._build_query()
     es = elasticutils.get_es()
     try:
         with statsd.timer('search.es.timer') as timer:
             hits = es.search(qs, self.index, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     statsd.timing('search.es.took', hits['took'])
     log.debug('[%s] [%s] %s' % (hits['took'], timer.ms, qs))
     return hits
예제 #13
0
 def raw(self):
     qs = self._build_query()
     es = elasticutils.get_es()
     try:
         with statsd.timer('search.es.timer') as timer:
             hits = es.search(qs, self.index, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     statsd.timing('search.es.took', hits['took'])
     log.debug('[%s] [%s] %s' % (hits['took'], timer.ms, qs))
     return hits
예제 #14
0
    def on_postrun(self, sender, **kw):
        # sender is the task object. task_id in here.
        pending = self.redis.hincrby(self.pending, sender.name, -1)
        # Clamp pending at 0. Tasks could be coming in before we started
        # tracking.
        if pending < 0:
            self.redis.hset(self.pending, sender.name, 0)
        self.redis.hincrby(self.run, sender.name, 1)

        start = self.redis.hget(self.timer, kw['task_id'])
        if start:
            t = (time.time() - float(start)) * 1000
            statsd.timing('tasks.%s' % sender.name, int(t))
예제 #15
0
파일: tasks.py 프로젝트: bowmasters/kitsune
def migrate_helpfulvotes(start_id, end_id):
    """Transfer helpfulvotes from old to new version."""

    if not waffle.switch_is_active('migrate-helpfulvotes'):
        raise  # Celery emails the failed IDs so we know to which to rerun.

    start = time.time()

    pin_this_thread()  # Pin to master

    transaction.enter_transaction_management()
    transaction.managed(True)
    try:
        cursor = connection.cursor()
        cursor.execute("""INSERT INTO `wiki_helpfulvote`
            (revision_id, helpful, created,
            creator_id, anonymous_id, user_agent)
            SELECT COALESCE(
                    (SELECT id FROM `wiki_revision`
                        WHERE `document_id` = wiki_helpfulvoteold.document_id
                            AND `is_approved`=1 AND
                            (`reviewed` <= wiki_helpfulvoteold.created
                                OR `reviewed` IS NULL)
                        ORDER BY CASE WHEN `reviewed`
                            IS NULL THEN 1 ELSE 0 END,
                                  `wiki_revision`.`created` DESC LIMIT 1),
                    (SELECT id FROM `wiki_revision`
                        WHERE `document_id` = wiki_helpfulvoteold.document_id
                            AND (`reviewed` <= wiki_helpfulvoteold.created
                                OR `reviewed` IS NULL)
                        ORDER BY CASE WHEN `reviewed`
                            IS NULL THEN 1 ELSE 0 END,
                                `wiki_revision`.`created`  DESC LIMIT 1),
                    (SELECT id FROM `wiki_revision`
                        WHERE `document_id` = wiki_helpfulvoteold.document_id
                        ORDER BY `created` ASC LIMIT 1)),
                helpful, created, creator_id, anonymous_id, user_agent
            FROM `wiki_helpfulvoteold` WHERE id >= %s AND id < %s""",
            [start_id, end_id])
        transaction.commit()
    except:
        transaction.rollback()
        raise

    transaction.leave_transaction_management()

    unpin_this_thread()

    d = time.time() - start
    statsd.timing('wiki.migrate_helpfulvotes', int(round(d * 1000)))
예제 #16
0
def _rebuild_kb_chunk(data, **kwargs):
    """Re-render a chunk of documents.

    Note: Don't use host components when making redirects to wiki pages; those
    redirects won't be auto-pruned when they're 404s.

    """
    log.info('Rebuilding %s documents.' % len(data))

    pin_this_thread()  # Stick to master.

    messages = []
    start = time.time()
    for pk in data:
        message = None
        try:
            document = Document.objects.get(pk=pk)

            # If we know a redirect link to be broken (i.e. if it looks like a
            # link to a document but the document isn't there), log an error:
            url = document.redirect_url()
            if (url and points_to_document_view(url)
                    and not document.redirect_document()):
                log.error('Invalid redirect document: %d' % pk)

            document.html = document.current_revision.content_parsed
            document.save()
        except Document.DoesNotExist:
            message = 'Missing document: %d' % pk
        except ValidationError as e:
            message = 'ValidationError for %d: %s' % (pk, e.messages[0])
        except SlugCollision:
            message = 'SlugCollision: %d' % pk
        except TitleCollision:
            message = 'TitleCollision: %d' % pk

        if message:
            log.debug(message)
            messages.append(message)
    d = time.time() - start
    statsd.timing('wiki.rebuild_chunk', int(round(d * 1000)))

    if messages:
        subject = ('[%s] Exceptions raised in _rebuild_kb_chunk()' %
                   settings.PLATFORM_NAME)
        mail_admins(subject=subject, message='\n'.join(messages))
    transaction.commit_unless_managed()

    unpin_this_thread()  # Not all tasks need to do use the master.
예제 #17
0
파일: tasks.py 프로젝트: fox2mike/kitsune
def _rebuild_kb_chunk(data, **kwargs):
    """Re-render a chunk of documents.

    Note: Don't use host components when making redirects to wiki pages; those
    redirects won't be auto-pruned when they're 404s.

    """
    log.info('Rebuilding %s documents.' % len(data))

    pin_this_thread()  # Stick to master.

    messages = []
    start = time.time()
    for pk in data:
        message = None
        try:
            document = Document.objects.get(pk=pk)

            # If we know a redirect link to be broken (i.e. if it looks like a
            # link to a document but the document isn't there), delete it:
            url = document.redirect_url()
            if (url and points_to_document_view(url) and
                not document.redirect_document()):
                document.delete()
            else:
                document.html = document.current_revision.content_parsed
                document.save()
        except Document.DoesNotExist:
            message = 'Missing document: %d' % pk
        except ValidationError as e:
            message = 'ValidationError for %d: %s' % (pk, e.messages[0])
        except SlugCollision:
            message = 'SlugCollision: %d' % pk
        except TitleCollision:
            message = 'TitleCollision: %d' % pk

        if message:
            log.debug(message)
            messages.append(message)
    d = time.time() - start
    statsd.timing('wiki.rebuild_chunk', int(round(d * 1000)))

    if messages:
        subject = ('[%s] Exceptions raised in _rebuild_kb_chunk()' %
                   settings.PLATFORM_NAME)
        mail_admins(subject=subject, message='\n'.join(messages))
    transaction.commit_unless_managed()

    unpin_this_thread()  # Not all tasks need to do use the master.
예제 #18
0
 def raw(self):
     """
     Builds query and passes to ElasticSearch, then returns the raw format
     returned.
     """
     qs = self._build_query()
     es = get_es()
     try:
         hits = es.search(qs, settings.ES_INDEX, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     if statsd:
         statsd.timing("search", hits["took"])
     log.debug("[%s] %s" % (hits["took"], qs))
     return hits
def check_machine(jenkins, client, machine, error_counts):
    try:
        server = client.servers.get(machine.external_id)
    except:
        print "Unable to get server detail, will retry"
        traceback.print_exc()
        return

    if server.status == 'ACTIVE':
        ip = utils.get_public_ip(server)
        if not ip and 'os-floating-ips' in utils.get_extensions(client):
            utils.add_public_ip(server)
            ip = utils.get_public_ip(server)
        if not ip:
            raise Exception("Unable to find public ip of server")

        machine.ip = ip
        print "Machine %s is running, testing ssh" % machine.id
        if utils.ssh_connect(ip, 'jenkins'):
            if statsd:
                dt = int((time.time() - machine.state_time) * 1000)
                key = 'devstack.launch.%s' % machine.base_image.provider.name
                statsd.timing(key, dt)
                statsd.incr(key)
            print "Adding machine %s to Jenkins" % machine.id
            create_jenkins_node(jenkins, machine)
            print "Machine %s is ready" % machine.id
            machine.state = vmdatabase.READY
            return
    elif not server.status.startswith('BUILD'):
        count = error_counts.get(machine.id, 0)
        count += 1
        error_counts[machine.id] = count
        print "Machine %s is in error %s (%s/5)" % (machine.id,
                                                    server.status,
                                                    count)
        if count >= 5:
            if statsd:
                statsd.incr('devstack.error.%s' %
                            machine.base_image.provider.name)
            raise Exception("Too many errors querying machine %s" % machine.id)
    else:
        if time.time() - machine.state_time >= ABANDON_TIMEOUT:
            if statsd:
                statsd.incr('devstack.timeout.%s' %
                            machine.base_image.provider.name)
            raise Exception("Waited too long for machine %s" % machine.id)
def check_machine(jenkins, client, machine, error_counts, credentials_id):
    try:
        server = client.servers.get(machine.external_id)
    except:
        print "Unable to get server detail, will retry"
        traceback.print_exc()
        return

    if server.status == 'ACTIVE':
        ip = utils.get_public_ip(server)
        if not ip and 'os-floating-ips' in utils.get_extensions(client):
            utils.add_public_ip(server)
            ip = utils.get_public_ip(server)
        if not ip:
            raise Exception("Unable to find public ip of server")

        machine.ip = ip
        print "Machine %s is running, testing ssh" % machine.id
        if utils.ssh_connect(ip, 'jenkins'):
            if statsd:
                dt = int((time.time() - machine.state_time) * 1000)
                key = 'devstack.launch.%s' % machine.base_image.provider.name
                statsd.timing(key, dt)
                statsd.incr(key)
            print "Adding machine %s to Jenkins" % machine.id
            create_jenkins_node(jenkins, machine, credentials_id)
            print "Machine %s is ready" % machine.id
            machine.state = vmdatabase.READY
            utils.log.debug("Online ID: %s" % machine.id)
            return
    elif not server.status.startswith('BUILD'):
        count = error_counts.get(machine.id, 0)
        count += 1
        error_counts[machine.id] = count
        print "Machine %s is in error %s (%s/5)" % (machine.id, server.status,
                                                    count)
        if count >= 5:
            if statsd:
                statsd.incr('devstack.error.%s' %
                            machine.base_image.provider.name)
            raise Exception("Too many errors querying machine %s" % machine.id)
    else:
        if time.time() - machine.state_time >= ABANDON_TIMEOUT:
            if statsd:
                statsd.incr('devstack.timeout.%s' %
                            machine.base_image.provider.name)
            raise Exception("Waited too long for machine %s" % machine.id)
예제 #21
0
    def handle_submission(self, frame, on_complete):
        """ Handles a submission popped off the internal work queue.

        Invokes ``self.evaluator.evaluate()`` to generate a response.

        """
        submission = frame["submission"]
        submission_id = submission['xqueue_header']['submission_id']
        success = True
        log.info("Evaluating submission #%d", submission_id)

        with statsd.timer('bux_grader_framework.evaluate'):
            result, success = safe_multi_call(self.evaluator.evaluate,
                                              args=(submission,),
                                              max_attempts=self._eval_max_attempts,
                                              delay=self._eval_retry_delay)

        # Note time spent in grader (between /xqueue/get_submission/ and
        # /xqueue/put_result/)
        elapsed_time = int((time.time() - frame["received_time"])*1000.0)
        statsd.timing('bux_grader_framework.total_time_spent', elapsed_time)
        log.info("Submission #%d evaluated in %0.3fms",
                 submission_id, elapsed_time)

        # Post response to XQueue
        if not success or not result:
            reason = "<pre><code>Submission could not be evaluated in %d attempts. Please try again later.</code></pre>" % (
                     self._eval_max_attempts)
            message = FAIL_RESPONSE.substitute(reason=reason)
            result, success = safe_multi_call(self.xqueue.push_failure,
                                              args=(message, submission),
                                              max_attempts=5,
                                              delay=5)

        else:
            result, success = safe_multi_call(self.xqueue.put_result,
                                              args=(submission, result),
                                              max_attempts=5,
                                              delay=5)

        if success:
            statsd.incr('bux_grader_framework.submissions.success')
        else:
            statsd.incr('bux_grader_framework.submissions.failure')

        # Notifies queue to ack / nack message
        on_complete(success)
예제 #22
0
 def raw(self):
     """
     Builds query and passes to ElasticSearch, then returns the raw format
     returned.
     """
     qs = self._build_query()
     es = get_es()
     index = (settings.ES_INDEXES.get(self.type)
              or settings.ES_INDEXES['default'])
     try:
         hits = es.search(qs, index, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     if statsd:
         statsd.timing('search', hits['took'])
     log.debug('[%s] %s' % (hits['took'], qs))
     return hits
예제 #23
0
 def raw(self):
     """
     Builds query and passes to ElasticSearch, then returns the raw format
     returned.
     """
     qs = self._build_query()
     es = get_es()
     index = (settings.ES_INDEXES.get(self.type)
              or settings.ES_INDEXES['default'])
     try:
         hits = es.search(qs, index, self.type._meta.db_table)
     except Exception:
         log.error(qs)
         raise
     if statsd:
         statsd.timing('search', hits['took'])
     log.debug('[%s] %s' % (hits['took'], qs))
     return hits
def main():
    db = vmdatabase.VMDatabase()

    machine = db.getMachineByJenkinsName(NODE_NAME)
    if machine.state != vmdatabase.HOLD:
        machine.state = vmdatabase.DELETE

    try:
        utils.update_stats(machine.base_image.provider)

        if UPSTREAM_BUILD_URL:
            fd = urllib.urlopen(UPSTREAM_BUILD_URL+'api/json')
            data = json.load(fd)
            result = data['result']
            if statsd and result == 'SUCCESS':
                dt = int(data['duration'])

                key = 'devstack.job.%s' % UPSTREAM_JOB_NAME
                statsd.timing(key+'.runtime', dt)
                statsd.incr(key+'.builds')

                key += '.%s' % UPSTREAM_BRANCH
                statsd.timing(key+'.runtime', dt)
                statsd.incr(key+'.builds')

                key += '.%s' % machine.base_image.provider.name
                statsd.timing(key+'.runtime', dt)
                statsd.incr(key+'.builds')
    except:
        print "Error getting build information"
        traceback.print_exc()
예제 #25
0
파일: middleware.py 프로젝트: jokar/minion
 def _record_time(self, request):
     if hasattr(request, '_start_time'):
         ms = int((time.time() - request._start_time) * 1000)
         data = dict(module=request._view_module, name=request._view_name,
                     method=request.method)
         statsd.timing('view.{module}.{name}.{method}'.format(**data), ms)
         statsd.timing('view.{module}.{method}'.format(**data), ms)
         statsd.timing('view.{method}'.format(**data), ms)
예제 #26
0
    def raw(self):
        """
        Builds query and passes to ElasticSearch, then returns the raw format
        returned.
        """
        qs = self._build_query()
        es = get_es()
        index = (settings.ES_INDEXES.get(self.type)
                 or settings.ES_INDEXES['default'])

        retries = getattr(settings, 'ES_RETRY', 0)
        retry_wait = getattr(settings, "ES_RETRY_INTERVAL", 0)

        try:
            args = [qs, index, self.type._meta.db_table]
            hits = self.retry_on_timeout(es.search, args, retries, retry_wait)
        except Exception:
            log.error(qs)
            raise
        if statsd:
            statsd.timing('search', hits['took'])
        log.debug('[%s] %s' % (hits['took'], qs))
        return hits
예제 #27
0
    def deleteNode(self, session, node):
        # Delete a node
        start_time = time.time()
        node.state = nodedb.DELETE
        self.updateStats(session, node.provider_name)
        provider = self.config.providers[node.provider_name]
        target = self.config.targets[node.target_name]
        manager = self.getProviderManager(provider)

        if target.jenkins_url:
            jenkins = self.getJenkinsManager(target)
            jenkins_name = node.nodename
            if jenkins.nodeExists(jenkins_name):
                jenkins.deleteNode(jenkins_name)
            self.log.info("Deleted jenkins node id: %s" % node.id)

        if node.external_id:
            try:
                server = manager.getServer(node.external_id)
                self.log.debug('Deleting server %s for node id: %s' %
                               (node.external_id,
                                node.id))
                manager.cleanupServer(server['id'])
            except provider_manager.NotFound:
                pass

        node.delete()
        self.log.info("Deleted node id: %s" % node.id)

        if statsd:
            dt = int((time.time() - start_time) * 1000)
            key = 'nodepool.delete.%s.%s.%s' % (node.image_name,
                                                node.provider_name,
                                                node.target_name)
            statsd.timing(key, dt)
            statsd.incr(key)
        self.updateStats(session, node.provider_name)
예제 #28
0
def main():
    db = vmdatabase.VMDatabase()

    try:
        machine = db.getMachineByJenkinsName(NODE_NAME)
    except Exception:
        utils.log.debug("Unable to find node: %s" % NODE_NAME)
        return

    if machine.state != vmdatabase.HOLD:
        utils.log.debug("Set deleted ID: %s old state: %s build: %s" % (
                machine.id, machine.state, BUILD_URL))
        machine.state = vmdatabase.DELETE
    else:
        utils.log.debug("Hold ID: %s old state: %s build: %s" % (
                machine.id, machine.state, BUILD_URL))

    try:
        utils.update_stats(machine.base_image.provider)

        if UPSTREAM_BUILD_URL:
            fd = urllib.urlopen(UPSTREAM_BUILD_URL + 'api/json')
            data = json.load(fd)
            result = data['result']
            if statsd and result == 'SUCCESS':
                dt = int(data['duration'])

                key = 'devstack.job.%s' % UPSTREAM_JOB_NAME
                statsd.timing(key + '.runtime', dt)
                statsd.incr(key + '.builds')

                key += '.%s' % UPSTREAM_BRANCH
                statsd.timing(key + '.runtime', dt)
                statsd.incr(key + '.builds')

                key += '.%s' % machine.base_image.provider.name
                statsd.timing(key + '.runtime', dt)
                statsd.incr(key + '.builds')
    except:
        print "Error getting build information"
        traceback.print_exc()
예제 #29
0
파일: views.py 프로젝트: KWierso/FlightDeck
        if os.path.exists('%s.json' % base):
            with open('%s.json' % base) as error_file:
                error_json = simplejson.loads(error_file.read())
            os.remove('%s.json' % base)
            if error_json['status'] == 'error':
                log.warning('Error creating xpi (%s)'
                        % error_json['message'] )
                return HttpResponseNotFound(error_json['message'])

        log.debug('[xpi:%s] Add-on not yet created: %s' % (hashtag, str(err)))
        return HttpResponse('')

    tend = time.time()
    tread = (tend - tfile) * 1000
    log.info('[xpi:%s] Add-on file found and read (%dms)' % (hashtag, tread))
    statsd.timing('xpi.build.fileread', tread)

    # Clean up
    pkg_json = '%s.json' % base
    if os.path.exists(pkg_json):
        try:
            os.remove(pkg_json)
        except OSError, e:
            log.debug('Error trying to cleanup (%s): %s' % (pkg_json, e))

    tkey = xpi_utils.get_queued_cache_key(hashtag, r)
    tqueued = cache.get(tkey)
    if tqueued:
        ttotal = (tend - tqueued) * 1000
        statsd.timing('xpi.build.total', ttotal)
        total = '%dms' % ttotal
예제 #30
0
 def execute(self):
     with statsd.timer('search.execute'):
         results = super(Search, self).execute()
         statsd.timing('search.took', results.took)
         return results
예제 #31
0
                   hashtag)
        log.critical("[xpi:%s] Failed to copy xpi.\n%s" % (hashtag, str(err)))
        shutil.rmtree(temp_dir)
        return response

    shutil.rmtree(temp_dir)

    ret = [xpi_targetfilename]
    ret.extend(response)

    t3 = time.time()
    copy_xpi_time = (t3 - t2) * 1000
    build_time = (t2 - t1) * 1000
    preparation_time = ((t1 - tstart) * 1000) if tstart else 0

    statsd.timing('xpi.build.prep', preparation_time)
    statsd.timing('xpi.build.build', build_time)
    statsd.timing('xpi.build.copyresult', copy_xpi_time)
    log.info(
        '[xpi:%s] Created xpi: %s (prep time: %dms) (build time: %dms) '
        '(copy xpi time: %dms)' %
        (hashtag, xpi_targetpath, preparation_time, build_time, copy_xpi_time))

    info_write(info_targetpath, 'success', response[0], hashtag)

    return response


def remove(path):
    " clear directory "
    log.debug("Removing directory (%s)" % path)
예제 #32
0
        with socket_timeout(10):
            feeddata = opener.open(request, urlencode(paypal_data)).read()
    except Exception, error:
        paypal_log.error('HTTP Error: %s' % error)
        raise

    response = dict(urlparse.parse_qsl(feeddata))

    if 'error(0).errorId' in response:
        error = errors.get(response['error(0).errorId'], PaypalError)
        paypal_log.error('Paypal Error: %s' % response['error(0).message'])
        raise error(response['error(0).message'])

    end = time.time() - start
    paypal_log.info('Paypal got key: %s (%.2fs)' % (response['payKey'], end))
    statsd.timing('paypal.paykey.retrieval', (end * 1000))
    return response['payKey']


def check_paypal_id(name):
    """
    Use the button API to check if name is a valid Paypal id.

    Returns bool(valid), str(msg).  msg will be None if there wasn't an error.
    """
    d = dict(version=settings.PAYPAL_API_VERSION,
             buttoncode='cleartext',
             buttontype='donate',
             method='BMCreateButton',
             l_buttonvar0='business=%s' % name)
    # TODO(andym): remove this once this is all live and settled down.
예제 #33
0
    results_ = jingo.render(request, template,
        {'num_results': num_results, 'results': results, 'q': cleaned['q'],
         'pages': pages, 'w': cleaned['w'],
         'search_form': search_form, 'lang_name': lang_name, })
    results_['Cache-Control'] = 'max-age=%s' % \
                                (settings.SEARCH_CACHE_PERIOD * 60)
    results_['Expires'] = (datetime.utcnow() +
                           timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \
                           .strftime(expires_fmt)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                        max_age=3600, secure=False, httponly=False)

    # Send timing information for each engine. Bug 723930.
    # TODO: Remove this once Sphinx is gone.
    dt = (time.time() - start) * 1000
    statsd.timing('search.elastic.unified.view', int(dt))

    return results_


def search_with_es(request, template=None):
    """ES-specific search view"""

    engine = 'elastic'

    # Time ES and Sphinx separate. See bug 723930.
    # TODO: Remove this once Sphinx is gone.
    start = time.time()

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
예제 #34
0
 def raw(self):
     hits = super(S, self).raw()
     if statsd:
         statsd.timing("search", hits["took"])
     return hits
예제 #35
0
 def raw(self):
     with statsd.timer('search.raw'):
         hits = super(S, self).raw()
         statsd.timing('search.took', hits['took'])
         return hits
예제 #36
0
        try:
            p = res.groups()[1]
            if 'localepicker.properties' not in p:
                p = os.path.join(p, 'localepicker.properties')
            res = zip.extract_from_manifest(p)
        except (zipfile.BadZipfile, IOError), e:
            log.error('Error unzipping: %s, %s in file: %s' % (p, e, self.pk))
            return ''
        except (ValueError, KeyError), e:
            log.error('No file named: %s in file: %s' % (e, self.pk))
            return ''

        end = time.time() - start
        log.info('Extracted localepicker file: %s in %.2fs' % (self.pk, end))
        statsd.timing('files.extract.localepicker', (end * 1000))
        return res

    def watermark_install_rdf(self, user):
        """
        Reads the install_rdf out of an addon and writes the user information
        into it.
        """
        inzip = SafeUnzip(self.file_path)
        inzip.is_valid()

        try:
            install = inzip.extract_path('install.rdf')
            data = RDF(install)
            data.set(user.email, self.version.addon.get_watermark_hash(user))
        except Exception, e:
예제 #37
0
 def raw(self):
     hits = super(S, self).raw()
     if statsd:
         statsd.timing('search', hits['took'])
     return hits
예제 #38
0
파일: views.py 프로젝트: tgavankar/kitsune
            "results": results,
            "q": cleaned["q"],
            "pages": pages,
            "w": cleaned["w"],
            "search_form": search_form,
            "lang_name": lang_name,
        },
    )
    results_["Cache-Control"] = "max-age=%s" % (settings.SEARCH_CACHE_PERIOD * 60)
    results_["Expires"] = (datetime.utcnow() + timedelta(minutes=settings.SEARCH_CACHE_PERIOD)).strftime(expires_fmt)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned["q"]), max_age=3600, secure=False, httponly=False)

    # Send timing information for each engine. Bug 723930.
    # TODO: Remove this once Sphinx is gone.
    dt = (time.time() - start) * 1000
    statsd.timing("search.%s.view" % engine, int(dt))

    return results_


def search_with_sphinx(request, template=None):
    """Sphinx-specific search view"""

    # Time ES and Sphinx separate. See bug 723930.
    # TODO: Remove this once Sphinx is gone.
    start = time.time()

    # JSON-specific variables
    is_json = request.GET.get("format") == "json"
    callback = request.GET.get("callback", "").strip()
    mimetype = "application/x-javascript" if callback else "application/json"
예제 #39
0
파일: models.py 프로젝트: brijmohan/zamboni
        try:
            p = res.groups()[1]
            if 'localepicker.properties' not in p:
                p = os.path.join(p, 'localepicker.properties')
            res = zip.extract_from_manifest(p)
        except (zipfile.BadZipfile, IOError), e:
            log.error('Error unzipping: %s, %s in file: %s' % (p, e, self.pk))
            return ''
        except (ValueError, KeyError), e:
            log.error('No file named: %s in file: %s' % (e, self.pk))
            return ''

        end = time.time() - start
        log.info('Extracted localepicker file: %s in %.2fs' %
                 (self.pk, end))
        statsd.timing('files.extract.localepicker', (end * 1000))
        return res

    def watermark_install_rdf(self, user):
        """
        Reads the install_rdf out of an addon and writes the user information
        into it.
        """
        inzip = SafeUnzip(self.file_path)
        inzip.is_valid()

        try:
            install = inzip.extract_path('install.rdf')
            data = RDF(install)
            data.set(user.email, self.version.addon.get_watermark_hash(user))
        except Exception, e:
예제 #40
0
    except Exception, err:
        if os.path.exists('%s.json' % base):
            with open('%s.json' % base) as error_file:
                error_json = simplejson.loads(error_file.read())
            os.remove('%s.json' % base)
            if error_json['status'] == 'error':
                log.warning('Error creating xpi (%s)' % error_json['message'])
                return HttpResponseNotFound(error_json['message'])

        log.debug('[xpi:%s] Add-on not yet created: %s' % (hashtag, str(err)))
        return HttpResponse('')

    tend = time.time()
    tread = (tend - tfile) * 1000
    log.info('[xpi:%s] Add-on file found and read (%dms)' % (hashtag, tread))
    statsd.timing('xpi.build.fileread', tread)

    # Clean up
    pkg_json = '%s.json' % base
    if os.path.exists(pkg_json):
        try:
            os.remove(pkg_json)
        except OSError, e:
            log.debug('Error trying to cleanup (%s): %s' % (pkg_json, e))

    tkey = xpi_utils.get_queued_cache_key(hashtag, r)
    tqueued = cache.get(tkey)
    if tqueued:
        ttotal = (tend - tqueued) * 1000
        statsd.timing('xpi.build.total', ttotal)
        total = '%dms' % ttotal
예제 #41
0
파일: views.py 프로젝트: klrmn/kitsune
    results_ = jingo.render(request, template,
        {'num_results': num_results, 'results': results, 'q': cleaned['q'],
         'pages': pages, 'w': cleaned['w'],
         'search_form': search_form, 'lang_name': lang_name, })
    results_['Cache-Control'] = 'max-age=%s' % \
                                (settings.SEARCH_CACHE_PERIOD * 60)
    results_['Expires'] = (datetime.utcnow() +
                           timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \
                           .strftime(expires_fmt)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                        max_age=3600, secure=False, httponly=False)

    # TODO: Remove this once elastic search bucketed is gone
    dt = (time.time() - start) * 1000
    statsd.timing('search.elastic.unified.view', int(dt))

    return results_


@mobile_template('search/{mobile/}results.html')
def search(request, template=None):
    """ES-specific search view"""

    if (waffle.flag_is_active(request, 'esunified') or
        request.GET.get('esunified')):
        return search_with_es_unified(request, template)

    start = time.time()

    # JSON-specific variables