Beispiel #1
0
def report_to_bigquery():
    """Flush all pending events of a certain type to BigQuery."""
    # Schedule multiple flush jobs per minute for some events.
    if request.method == 'GET':
        tasks = []
        for delay in xrange(0, 60, 5):
            tasks.append(taskqueue.Task(method='POST', url=request.path,
                                        countdown=delay,
                                        params={'event_name': 'content_vote_v1'}))
        tasks.append(taskqueue.Task(method='POST', url=request.path))
        taskqueue.Queue(config.BIGQUERY_CRON_QUEUE_NAME).add(tasks)
        return ''
    # Retrieve pending events from pull queue.
    try:
        q = taskqueue.Queue(config.BIGQUERY_QUEUE_NAME)
        tasks = q.lease_tasks_by_tag(config.BIGQUERY_LEASE_TIME.total_seconds(),
                                     config.BIGQUERY_LEASE_AMOUNT,
                                     tag=flask_extras.get_parameter('event_name'))
        logging.debug('Leased %d event(s) from %s', len(tasks), config.BIGQUERY_QUEUE_NAME)
    except taskqueue.TransientError:
        logging.warning('Could not lease events due to transient error')
        return '', 503
    if not tasks:
        return ''
    # Insert the events into BigQuery.
    table_id = tasks[0].tag
    rows = [json.loads(t.payload) for t in tasks]
    bigquery_client.insert_rows(table_id, rows)
    # Delete the tasks now that we're done with them.
    q.delete_tasks(tasks)
    return ''
Beispiel #2
0
 def get(self):
     # Purge entire queue
     q = taskqueue.Queue('updatequeue')
     q.purge()
     q = taskqueue.Queue('default')
     q.purge()
     self.redirect(webapp2.uri_for('home'))
  def __notify_feature_subscribers_of_changes(self, is_update):
    """Async notifies subscribers of new features and property changes to features by
       posting to a task queue."""
    # Diff values to see what properties have changed.
    changed_props = []
    for prop_name, prop in self.properties().iteritems():
      new_val = getattr(self, prop_name, None)
      old_val = getattr(self, '_old_' + prop_name, None)
      if new_val != old_val:
        changed_props.append({
            'prop_name': prop_name, 'old_val': old_val, 'new_val': new_val})

    payload = json.dumps({
      'changes': changed_props,
      'is_update': is_update,
      'feature': self.format_for_template(version=2)
    })

    # Create task to email subscribers.
    queue = taskqueue.Queue()#name='emailer')
    task = taskqueue.Task(method="POST", url='/tasks/email-subscribers',
        target='notifier', payload=payload)
    queue.add(task)

    # Create task to send push notifications
    queue = taskqueue.Queue()
    task = taskqueue.Task(method="POST", url='/tasks/send_notifications',
        target='notifier', payload=payload)
    queue.add(task)
Beispiel #4
0
def prep(search_params=None, **kw):
    if search_params is None:
        return
    if 'filters' not in search_params:
        return
    filters = search_params['filters']
    if len(filters) != 1:
        return
    filter = filters[0]
    if 'name' not in filter:
        return
    if filter['name'] != 'scholar_id':
        return
    if filter['op'] != '==':
        return
    scholar_id = filter['val']
    publication = Publication.query.filter_by(scholar_id=scholar_id).first()
    if (publication is None or publication.retrieved_at is None):
        queue = taskqueue.Queue('publication-fetchers')
        task = taskqueue.Task(url='/publication/crawl',
                              params={'scholar_id': scholar_id})
        queue.add(task)
        raise ProcessingException(description='Try later.', code=202)
    elif (datetime.now() - publication.retrieved_at).days > 365:
        queue = taskqueue.Queue('publication-fetchers')
        task = taskqueue.Task(url='/publication/crawl',
                              params={'scholar_id': scholar_id})
        queue.add(task)
    def testAddingTasks(self):
        """Tests for adding tasks."""

        taskqueue.add(url='/run')
        taskqueue.Queue('test').add(taskqueue.Task(url='/foo'))

        self.assertRaises(taskqueue.UnknownQueueError,
                          taskqueue.Queue('unknown').add,
                          taskqueue.Task(url='/foo'))
Beispiel #6
0
    def post(self):
        """In this task, we should watch an episode entity
            and check the task queue for the new completed
            episode. then, create a new task to send the email. """

        # TODO 1: in the first run, we shoul defer it for later, because if
        # there were no task pending, it will be instantly trigguered :(

        keyEpisode = self.request.get('keyEpisode')
        submitter = self.request.get('submitter')
        # get the episode from the database

        # check if this episode has videos. (Trick for the earliest version)

        # TODO: check if all this episode videos have been added or failed
        # by now, just check if video queue is empty
        queue = taskqueue.Queue()  # get the queue of newVideos
        queueStats = queue.fetch_statistics()  # returns QueueStatistics

        if queueStats.tasks == 0:
            logging.debug("There are no pending video tasks")
            # so send an email

            sender = "Capitulizer Mighty Bot <*****@*****.**>"
            to = submitter  # send them to the submitter TODO
            cc = ""  # by now, empty string so sending doesnt fail
            #bcc = "*****@*****.**"  # and send a copy to the admin
            subject = "New Episode Available to Watch"
            # TODO 1: create the notification body
            body = """<html><h1>Notification Email Body</h1>
            http://capitulizer.appspot.com/</html>"""

            # New version, creating a new task
            logging.debug("Creating task sendEmail")
            queue = taskqueue.Queue('sendEmail')
            task = taskqueue.Task(url='/tasks/sendEmail',
                                  params={
                                      'sender': sender,
                                      'subject': subject,
                                      'body': body,
                                      'cc': cc,
                                      'bcc': bcc,
                                      'to': to
                                  })
            queue.add(task)

            logging.debug("Creating task newPost")
            queue = taskqueue.Queue('newPost')
            task = taskqueue.Task(url='/tasks/newPost',
                                  params={'keyEpisode': keyEpisode})
            queue.add(task)

        else:
            logging.debug("There are still some video tasks to complete")
            # so retry
            raise Exception("Still new videos waiting. Maybe next time...")
 def get(self):
     self.pull = taskqueue.Queue('recordio-queue')
     tag = self.request.get("taskqueue")
     max_tasks_to_lease = MAX_RPC_SIZE / MAX_TASKQUEUE_BATCH_SIZE
     if tag:
         batch = []
         batch_size = 0
         success = True
         while True:
             tasks = self.pull.lease_tasks_by_tag(LEASE_TIME_PER_BATCH,
                                                  max_tasks_to_lease,
                                                  tag=tag)
             for task in tasks:
                 if task.was_deleted:
                     # Should never happend
                     continue
                 next_key_values = marshal.loads(task.payload)
                 next_size = sum(
                     [recordio_chunks.size(x) for x in next_key_values])
                 if next_size + batch_size >= MAX_WRITE_BATCH_SIZE:
                     success = success and self.commit_batch(tag, batch)
                     batch = [(task, next_key_values)]
                     batch_size = next_size
                 else:
                     batch_size += next_size
                     batch.append((task, next_key_values))
             if len(tasks) != max_tasks_to_lease:
                 break
         success = success and self.commit_batch(tag, batch)
         if not success:
             raise Exception("RecordIO not completed")
     else:
         pending_tasks = self.pull.lease_tasks(0, max_tasks_to_lease)
         seen = set([])
         for task in pending_tasks:
             tag = task.tag
             if tag in seen:
                 continue
             seen.add(tag)
             try:
                 taskqueue.Queue('recordio-writer').add(
                     RecordIOWriter.create_task_(tag, in_past=True))
                 self.response.out.write("Scheduled write for: %s<br>" %
                                         tag)
             except (taskqueue.DuplicateTaskNameError,
                     taskqueue.TombstonedTaskError,
                     taskqueue.TaskAlreadyExistsError):
                 self.response.out.write(
                     "Already pending write for: %s<br>" % tag)
         if len(pending_tasks) == max_tasks_to_lease:
             self.response.out.write(
                 "<script type=text/javascript>window.setTimeout(function() {"
                 "document.location.reload();"
                 "}, 5000);</script>")
Beispiel #8
0
def _insert_tasks(tasks,
                  queue,
                  transactional=False,
                  retry_transient_errors=True,
                  retry_delay=RETRY_SLEEP_SECS):
    """Insert a batch of tasks into the specified queue. If an error occurs
    during insertion, split the batch and retry until they are successfully
    inserted. Return the number of successfully inserted tasks.
    """
    from google.appengine.api import taskqueue

    if not tasks:
        return 0

    try:
        taskqueue.Queue(name=queue).add(tasks, transactional=transactional)
        return len(tasks)
    except (taskqueue.BadTaskStateError, taskqueue.TaskAlreadyExistsError,
            taskqueue.TombstonedTaskError):
        if len(tasks) <= 1:
            # Task has already been inserted, no reason to report an error here.
            return 0

        # If a list of more than one Tasks is given, a raised exception does
        # not guarantee that no tasks were added to the queue (unless
        # transactional is set to True). To determine which tasks were
        # successfully added when an exception is raised, check the
        # Task.was_enqueued property.
        reinsert = _tasks_to_reinsert(tasks, transactional)
        count = len(reinsert)
        inserted = len(tasks) - count
        inserted += _insert_tasks(reinsert[:count / 2], queue, transactional,
                                  retry_transient_errors, retry_delay)
        inserted += _insert_tasks(reinsert[count / 2:], queue, transactional,
                                  retry_transient_errors, retry_delay)

        return inserted
    except taskqueue.TransientError:
        # Always re-raise for transactional insert, or if specified by
        # options.
        if transactional or not retry_transient_errors:
            raise

        reinsert = _tasks_to_reinsert(tasks, transactional)

        # Retry with a delay, and then let any errors re-raise.
        time.sleep(retry_delay)

        taskqueue.Queue(name=queue).add(reinsert, transactional=transactional)
        return len(tasks)
Beispiel #9
0
    def consume_pull(self):
        if "X-AppEngine-Cron" in self.request.headers:
            try:
                q = taskqueue.Queue(self.queue_name)
                stats = q.fetch_statistics()
                task_in_queue = stats.tasks
                logging.error(str(task_in_queue))
                count = task_in_queue / 20000.0
                while count > 0:
                    url = '/openxc_stats/consume_pull'
                    taskqueue.add(queue_name='general-stats', url=url)
                    count -= 1
            except:
                logging.error(traceback.format_exc())
            return

        lease_seconds = 10
        max_tasks = 1000
        num_merge = 100
        start_time = time.time()
        q = taskqueue.Queue(self.queue_name)
        while True:
            if time.time() - start_time > 50:
                break
            try:
                tasks = q.lease_tasks(lease_seconds, max_tasks)
                if len(tasks) == 0:
                    return
            except:
                logging.error(traceback.format_exc())
                return

            task_sub_lists = [
                tasks[i:i + int(num_merge)]
                for i in range(0, len(tasks), int(num_merge))
            ]
            for sub_list in task_sub_lists:
                entities = []
                for task in sub_list:
                    entities.append(
                        OpenXCStats.get_datastore_entity(
                            json.loads(task.payload)))

                try:
                    db.put(entities)
                    q.delete_tasks(sub_list)
                except:
                    logging.error(traceback.format_exc())
Beispiel #10
0
    def post(self):
        if not self.request.body:
            self.response.write("Bad request.")
            self.response.status_int = 400
            return

        dna = json.loads(self.request.body)
        if not self.valid_dna(dna.get('dna')):
            self.response.write("Bad request.")
            self.response.status_int = 400
            return

        is_mutant = Human.is_mutant(dna.get('dna'))

        queue = taskqueue.Queue(name='default')
        task = taskqueue.Task(url='/insert_dna/',
                              target='worker',
                              params={
                                  'dna': dna,
                                  'is_mutant': is_mutant
                              })

        queue.add(task)

        if is_mutant:
            self.response.write('It is mutant.')
        else:
            self.response.write("Is human.")
            self.response.status_int = 403
Beispiel #11
0
 def cancel_tasks(self):
     task_namespace = self._get_task_namespace()
     enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace)
     if enqueued_tasks:
         tasks = [taskqueue.Task(name=t.task_name) for t in enqueued_tasks]
         taskqueue.Queue().delete_tasks(tasks)
         TaskEnqueued.where(task_namespace=task_namespace).delete()
Beispiel #12
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise BadRequestError('HistogramSet JSON much be a list of dicts')
    histograms = histogram_module.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    InlineDenseSharedDiagnostics(histograms)

    revision = ComputeRevision(histograms)

    task_list = []

    suite_path = ComputeSuitePath(histograms)

    for diagnostic in histograms.shared_diagnostics:
        # We'll skip the histogram-level sparse diagnostics because we need to
        # handle those with the histograms, below, so that we can properly assign
        # test paths.
        if type(diagnostic) in SUITE_LEVEL_SPARSE_DIAGNOSTIC_TYPES:
            task_list.append(_MakeTask(diagnostic, suite_path, revision))

    for histogram in histograms:
        guid = histogram.guid
        objects = FindHistogramLevelSparseDiagnostics(guid, histograms)
        test_path = ComputeTestPath(guid, histograms)
        # We need to queue the histogram in addition to its dense shared
        # diagnostics.
        objects.append(histogram)
        # TODO(eakuefner): Batch these better than one per task.
        for obj in objects:
            task_list.append(_MakeTask(obj, test_path, revision))

    queue = taskqueue.Queue(TASK_QUEUE_NAME)
    queue.add(task_list)
def manual_process_for_venue(venue_id):
    raise DeprecationWarning(
        'this code is now depricated as it relies on the depricated InstagramDataProcessor'
    )
    logging.debug("SPICE: socialnetworks/instagram/process_for_venue started")

    if instagram_settings.instagram_auto_process_callback == False:
        instagram_queue = taskqueue.Queue('instagramcallback')
        queue_stats = instagram_queue.fetch_statistics()

        # Only add if there is no task in the queue
        if queue_stats.tasks < 1:
            task = taskqueue.Task(
                url=
                '/socialengine/api/instagram/callback_task?key=%s&venue_id=%s'
                % (spice_settings.static_api_key, venue_id),
                method='GET')
            instagram_queue.add(task)

            logging.debug(
                "SPICE: socialnetworks/instagram/process_for_venue added task for venue_id %s"
                % venue_id)

    logging.debug("SPICE: socialnetworks/instagram/process_for_venue ended")

    return None
Beispiel #14
0
    def get(self):
        """Indefinitely fetch tasks and update the datastore."""
        queue = taskqueue.Queue('pullq')
        while True:
            try:
                tasks = queue.lease_tasks_by_tag(3600, 1000, deadline=60)
            except (taskqueue.TransientError,
                    apiproxy_errors.DeadlineExceededError) as e:
                logging.exception(e)
                time.sleep(1)
                continue
            if tasks:
                key = tasks[0].tag

                @ndb.transactional
                def update_counter():
                    counter = Counter.get_or_insert(key, count=0)
                    counter.count += len(tasks)
                    counter.put()

                try:
                    update_counter()
                except Exception as e:
                    logging.exception(e)
                else:
                    queue.delete_tasks(tasks)
            time.sleep(1)
Beispiel #15
0
def cron_run_import():  # pragma: no cover
    """Schedules a push task for each config set imported from Gitiles."""
    conf = admin.GlobalConfig.fetch()

    # Collect the list of config sets to import.
    config_sets = []
    if (conf and conf.services_config_storage_type == GITILES_STORAGE_TYPE
            and conf.services_config_location):
        loc = _resolved_location(conf.services_config_location)
        config_sets += _service_config_sets(loc)
    config_sets += _project_and_ref_config_sets()

    # For each config set, schedule a push task.
    # This assumes that tasks are processed faster than we add them.
    tasks = [
        taskqueue.Task(url='/internal/task/luci-config/gitiles_import/%s' % cs)
        for cs in config_sets
    ]

    # Task Queues try to preserve FIFO semantics. But if something is partially
    # failing (e.g. LUCI Config hitting gitiles quota midway through update), we'd
    # want to make a slow progress across all config sets. Shuffle tasks, so we
    # don't give accidental priority to lexicographically first ones.
    random.shuffle(tasks)

    q = taskqueue.Queue('gitiles-import')
    pending = tasks
    while pending:
        batch = pending[:100]
        pending = pending[len(batch):]
        q.add(batch)

    logging.info('scheduled %d tasks', len(tasks))
Beispiel #16
0
def _insert_tasks(tasks, queue, transactional=False):
    """Insert a batch of tasks into the specified queue. If an error occurs
    during insertion, split the batch and retry until they are successfully
    inserted. Return the number of successfully inserted tasks.
    """
    from google.appengine.api import taskqueue

    if not tasks:
        return 0

    try:
        taskqueue.Queue(name=queue).add(tasks, transactional=transactional)
        return len(tasks)
    except (taskqueue.BadTaskStateError,
            taskqueue.TaskAlreadyExistsError,
            taskqueue.TombstonedTaskError,
            taskqueue.TransientError):
        count = len(tasks)
        if count <= 1:
            return 0

        inserted = _insert_tasks(tasks[:count / 2], queue, transactional)
        inserted += _insert_tasks(tasks[count / 2:], queue, transactional)

        return inserted
 def post(self):
     _msg = self.request.get('message', '0')
     if _msg is not None:
         sentOn = None
         _type = self.request.get('type', '')
         post_args = self.request.arguments()
         if _type in ['feedback', 'error-feedback']:
             if 'reportsentutc' in post_args:
                 sentOn = self.parseDateTime(
                     self.request.get('reportsentutc', ''))
             _groupId = long(self.request.get('groupid', ''))
             if _groupId and sentOn:
                 if _msg != 'Automatically sent':
                     fb = Feedback(groupId=_groupId,
                                   sendTime=sentOn,
                                   timezone=self.request.get(
                                       'reportsenttz', ''),
                                   type=_type,
                                   message=_msg)
                     fb.put()
                     q = taskqueue.Queue('feedback-export-queue')
                     q.add((taskqueue.Task(payload=fb.getExportLine(),
                                           method='PULL')))
                     Cnt.incr("Feedback_counter")
                 else:
                     logging.warning(
                         'Automatically sent feedback message, discarding.')
                 self.response.out.write("OK")
             else:
                 self.error(400)
         else:
             self.error(400)
     else:
         self.error(400)
Beispiel #18
0
    def handle_get(self):
        queue = taskqueue.Queue('update')
        if queue.fetch_statistics().tasks > 0:
            self.response.write('update already in progress')
            return

        query = Library.query()
        cursor = None
        more = True
        task_count = 0
        while more:
            keys, cursor, more = query.fetch_page(50,
                                                  keys_only=True,
                                                  start_cursor=cursor)
            for key in keys:
                task_count = task_count + 1
                task_url = util.update_library_task(key.id())
                util.new_task(task_url, target='manage', queue_name='update')

        logging.info('triggered %d library updates', task_count)

        query = Author.query()
        cursor = None
        more = True
        task_count = 0
        while more:
            keys, cursor, more = query.fetch_page(50,
                                                  keys_only=True,
                                                  start_cursor=cursor)
            for key in keys:
                task_count = task_count + 1
                task_url = util.update_author_task(key.id())
                util.new_task(task_url, target='manage', queue_name='update')

        logging.info('triggered %d author updates', task_count)
Beispiel #19
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON much be a list of dicts')
    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    InlineDenseSharedDiagnostics(histograms)

    revision = ComputeRevision(histograms)

    task_list = []

    suite_key = GetSuiteKey(histograms)

    suite_level_sparse_diagnostic_entities = []
    diagnostic_names_added = {}

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    for hist in histograms:
        for name, diag in hist.diagnostics.iteritems():
            if name in SUITE_LEVEL_SPARSE_DIAGNOSTIC_NAMES:
                if diagnostic_names_added.get(name) is None:
                    diagnostic_names_added[name] = diag.guid

                if diagnostic_names_added.get(name) != diag.guid:
                    raise ValueError(
                        name +
                        ' diagnostics must be the same for all histograms')

            if name in SUITE_LEVEL_SPARSE_DIAGNOSTIC_NAMES:
                suite_level_sparse_diagnostic_entities.append(
                    histogram.SparseDiagnostic(id=diag.guid,
                                               data=diag.AsDict(),
                                               test=suite_key,
                                               start_revision=revision,
                                               end_revision=sys.maxint,
                                               name=name))

    # TODO(eakuefner): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    new_guids_to_old_diagnostics = DeduplicateAndPut(
        suite_level_sparse_diagnostic_entities, suite_key, revision)
    for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems():
        histograms.ReplaceSharedDiagnostic(
            new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    for hist in histograms:
        guid = hist.guid
        diagnostics = FindHistogramLevelSparseDiagnostics(guid, histograms)
        # TODO(eakuefner): Don't compute full diagnostics, because we need anyway to
        # call GetOrCreate here and in the queue.
        test_path = ComputeTestPath(guid, histograms)
        # TODO(eakuefner): Batch these better than one per task.
        task_list.append(_MakeTask(hist, test_path, revision, diagnostics))

    queue = taskqueue.Queue(TASK_QUEUE_NAME)
    queue.add(task_list)
Beispiel #20
0
    def processOfRequest(self):

        #		# cronからのコールかどうかを判定
        #		if self.getServerVariables('X-AppEngine-Cron') != 'true':
        #			logging.error('invalid access it does not accessed by any cron.')
        #			return

        #		target = backends.get_backend()
        #		logging.info('backends_target=' + (str(target) if backends is not None else ''))

        # テナントごとにループ ※namespace_managerから取得できれば一番いいがとりあえずDomainEntryをループ
        tenant_entrys = sateraito_func.get_all_tenant_entry()
        for tenant_entry in tenant_entrys:
            ###############################################
            # ドメイン確定&ネームスペースセット(queueはキュー追加時点のネームスペースにて実行されるとのこともありここでセット)
            tenant = tenant_entry.tenant
            # 無効テナントは処理しない
            if sateraito_func.isTenantDisabled(tenant):
                pass
            else:
                # token作成
                token = UcfUtil.guid()
                # Save Number of GoogleApps domain user
                params = {'requestor': '', 'type': 'start'}
                # taskに追加 まるごと
                import_q = taskqueue.Queue('tenant-set-queue')
                import_t = taskqueue.Task(
                    url='/a/' + tenant + '/openid/' + token +
                    '/regist_tenant_entry',
                    params=params,
                    #target='b1process',		# 365版はGAE側でタスク実行しないのでFrontEndsに変更 2015.03.09
                    target='',
                    countdown='5')
                #logging.info('run task')
                import_q.add(import_t)
def queueAggregation(error, instance, backtraceText):
  """Enqueues a task to aggregate the given instance in to the given error."""
  payload = {'error': str(error.key()), 'instance': str(instance.key()), 'backtrace': backtraceText}
  taskqueue.Queue('aggregation').add([
    taskqueue.Task(payload = json.dumps(payload), method='PULL')
  ])
  queueAggregationWorker()
Beispiel #22
0
def get_queue(queue_name):
    global queue_cache
    try:
        queue = queue_cache[queue_name]
    except KeyError:
        queue = queue_cache[queue_name] = taskqueue.Queue(queue_name)
    return queue
def tick(request):
    queue = taskqueue.Queue('challenges-sendemails')
    task = taskqueue.Task(url='/challenges/send_challenge_emails',
                          method='GET')
    queue.add(task)
    return HttpResponse(json.dumps({'status': 'ok'}),
                        content_type='application/json')
Beispiel #24
0
 def get(self):
     if self.request.get("method") == "task" and self.request.get("summonername"):
         if self.request.get("flag"):
             sname = self.request.get("summonername").encode()
             taskqueue.add(
                 url="/analyzer",
                 target="analyzerservice",
                 queue_name="summoner-analyzer",
                 params={"summonername": sname,
                         "flag":"true"}
             )
         else:
             sname = self.request.get("summonername").encode()
             taskqueue.add(
                 url = "/analyzer",
                 target = "analyzerservice",
                 queue_name = "summoner-analyzer",
                 params={"summonername": sname}
             )
         self.response.write("%s has been added to analyzer_service"%(self.request.get("summonername")))
     else:
         statsList = taskqueue.QueueStatistics.fetch(taskqueue.Queue("summoner-analyzer"))
         self.response.write(html)
         self.response.write("<p>"+str(statsList)+"</p><hr>")
         apistats = memcache.get(key="riot")
         if apistats:
             self.response.write('<br><p>API calls in the last minute: <div id="int">%d calls</div></p>'%(apistats))
         else:
             self.response.write('<br><p>API calls in the last minute: <div id="int">No data available(check memcache)</div></p>')
         self.response.write(endhtml)
Beispiel #25
0
    def create_videos_tasks(self, linksInter=[], keyEpisode=""):
        try:
            # create newVideo tasks to add videos to the episode.
            limit = 30  # TODO eliminate this limit. is horrible, sucker
            #if we want to set a limit, we should do it better
            # with the interlinks, get some data
            for linkInter in linksInter:
                # TODO: rename linkInter to interLink
                limit -= 1
                # build a link if its not complete
                linkInter = extract.buildLink(linkInter)
                # and dont get past the limit
                if limit > 0:
                    # Call a newVideo task for each interlink
                    queue = taskqueue.Queue('newVideo')
                    task = taskqueue.Task(url='/tasks/newVideo',
                                          params={
                                              'keyEpisode': keyEpisode,
                                              'interLink': linkInter
                                          })
                    queue.add(task)

                else:
                    logging.error("We still have a limit set")
                    break
        except:
            raise  # TODO
Beispiel #26
0
    def AuthorizedPost(self):
        datastore_hooks.SetPrivilegedRequest()

        with timing.WallTimeLogger('decompress'):
            try:
                data_str = self.request.body
                zlib.decompress(data_str)
                logging.info('Recieved compressed data.')
            except zlib.error:
                data_str = self.request.get('data')
                data_str = zlib.compress(data_str)
                logging.info('Recieved uncompressed data.')

        if not data_str:
            raise api_request_handler.BadRequestError(
                'Missing "data" parameter')

        filename = uuid.uuid4()
        params = {'gcs_file_path': '/add-histograms-cache/%s' % filename}

        gcs_file = cloudstorage.open(params['gcs_file_path'],
                                     'w',
                                     content_type='application/octet-stream',
                                     retry_params=_RETRY_PARAMS)
        gcs_file.write(data_str)
        gcs_file.close()

        retry_options = taskqueue.TaskRetryOptions(
            task_retry_limit=_TASK_RETRY_LIMIT)
        queue = taskqueue.Queue('default')
        queue.add(
            taskqueue.Task(url='/add_histograms/process',
                           payload=json.dumps(params),
                           retry_options=retry_options))
Beispiel #27
0
def cron_run_import():  # pragma: no cover
  """Schedules a push task for each config set imported from Gitiles."""
  conf = admin.GlobalConfig.fetch()

  # Collect the list of config sets to import.
  config_sets = []
  if (conf and conf.services_config_storage_type == GITILES_STORAGE_TYPE and
      conf.services_config_location):
    loc = gitiles.Location.parse_resolve(conf.services_config_location)
    config_sets += _service_config_sets(loc)
  config_sets += _project_and_ref_config_sets()

  # For each config set, schedule a push task.
  # This assumes that tasks are processed faster than we add them.
  tasks = [
    taskqueue.Task(url='/internal/task/luci-config/gitiles_import/%s' % cs)
    for cs in config_sets
  ]

  q = taskqueue.Queue('gitiles-import')
  pending = tasks
  while pending:
    batch = pending[:100]
    pending = pending[len(batch):]
    q.add(batch)

  logging.info('scheduled %d tasks', len(tasks))
Beispiel #28
0
    def delete(self):
        queue_name = self.request.get('queueName')
        task_id = self.request.get('taskId')

        queue = taskqueue.Queue(queue_name)
        task = taskqueue.Task(name=task_id)
        queue.delete_tasks(task)
def EnqueueTasks(tasks, task_tag):
    """Enqueues a list of tasks in the Google Cloud task queue, for consumption by
  Google Compute Engine.
  """
    q = taskqueue.Queue('clovis-queue')
    # Add tasks to the queue by groups.
    # TODO(droger): This supports thousands of tasks, but maybe not millions.
    # Defer the enqueuing if it times out.
    group_size = 100
    callbacks = []
    try:
        for i in range(0, len(tasks), group_size):
            group = tasks[i:i + group_size]
            taskqueue_tasks = [
                taskqueue.Task(payload=task.ToJsonString(),
                               method='PULL',
                               tag=task_tag) for task in group
            ]
            rpc = taskqueue.create_rpc()
            q.add_async(task=taskqueue_tasks, rpc=rpc)
            callbacks.append(rpc)
        for callback in callbacks:
            callback.get_result()
    except Exception as e:
        clovis_logger.error('Exception:' + type(e).__name__ + ' ' +
                            str(e.args))
        return False
    clovis_logger.info('Pushed %i tasks with tag: %s.' %
                       (len(tasks), task_tag))
    return True
Beispiel #30
0
    def get(self):
        q = taskqueue.Queue('views')
        tasks = q.lease_tasks(360, 100)
        if not tasks:
            logging.info('No views to update')
            return

        stats = {}
        for task in tasks:
            payload = json.loads(task.payload)
            url_id = payload['id']
            if url_id not in stats:
                stats.update({url_id: {'one': 0, 'two': 0}})
            if payload['index'] == 1:
                stats[payload['id']]['one'] += 1
            elif payload['index'] == 2:
                stats[payload['id']]['two'] += 1

        for u_id in stats.iterkeys():
            url = RandomURL.get_by_id(u_id)
            url.views_for_one = stats[u_id]['one']
            url.views_for_two = stats[u_id]['two']
            url.all_views = stats[u_id]['one'] + stats[u_id]['two']
            url.put()
        q.delete_tasks(tasks)