コード例 #1
0
    def get(self):
        f_cursor_param = self.request.get('f_cursor')
        b_cursor_param = self.request.get('b_cursor')

        template_values = {}
        if b_cursor_param:	
            words_query = Word.query(ancestor=create_dictionary_key()).order(Word.date)
            b_cursor = Cursor(urlsafe=self.request.get('b_cursor'))
            template_values['f_cursor'] = b_cursor.urlsafe()
            rev_curs = b_cursor.reversed()	
            words, next_cursor, more = words_query.fetch_page(WORDS_PER_PAGE, start_cursor=rev_curs)
            words.reverse()
            if more and next_cursor:
                template_values['b_cursor'] = next_cursor.reversed().urlsafe()
        else:
            words_query = Word.query(ancestor=create_dictionary_key()).order(-Word.date)
            f_cursor = Cursor(urlsafe=self.request.get('f_cursor'))
            template_values['b_cursor'] = f_cursor.urlsafe() 
            words, next_cursor, more = words_query.fetch_page(WORDS_PER_PAGE, start_cursor=f_cursor)
            if more and next_cursor:
			    template_values['f_cursor'] = next_cursor.urlsafe() 
        wordDTOs = []
        for word in words:
            wordDTOs.append(WordDTO(word.word, word.description, word.example))
        template_values['words'] = wordDTOs
        self.response.out.write(template.render(get_template_path('view_words.html'), template_values))
コード例 #2
0
def list(limit=10, cursor=None):
    if cursor:
        cursor = Cursor(urlsafe=cursor)
    query = Book.query().order(Book.description)
    entities, cursor, more = query.fetch_page(limit, start_cursor=cursor)
    entities = builtin_list(map(from_datastore, entities))
    return entities, cursor.urlsafe() if len(entities) == limit else None
コード例 #3
0
class _ShardParams(object):
  """Parameters for a chain of taskqueue tasks."""
  def __init__(self, payload):
    self.start_time = utils.utcnow()
    self.cursor = None
    self.task_start = self.start_time
    self.task_count = 0
    self.count = 0
    if not payload:
      return
    try:
      params = json.loads(payload)
      if params['cursor']:
        self.cursor = Cursor(urlsafe=params['cursor'])
      self.task_start = datetime.datetime.strptime(
          params['task_start'], utils.DATETIME_FORMAT)
      self.task_count = params['task_count']
      self.count = params['count']
    except (ValueError, KeyError) as e:
      logging.error('_ShardParams: bad JSON: %s: %s', payload, e)
      # Stop the task chain and let the request fail.
      raise

  def json(self):
    return utils.encode_to_json({
        'cursor': self.cursor.urlsafe() if self.cursor else None,
        'task_start': self.task_start,
        'task_count': self.task_count,
        'count': self.count,
    })
コード例 #4
0
def xdb_fetch_page(query, limit, offset=None, start_cursor=None):
    """Pagination-ready fetching a some entities."""

    if isinstance(query, ndb.Query):
        if start_cursor:
            if isinstance(start_cursor, basestring):
                start_cursor = Cursor(urlsafe=start_cursor)
            objects, cursor, more_objects = query.fetch_page(limit, start_cursor=start_cursor)
        else:
            objects, cursor, more_objects = query.fetch_page(limit, offset=offset)
    elif isinstance(query, db.Query) or isinstance(query, db.GqlQuery):
        if start_cursor:
            if isinstance(start_cursor, Cursor):
                start_cursor = start_cursor.urlsafe()
            query.with_cursor(start_cursor)
            objects = query.fetch(limit)
            cursor = Cursor(urlsafe=query.cursor())
            more_objects = len(objects) == limit
        else:
            objects = query.fetch(limit, offset=offset)
            _cursor = query.cursor()
            more_objects = query.with_cursor(_cursor).count(1) > 0
            cursor = Cursor(urlsafe=_cursor)
    else:
        raise RuntimeError('unknown query class: %s' % type(query))
    return objects, cursor, more_objects
コード例 #5
0
ファイル: vessel.py プロジェクト: hmacread/keelscape
    def get_template_params(self, vessel_key):
        self.vessel_key = vessel_key
        vessel = vessel_key.get()

        wpt_qry = Waypoint.query(ancestor=vessel.key).order(-Waypoint.report_date, -Waypoint.received_date)
        curs = Cursor(urlsafe=self.request.get('cursor'))
        params = {'loginurl': users.create_login_url('/'),
                'vessel': vessel,
                'map' : GoogleMapTrack(vessel)
                }
        if self.request.get('cursor'):
            params['start_url'] = self.get_base_url()
        else:
            params['start_url'] = ''
        params['waypoints'], next_curs, params['older'] = wpt_qry.fetch_page(self.NUM_WAYPOINTS, start_cursor=curs)
        params['this_page_url'] = self.get_base_url() + "?cursor=" + curs.urlsafe()
        if params['older'] and next_curs:
            params['next_page_url'] = self.get_base_url() + "?cursor=" + next_curs.urlsafe()
        else:
            params['older'] = False

        # #Formulate reverse pointer if there is more recent waypoints
        # rev_wpt_qry = Waypoint.query(ancestor=vessel.key).order(Waypoint.report_date, Waypoint.received_date)
        # rev_curs = curs.reversed()
        # _, prev_curs, params['newer'] = wpt_qry.fetch_page(self.NUM_WAYPOINTS, start_cursor=rev_curs)
        # if params['newer'] and prev_curs:
        #      params['prev_page_url'] = self.get_base_url() + "?cursor=" + prev_curs.reversed().urlsafe()
        # else:
        #      params['newer'] = False


        return params
コード例 #6
0
    def get_stats(cls,
                  cursor_key=None,
                  limit=None,
                  year=None,
                  topic=None,
                  sort_by=None,
                  **kw):
        if topic:
            return RoshReviewUserTopicStats.get_stats(topic,
                                                      cursor_key=cursor_key,
                                                      limit=limit,
                                                      year=year,
                                                      **kw)

        limit = limit if limit else 20
        sort_by = sort_by if sort_by else 'performance'
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query()
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),
コード例 #7
0
ファイル: models.py プロジェクト: gayancliyanage/education
    def get_students(cls, cursor_key=None, limit=None, name=None, years=[], **kw):
        limit = 20 if limit is None else limit

        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None
        q = cls.query()

        if name:
            q = q.filter(cls.names >= name)
            q = q.filter(cls.names < "%s{" % name)

        if years:
            q = q.filter(cls.year.IN(years))
        else:
            q = q.filter(cls.is_active == True)

        if name:
            q = q.order(cls.names, cls.year)
        elif years:
            q = q.order(cls.year, cls.display_name, cls.key)
        else:
            q = q.order(cls.year, cls.display_name)

        if name or limit == 0:
            limit = limit if limit else None
            students, cursor = q.fetch(limit, **kw), None
        else:
            students, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)

        return ([s.details() for s in students], (cursor.urlsafe() if cursor else None))
コード例 #8
0
def list(limit=10, cursor=None):
    if cursor:
        cursor = Cursor(urlsafe=cursor)
    query = Book.query().order(Book.title)
    entities, cursor, more = query.fetch_page(limit, start_cursor=cursor)
    entities = builtin_list(map(from_datastore, entities))
    return entities, cursor.urlsafe() if len(entities) == limit else None
コード例 #9
0
def tag(tag):
    cursor_str = util.param('cursor', str)
    cursor = None
    try:
        cursor = Cursor(urlsafe=cursor_str)
    except TypeError:
        key = None

    story_dbs, next_cursor, more = model.Story.query(
        model.Story.tags == tag).filter(model.Story.story_item_count > 0).fetch_page(24,
                                                                                     start_cursor=cursor)

    if len(story_dbs) == 0:
        not_found = exceptions.NotFound()
        raise not_found
    params = {
        'next_cursor': next_cursor.urlsafe(),
        'tag': tag,
        'current_cursor': cursor.urlsafe()
    }
    resp_model = {}
    resp_model['html_class'] = 'tag'
    resp_model['canonical_path'] = flask.url_for('tag', tag=tag)
    decorate_page_response_model(resp_model)
    decorate_stories_page_model(resp_model, story_dbs, params)
    return flask.render_template('public/story/story_list.html', model=resp_model)
コード例 #10
0
ファイル: models.py プロジェクト: CultureMap/GAE-Bulk-Mailer
  def process (self, cursor=None):

    if cursor:
        cursor = Cursor(urlsafe=cursor)

    total_clicks = 0
    total_opens = 0
    
    self.temp_clicks = {}
    self.temp_opens = {}
    if cursor == None: #skip all cursor continuations, these values are already init'd
      self.tags = {}
      self.urls = {}
      self.clients = {}
      self.clicks = []
      self.opens = []
      self.total_sends = 0
      self.total_clicks = 0
      self.total_opens = 0

      from bulkmail.api.models import Campaign
      c = Campaign.query(Campaign.campaign_id == self.campaign_id, Campaign.list_id == self.list_id).get()
      
      for key in c.send_data:
        sd = key.get()
        self.total_sends += len(sd.data)

    tracks, cursor, more = Track.query(
        Track.list_id == self.list_id,
        Track.campaign_id == self.campaign_id,
        ndb.OR(Track.ttype == 'click', Track.ttype == 'open')
      ).order(Track._key).fetch_page(100, start_cursor=cursor)

    for t in tracks:
      self.process_track(t)
      if t.ttype == 'click':
        total_clicks += 1
      elif t.ttype == 'open':
        total_opens += 1

    #set total_clicks/total_opens
    self.total_clicks = self.total_clicks + total_clicks
    self.total_opens = self.total_opens + total_opens
    #set clicks/opens
    self.sort_data('clicks')
    self.sort_data('opens')

    self.put()

    if more and cursor:
      taskqueue.add(
        url='/api/compile-stats',
        params={
          'list_id': self.list_id,
          'campaign_id': self.campaign_id,
          'key': self.key.urlsafe(),
          'cursor': cursor.urlsafe()
        },
        queue_name='stats'
      )
コード例 #11
0
 def get_staff(cls, cursor_key, limit=20, **kw):
     cursor = Cursor(urlsafe=cursor_key) if cursor_key else None
     is_staff = True
     q = cls.query().filter(cls.is_staff == is_staff)
     q = q.order(cls.display_name)
     staff, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
     return staff, (cursor.urlsafe() if cursor else None),
コード例 #12
0
    def get_stats(
        cls,
        cursor_key=None,
        limit=None,
        year=None,
        topic=None,
        sort_by=None,
        **kw
    ):
        if topic:
            return RoshReviewUserTopicStats.get_stats(
                topic,
                cursor_key=cursor_key,
                limit=limit,
                year=year,
                **kw
            )

        limit = limit if limit else 20
        sort_by = sort_by if sort_by else 'performance'
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query()
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),
コード例 #13
0
ファイル: feed.py プロジェクト: egoing/fbgp
 def get(self, post_key):
     from google.appengine.datastore.datastore_query import Cursor
     args = {}
     next_cursor = self.request.get('next_cursor');
     ckey = 'PostHandler.%s.%s' % (post_key,next_cursor)
     cdata = memcache.get(ckey)
     if cdata is not None:
         args = cdata
     else:
         args['tags'] = self.tags()
         post = Feed.query(Feed.key ==  ndb.Key(urlsafe = post_key)).get()
         next_cursor = Cursor(urlsafe=next_cursor)
         entryRef, next_cursor, more = Comment.query(Comment.parent == post.key).order(Comment.created_time).fetch_page(100, start_cursor = next_cursor)
         entries = []
         for _entry in entryRef:
             entry = _entry.to_dict()
             entry['member'] = _entry.member.get().to_dict()
             entries.append(entry)
         post_key = post.key.urlsafe()
         args['post'] = post.to_dict();
         args['post']['message'] = message(args['post']['message'])
         args['post']['member'] = post.member.get().to_dict()
         args['comments'] = {}
         args['comments']['entries'] = entries;
         args['comments']['next_cursor'] = next_cursor.urlsafe() if next_cursor else None
         args['comments']['more'] = more
         if not memcache.add(ckey, args, CACEH_POST_IN_PERMLINK):
             logging.error('Memcache set failed.')
     template = JINJA_ENVIRONMENT.get_template('/view/post.html')
     self.response.write(template.render(args))
コード例 #14
0
ファイル: models.py プロジェクト: gayancliyanage/education
 def get_staff(cls, cursor_key, limit=20, **kw):
     cursor = Cursor(urlsafe=cursor_key) if cursor_key else None
     is_staff = True
     q = cls.query().filter(cls.is_staff == is_staff)
     q = q.order(cls.display_name)
     staff, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
     return staff, (cursor.urlsafe() if cursor else None)
コード例 #15
0
ファイル: feed.py プロジェクト: happydeveloper/fbgp
 def get(self, post_key):
     from google.appengine.datastore.datastore_query import Cursor
     args = {}
     next_cursor = self.request.get('next_cursor')
     ckey = 'PostHandler.%s.%s' % (post_key, next_cursor)
     cdata = memcache.get(ckey)
     if cdata is not None:
         args = cdata
     else:
         args['tags'] = self.tags()
         post = Feed.query(Feed.key == ndb.Key(urlsafe=post_key)).get()
         next_cursor = Cursor(urlsafe=next_cursor)
         entryRef, next_cursor, more = Comment.query(
             Comment.parent == post.key).order(
                 Comment.created_time).fetch_page(100,
                                                  start_cursor=next_cursor)
         entries = []
         for _entry in entryRef:
             entry = _entry.to_dict()
             entry['member'] = _entry.member.get().to_dict()
             entries.append(entry)
         post_key = post.key.urlsafe()
         args['post'] = post.to_dict()
         args['post']['message'] = message(args['post']['message'])
         args['post']['member'] = post.member.get().to_dict()
         args['comments'] = {}
         args['comments']['entries'] = entries
         args['comments']['next_cursor'] = next_cursor.urlsafe(
         ) if next_cursor else None
         args['comments']['more'] = more
         if not memcache.add(ckey, args, CACEH_POST_IN_PERMLINK):
             logging.error('Memcache set failed.')
     template = JINJA_ENVIRONMENT.get_template('/view/post.html')
     self.response.write(template.render(args))
コード例 #16
0
ファイル: feed.py プロジェクト: happydeveloper/fbgp
 def get(self, post_key):
     from google.appengine.datastore.datastore_query import Cursor
     import json
     next_cursor = Cursor(urlsafe=self.request.get('next_cursor'))
     entries = []
     ckey = 'CommentDataHandler.%s.%s' % (post_key,
                                          self.request.get('next_cursor'))
     cdata = memcache.get(ckey)
     if cdata is not None:
         cache = cdata
     else:
         post = Feed.query(Feed.key == ndb.Key(urlsafe=post_key)).get()
         #syncComment(post);
         entryRef, next_cursor, more = Comment.query(
             Comment.parent == ndb.Key(urlsafe=post_key)).order(
                 Comment.created_time).fetch_page(COMMEMT_PAGE_SCALE,
                                                  start_cursor=next_cursor)
         for _entry in entryRef:
             entry = _entry.to_dict()
             entry['member'] = _entry.member.get().to_dict()
             entries.append(entry)
         cache = {
             'entries': entries,
             'next_cursor': next_cursor.urlsafe() if next_cursor else None,
             'more': more
         }
         if not memcache.add(ckey, cache, CACHE_COMMENT_IN_POST_TIME):
             logging.error('Memcache set failed.')
     self.response.write(json.dumps(cache))
コード例 #17
0
ファイル: dashboard_util.py プロジェクト: xinghun61/infra
def GetPagedResults(query,
                    order_properties,
                    cursor=None,
                    direction=NEXT,
                    page_size=PAGE_SIZE):
    """Paging the query results with page_size.

  Args:
    query(ndb.Query): The ndb query to query entities.
    order_properties: A list of tuples containing class attribute of entity
      class to order the entities and ordering of the field.
    cursor (Cursor): The cursor provides a cursor in the current query
      results, allowing you to retrieve the next set based on the offset.
    direction (str): Either previous or next.
    page_size (int): Number of entities to show per page.

  Returns:
    A tuple of (entities, top_cursor, next_cursor).
    entities (list): List of entities to be displayed at the current page.
    top_cursor (str): The urlsafe encoding of the cursor, which is at the
      top position of entities of the current page.
    bottom_cursor (str): The urlsafe encoding of the cursor, which is at the
      bottom position of entities of the current page.
  """
    cursor = Cursor(urlsafe=cursor) if cursor else None

    if direction.lower() == PREVIOUS:
        for order_property, forward_order in order_properties:
            if forward_order == DESC:
                # Forward order is desc meaning backward order should be asc.
                query = query.order(order_property)
            if forward_order == ASC:
                # Forward order is asc meaning backward order should be desc.
                query = query.order(-order_property)
        entities, next_cursor, more = query.fetch_page(
            page_size, start_cursor=cursor.reversed())
        entities.reverse()
    else:
        for order_property, forward_order in order_properties:
            if forward_order == DESC:
                query = query.order(-order_property)
            if forward_order == ASC:
                query = query.order(order_property)
        entities, next_cursor, more = query.fetch_page(page_size,
                                                       start_cursor=cursor)

    next_cursor = next_cursor.urlsafe() if next_cursor else ''
    used_cursor = cursor.urlsafe() if cursor else ''
    if direction.lower() == PREVIOUS:
        top_cursor = next_cursor if more else ''
        bottom_cursor = used_cursor
    else:
        top_cursor = used_cursor
        bottom_cursor = next_cursor if more else ''

    return entities, top_cursor, bottom_cursor
コード例 #18
0
def list(limit=10, cursor=None):
    """ Returns the moviereview entities from the DataStore converted to a list (limit 10 per list), with each entry containing
        the movie id, title, year, genre, rating, review and reviewer. """
    if cursor:
        cursor = Cursor(urlsafe=cursor)

    query = MovieReview.query().order(MovieReview.movie)
    entities, cursor, more = query.fetch_page(limit, start_cursor=cursor)
    entities = builtin_list(map(from_datastore, entities))
    return entities, cursor.urlsafe() if len(entities) == limit else None
コード例 #19
0
    def get_stats(cls, topic, cursor_key=None, limit=None, year=None, **kw):
        limit = limit if limit else 20
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query().filter(cls.topic == topic)
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-cls.performance)

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),
コード例 #20
0
 def get(self):
     if self.request.get('p'):
         curs = Cursor(urlsafe=self.request.get('p'))
         articles, curs, more = Article.query().order(
             -Article.when).fetch_page(ITEMS_PER_PAGE, start_cursor=curs)
         
         if more:
             curs = curs.urlsafe()
             
         else:
             curs = None
         
         self.response.out.headers['Content-Type'] = 'application/json'
         self.response.out.write(
             json.dumps(
                 {'articles': [a.as_dict() for a in articles],
                  'next': curs,
                  'more': more}
             )
         )
         
     # If nothing is queried, return initial page.
     # TODO: merge those two conditionals.
     else:
         articles, curs, more =  Article.query().order(
             -Article.when).fetch_page(ITEMS_PER_PAGE)
         
         if more:
             curs = curs.urlsafe()
             
         else:
             curs = None
         
         self.response.out.headers['Content-Type'] = 'application/json'
         self.response.out.write(
             json.dumps(
                 {'articles': [a.as_dict() for a in articles],
                  'next': curs,
                  'more': more}
             )
         )        
コード例 #21
0
ファイル: views.py プロジェクト: astagi/magpi-api
def get_news_list_from_db(token):
    curs = Cursor(urlsafe=token)
    newss, curs, _ = News.query().order(-News.date).fetch_page(10, start_cursor=curs)
    if newss:
        newss_list = {}
        if curs:
            newss_list['pagetoken'] = curs.urlsafe()
        newss_list['news'] = []
        for news in newss:
            newss_list['news'].append(news.maximize())
        return newss_list
    return None
コード例 #22
0
  def remove_old_dashboard_data(self):
    """ Removes old statistics from the AppScale dashboard application. """
    last_cursor = None
    last_model = None

    # If we have state information beyond what function to use,
    # load the last seen model and cursor if available.
    if (len(self.groomer_state) > 1 and
      self.groomer_state[0] == self.CLEAN_DASHBOARD_TASK):
      last_model = self.DASHBOARD_DATA_MODELS[int(self.groomer_state[1])]
      if len(self.groomer_state) > 2:
        last_cursor = Cursor(self.groomer_state[2])

    self.register_db_accessor(constants.DASHBOARD_APP_ID)
    timeout = datetime.datetime.utcnow() - \
      datetime.timedelta(seconds=self.DASHBOARD_DATA_TIMEOUT)
    for model_number in range(len(self.DASHBOARD_DATA_MODELS)):
      model_type = self.DASHBOARD_DATA_MODELS[model_number]
      if last_model and model_type != last_model:
        continue
      counter = 0
      while True:
        query = model_type.query().filter(model_type.timestamp < timeout)
        entities, next_cursor, more = query.fetch_page(self.BATCH_SIZE,
          start_cursor=last_cursor)
        for entity in entities:
          entity.key.delete()
          counter += 1
        if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
          logging.info('Removed {} {} entities.'
            .format(counter, model_type.__class__.__name__))
          self.last_logged = time.time()
        if more:
          last_cursor = next_cursor
          self.update_groomer_state([self.CLEAN_DASHBOARD_TASK,
            str(model_number), last_cursor.urlsafe()])
        else:
          break
      if model_number != len(self.DASHBOARD_DATA_MODELS) - 1:
        self.update_groomer_state([self.CLEAN_DASHBOARD_TASK,
          str(model_number + 1)])
        last_model = None
        last_cursor = None
      if counter > 0:
        logging.info("Removed {0} {1} dashboard entities".format(counter,
          model_type))

      # Do a scan of all entities and remove any that
      # do not have timestamps for AppScale versions 2.3 and before. 
      # This may take some time on the initial run, but subsequent runs should
      # be quick given a low dashboard data timeout.
      self.remove_deprecated_dashboard_data(model_type)
    return
コード例 #23
0
ファイル: views.py プロジェクト: astagi/magpi-api
def get_issues_list_from_db(token):
    curs = Cursor(urlsafe=token)
    issues, curs, _ = Issue.query().order(-Issue.id).fetch_page(10, start_cursor=curs)
    if issues:
        issues_list = {}
        if curs:
            issues_list['pagetoken'] = curs.urlsafe()
        issues_list['issues'] = []
        for issue in issues:
            issues_list['issues'].append(issue.minimize())
        return issues_list
    return None
コード例 #24
0
    def post(self):

        user = self._pre_post()
        if not user:
            return

        # pull together all of the necessary parameters
        parameters = self._post_parameters()
        search, search_column, orders = self._post_aggregate()
        search, search_valid = self._post_search(search, search_column)

        # build the query
        query = models.Hacker.query()
        for order in orders:
            query = query.order(order)
        if search_valid and search:
            search_column = ReviewDataTableHandler.COLUMNS[search_column]
            query = query.filter(search_column == search)

        offset = parameters['offset']
        cursor = None
        data, more = [], False

        if search_valid:
            cursor = Cursor(urlsafe=parameters['cursor'])
            data, cursor, more = query.fetch_page(parameters['limit'], offset=0, start_cursor=cursor)

        # the data that we send back is slightly different
        # than what the serializer would yield; we can't include
        # extra fields either
        serialized_data = [{
            "firstName": registrant.first_name,
            "lastName": registrant.last_name,
            "gender": registrant.registration.gender,
            "school": registrant.registration.school,
            "graduation": registrant.registration.graduation_year,
            "major": registrant.registration.major,
            "initiatives": registrant.registration.initiatives,
            "registered": registrant.registration.created,
            "status": registrant.status,
            "wave": registrant.notification_wave,
            "id": registrant.key.parent().id()
        } for registrant in data if registrant.registration]

        records_total = self._compute_records_total(parameters, more)

        self.write_json(json.dumps({
            "draw": parameters['draw'],
            "data": serialized_data,
            "recordsTotal": records_total,
            "recordsFiltered": records_total,
            "cursor": cursor.urlsafe() if cursor else None
        }))
コード例 #25
0
ファイル: groomer.py プロジェクト: caseyoneill/appscale
  def remove_old_dashboard_data(self):
    """ Removes old statistics from the AppScale dashboard application. """
    last_cursor = None
    last_model = None

    # If we have state information beyond what function to use,
    # load the last seen model and cursor if available.
    if (len(self.groomer_state) > 1 and
      self.groomer_state[0] == self.CLEAN_DASHBOARD_TASK):
      last_model = self.DASHBOARD_DATA_MODELS[int(self.groomer_state[1])]
      if len(self.groomer_state) > 2:
        last_cursor = Cursor(self.groomer_state[2])

    self.register_db_accessor(constants.DASHBOARD_APP_ID)
    timeout = datetime.datetime.utcnow() - \
      datetime.timedelta(seconds=self.DASHBOARD_DATA_TIMEOUT)
    for model_number in range(len(self.DASHBOARD_DATA_MODELS)):
      model_type = self.DASHBOARD_DATA_MODELS[model_number]
      if last_model and model_type != last_model:
        continue
      counter = 0
      while True:
        query = model_type.query().filter(model_type.timestamp < timeout)
        entities, next_cursor, more = query.fetch_page(self.BATCH_SIZE,
          start_cursor=last_cursor)
        for entity in entities:
          entity.key.delete()
          counter += 1
        if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
          logging.info('Removed {} {} entities.'
            .format(counter, model_type.__class__.__name__))
          self.last_logged = time.time()
        if more:
          last_cursor = next_cursor
          self.update_groomer_state([self.CLEAN_DASHBOARD_TASK,
            str(model_number), last_cursor.urlsafe()])
        else:
          break
      if model_number != len(self.DASHBOARD_DATA_MODELS) - 1:
        self.update_groomer_state([self.CLEAN_DASHBOARD_TASK,
          str(model_number + 1)])
        last_model = None
        last_cursor = None
      if counter > 0:
        logging.info("Removed {0} {1} dashboard entities".format(counter,
          model_type))

      # Do a scan of all entities and remove any that
      # do not have timestamps for AppScale versions 2.3 and before. 
      # This may take some time on the initial run, but subsequent runs should
      # be quick given a low dashboard data timeout.
      self.remove_deprecated_dashboard_data(model_type)
    return 
コード例 #26
0
    def paginate_ndb(self, query, cursor, limit):
        if cursor:
            cursor = Cursor(urlsafe=cursor)

        data, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)

        self.handler.set('paging', {
            'cursor': cursor.urlsafe() if cursor else '',
            'next_cursor': next_cursor.urlsafe() if more else '',
            'limit': limit
        })

        return data
コード例 #27
0
ファイル: pagination.py プロジェクト: Tapsa/Ferris
    def paginate_ndb(self, query, cursor, limit):
        if cursor:
            cursor = Cursor(urlsafe=cursor)

        data, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)

        self.handler.set('paging', {
            'cursor': cursor.urlsafe() if cursor else '',
            'next_cursor': next_cursor.urlsafe() if more else '',
            'limit': limit
        })

        return data
コード例 #28
0
    def get_stats(
        cls, topic, cursor_key=None, limit=None, year=None, **kw
    ):
        limit = limit if limit else 20
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query().filter(cls.topic == topic)
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-cls.performance)

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),
コード例 #29
0
ファイル: webedit.py プロジェクト: insiderr/insiderr-server
    def get(self, key=None):
        assert_editor(self)

        cur = Cursor(urlsafe=self.request.get('cur'))
        count = int(self.request.get('count', 500))
        objs, cur, more = self.query(key).fetch_page(count, start_cursor=cur)
        user_key_to_desc = dict((key, key.get().pub_key) for key in edit_users())
        resp = {
            'items': [self.to_dict(obj, user_key_to_desc) for obj in objs],
            'cur': cur.urlsafe() if cur else None,
            'more': more,
        }
        self.respond(resp)
コード例 #30
0
    def post(self):
        user = self._pre_post()
        if not user:
            return

        # pull together all of the necessary parameters
        parameters = self._post_parameters()
        search, search_column, orders = self._post_aggregate()
        search, search_valid = self._post_search(search, search_column)
        model = self._post_model()

        # build the query
        query = model.query()
        for order in orders:
            query = query.order(order)
        if search_valid and search:
            search_column = CheckinDataTableHandler.COLUMNS[search_column]
            query = query.filter(search_column == search)

        offset = parameters['offset']
        cursor = None
        data, more = [], False

        if search_valid:
            cursor = Cursor(urlsafe=parameters['cursor'])
            data, cursor, more = query.fetch_page(parameters['limit'], offset=0, start_cursor=cursor)

        serialized_data = []
        for result in data:
            has_registration = hasattr(result, 'registration') and result.registration
            has_organization = hasattr(result, 'organization') and result.organization

            value = {}
            value['id'] = result.key.parent().id()
            value['firstName'] = result.first_name
            value['lastName'] = result.last_name
            value['school'] = result.registration.school if has_registration else 'N/A'
            value['graduation'] = result.registration.graduation_year if has_registration else 'N/A'
            value['organization'] = result.organization.name if has_organization else 'N/A'
            value['checkIn'] = utils.to_epoch_seconds(result.checked_in) if result.checked_in else None
            serialized_data.append(value)

        records_total = self._compute_records_total(parameters, more)

        self.write_json(json.dumps({
            "draw": parameters['draw'],
            "data": serialized_data,
            "recordsTotal": records_total,
            "recordsFiltered": records_total,
            "cursor": cursor.urlsafe() if cursor else None
        }))
コード例 #31
0
ファイル: models.py プロジェクト: smbuben/monaggre-server
 def getpage(cls, monitor, number=20, cursor=None):
     """
     Retrieve a page of events at the given cursor position.
     """
     user = users.get_current_user()
     if not monitor.owner == user:
         raise Exception('Permission error.')
     if not cursor is None:
         cursor = Cursor(urlsafe=cursor)
     query = cls.query(cls.monitor == monitor.key).order(-cls.timestamp)
     events, cursor, more = query.fetch_page(number, start_cursor=cursor)
     if not cursor is None:
         cursor = cursor.urlsafe()
     return (events, cursor, more)
コード例 #32
0
    def post(self):
        in_obj = self.request.POST

        out_obj = {}
        out_obj['draw'] = int(in_obj['draw'])

        self.response.content_type = 'application/json'

        count = 99999999  # FIXME if real number of records exceeds this, increment this!!
        out_obj['recordsTotal'] = count
        out_obj['recordsFiltered'] = count  # OK?

        start = in_obj['start']  # starts at 0
        pagesize = int(in_obj['length'])  # number of rows to return

        cursor = None
        if len(in_obj['next_cursor']):
            cursor = Cursor(urlsafe=in_obj['next_cursor'])
        print("cursor is")
        if cursor:
            print(cursor.urlsafe())
        else:
            print("(none)")

        query = AMILaunch.query(ancestor=get_parent()).order(-AMILaunch.date)
        rows, next_cursor, more = query.fetch_page(pagesize,
                                                   start_cursor=cursor)

        if next_cursor:
            print("urlsafe is")
            print(next_cursor.urlsafe())

            out_obj['next_cursor'] = next_cursor.urlsafe()
        else:
            out_obj['next_cursor'] = None
            print("urlsafe is None")

        data = []

        for row in rows:
            data.append([
                row.ami_id, row.bioc_version, row.ami_name, row.instance_type,
                row.region, row.availability_zone, row.is_bioc_account,
                str(row.date), row.account_hash
            ])

        out_obj['data'] = data
        self.response.write(json.encode(out_obj))
コード例 #33
0
def GetPagedResults(query,
                    order_property,
                    cursor=None,
                    direction=_NEXT,
                    page_size=PAGE_SIZE):
    """Paging the query results with page_size.

  Args:
    query(ndb.Query): The ndb query to query entities.
    order_property (DateTimeProperty of ndb.Model): A class attribute of
      entity class to order the entities.
    cursor (Cursor): The cursor provides a cursor in the current query
      results, allowing you to retrieve the next set based on the offset.
    direction (str): Either previous or next.
    page_size (int): Number of entities  to show per page.

  Returns:
    A tuple of (entities, top_cursor, next_cursor).
    entities (list): List of entities to be displayed at the current page.
    top_cursor (str): The urlsafe encoding of the cursor, which is at the
      top position of entities of the current page.
    bottom_cursor (str): The urlsafe encoding of the cursor, which is at the
      bottom position of entities of the current page.
  """
    cursor = Cursor(urlsafe=cursor) if cursor else None

    if direction.lower() == _PREVIOUS:
        query = query.order(order_property)
        entities, next_cursor, more = query.fetch_page(
            page_size, start_cursor=cursor.reversed())
        entities.reverse()
    else:
        query = query.order(-order_property)
        entities, next_cursor, more = query.fetch_page(page_size,
                                                       start_cursor=cursor)

    next_cursor = next_cursor.urlsafe() if next_cursor else ''
    used_cursor = cursor.urlsafe() if cursor else ''
    if direction.lower() == _PREVIOUS:
        top_cursor = next_cursor if more else ''
        bottom_cursor = used_cursor
    else:
        top_cursor = used_cursor
        bottom_cursor = next_cursor if more else ''

    return entities, top_cursor, bottom_cursor
コード例 #34
0
  def remove_old_logs(self, log_timeout):
    """ Removes old logs.

    Args:
      log_timeout: The timeout value in seconds.

    Returns:
      True on success, False otherwise.
    """
    # If we have state information beyond what function to use,
    # load the last seen cursor.
    if (len(self.groomer_state) > 1 and
      self.groomer_state[0] == self.CLEAN_LOGS_TASK):
      last_cursor = Cursor(self.groomer_state[1])
    else:
      last_cursor = None

    self.register_db_accessor(constants.DASHBOARD_APP_ID)
    if log_timeout:
      timeout = (datetime.datetime.utcnow() -
        datetime.timedelta(seconds=log_timeout))
      query = RequestLogLine.query(RequestLogLine.timestamp < timeout)
      logging.debug("The timeout time is {0}".format(timeout))
    else:
      query = RequestLogLine.query()
    counter = 0
    logging.debug("The current time is {0}".format(datetime.datetime.utcnow()))

    while True:
      entities, next_cursor, more = query.fetch_page(self.BATCH_SIZE,
        start_cursor=last_cursor)
      for entity in entities:
        logging.debug("Removing {0}".format(entity))
        entity.key.delete()
        counter += 1
      if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
        logging.info('Removed {} log entries.'.format(counter))
        self.last_logged = time.time()
      if more:
        last_cursor = next_cursor
        self.update_groomer_state([self.CLEAN_LOGS_TASK,
          last_cursor.urlsafe()])
      else:
        break
    logging.info("Removed {0} log entries.".format(counter))
    return True
コード例 #35
0
    def post(self):
        in_obj = self.request.POST

        out_obj = {}
        out_obj['draw'] = int(in_obj['draw'])

        self.response.content_type = 'application/json'

        count =  99999999 # FIXME if real number of records exceeds this, increment this!!
        out_obj['recordsTotal'] = count
        out_obj['recordsFiltered'] = count # OK?

        start = in_obj['start'] # starts at 0
        pagesize = int(in_obj['length']) # number of rows to return

        cursor = None
        if len(in_obj['next_cursor']):
            cursor = Cursor(urlsafe=in_obj['next_cursor'])
        print("cursor is")
        if cursor:
            print(cursor.urlsafe())
        else:
            print("(none)")


        query = AMILaunch.query(ancestor=get_parent()).order(-AMILaunch.date)
        rows, next_cursor, more = query.fetch_page(pagesize, start_cursor=cursor)

        if next_cursor:
            print("urlsafe is")
            print(next_cursor.urlsafe())

            out_obj['next_cursor'] = next_cursor.urlsafe()
        else:
            out_obj['next_cursor'] = None
            print("urlsafe is None")

        data = []

        for row in rows:
            data.append([row.ami_id, row.bioc_version, row.ami_name,
                         row.instance_type, row.region, row.availability_zone,
                         row.is_bioc_account, str(row.date), row.account_hash])

        out_obj['data'] = data
        self.response.write(json.encode(out_obj))
コード例 #36
0
    def get_movies(self, request):
        query = movie.Movie.query()

        if request.order == GetMoviesRequest.Order.TITLE:
            query = query.order(movie.Movie.title)

        if request.direction == request.direction.NEXT:
            if request.next_cursor:  # usual_case
                cursor = Cursor(urlsafe=request.next_cursor)
                current_cursor = request.next_cursor
            else:  # No Cursors. First batch
                cursor = Cursor()
                current_cursor = cursor.urlsafe()

            movies, next_cursor, more = query.fetch_page(
                request.how_many_on_page, start_cursor=cursor)
            if next_cursor:
                next_cursor = next_cursor.urlsafe()

        elif request.direction == request.direction.PREVIOUS:
            if not (
                    request.current_cursor and request.next_cursor
            ):  # We are at first batch cursor- there is no data to get in this direction.
                raise NoPageLeftInThatDirection(
                    "Cannot get more in that direction.")

            cursor = Cursor(urlsafe=request.current_cursor)
            cursor.reversed()

            movies, next_cursor, more = query.fetch_page(
                request.how_many_on_page, start_cursor=cursor)

            current_cursor = next_cursor.reversed().urlsafe()
            next_cursor = cursor.reversed().urlsafe()
        else:
            raise UnknownDirection(
                "Direction %s is unknown. % request.direction")

        map_to_movie = lambda item: Movie(
            title=item.title, ID=item.key.id(), description=item.description)

        movies = map(map_to_movie, movies)
        return GetMoviesRespond(movies=movies,
                                next_cursor=next_cursor,
                                current_cursor=current_cursor,
                                more_to_get=more)
コード例 #37
0
    def get(self):
        parser = reqparse.RequestParser()
        parser.add_argument('cursor')
        parser.add_argument('user_id')
        parser.add_argument('sort')

        args = parser.parse_args()
        curs = Cursor(urlsafe=args['cursor'])

        if args['user_id'] is not None:
            q = PaletteModel.query(PaletteModel.added_by_id == args['user_id'])
        else:
            q = PaletteModel.query()

        if args['sort'] is not None and args['sort'] == 'likes':
            q_forward = q.order(-PaletteModel.like_count)
            q_reverse = q.order(PaletteModel.like_count)
        else:
            q_forward = q.order(-PaletteModel.timestamp)
            q_reverse = q.order(PaletteModel.timestamp)            
            
        entries, next_curs, more = q_forward.fetch_page(10, start_cursor=curs)

        out = []
        for palette in entries:
            out.append(PaletteModel.format(palette))

        nextCurs = ""
        if more:
            nextCurs = next_curs.urlsafe()

        prevCurs = ""
        if next_curs is not None:
            rev_cursor = next_curs.reversed()
            old_entries, prev_cursor, fewer = q_reverse.fetch_page(10, start_cursor=rev_cursor, offset=len(out))
            if prev_cursor is not None:
                prevCurs = prev_cursor.urlsafe()

        return {
            'meta': {
                'prev_curs': prevCurs,
                'curs': curs.urlsafe(), 
                'next_curs': nextCurs
            },
            'entries': out
        }
コード例 #38
0
ファイル: groomer.py プロジェクト: caseyoneill/appscale
  def remove_old_logs(self, log_timeout):
    """ Removes old logs.

    Args:
      log_timeout: The timeout value in seconds.

    Returns:
      True on success, False otherwise.
    """
    # If we have state information beyond what function to use,
    # load the last seen cursor.
    if (len(self.groomer_state) > 1 and
      self.groomer_state[0] == self.CLEAN_LOGS_TASK):
      last_cursor = Cursor(self.groomer_state[1])
    else:
      last_cursor = None

    self.register_db_accessor(constants.DASHBOARD_APP_ID)
    if log_timeout:
      timeout = (datetime.datetime.utcnow() -
        datetime.timedelta(seconds=log_timeout))
      query = RequestLogLine.query(RequestLogLine.timestamp < timeout)
      logging.debug("The timeout time is {0}".format(timeout))
    else:
      query = RequestLogLine.query()
    counter = 0
    logging.debug("The current time is {0}".format(datetime.datetime.utcnow()))

    while True:
      entities, next_cursor, more = query.fetch_page(self.BATCH_SIZE,
        start_cursor=last_cursor)
      for entity in entities:
        logging.debug("Removing {0}".format(entity))
        entity.key.delete()
        counter += 1
      if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
        logging.info('Removed {} log entries.'.format(counter))
        self.last_logged = time.time()
      if more:
        last_cursor = next_cursor
        self.update_groomer_state([self.CLEAN_LOGS_TASK,
          last_cursor.urlsafe()])
      else:
        break
    logging.info("Removed {0} log entries.".format(counter))
    return True
コード例 #39
0
ファイル: views.py プロジェクト: georgefs/htmlbuffer
    def get(self, group=None):
        cursor = self.request.get('cursor')

        template = template_loader.get_template('index.html')
        query = {}
        if cursor:
            query['start_cursor'] = Cursor.urlsafe(cursor)

        if group:
            temp_query = TempFile.query(TempFile.group==group)
        else:
            temp_query = TempFile.query()

        
        items, cursor, more = temp_query.order(-TempFile.updated).fetch_page(100, **query)
        cursor = cursor and cursor.urlsafe()

        self.response.write(template.render({"items":items, "cursor":cursor, "more":more}))
コード例 #40
0
ファイル: firstaid.py プロジェクト: gayancliyanage/education
    def get_stats(cls,
                  topic_id,
                  year=None,
                  sort_by=None,
                  is_active=True,
                  cursor_key=None,
                  limit=None,
                  **kw):
        limit = limit if limit else 20
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query().filter(cls.is_active == is_active)
        q = cls.query().filter(cls.topic_id == topic_id)
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None)
コード例 #41
0
ファイル: LikeController.py プロジェクト: rsyvarth/Schemify
    def get(self):
        parser = reqparse.RequestParser()
        parser.add_argument('cursor')
        parser.add_argument('palette_id')

        args = parser.parse_args()
        curs = Cursor(urlsafe=args['cursor'])

        if args['palette_id'] is None:
            return {'status' : 404, 'message' : 'palette_id required'}, 404

        q = LikeModel.query(LikeModel.palette_id == args['palette_id'])

        q_forward = q.order(-LikeModel.timestamp)
        q_reverse = q.order(LikeModel.timestamp)

        entries, next_curs, more = q_forward.fetch_page(10, start_cursor=curs)


        out = []
        for like in entries:
            out.append(LikeModel.format(like))

        nextCurs = ""
        if more:
            nextCurs = next_curs.urlsafe()

        prevCurs = ""
        if next_curs is not None:
            rev_cursor = next_curs.reversed()
            old_entries, prev_cursor, fewer = q_reverse.fetch_page(10, start_cursor=rev_cursor, offset=len(out))
            if prev_cursor is not None:
                prevCurs = prev_cursor.urlsafe()

        return {
            'meta': {
                'prev_curs': prevCurs,
                'curs': curs.urlsafe(), 
                'next_curs': nextCurs
            },
            'likes': out
        }
コード例 #42
0
ファイル: pagination.py プロジェクト: ksdtech/gae-conferences
    def auto_paginate(self, query=None, cursor=None, limit=None):
        """
        Paginates a query and sets up the appropriate template variables.

        Uses ``handler.paginate_limit`` to determine how many items per page, or defaults to 10 if omitted.

        Sets the ``paging`` template variable to a dictionary like::

            {
                "cursor": "abc...",
                "next_cursor": "nzb...",
                "limit": 10
            }

        Returns the data, and if ``query_or_var_name`` is a string, sets that template variable.
        """

        cursor, limit = self.get_pagination_params(cursor, limit)
        query = self._get_query(query)

        if not query:
            logging.info('Couldn\'t auto paginate, no valid query found')
            return

        if cursor and not isinstance(cursor, Cursor):
            cursor = Cursor(urlsafe=cursor)

        data, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)

        if hasattr(self.controller, 'scaffold'):
            self.controller.context[self.controller.scaffold.plural] = data
        else:
            logging.info('Could not set data')

        self.set_pagination_info(
            current_cursor=cursor.urlsafe() if cursor else False,
            next_cursor=next_cursor.urlsafe() if more else False,
            limit=limit,
            count=len(data)
        )

        return data
コード例 #43
0
ファイル: user.py プロジェクト: tylertreat/Kaput
def sync_users(cursor=None):
    """Insert tasks to sync Users with GitHub.

    Args:
        cursor: urlsafe cursor to begin fetching users at.
    """

    query = User.query()
    if cursor:
        cursor = Cursor(urlsafe=cursor)

    keys, cursor, more = query.fetch_page(100, start_cursor=cursor,
                                          keys_only=True)

    with context.new() as ctx:
        logging.debug('Inserting task to sync %s user accounts' % len(keys))
        ctx.add(target=_sync_users, args=([key.id() for key in keys],))

        if more:
            ctx.add(target=sync_users, kwargs={'cursor': cursor.urlsafe()})
コード例 #44
0
ファイル: pagination.py プロジェクト: ksdtech/gae-conferences
    def auto_paginate(self, query=None, cursor=None, limit=None):
        """
        Paginates a query and sets up the appropriate template variables.

        Uses ``handler.paginate_limit`` to determine how many items per page, or defaults to 10 if omitted.

        Sets the ``paging`` template variable to a dictionary like::

            {
                "cursor": "abc...",
                "next_cursor": "nzb...",
                "limit": 10
            }

        Returns the data, and if ``query_or_var_name`` is a string, sets that template variable.
        """

        cursor, limit = self.get_pagination_params(cursor, limit)
        query = self._get_query(query)

        if not query:
            logging.info('Couldn\'t auto paginate, no valid query found')
            return

        if cursor and not isinstance(cursor, Cursor):
            cursor = Cursor(urlsafe=cursor)

        data, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)

        if hasattr(self.controller, 'scaffold'):
            self.controller.context[self.controller.scaffold.plural] = data
        else:
            logging.info('Could not set data')

        self.set_pagination_info(
            current_cursor=cursor.urlsafe() if cursor else False,
            next_cursor=next_cursor.urlsafe() if more else False,
            limit=limit,
            count=len(data))

        return data
コード例 #45
0
ファイル: firstaid.py プロジェクト: gayancliyanage/education
    def get_stats(
        cls,
        topic_id,
        year=None,
        sort_by=None,
        is_active=True,
        cursor_key=None,
        limit=None,
        **kw
    ):
        limit = limit if limit else 20
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query().filter(cls.is_active == is_active)
        q = cls.query().filter(cls.topic_id == topic_id)
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None)
コード例 #46
0
ファイル: feed.py プロジェクト: egoing/fbgp
 def get(self, post_key):
     from google.appengine.datastore.datastore_query import Cursor
     import json
     next_cursor = Cursor(urlsafe=self.request.get('next_cursor'))
     entries = []
     ckey = 'CommentDataHandler.%s.%s' % (post_key,self.request.get('next_cursor'))
     cdata = memcache.get(ckey)
     if cdata is not None:
         cache = cdata
     else:
         post = Feed.query(Feed.key ==  ndb.Key(urlsafe = post_key)).get()
         #syncComment(post);
         entryRef, next_cursor, more = Comment.query(Comment.parent == ndb.Key(urlsafe = post_key)).order(Comment.created_time).fetch_page(COMMEMT_PAGE_SCALE, start_cursor = next_cursor)
         for _entry in entryRef:
             entry = _entry.to_dict()
             entry['member'] = _entry.member.get().to_dict()
             entries.append(entry)
         cache = {'entries':entries, 'next_cursor':next_cursor.urlsafe() if next_cursor else None, 'more':more};
         if not memcache.add(ckey, cache, CACHE_COMMENT_IN_POST_TIME):
             logging.error('Memcache set failed.')
     self.response.write(json.dumps(cache))
コード例 #47
0
ファイル: firstaid.py プロジェクト: gayancliyanage/education
    def get_stats(
        cls,
        cursor_key=None,
        limit=None,
        year=None,
        topic_id=None,
        sort_by=None,
        is_active=True,
        **kw
    ):
        """Return stats by page.

        TODO: add tests

        """
        if sort_by and sort_by not in ('question_taken', 'performance'):
            raise ValueError("Cannot sort by %s" % sort_by)

        if topic_id:
            return FirstAidUserTopicStats.get_stats(
                topic_id,
                cursor_key=cursor_key,
                limit=limit,
                year=year,
                sort_by=sort_by,
                is_active=True,
                **kw
            )

        limit = limit if limit else 20
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query()
        q = cls.query().filter(cls.is_active == is_active)
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),
コード例 #48
0
ファイル: pagination.py プロジェクト: cwen0708/order-plus
    def paginate(self, query=None, cursor=None, limit=None):
        """
        Paginates a ``ndb.Query`` and sets up the appropriate template variables.

        Uses ``Controller.Meta.pagination_limit`` to determine how many items per page
        or defaults to 10 if omitted.

        Returns the data, and if ``query`` is a string, sets that template variable.

        If ``query`` is omitted it'll attempt to find the dataset using the scaffold variable names.
        """

        cursor, limit = self.get_pagination_params(cursor, limit)
        query = self._get_query(query)

        if not query:
            logging.debug('Couldn\'t auto paginate, no valid query found')
            return

        if cursor and not isinstance(cursor, Cursor):
            cursor = Cursor(urlsafe=cursor)

        if cursor is u"":
            cursor = None
        data, next_cursor, more = query.fetch_page(limit, start_cursor=cursor)

        if hasattr(self.controller, 'scaffold'):
            self.controller.context[self.controller.scaffold.plural] = data
        else:
            logging.debug('Could not set data')

        self.set_pagination_info(
            current_cursor=cursor.urlsafe() if cursor else False,
            next_cursor=next_cursor.urlsafe() if more else False,
            limit=limit,
            count=len(data)
        )

        return data
コード例 #49
0
ファイル: cron.py プロジェクト: miraclestyle/miraclestyle
 def run(self, context):
     if not isinstance(self.cfg, dict):
         self.cfg = {}
     limit = self.cfg.get('page', 10)
     Domain = context.models['6']
     cursor = None
     if context._cronconfig.data.get('more'):
         cursor = Cursor(urlsafe=context._cronconfig.data.get('cursor'))
     entities, cursor, more = Domain.query().order(
         Domain.created).fetch_page(limit,
                                    start_cursor=cursor,
                                    keys_only=True)
     if cursor:
         cursor = cursor.urlsafe()
     context._cronconfig.data['cursor'] = cursor
     context._cronconfig.data['more'] = more
     for key in entities:
         data = {
             'action_id': 'cron',
             'action_model': '35',
             'domain': key.urlsafe()
         }
         context._callbacks.append(('callback', data))
コード例 #50
0
 def get(self, id=None):
     if id:
         user = User.get_by_id(int(id))
         self.response.write(respond_json(user.to_object()))
         return
     else:
         query = User.query()
         n = 25
         if self.request.get('n'):
             n = int(self.request.get('n'))
         if self.request.get('cursor'):
             cursor = Cursor(urlsafe=self.request.get('cursor'))
             users, cursor, more = query.fetch_page(n, start_cursor=cursor)
         else:
             users, cursor, more = query.fetch_page(n)
         resp = {}
         resp['data'] = []
         if more:
             resp['cursor'] = cursor.urlsafe()
         for user in users:
             resp['data'].append(user.to_object())
         self.response.write(respond_json(resp))
         return
コード例 #51
0
def _xdb_fetch_page(query, limit, offset=None, start_cursor=None):
    """Pagination-ready fetching a some entities.

    Returns:
        (objects, cursor, more_objects)
    """

    if isinstance(query, ndb.Query):
        if start_cursor:
            if isinstance(start_cursor, basestring):
                start_cursor = Cursor(urlsafe=start_cursor)
            objects, cursor, more_objects = query.fetch_page(
                limit, start_cursor=start_cursor)
        else:
            objects, cursor, more_objects = query.fetch_page(limit,
                                                             offset=offset)
    elif isinstance(query, db.Query) or isinstance(query, db.GqlQuery):
        if start_cursor:
            if isinstance(start_cursor, Cursor):
                start_cursor = start_cursor.urlsafe()
            query.with_cursor(start_cursor)
            objects = query.fetch(limit)
            cursor = Cursor(urlsafe=query.cursor())
            more_objects = len(objects) == limit
        else:
            objects = query.fetch(limit, offset=offset)
            # MultiQuery kann keine Cursor
            if len(getattr(query, '_Query__query_sets', [])) < 2:
                _cursor = query.cursor()
                more_objects = query.with_cursor(_cursor).count(1) > 0
                cursor = Cursor(urlsafe=_cursor)
            else:
                more_objects = len(objects) == limit
                cursor = None
    else:
        raise RuntimeError('unknown query class: %s' % type(query))
    return objects, cursor, more_objects
コード例 #52
0
ファイル: firstaid.py プロジェクト: gayancliyanage/education
    def get_stats(cls,
                  cursor_key=None,
                  limit=None,
                  year=None,
                  topic_id=None,
                  sort_by=None,
                  is_active=True,
                  **kw):
        """Return stats by page.

        TODO: add tests

        """
        if sort_by and sort_by not in ('question_taken', 'performance'):
            raise ValueError("Cannot sort by %s" % sort_by)

        if topic_id:
            return FirstAidUserTopicStats.get_stats(topic_id,
                                                    cursor_key=cursor_key,
                                                    limit=limit,
                                                    year=year,
                                                    sort_by=sort_by,
                                                    is_active=True,
                                                    **kw)

        limit = limit if limit else 20
        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None

        q = cls.query()
        q = cls.query().filter(cls.is_active == is_active)
        if year:
            q = q.filter(cls.year == year)
        q = q.order(-ndb.GenericProperty(sort_by))

        stats, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
        return stats, (cursor.urlsafe() if cursor else None),
コード例 #53
0
    def get_students(
        cls, cursor_key=None, limit=None, name=None, years=[], **kw
    ):
        limit = 20 if limit is None else limit

        cursor = Cursor(urlsafe=cursor_key) if cursor_key else None
        q = cls.query()

        if name:
            q = q.filter(cls.names >= name)
            q = q.filter(cls.names < '%s{' % name)

        if years:
            q = q.filter(cls.year.IN(years))
        else:
            q = q.filter(cls.is_active == True)

        if name:
            q = q.order(cls.names, cls.year)
        elif years:
            q = q.order(cls.year, cls.display_name, cls.key)
        else:
            q = q.order(cls.year, cls.display_name)

        if name or limit == 0:
            limit = limit if limit else None
            students, cursor = q.fetch(limit, **kw), None
        else:
            students, cursor, _ = q.fetch_page(
                limit, start_cursor=cursor, **kw
            )

        return (
            [s.details() for s in students],
            (cursor.urlsafe() if cursor else None),
        )
コード例 #54
0
ファイル: vessel.py プロジェクト: hmacread/keelscape
    def get_template_params(self, vessel_key):
        self.vessel_key = vessel_key
        vessel = vessel_key.get()

        wpt_qry = Waypoint.query(ancestor=vessel.key).order(
            -Waypoint.report_date, -Waypoint.received_date)
        curs = Cursor(urlsafe=self.request.get('cursor'))
        params = {
            'loginurl': users.create_login_url('/'),
            'vessel': vessel,
            'map': GoogleMapTrack(vessel)
        }
        if self.request.get('cursor'):
            params['start_url'] = self.get_base_url()
        else:
            params['start_url'] = ''
        params['waypoints'], next_curs, params['older'] = wpt_qry.fetch_page(
            self.NUM_WAYPOINTS, start_cursor=curs)
        params['this_page_url'] = self.get_base_url(
        ) + "?cursor=" + curs.urlsafe()
        if params['older'] and next_curs:
            params['next_page_url'] = self.get_base_url(
            ) + "?cursor=" + next_curs.urlsafe()
        else:
            params['older'] = False

        # #Formulate reverse pointer if there is more recent waypoints
        # rev_wpt_qry = Waypoint.query(ancestor=vessel.key).order(Waypoint.report_date, Waypoint.received_date)
        # rev_curs = curs.reversed()
        # _, prev_curs, params['newer'] = wpt_qry.fetch_page(self.NUM_WAYPOINTS, start_cursor=rev_curs)
        # if params['newer'] and prev_curs:
        #      params['prev_page_url'] = self.get_base_url() + "?cursor=" + prev_curs.reversed().urlsafe()
        # else:
        #      params['newer'] = False

        return params
コード例 #55
0
ファイル: tasks.py プロジェクト: hkarriche/iogrowCRM
    def list(cls, user_from_email, request):
        curs = Cursor(urlsafe=request.pageToken)
        if request.limit:
            limit = int(request.limit)
        else:
            limit = 10
        items = list()
        date_to_string = lambda x: x.strftime("%Y-%m-%d") if x else ""
        date_time_to_string = lambda x: x.strftime("%Y-%m-%dT%H:%M:00.000"
                                                   ) if x else ""
        you_can_loop = True
        count = 0
        while you_can_loop:
            if request.order:
                ascending = True
                if request.order.startswith('-'):
                    order_by = request.order[1:]
                    ascending = False
                else:
                    order_by = request.order
                attr = cls._properties.get(order_by)
                if attr is None:
                    raise AttributeError('Order attribute %s not defined.' %
                                         (order_by, ))
                if ascending:
                    tasks, next_curs, more = cls.query().filter(
                        cls.organization ==
                        user_from_email.organization).order(+attr).fetch_page(
                            limit, start_cursor=curs)
                else:
                    tasks, next_curs, more = cls.query().filter(
                        cls.organization ==
                        user_from_email.organization).order(-attr).fetch_page(
                            limit, start_cursor=curs)
            else:
                tasks, next_curs, more = cls.query().filter(
                    cls.organization ==
                    user_from_email.organization).fetch_page(limit,
                                                             start_cursor=curs)
            for task in tasks:
                if len(items) < limit:
                    is_filtered = True
                    if task.access == 'private' and task.owner != user_from_email.google_user_id:
                        end_node_set = [user_from_email.key]
                        if not Edge.find(start_node=task.key,
                                         kind='permissions',
                                         end_node_set=end_node_set,
                                         operation='AND'):
                            is_filtered = False
                    if request.status and task.status != request.status and is_filtered:
                        is_filtered = False
                    if request.tags and is_filtered:
                        end_node_set = [
                            ndb.Key(urlsafe=tag_key)
                            for tag_key in request.tags
                        ]
                        if not Edge.find(start_node=task.key,
                                         kind='tags',
                                         end_node_set=end_node_set,
                                         operation='AND'):
                            is_filtered = False
                    if request.assignee and is_filtered:
                        user_assigned = model.User.get_by_gid(request.assignee)
                        end_node_set = [user_assigned.key]
                        if not Edge.find(start_node=task.key,
                                         kind='assignees',
                                         end_node_set=end_node_set,
                                         operation='AND'):
                            is_filtered = False
                    if request.owner and task.owner != request.owner and is_filtered:
                        is_filtered = False
                    if request.completed_by and task.completed_by != request.completed_by and is_filtered:
                        is_filtered = False
                    if request.about and is_filtered:
                        end_node_set = [ndb.Key(urlsafe=request.about)]
                        if not Edge.find(start_node=task.key,
                                         kind='related_to',
                                         end_node_set=end_node_set,
                                         operation='AND'):
                            is_filtered = False
                    if request.urgent and is_filtered:
                        if task.due is None:
                            is_filtered = False
                        else:
                            now = datetime.datetime.now()
                            diff = task.due - now
                            if diff.days > 2:
                                is_filtered = False
                        if task.status == 'closed':
                            is_filtered = False

                    if is_filtered:
                        count += 1
                        # list of tags related to this task
                        tag_list = Tag.list_by_parent(parent_key=task.key)
                        about = None
                        edge_list = Edge.list(start_node=task.key,
                                              kind='related_to')
                        for edge in edge_list['items']:
                            about_kind = edge.end_node.kind()
                            parent = edge.end_node.get()
                            if parent:
                                if about_kind == 'Contact' or about_kind == 'Lead':
                                    about_name = parent.firstname + ' ' + parent.lastname
                                else:
                                    about_name = parent.name
                                about = DiscussionAboutSchema(
                                    kind=about_kind,
                                    id=str(parent.key.id()),
                                    name=about_name)
                        # list of tags related to this task
                        edge_list = Edge.list(start_node=task.key,
                                              kind='assignees')
                        assignee_list = list()
                        for edge in edge_list['items']:
                            if edge.end_node.get():
                                assignee_list.append(
                                    AuthorSchema(
                                        edgeKey=edge.key.urlsafe(),
                                        google_user_id=edge.end_node.get(
                                        ).google_user_id,
                                        google_public_profile_url=edge.end_node
                                        .get().google_public_profile_url,
                                        photo=edge.end_node.get(
                                        ).google_public_profile_photo_url))

                        status_color = 'green'
                        status_label = ''
                        if task.due:
                            now = datetime.datetime.now()
                            diff = task.due - now
                            if 0 <= diff.days <= 2:
                                status_color = 'orange'
                                status_label = 'soon: due in ' + str(
                                    diff.days) + ' days'
                            elif diff.days < 0:
                                status_color = 'red'
                                status_label = 'overdue'
                            else:
                                status_label = 'due in ' + str(
                                    diff.days) + ' days'
                        if task.status == 'closed':
                            status_color = 'white'
                            status_label = 'closed'
                        author_schema = None
                        if task.author:
                            author_schema = AuthorSchema(
                                google_user_id=task.author.google_user_id,
                                display_name=task.author.display_name,
                                google_public_profile_url=task.author.
                                google_public_profile_url,
                                photo=task.author.photo)
                        task_schema = TaskSchema(
                            id=str(task.key.id()),
                            entityKey=task.key.urlsafe(),
                            title=task.title,
                            status=task.status,
                            status_color=status_color,
                            status_label=status_label,
                            comments=0,
                            about=about,
                            created_by=author_schema,
                            completed_by=AuthorSchema(),
                            tags=tag_list,
                            assignees=assignee_list,
                            created_at=date_time_to_string(task.created_at),
                            updated_at=date_time_to_string(task.updated_at))
                        if task.due:
                            task_schema.due = date_to_string(task.due)
                        items.append(task_schema)
            if len(items) >= limit:
                you_can_loop = False
            if next_curs:
                if count >= limit:
                    next_curs_url_safe = next_curs.urlsafe()
                else:
                    next_curs_url_safe = curs.urlsafe()
            if next_curs:
                curs = next_curs
            else:
                you_can_loop = False
                next_curs_url_safe = None
        return TaskListResponse(items=items, nextPageToken=next_curs_url_safe)
コード例 #56
0
 def get_users(cls, cursor_key, limit=20, **kw):
     cursor = Cursor(urlsafe=cursor_key) if cursor_key else None
     q = cls.query().order(cls.display_name)
     users, cursor, _ = q.fetch_page(limit, start_cursor=cursor, **kw)
     return users, (cursor.urlsafe() if cursor else None),
コード例 #57
0
    def post(self):

        s = "Version: %s\n" % __version__
        s += "Arguments from POST:"
        for arg in self.request.arguments():
            s += '\n%s:%s' % (arg, self.request.get(arg))
        logging.info(s)

        # Try to get period from the request in case GetEvents was called directly
        try:
            self.period = self.request.get("period").lower()
            s = "Version: %s\n" % __version__
            s += "Period %s determined from request: %s" % (self.period,
                                                            self.request)
            logging.info(s)
        except Exception:
            pass

        # If real period not in request, try to get parameters from StatsRun entity
        # in case GetEvents was called from a previous task.
        if self.period is None or len(self.period) == 0:
            run_key = ndb.Key("StatsRun", 5759180434571264)
            run_entity = run_key.get()
            self.period = run_entity.period

        if self.period is None or len(self.period) == 0:
            self.error(400)
            resp = {
                "status": "error",
                "message": "Period parameter was not provided."
            }
            s = "Version: %s\n" % __version__
            s += "%s" % resp
            logging.error(s)
            self.response.write(json.dumps(resp) + "\n")
            return

        # If Period not already stored, halt
        period_key = ndb.Key("Period", self.period)
        period_entity = period_key.get()
        if not period_entity:
            self.error(400)
            resp = {
                "status": "error",
                "message": "Provided period does not exist in datastore",
                "data": {
                    "period": self.period
                }
            }
            logging.error(resp)
            self.response.write(json.dumps(resp) + "\n")
            return

        self.github_store = period_entity.github_store
        self.github_issue = period_entity.github_issue

        # Start the loop, until deadline
        try:

            # Prepare query for all Reports to process
            query = ReportToProcess.query()
            query = query.order(ReportToProcess.gbifdatasetid)
            s = "Version: %s\n" % __version__
            s += "ReportToProcess queried"
            logging.info(s)

            # Get cursor from request, if any
            cursor_str = self.request.get('cursor', None)
            cursor = None
            if cursor_str:
                cursor = Cursor(urlsafe=cursor_str)
            s = "Version: %s\n" % __version__
            s += "Cursor built: %s" % cursor
            logging.info(s)

            # Initialize loop
            more = True

            # Repeat while there are reports to process
            while more is True:

                # Get the next (or first) round of elements
                logging.info("Fetching %d entities" % PAGE_SIZE)
                results, new_cursor, more = query.fetch_page(
                    PAGE_SIZE, start_cursor=cursor)
                s = "Version: %s\n" % __version__
                s += "Got %d results" % len(results)
                logging.info(s)

                # Process and store transactionally
                self.process_and_store(results)

                # Restart with new cursor (if any)
                if more is True:
                    cursor = new_cursor
                    s = "Version: %s\n" % __version__
                    s += "New cursor: %s" % cursor.urlsafe()
                    logging.info(s)

            s = "Version: %s\n" % __version__
            s += "Finished processing reports"
            logging.info(s)

            period_entity = ndb.Key("Period", self.period).get()

            resp = {
                "status": "success",
                "message": "Successfully finished processing all reports",
                "data": {
                    "processed_searches": period_entity.processed_searches,
                    "processed_downloads": period_entity.processed_downloads
                }
            }

            # Launch process to store reports on GitHub, if applicable
            if self.github_store is True:
                resp['message'] += ". Launching GitHub storing process"
                taskqueue.add(url=URI_GITHUB_STORE, queue_name=QUEUENAME)

            # Launch process to create issues on GitHub, if applicable
            elif self.github_issue is True:
                resp['message'] += ". Launching GitHub issue process"
                taskqueue.add(url=URI_GITHUB_ISSUE, queue_name=QUEUENAME)

            # Otherwise, consider finished
            else:
                resp['message'] += ". No GitHub process launched"
                period_entity.status = "done"
                mail.send_mail(sender=EMAIL_SENDER,
                               to=EMAIL_RECIPIENT,
                               subject="Usage reports for period %s" %
                               self.period,
                               body="""
Hey there!

Just a brief note to let you know the extraction of %s stats has 
successfully finished, with no GitHub processes launched.

Congrats!
""" % self.period)

            # In any case, store the status, show message and finish
            period_entity.put()
            logging.info(resp)
            self.response.write(json.dumps(resp) + "\n")

            return

        # When timeout arrives...
        except DeadlineExceededError:
            # Launch new instance with current (failed) cursor
            taskqueue.add(url=URI_PROCESS_EVENTS,
                          params={"cursor": cursor.urlsafe()},
                          queue_name=QUEUENAME)
            s = "Version: %s\n" % __version__
            s += "Caught a DeadlineExceededError. Relaunching"
            logging.warning(s)

            resp = {
                "status": "in progress",
                "message": "Caught a DeadlineExceededError."
                " Relaunching with new cursor",
                "data": {
                    "period": self.period,
                    "cursor": cursor.urlsafe()
                }
            }
            logging.info(resp)
            self.response.write(json.dumps(resp) + "\n")

        return
コード例 #58
0
    def get(self, linkKeyStr, proposalId):

        if conf.isDev:
            logging.debug('TopReasons.get() linkKeyStr=' + str(linkKeyStr) +
                          ' proposalId=' + str(proposalId))

        # Collect inputs
        preview = self.request.get('preview', None) is not None
        cursorPro = self.request.get('cursorPro', None)
        cursorPro = Cursor(urlsafe=cursorPro) if cursorPro else None
        cursorCon = self.request.get('cursorCon', None)
        cursorCon = Cursor(urlsafe=cursorCon) if cursorCon else None
        httpRequestId = os.environ.get(conf.REQUEST_LOG_ID)
        responseData = {'success': False, 'httpRequestId': httpRequestId}
        cookieData = httpServer.validate(self.request,
                                         self.request.GET,
                                         responseData,
                                         self.response,
                                         idRequired=False)
        userId = cookieData.id()

        # Retrieve top-level records
        (linkKeyRecord, proposalRecord,
         requestRecord) = retrieveRequestAndProposal(linkKeyStr, proposalId)
        if not linkKeyRecord: return  # Bad link
        proposalId = str(proposalRecord.key.id())

        # Retrieve reasons and vote, in parallel
        voteRecordsFuture = reasonVote.ReasonVote.query(
            reasonVote.ReasonVote.proposalId == proposalId,
            reasonVote.ReasonVote.userId
            == userId).fetch_async() if userId else None
        maxReasonsPerType = (conf.MAX_TOP_REASONS / 2) if preview else 10
        proRecordsFuture, conRecordsFuture = reason.retrieveTopReasonsAsync(
            proposalId,
            maxReasonsPerType,
            cursorPro=cursorPro,
            cursorCon=cursorCon)
        proRecords, cursorPro, morePros = proRecordsFuture.get_result()
        conRecords, cursorCon, moreCons = conRecordsFuture.get_result()
        cursorPro = cursorPro.urlsafe() if cursorPro else None
        cursorCon = cursorCon.urlsafe() if cursorCon else None
        voteRecords = voteRecordsFuture.get_result(
        ) if voteRecordsFuture else None

        # Filter/transform fields for display
        linkKeyDisplay = httpServer.linkKeyToDisplay(linkKeyRecord)
        proposalDisp = httpServer.proposalToDisplay(
            proposalRecord, userId, requestRecord=requestRecord)
        reasonDisps = [
            httpServer.reasonToDisplay(r, userId)
            for r in (proRecords + conRecords)
        ]
        mergeReasonVotes(voteRecords, reasonDisps)

        # Store request/proposal to user's recent (cookie)
        user.storeRecentLinkKey(linkKeyStr, cookieData)

        # Display proposal data
        responseData = {
            'success': True,
            'linkKey': linkKeyDisplay,
            'proposal': proposalDisp,
            'reasons': reasonDisps,
            'cursorPro': cursorPro,
            'cursorCon': cursorCon
        }
        httpServer.outputJson(cookieData, responseData, self.response)
コード例 #59
0
    def get(self, linkKeyStr):
        logging.debug('linkKeyStr=' + linkKeyStr)

        # Collect inputs
        cursor = self.request.get('cursor', None)
        cursor = Cursor(urlsafe=cursor) if cursor else None
        getReasons = (self.request.get('getReasons', 'true') == 'true')
        logging.debug('getReasons=' + str(getReasons))

        httpRequestId = os.environ.get(conf.REQUEST_LOG_ID)
        responseData = {'success': False, 'httpRequestId': httpRequestId}

        cookieData = httpServer.validate(self.request,
                                         self.request.GET,
                                         responseData,
                                         self.response,
                                         idRequired=False)
        userId = cookieData.id()

        # Retrieve requestId from linkKey.  destinationType must be RequestForProposals.
        linkKeyRecord = linkKey.LinkKey.get_by_id(linkKeyStr)
        logging.debug('GetRequestData.get() linkKeyRecord=' +
                      str(linkKeyRecord))

        if (linkKeyRecord == None) or (linkKeyRecord.destinationType !=
                                       conf.REQUEST_CLASS_NAME):
            return httpServer.outputJson(cookieData,
                                         responseData,
                                         self.response,
                                         errorMessage=conf.BAD_LINK)

        # Retrieve RequestForProposal by id, filter/transform fields for display.
        requestId = linkKeyRecord.destinationId
        requestRecordFuture = requestForProposals.RequestForProposals.get_by_id_async(
            int(requestId))

        requestRecord = requestRecordFuture.get_result(
        ) if requestRecordFuture else None

        logging.debug('GetRequestData.get() userId=' + str(userId) +
                      ' requestRecord.creator=' + str(requestRecord.creator))

        # If userId exists... async-retrieve user's ReasonVotes by KeyProperty requestId x userId.
        voteRecordsFuture = None
        if getReasons and userId:
            voteRecordsFuture = reasonVote.ReasonVote.query(
                reasonVote.ReasonVote.requestId == requestId,
                reasonVote.ReasonVote.userId == userId).fetch_async()

        # Retrieve Proposals by KeyProperty requestId
        # Get all data up to current page maximum length.  + Refreshes earlier proposal data.
        maxProposals = const.INITIAL_MAX_PROPOSALS
        proposalRecords, cursor, hasMore = proposal.retrieveTopProposals(
            requestId, maxProposals, cursor=cursor)
        cursor = cursor.urlsafe() if cursor else None

        # Async-retrieve top N reasons per proposal, equal number of pro/con reasons
        reasonRecordsFutures = []
        if getReasons:
            for proposalRec in proposalRecords:
                maxReasonsPerType = conf.MAX_TOP_REASONS / 2
                proRecordsFuture, conRecordsFuture = reason.retrieveTopReasonsAsync(
                    proposalRec.key.id(), maxReasonsPerType)
                reasonRecordsFutures.append(proRecordsFuture)
                reasonRecordsFutures.append(conRecordsFuture)

        # Wait for parallel retrievals
        logging.debug('GetRequestData.get() requestRecord=' +
                      str(requestRecord))

        reasonRecords = []
        for reasonRecordsFuture in reasonRecordsFutures:
            reasonRecordsForProp, cursor, hasMore = reasonRecordsFuture.get_result(
            )
            logging.debug('GetRequestData.get() reasonRecordsForProp=' +
                          str(reasonRecordsForProp))
            reasonRecords.extend(reasonRecordsForProp)
        reasonRecords = sorted(reasonRecords, key=lambda r: -r.score)
        logging.debug('GetRequestData.get() reasonRecords=' +
                      str(reasonRecords))

        voteRecords = voteRecordsFuture.get_result(
        ) if voteRecordsFuture else []
        logging.debug('GetRequestData.get() voteRecords=' + str(voteRecords))

        # Transform records for display.
        linkKeyDisp = httpServer.linkKeyToDisplay(linkKeyRecord)
        logging.debug('GetRequestData.get() linkKeyDisp=' + str(linkKeyDisp))

        requestDisp = httpServer.requestToDisplay(requestRecord, userId)
        logging.debug('GetRequestData.get() requestDisp=' + str(requestDisp))

        proposalDisps = [
            httpServer.proposalToDisplay(p,
                                         userId,
                                         requestRecord=requestRecord)
            for p in proposalRecords
        ]
        logging.debug('GetRequestData.get() proposalDisps=' +
                      str(proposalDisps))

        reasonDisps = [
            httpServer.reasonToDisplay(r, userId) for r in reasonRecords
        ]
        logging.debug('GetRequestData.get() reasonDisps=' + str(reasonDisps))

        # For each reason... collect user vote in reason.myVote
        reasonToVoteRec = {v.reasonId: v
                           for v in voteRecords} if voteRecords else {}
        logging.debug('GetRequestData.get() reasonToVoteRec=' +
                      str(reasonToVoteRec))

        for r in reasonDisps:
            voteRec = reasonToVoteRec.get(r['id'])
            r['myVote'] = voteRec.voteUp if voteRec else False

        # Store request to user's recent requests (cookie).
        user.storeRecentLinkKey(linkKeyStr, cookieData)

        # Display request data.
        responseData = {
            'success': True,
            'linkKey': linkKeyDisp,
            'request': requestDisp,
            'proposals': proposalDisps,
            'reasons': reasonDisps,
            'maxProposals': maxProposals,
            'cursor': cursor,
        }
        logging.debug(
            'GetRequestData.get() responseData=' +
            json.dumps(responseData, indent=4, separators=(', ', ':')))
        httpServer.outputJson(cookieData, responseData, self.response)