Example #1
0
def _bucket_latest_entries_batch(bucket, limit=DEFAULT_BATCH_SIZE, startkey=None, startkey_docid=None):
    limit = min(limit, MAX_BATCH_SIZE)
    query = dict(
        limit=limit + 1, # request 1 more than limit to see if there's a next batch
        startkey=[bucket.id, {}], # initial batch; overridden for subsequent batches below
        endkey=[bucket.id],
        include_docs=True,
        descending=True,
        )
    if startkey is not None: # subsequent batches
        assert startkey_docid is not None, 'startkey given with no startkey_docid'
        query.update(startkey=startkey, startkey_docid=startkey_docid)

    rows = list(view_entries_by_timestamp(ctx.db, **query))
    if len(rows) > limit: # there's another batch after this one
        lastrow = rows.pop()
        next = url_for('bucket_latest_entries',
            bucket=bucket,
            startkey=json_sleep(lastrow.key),
            startkey_docid=lastrow.id,
            )
    else:
        next = None

    #entries = [tidy_entry(NewsItemRef.from_doc(r.doc, ctx)) for r in rows]
    entryids = [r.doc['item_id'] for r in rows]
    entries = [tidy_entry(i) for i in NewsItem.get_by_ids(entryids, ctx)]
    return (entries, next)
Example #2
0
def _bucket_latest_entries_batch(bucket, limit=DEFAULT_FEED_SIZE):
    limit = min(limit, MAX_FEED_SIZE)
    query = dict(
        limit=limit,
        startkey=[bucket.id, {}],
        endkey=[bucket.id],
        include_docs=True,
        descending=True,
        )
    entryids = [r.doc['item_id'] for r in view_entries_by_timestamp(ctx.db, **query)]
    return [tidy_entry(i) for i in NewsItem.get_by_ids(entryids, ctx)]