コード例 #1
0
ファイル: bucketreader.py プロジェクト: jab/beereader
def _bucket_latest_entries_batch(bucket, limit=DEFAULT_BATCH_SIZE, startkey=None, startkey_docid=None):
    limit = min(limit, MAX_BATCH_SIZE)
    query = dict(
        limit=limit + 1, # request 1 more than limit to see if there's a next batch
        startkey=[bucket.id, {}], # initial batch; overridden for subsequent batches below
        endkey=[bucket.id],
        include_docs=True,
        descending=True,
        )
    if startkey is not None: # subsequent batches
        assert startkey_docid is not None, 'startkey given with no startkey_docid'
        query.update(startkey=startkey, startkey_docid=startkey_docid)

    rows = list(view_entries_by_timestamp(ctx.db, **query))
    if len(rows) > limit: # there's another batch after this one
        lastrow = rows.pop()
        next = url_for('bucket_latest_entries',
            bucket=bucket,
            startkey=json_sleep(lastrow.key),
            startkey_docid=lastrow.id,
            )
    else:
        next = None

    #entries = [tidy_entry(NewsItemRef.from_doc(r.doc, ctx)) for r in rows]
    entryids = [r.doc['item_id'] for r in rows]
    entries = [tidy_entry(i) for i in NewsItem.get_by_ids(entryids, ctx)]
    return (entries, next)
コード例 #2
0
ファイル: bucketfeeder.py プロジェクト: jab/beereader
def _bucket_latest_entries_batch(bucket, limit=DEFAULT_FEED_SIZE):
    limit = min(limit, MAX_FEED_SIZE)
    query = dict(
        limit=limit,
        startkey=[bucket.id, {}],
        endkey=[bucket.id],
        include_docs=True,
        descending=True,
        )
    entryids = [r.doc['item_id'] for r in view_entries_by_timestamp(ctx.db, **query)]
    return [tidy_entry(i) for i in NewsItem.get_by_ids(entryids, ctx)]
コード例 #3
0
ファイル: composite.py プロジェクト: jab/melkman
    def init_subscription(self, bucket_id):
        sub_info = self.subscriptions.get(bucket_id, None)
        
        if sub_info is None:
            return 0 # not subscribed.
        
        stop_date = datetime.utcnow() - timedelta(days=1)
        query = {
            'startkey': [bucket_id, {}],
            'endkey': [bucket_id, DateTimeField()._to_json(stop_date)],
            'limit': 50,
            'descending': True,
            'include_docs': True,
        }
        initial_items = [NewsItemRef.from_doc(r.doc, self._context) for r in 
                         view_entries_by_timestamp(self._context.db, **query)]

        if len(initial_items) > 0:
            return self.filtered_update(initial_items)
        else:
            return 0
コード例 #4
0
ファイル: test_remotefeed.py プロジェクト: jab/melkman
def test_view_bucket_entries_by_timestamp(ctx):
    from melkman.db import NewsBucket, NewsItemRef
    from melkman.db.bucket import view_entries_by_timestamp
    from random import shuffle
    
    bucket_id = 'test_bucket'
    bucket = NewsBucket.create(ctx, bucket_id)

    first_date = datetime.today()
    items = [(random_id(), first_date - timedelta(days=i)) for i in range(100)]
    jumbled_items = list(items)
    shuffle(jumbled_items)

    for iid, timestamp in jumbled_items:
        bucket.add_news_item({'item_id': iid, 'timestamp': timestamp})

    bucket.save()
    
    # make sure they're all in there
    for iid, timestamp in items:
        assert bucket.has_news_item(iid)

    # insure they come out in the right order in the index
    query = {
        'startkey': [bucket_id, {}],
        'endkey': [bucket_id],
        'limit': 200,
        'descending': True, 
        'include_docs': True
    }
    sorted_items = [NewsItemRef.from_doc(r.doc, ctx) for r in 
                    view_entries_by_timestamp(ctx.db, **query)]

    assert len(sorted_items) == 100
    for i, item in enumerate(sorted_items):
        assert item.item_id == items[i][0]