示例#1
0
    def test_encode_scanvec(self):
        # The value is a vbucket's sequence number,
        # and guard is a vbucket's UUID.

        q = N1QLQuery('SELECT * FROM default')
        ms = MutationState()
        ms._add_scanvec((42, 3004, 3, 'default'))
        q.consistent_with(ms)

        dval = json.loads(q.encoded)
        sv_exp = {'default': {'42': [3, '3004']}}

        self.assertEqual('at_plus', dval['scan_consistency'])
        self.assertEqual(sv_exp, dval['scan_vectors'])

        # Ensure the vb field gets updated. No duplicates!
        ms._add_scanvec((42, 3004, 4, 'default'))
        sv_exp['default']['42'] = [4, '3004']
        dval = json.loads(q.encoded)
        self.assertEqual(sv_exp, dval['scan_vectors'])

        ms._add_scanvec((91, 7779, 23, 'default'))
        dval = json.loads(q.encoded)
        sv_exp['default']['91'] = [23, '7779']
        self.assertEqual(sv_exp, dval['scan_vectors'])

        # Try with a second bucket
        sv_exp['other'] = {'666': [99, '5551212']}
        ms._add_scanvec((666, 5551212, 99, 'other'))
        dval = json.loads(q.encoded)
        self.assertEqual(sv_exp, dval['scan_vectors'])
    def test_consistency(self):
        uuid = str('10000')
        vb = 42
        seq = 101
        ixname = 'ix'

        mutinfo = (vb, uuid, seq, 'dummy-bucket-name')
        ms = MutationState()
        ms._add_scanvec(mutinfo)

        params = cbft.Params()
        params.consistent_with(ms)
        got = cbft.make_search_body('ix', cbft.MatchNoneQuery(), params)
        exp = {
            'indexName': ixname,
            'query': {
                'match_none': None
            },
            'ctl': {
                'consistency': {
                    'level': 'at_plus',
                    'vectors': {
                        ixname: {
                            '{0}/{1}'.format(vb, uuid): seq
                        }
                    }
                }
            }
        }
        self.assertEqual(exp, got)
    def test_consistency(self):
        uuid = str('10000')
        vb = 42
        seq = 101
        ixname = 'ix'

        mutinfo = (vb, uuid, seq, 'dummy-bucket-name')
        ms = MutationState()
        ms._add_scanvec(mutinfo)

        params = cbft.Params()
        params.consistent_with(ms)
        got = cbft.make_search_body('ix', cbft.MatchNoneQuery(), params)
        exp = {
            'indexName': ixname,
            'query': {
                'match_none': None
            },
            'ctl': {
                'consistency': {
                    'level': 'at_plus',
                    'vectors': {
                        ixname: {
                            '{0}/{1}'.format(vb, uuid): seq
                        }
                    }
                }
            }
        }
        self.assertEqual(exp, got)
    def test_encode_scanvec(self):
        # The value is a vbucket's sequence number,
        # and guard is a vbucket's UUID.

        q = N1QLQuery('SELECT * FROM default')
        ms = MutationState()
        ms._add_scanvec((42, 3004, 3, 'default'))
        q.consistent_with(ms)

        dval = json.loads(q.encoded)
        sv_exp = {
            'default': {'42': [3, '3004']}
        }

        self.assertEqual('at_plus', dval['scan_consistency'])
        self.assertEqual(sv_exp, dval['scan_vectors'])

        # Ensure the vb field gets updated. No duplicates!
        ms._add_scanvec((42, 3004, 4, 'default'))
        sv_exp['default']['42'] = [4, '3004']
        dval = json.loads(q.encoded)
        self.assertEqual(sv_exp, dval['scan_vectors'])

        ms._add_scanvec((91, 7779, 23, 'default'))
        dval = json.loads(q.encoded)
        sv_exp['default']['91'] = [23, '7779']
        self.assertEqual(sv_exp, dval['scan_vectors'])

        # Try with a second bucket
        sv_exp['other'] = {'666': [99, '5551212']}
        ms._add_scanvec((666, 5551212, 99, 'other'))
        dval = json.loads(q.encoded)
        self.assertEqual(sv_exp, dval['scan_vectors'])
    def test_mutation_state(self):
        cb = self.cb
        key = self.gen_key('mutationState')
        rv = cb.upsert(key, 'value')

        d1 = json.loads(MutationState(rv).encode())
        ms = MutationState()
        ms.add_results(rv)
        d2 = json.loads(ms.encode())
        self.assertEqual(d1, d2)   # Ensure it's the same
        self.assertTrue(d1[cb.bucket])  # Ensure it's not empty

        vb, uuid, seq, _ = rv._mutinfo
        mt_got = d1[cb.bucket][str(vb)]
        self.assertEqual(seq, mt_got[0])
        self.assertEqual(str(uuid), mt_got[1])
    def test_mutation_state(self):
        cb = self.cb
        key = self.gen_key('mutationState')
        rv = cb.upsert(key, 'value')

        d1 = json.loads(MutationState(rv).encode())
        ms = MutationState()
        ms.add_results(rv)
        d2 = json.loads(ms.encode())
        self.assertEqual(d1, d2)  # Ensure it's the same
        self.assertTrue(d1[cb.bucket])  # Ensure it's not empty

        vb, uuid, seq, _ = rv._mutinfo
        mt_got = d1[cb.bucket][str(vb)]
        self.assertEqual(seq, mt_got[0])
        self.assertEqual(str(uuid), mt_got[1])
示例#7
0
#!/usr/bin/env python
import time

from couchbase.bucket import Bucket
from couchbase.n1ql import N1QLQuery, MutationState

TIMESTAMP = str(time.time())

cb = Bucket('couchbase://localhost/default?fetch_mutation_tokens=true')
rv = cb.upsert('ndoc', {'timestamp': TIMESTAMP})

ms = MutationState()
ms.add_results(rv)

query = N1QLQuery('SELECT * from default WHERE timestamp=$1', TIMESTAMP)
query.consistent_with(ms)
print query.encoded

for row in cb.n1ql_query(query):
    print row
示例#8
0
文件: wgen.py 项目: woodysign/spring
    def do_batch(self):

        if self.ws.n1ql_op == 'read':
            curr_items_spot = \
                self.curr_items.value - self.ws.creates * self.ws.workers
            deleted_spot = \
                self.deleted_items.value + self.ws.deletes * self.ws.workers
            for _ in xrange(self.BATCH_SIZE):
                key = self.existing_keys.next(curr_items_spot, deleted_spot)
                doc = self.docs.next(key)
                doc['key'] = key
                doc['bucket'] = self.ts.bucket
                ddoc_name, view_name, query = self.new_queries.next(doc)
                self.cb.query(ddoc_name, view_name, query=query)
            return

        curr_items_tmp = curr_items_spot = self.curr_items.value
        if self.ws.n1ql_op == 'create':
            with self.lock:
                self.curr_items.value += self.BATCH_SIZE
                curr_items_tmp = self.curr_items.value - self.BATCH_SIZE
            curr_items_spot = (curr_items_tmp -
                               self.BATCH_SIZE * self.total_workers)

        deleted_items_tmp = deleted_spot = 0
        if self.ws.n1ql_op == 'delete':
            with self.lock:
                self.deleted_items.value += self.BATCH_SIZE
                deleted_items_tmp = self.deleted_items.value - self.BATCH_SIZE
            deleted_spot = (deleted_items_tmp +
                            self.BATCH_SIZE * self.total_workers)

        deleted_capped_items_tmp = deleted_capped_spot = 0
        if self.ws.n1ql_op == 'rangedelete':
            with self.lock:
                self.deleted_capped_items.value += self.BATCH_SIZE
                deleted_capped_items_tmp = self.deleted_capped_items.value - self.BATCH_SIZE
            deleted_capped_spot = (deleted_capped_items_tmp +
                            self.BATCH_SIZE * self.total_workers)

        casupdated_items_tmp = casupdated_spot = 0
        if self.ws.n1ql_op == 'update':
            with self.lock:
                self.casupdated_items.value += self.BATCH_SIZE
                casupdated_items_tmp = self.casupdated_items.value - self.BATCH_SIZE
            casupdated_spot = (casupdated_items_tmp +
                            self.BATCH_SIZE * self.total_workers)

        if self.ws.n1ql_op == 'create':
            for _ in xrange(self.BATCH_SIZE):
                curr_items_tmp += 1
                key, ttl = self.new_keys.next(curr_items_tmp)
                doc = self.docs.next(key)
                doc['key'] = key
                doc['bucket'] = self.ts.bucket
                ddoc_name, view_name, query = self.new_queries.next(doc)
                self.cb.query(ddoc_name, view_name, query=query)

        elif self.ws.n1ql_op == 'delete':
            for _ in xrange(self.BATCH_SIZE):
                deleted_items_tmp += 1
                key = self.keys_for_removal.next(deleted_items_tmp)
                doc = self.docs.next(key)
                doc['key'] = key
                doc['bucket'] = self.ts.bucket
                ddoc_name, view_name, query = self.new_queries.next(doc)
                self.cb.query(ddoc_name, view_name, query=query)

        elif self.ws.n1ql_op == 'update' or self.ws.n1ql_op == 'lookupupdate':
            for _ in xrange(self.BATCH_SIZE):
                key = self.keys_for_casupdate.next(self.sid, curr_items_spot, deleted_spot)
                doc = self.docs.next(key)
                doc['key'] = key
                doc['bucket'] = self.ts.bucket
                ddoc_name, view_name, query = self.new_queries.next(doc)
                self.cb.query(ddoc_name, view_name, query=query)

        elif self.ws.n1ql_op == 'ryow':
            for _ in xrange(self.BATCH_SIZE):
                query = self.ws.n1ql_queries[0]['statement'][1:-1]
                if self.ws.n1ql_queries[0]['prepared'] == "singleton_unique_lookup":
                    by_key = 'email'
                elif self.ws.n1ql_queries[0]['prepared'] == "range_scan":
                    by_key = 'capped_small'
                else:
                    logger.error('n1ql_queries {} not defined'.format(self.ws.n1ql_queries))
                key1 = self.keys_for_casupdate.next(self.sid, curr_items_spot, deleted_spot)
                doc1 = self.docs.next(key1)
                key2 = self.keys_for_casupdate.next(self.sid, curr_items_spot, deleted_spot)
                doc2 = self.docs.next(key2)
                rvs = self.cb.client.upsert_multi({key1: doc2, key2: doc1})
                # This is a part of requirements:
                # Each n1ql worker sleeps for 1 seconds.
                time.sleep(float(self.ws.n1ql_queries[0]['time_sleep']))
                ms = MutationState()
                ms.add_results(*rvs.values())
                nq = N1QLQuery(query.format(doc2[by_key]))
                nq.consistent_with(ms)
                len(list(self.cb.client.n1ql_query(nq)))

        elif self.ws.n1ql_op == 'rangeupdate':
            for _ in xrange(self.BATCH_SIZE):
                key = self.keys_for_casupdate.next(self.sid, curr_items_spot, deleted_spot)
                doc = self.docs.next(key)
                doc['key'] = key
                doc['bucket'] = self.ts.bucket
                ddoc_name, view_name, query = self.new_queries.next(doc)
                self.cb.query(ddoc_name, view_name, query=query)

        elif self.ws.n1ql_op == 'rangedelete':
            for _ in xrange(self.BATCH_SIZE):
                doc = {}
                doc['capped_small'] = "n1ql-_100_" + str(deleted_capped_items_tmp)
                ddoc_name, view_name, query = self.new_queries.next(doc)
                self.cb.query(ddoc_name, view_name, query=query)
                deleted_capped_items_tmp += 1

        elif self.ws.n1ql_op == 'merge':           #run select * workload for merge
            for _ in xrange(self.BATCH_SIZE):
                key = self.existing_keys.next(curr_items_spot, deleted_spot)
                doc = self.docs.next(key)
                doc['key'] = key
                doc['bucket'] = self.ts.bucket
                ddoc_name, view_name, query = self.new_queries.next(doc)
                query['statement'] = "SELECT * FROM `bucket-1` USE KEYS[$1];"
                query['args'] = "[\"{key}\"]".format(**doc)
                del query['prepared']
                self.cb.query(ddoc_name, view_name, query=query)