コード例 #1
0
ファイル: cbas_infer_schema.py プロジェクト: umang-cb/Jython
    def verify_infer_schema_on_same_type_missing_fields(self):
        self.log.info('Create unique documents')
        client = SDKClient(hosts=[self.master.ip], bucket=self.cb_bucket_name, password=self.master.rest_password)
        documents = [
            '{ "array": [1,2,3,4,5,6], "integer": 10, "float":10.12, "boolean":true, "null":null, "object":{"array": [1,2,3,4,5,6], "integer": 10, "float":10.12, "boolean":true, "null":null}}',
            '{ "array": null, "integer": 10, "float":10.12, "boolean":true, "null":null, "object":{"array": [1,2,3,4,5,6], "integer": 10, "float":10.12, "boolean":true, "null":"null value"}}',
            '{ "array": [1,2,3,4,5,6], "integer": 10, "float":10.12, "boolean":true, "null":null, "object":{"array": null, "integer": null, "float":null, "boolean":null, "null":null}}'
            ]
        for index, document in enumerate(documents):
            client.insert_document(str(index), document)

        self.log.info('Create primary index on %s' % self.cb_bucket_name)
        self.rest.query_tool('CREATE PRIMARY INDEX idx on %s' % self.cb_bucket_name)

        self.log.info('Create dataset')
        self.cbas_util.createConn(self.cb_bucket_name)
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)

        self.log.info('Connect link')
        self.cbas_util.connect_link()

        self.log.info('Verify dataset count')
        self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, len(documents))

        self.validate_infer_schema_response(len(documents))
コード例 #2
0
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        SERVER_IP = self.servers[0].ip
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip],
                  bucket='default').cb
        main_bucket.upsert(DOCID, {'recs':[]})

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
                  bucket='default').cb
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue(array_entry_count == ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
コード例 #3
0
ファイル: cbas_infer_schema.py プロジェクト: umang-cb/Jython
    def verify_infer_schema_on_documents_with_overlapping_types_same_keys(self):
        self.log.info('Create unique documents')
        client = SDKClient(hosts=[self.master.ip], bucket=self.cb_bucket_name, password=self.master.rest_password)
        documents = [
            '{ "array": 20, "integer": 300.34345, "float":false, "boolean":["a", "b", "c"], "null":null, "string":"300"}'
            '{ "array": 300, "integer": ["a", "b", "c"], "float":1012.56756, "boolean":300, "null":null, "string":10345665}'
            '{ "array": [1, 2, 3], "integer": 10345665, "float":"Steve", "boolean":[[1, 2], [3, 4], []], "null":null, "string":20.343}'
            '{ "array": 20.343, "integer": [[1, 2], [3, 4], []], "float":0.00011, "boolean":[1, 1.1, 1.0, 0], "null":null, "string":[1, 1.1, 1.0, 0]}'
            '{ "array": 10345665, "integer": 1012.56756, "float":1012.56756, "boolean":false, "null":null, "string":[1, "hello", ["a", 1], 2.22]}'
            '{ "array": "Steve", "integer": 1, "float":[1, 2, 3], "boolean":false, "null":null, "string":1}'
            '{ "array": true, "integer": 1.1, "float":20.343, "boolean":["a", "b", "c"], "null":null, "string":300}'
            '{ "array": true, "integer": "Alex", "float":10345665, "boolean":1.1, "null":null, "string":0.00011}'
            '{ "array": 20.343, "integer": 20.343, "float":300, "boolean":true, "null":null, "string":true}'
            '{ "array": 1, "integer": 10345665, "float":300.34345, "boolean":1, "null":null, "string":true}'
        ]
        for index, document in enumerate(documents):
            client.insert_document(str(index), document)

        self.log.info('Create primary index on %s' % self.cb_bucket_name)
        self.rest.query_tool('CREATE PRIMARY INDEX idx on %s' % self.cb_bucket_name)

        self.log.info('Create dataset')
        self.cbas_util.createConn(self.cb_bucket_name)
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)

        self.log.info('Connect link')
        self.cbas_util.connect_link()

        self.log.info('Verify dataset count')
        self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, len(documents))

        self.validate_infer_schema_response(len(documents))
コード例 #4
0
    def test_cbas_ingestion_with_documents_containing_multilingual_data(self):
        """
        1. Create reference to SDK client
        2. Add multilingual json documents to default bucket
        3. Verify ingestion on dataset with and with out secondary index
        """
        multilingual_strings = [
            'De flesta sagorna här är från Hans Hörner svenska översättning',
            'Il était une fois une maman cochon qui avait trois petits cochons',
            '森林里住着一只小兔子,它叫“丑丑”。它的眼睛红红的,像一对红宝石',
            '外治オヒル回条フ聞定ッ加官言岸ムモヱツ求碁込ヌトホヒ舞高メ旅位',
            'ان عدة الشهور عند الله اثنا عشر شهرا في',
        ]

        self.log.info("Fetch test case arguments")
        self.fetch_test_case_arguments()

        self.log.info("Create reference to SDK client")
        client = SDKClient(scheme="couchbase", hosts=[self.master.ip], bucket=self.cb_bucket_name,
                           password=self.master.rest_password)

        self.log.info("Add multilingual documents to the default bucket")
        client.insert_custom_json_documents("custom-key-", multilingual_strings)

        self.log.info("Create connections, datasets and indexes")
        self.cbas_dataset_setup()

        self.log.info("Wait for ingestion to complete and verify count")
        self.cbas_util.wait_for_ingestion_complete([self.dataset_name], len(multilingual_strings))
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.dataset_name, len(multilingual_strings)))
コード例 #5
0
ファイル: cbas_infer_schema.py プロジェクト: umang-cb/Jython
    def verify_infer_schema_on_unique_nested_documents(self):
        self.log.info('Create unique documents')
        client = SDKClient(hosts=[self.master.ip], bucket=self.cb_bucket_name, password=self.master.rest_password)
        documents = [
            '{"from": "user_512", "to": "user_768", "text": "Hey, that Beer you recommended is pretty fab, thx!", "sent_timestamp":476560}',
            '{"user_id": 512, "name": "Bob Likington", "email": "*****@*****.**", "sign_up_timestamp": 1224612317, "last_login_timestamp": 1245613101}',
            '{"user_id": 768, "name": "Simon Neal", "email": "*****@*****.**", "sign_up_timestamp": 1225554317, "last_login_timestamp": 1234166701, "country": "Scotland", "pro_account": true, "friends": [512, 666, 742, 1111]}',
            '{"photo_id": "ccbcdeadbeefacee", "size": { "w": 500, "h": 320, "unit": "px" }, "exposure": "1/1082", "aperture": "f/2.4", "flash": false, "camera": { "name": "iPhone 4S", "manufacturer": {"Company":"Apple", "Location": {"City":"California", "Country":"USA"} } }, "user_id": 512, "timestamp": [2011, 12, 13, 16, 31, 7]}'
            ]
        for index, document in enumerate(documents):
            client.insert_document(str(index), document)

        self.log.info('Create primary index on %s' % self.cb_bucket_name)
        self.rest.query_tool('CREATE PRIMARY INDEX idx on %s' % self.cb_bucket_name)

        self.log.info('Create dataset')
        self.cbas_util.createConn(self.cb_bucket_name)
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)

        self.log.info('Connect link')
        self.cbas_util.connect_link()

        self.log.info('Verify dataset count')
        self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, len(documents))

        self.validate_infer_schema_response(len(documents), compare_with_n1ql=False)
コード例 #6
0
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        SERVER_IP = self.servers[0].ip
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip],
                  bucket='default').cb
        main_bucket.upsert(DOCID, {'recs':[]})

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
                  bucket='default').cb
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue(array_entry_count == ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
コード例 #7
0
    def test_swap_rebalance_cb_cbas_together(self):

        self.log.info("Creates cbas buckets and dataset")
        wait_for_rebalance = self.input.param("wait_for_rebalance", True)
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Add KV node and don't rebalance")
        self.add_node(node=self.rebalanceServers[1], rebalance=False)

        self.log.info("Add cbas node and don't rebalance")
        self.add_node(node=self.rebalanceServers[3], rebalance=False)

        otpnodes = []
        nodes = self.rest.node_statuses()
        for node in nodes:
            if node.ip == self.rebalanceServers[
                    0].ip or node.ip == self.rebalanceServers[2].ip:
                otpnodes.append(node)

        self.log.info("Remove master node")
        self.remove_node(otpnode=otpnodes,
                         wait_for_rebalance=wait_for_rebalance)
        self.master = self.rebalanceServers[1]

        self.log.info("Create instances pointing to new master nodes")
        c_utils = cbas_utils(self.rebalanceServers[1],
                             self.rebalanceServers[3])
        c_utils.createConn(self.cb_bucket_name)

        self.log.info("Create reference to SDK client")
        client = SDKClient(scheme="couchbase",
                           hosts=[self.rebalanceServers[1].ip],
                           bucket=self.cb_bucket_name,
                           password=self.rebalanceServers[1].rest_password)

        self.log.info("Add more document to default bucket")
        documents = ['{"name":"value"}'] * (self.num_items // 10)
        document_id_prefix = "custom-id-"
        client.insert_custom_json_documents(document_id_prefix, documents)

        self.log.info(
            "Run queries as rebalance is in progress : Rebalance state:%s" %
            self.rest._rebalance_progress_status())
        handles = c_utils._run_concurrent_queries(
            dataset_count_query,
            "immediate",
            2000,
            batch_size=self.concurrent_batch_size)

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not c_utils.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items +
            (self.num_items // 10), 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )
コード例 #8
0
    def load_docs(self, node, num_docs, bucket = 'default', password = '',
                  exp = 0, flags = 0):
        host = node.ip+":"+node.port
        client = SDKClient(bucket = "default", hosts = [host], scheme = "http")

        for i in range(num_docs):
            key = "key%s"%i
            rc = client.upsert(key, "value")
コード例 #9
0
    def test_MB_32114(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        if self.maxttl:
            self._expiry_pager(self.master)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket='default')
        KEY_NAME = 'key1'

        for i in range(1000):
            mcd = client.memcached(KEY_NAME + str(i))
            rc = mcd.set(KEY_NAME + str(i), 0, 0,
                         json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                KEY_NAME + str(i),
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # wait for it to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = client.observe(
                    KEY_NAME + str(i))

        start_time = time.time()
        self._load_doc_data_all_buckets(batch_size=1000)
        end_time = time.time()

        for i in range(1000):
            try:
                mcd = client.memcached(KEY_NAME + str(i))
                _, flags, exp, seqno, cas = client.memcached(
                    KEY_NAME + str(i)).getMeta(KEY_NAME + str(i))
                rc = mcd.del_with_meta(KEY_NAME + str(i), 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
        self.cluster.compact_bucket(self.master, "default")
        if self.maxttl:
            time_to_sleep = (self.maxttl - (end_time - start_time)) + 20
            self.sleep(int(time_to_sleep))
        else:
            self.sleep(60)
        active_bucket_items = rest.get_active_key_count("default")
        replica_bucket_items = rest.get_replica_key_count("default")
        print('active_bucket_items ', active_bucket_items)
        print('replica_bucket_items ', replica_bucket_items)
        if active_bucket_items * self.num_replicas != replica_bucket_items:
            self.fail("Mismatch in data !!!")
コード例 #10
0
 def async_load_data_till_upgrade_completes():
     self.log.info("Started doc operations on KV")
     client = SDKClient(hosts=[self.master.ip],
                        bucket=self.cb_bucket_name,
                        password=self.master.rest_password)
     i = 0
     while not self._STOP_INGESTION:
         client.insert_document(
             "key-id" + str(i),
             '{"name":"James_' + str(i) + '", "profession":"Pilot"}')
         i += 1
コード例 #11
0
    def test_ephemeral_bucket_NRU_eviction_access_in_the_delete_range(self):
        """
        generate_load = BlobGenerator(EvictionKV.KEY_ROOT, 'param2', self.value_size, start=0, end=self.num_items)
        self._load_all_ephemeral_buckets_until_no_more_memory(self.servers[0], generate_load, "create", 0, self.num_items)


        # figure out how many items were loaded and load 10% more
        rest = RestConnection(self.servers[0])
        itemCount = rest.get_bucket(self.buckets[0]).stats.itemCount

        self.log.info( 'Reached OOM, the number of items is {0}'.format( itemCount))


        """

        # select some keys which we expect to be adjacent to the kvs which will be deleted
        # and how many KVs should we select, maybe that is a parameter

        itemCount = 50000
        max_delete_value = itemCount / 10
        NUM_OF_ACCESSES = 50
        keys_to_access = set()
        for i in range(NUM_OF_ACCESSES):
            keys_to_access.add(random.randint(0, max_delete_value))

        # and then do accesses on the key set
        client = SDKClient(hosts=[self.master.ip], bucket=self.buckets[0])
        for i in keys_to_access:
            # and we may want to parameterize the get at some point
            rc = client.get(EvictionKV.KEY_ROOT + str(i), no_format=True)

        # and then do puts to delete out stuff
        PERCENTAGE_TO_ADD = 10
        incremental_kv_population = BlobGenerator(EvictionKV.KEY_ROOT,
                                                  'param2',
                                                  self.value_size,
                                                  start=itemCount,
                                                  end=itemCount *
                                                  PERCENTAGE_TO_ADD / 100)
        self._load_bucket(self.buckets[0],
                          self.master,
                          incremental_kv_population,
                          "create",
                          exp=0,
                          kv_store=1)

        # and verify that the touched kvs are still there
        for i in keys_to_access:
            # and we may want to parameterize the get at some point
            rc = client.get(EvictionKV.KEY_ROOT + str(i), no_format=True)
            self.assertFalse(
                rc is None,
                'Key {0} was incorrectly deleted'.format(EvictionKV.KEY_ROOT +
                                                         str(i)))
コード例 #12
0
 def async_load_data():
     self.log.info(
         "Performing doc operations on KV until upgrade is in progress")
     client = SDKClient(hosts=[self.master.ip],
                        bucket=self.cb_bucket_name,
                        password=self.master.rest_password)
     i = 0
     while not _STOP_INGESTION:
         client.insert_document(
             "key-id" + str(i),
             '{"name":"James_' + str(i) + '", "profession":"Pilot"}')
         i += 1
コード例 #13
0
    def load_docs(self,
                  node,
                  num_docs,
                  bucket='default',
                  password='',
                  exp=0,
                  flags=0):
        host = node.ip + ":" + node.port
        client = SDKClient(bucket="default", hosts=[host], scheme="http")

        for i in range(num_docs):
            key = "key%s" % i
            rc = client.upsert(key, "value")
コード例 #14
0
    def test_xattr_compression(self):
        # MB-32669
        # subdoc.subdoc_simple_dataset.SubdocSimpleDataset.test_xattr_compression,compression=active
        mc = MemcachedClient(self.master.ip, 11210)
        mc.sasl_auth_plain(self.master.rest_username,
                           self.master.rest_password)
        mc.bucket_select('default')

        self.key = "test_xattr_compression"
        self.nesting_level = 5
        array = {'i_add': 0, 'i_sub': 1, 'a_i_a': [0, 1], 'ai_sub': [0, 1]}
        base_json = self.generate_json_for_nesting()
        nested_json = self.generate_nested(base_json, array,
                                           self.nesting_level)
        jsonDump = json.dumps(nested_json)
        stats = mc.stats()
        self.assertEquals(stats['ep_compression_mode'], 'active')

        scheme = "http"
        host = "{0}:{1}".format(self.master.ip, self.master.port)
        self.sdk_client = SDKClient(scheme=scheme,
                                    hosts=[host],
                                    bucket="default")

        self.sdk_client.set(self.key, value=jsonDump, ttl=60)
        rv = self.sdk_client.cb.mutate_in(self.key,
                                          SD.upsert('my.attr',
                                                    "value",
                                                    xattr=True,
                                                    create_parents=True),
                                          ttl=60)
        self.assertTrue(rv.success)

        # wait for it to persist and then evict the key
        persisted = 0
        while persisted == 0:
            opaque, rep_time, persist_time, persisted, cas = mc.observe(
                self.key)

        mc.evict_key(self.key)
        time.sleep(65)
        try:
            self.client.get(self.key)
            self.fail("the key should get expired")
        except mc_bin_client.MemcachedError as error:
            self.assertEquals(error.status, 1)

        stats = mc.stats()
        self.assertEquals(int(stats['curr_items']), 0)
        self.assertEquals(int(stats['curr_temp_items']), 0)
コード例 #15
0
    def test_ingestion_impact_for_documents_containing_xattr_meta_information(self):

        self.log.info("Fetch test case arguments")
        self.fetch_test_case_arguments()

        self.log.info("Create reference to SDK client")
        client = SDKClient(scheme="couchbase", hosts=[self.master.ip], bucket="default",
                           password=self.master.rest_password)

        self.log.info("Insert custom data into default bucket")
        documents = ['{"name":"value"}'] * self.num_of_documents
        document_id_prefix = "id-"
        client.insert_custom_json_documents(document_id_prefix, documents)

        self.log.info("Create connections, datasets, indexes")
        self.cbas_dataset_setup()

        self.log.info("Wait for ingestion to complete and verify count")
        self.cbas_util.wait_for_ingestion_complete([self.dataset_name], self.num_of_documents)
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.dataset_name, self.num_of_documents))

        self.log.info("Insert xattr attribute for all the documents and assert document count on dataset")
        for i in range(self.num_of_documents):
            client.insert_xattr_attribute(document_id=document_id_prefix + str(i), path="a", value="{'xattr-value': 1}",
                                          xattr=True, create_parents=True)
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.dataset_name, self.num_of_documents))

        self.log.info("Update xattr attribute and assert document count on dataset")
        for i in range(self.num_of_documents):
            client.update_xattr_attribute(document_id=document_id_prefix + str(i), path="a",
                                          value="{'xattr-value': 11}", xattr=True, create_parents=True)
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.dataset_name, self.num_of_documents))
コード例 #16
0
ファイル: cbas_limit_pushdown.py プロジェクト: ritalrw/Jython
    def setUp(self):
        super(CBASLimitPushdown, self).setUp()

        self.log.info("Create a reference to SDK client")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)

        self.log.info("Insert documents in KV bucket")
        documents = [
            '{"name":"dave","age":19,"gender":"Male","salary":50.0, "married":false, "employed":""}',
            '{"name":"evan","age":25,"gender":"Female","salary":100.15, "married":true}',
            '{"name":"john","age":44,"gender":"Male","salary":150.55, "married":null}',
            '{"name": "sara", "age": 20, "gender": "Female", "salary": 200.34, "married":false}',
            '{"name":"tom","age":31,"gender":"Male","salary":250.99, "married":true}'
        ]
        client.insert_json_documents("id-", documents)

        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)

        self.log.info("Create primary index on KV bucket")
        self.rest.query_tool("create primary index pri_idx on default")

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Wait for ingestion to complete")
        self.total_documents = self.rest.query_tool(
            CBASLimitQueries.BUCKET_COUNT_QUERY)['results'][0]['$1']
        self.cbas_util.wait_for_ingestion_complete([self.cbas_dataset_name],
                                                   self.total_documents)

        self.log.info("Fetch partitions")
        shell = RemoteMachineShellConnection(self.cbas_node)
        response = self.cbas_util.fetch_analytics_cluster_response(shell)
        if 'partitions' in response:
            self.partitions = len(response['partitions'])
            self.log.info("Number of data partitions on cluster %d" %
                          self.partitions)
        else:
            self.fail(
                msg=
                "Partitions not found. Failing early to avoid unexpected results"
            )
コード例 #17
0
ファイル: cbas_stats.py プロジェクト: umang-cb/Jython
    def setUp(self):
        super(CbasStats, self).setUp()

        self.log.info("Add Json documents to default bucket")
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0,
                                               self.num_items)

        self.log.info("Create reference to SDK client")
        client = SDKClient(scheme="couchbase",
                           hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)

        self.log.info("Insert binary data into default bucket")
        keys = ["%s" % (uuid.uuid4()) for i in range(0, self.num_items)]
        client.insert_binary_document(keys)

        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)

        self.log.info("Create bucket on CBAS")
        self.assertTrue(
            self.cbas_util.create_bucket_on_cbas(
                cbas_bucket_name=self.cbas_bucket_name,
                cb_bucket_name=self.cb_bucket_name,
                cb_server_ip=self.cb_server_ip),
            "bucket creation failed on cbas")

        self.log.info("Create dataset on the CBAS bucket")
        self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=self.cb_bucket_name,
            cbas_dataset_name=self.cbas_dataset_name)

        self.log.info("Connect to Bucket")
        self.cbas_util.connect_to_bucket(
            cbas_bucket_name=self.cbas_bucket_name,
            cb_bucket_password=self.cb_bucket_password)

        statement = "select count(*) from {0} where mutated=0;".format(
            self.cbas_dataset_name)
        status, metrics, errors, results, response_handle = self.cbas_util.execute_statement_on_cbas_util(
            statement, mode=self.mode, timeout=75, analytics_timeout=120)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )
コード例 #18
0
    def test_ephemeral_bucket_NRU_eviction(self):

        generate_load = BlobGenerator(EvictionKV.KEY_ROOT,
                                      'param2',
                                      self.value_size,
                                      start=0,
                                      end=self.num_items)
        self._load_all_ephemeral_buckets_until_no_more_memory(
            self.servers[0], generate_load, "create", 0, self.num_items)

        # figure out how many items were loaded and load 10% more
        rest = RestConnection(self.servers[0])
        itemCount = rest.get_bucket(self.buckets[0]).stats.itemCount

        self.log.info(
            'Reached OOM, the number of items is {0}'.format(itemCount))

        incremental_kv_population = BlobGenerator(EvictionKV.KEY_ROOT,
                                                  'param2',
                                                  self.value_size,
                                                  start=itemCount,
                                                  end=itemCount * 1.1)
        self._load_bucket(self.buckets[0],
                          self.master,
                          incremental_kv_population,
                          "create",
                          exp=0,
                          kv_store=1)

        # and then probe the keys that are left. For now print out a distribution but later apply some heuristic
        client = SDKClient(hosts=[self.master.ip], bucket=self.buckets[0])

        NUMBER_OF_CHUNKS = 11
        items_in_chunk = int(1.1 * itemCount / NUMBER_OF_CHUNKS)
        for i in range(NUMBER_OF_CHUNKS):
            keys_still_present = 0
            for j in range(items_in_chunk):
                rc = client.get(EvictionKV.KEY_ROOT +
                                str(i * items_in_chunk + j),
                                no_format=True)

                if rc[2] is not None:
                    keys_still_present = keys_still_present + 1

            self.log.info(
                'Chunk {0} has {1:.2f} percent items still present'.format(
                    i, 100 * keys_still_present /
                    (itemCount * 1.1 / NUMBER_OF_CHUNKS)))
コード例 #19
0
    def test_logical_clock_ticks(self):

        self.log.info('starting test_logical_clock_ticks')

        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.servers[0].ip],
                               bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(
            self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # do a bunch of mutations to set the max cas
        gen_load = BlobGenerator('key-for-cas-test-logical-ticks',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        base_total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            #print vbucket_stats['vb_' + str(i) + ':logical_clock_ticks']
            base_total_logical_clock_ticks = base_total_logical_clock_ticks + int(
                vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])
        self.log.info('The base total logical clock ticks is {0}'.format(
            base_total_logical_clock_ticks))

        # move the system clock back so the logical counter part of HLC is used and the logical clock ticks
        # stat is incremented
        self.assertTrue(
            shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS),
            'Failed to advance the clock')

        # do more mutations
        NUMBER_OF_MUTATIONS = 10000
        gen_load = BlobGenerator('key-for-cas-test-logical-ticks',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=NUMBER_OF_MUTATIONS)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        time.sleep(30)
        total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            total_logical_clock_ticks = total_logical_clock_ticks + int(
                vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])

        self.log.info('The total logical clock ticks is {0}'.format(
            total_logical_clock_ticks))

        self.assertTrue(
            total_logical_clock_ticks -
            base_total_logical_clock_ticks == NUMBER_OF_MUTATIONS,
            'Expected clock tick {0} actual {1}'.format(
                NUMBER_OF_MUTATIONS,
                total_logical_clock_ticks - base_total_logical_clock_ticks))
コード例 #20
0
    def test_no_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info("Load more until we are out of memory")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        i = item_count
        insert_success = True
        while insert_success:
            insert_success = client.insert_document("key-id" + str(i),
                                                    '{"name":"dave"}')
            i += 1

        self.log.info('Memory is full at {0} items'.format(i))
        self.log.info("As a result added more %s items" % (i - item_count))

        self.log.info("Fetch item count")
        stats = bucket_helper.get_bucket(self.cb_bucket_name).stats
        itemCountWhenOOM = stats.itemCount
        memoryWhenOOM = stats.memUsed
        self.log.info('Item count when OOM {0} and memory used {1}'.format(
            itemCountWhenOOM, memoryWhenOOM))

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, count_n1ql),
                        msg="Count mismatch on CBAS")
コード例 #21
0
    def __load_chain(self, start_num=0):
        for i, cluster in enumerate(self.get_cb_clusters()):
            if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION:
                if i > len(self.get_cb_clusters()) - 1:
                    break
            else:
                if i >= len(self.get_cb_clusters()) - 1:
                    break
            if not self._dgm_run:
                for bucket in cluster.get_buckets():
                    client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                            bucket=bucket.name).cb
                    for i in range(start_num, start_num + self._num_items):
                        key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                           replace('.', '_').replace(',', '_').replace(':', '_'))
                        value = {'xattr_%s' % i:'value%s' % i}
                        client.upsert(key, value)
                        client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i,
                                                             xattr=True,
                                                             create_parents=True))
                        partition = bucket.kvs[1].acquire_partition(key)#["partition"]
                        if self.only_store_hash:
                            value = str(crc32.crc32_hash(value))
                        res = client.get(key)
                        partition.set(key, json.dumps(value), 0, res.flags)
                        bucket.kvs[1].release_partition(key)

            else:
                cluster.load_all_buckets_till_dgm(
                    active_resident_threshold=self._active_resident_threshold,
                    items=self._num_items)
コード例 #22
0
    def test_error_response_type_mismatch_object(self):
        self.log.info("Create a reference to SDK client")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)

        self.log.info("Insert documents in KV bucket")
        documents = ['{"address":{"city":"NY"}}']
        client.insert_json_documents("id-", documents)

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(
            self.error_response["query"])
        self.validate_error_response(status, errors,
                                     self.error_response["msg"],
                                     self.error_response["code"])
コード例 #23
0
 def _direct_client(self, server, bucket, timeout=30):
     # CREATE SDK CLIENT
     if self.use_sdk_client:
         try:
             from sdk_client import SDKClient
             scheme = "couchbase"
             host = self.master.ip
             if self.master.ip == "127.0.0.1":
                 scheme = "http"
                 host="{0}:{1}".format(self.master.ip, self.master.port)
             return SDKClient(scheme=scheme, hosts = [host], bucket = bucket)
         except ImportError:
             from sdk_client3 import SDKClient
             return SDKClient(RestConnection(self.master), bucket = bucket)
         except Exception as ex:
             self.log.error("cannot load sdk client due to error {0}".format(str(ex)))
     # USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
     return self.direct_mc_bin_client(server, bucket, timeout= timeout)
コード例 #24
0
    def test_MB_36087(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        g_key = "test_doc"
        bucket_name = "default"
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket=bucket_name)
        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, bucket_name)
        for i in range(self.num_items):
            key = g_key + str(i)
            mcd = client.memcached(key)
            rc = mcd.set(key, 0, 0, json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                key,
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # Wait for key to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = \
                    client.observe(key)

            time.sleep(10)
            # Evict the key
            try:
                rc = mcd.evict_key(key)
            except MemcachedError as exp:
                self.fail("Exception with evict meta - %s" % exp)

            # Perform del_with_meta
            try:
                mcd = client.memcached(key)
                _, flags, exp, seqno, cas = client.memcached(key).getMeta(key)
                rc = mcd.del_with_meta(key, 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
コード例 #25
0
ファイル: xdcr_xattr_sdk.py プロジェクト: arod1987/testrunner
    def __load_chain(self, start_num=0):
        for i, cluster in enumerate(self.get_cb_clusters()):
            if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION:
                if i > len(self.get_cb_clusters()) - 1:
                    break
            else:
                if i >= len(self.get_cb_clusters()) - 1:
                    break
            if not self._dgm_run:
                for bucket in cluster.get_buckets():
                    client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                            bucket=bucket.name).cb
                    for i in xrange(start_num, start_num + self._num_items):
                        key = 'k_%s_%s' % (i, str(cluster).replace(' ','_').
                                           replace('.','_').replace(',','_').replace(':','_'))
                        value = {'xattr_%s' % i:'value%s' % i}
                        client.upsert(key, value)
                        client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i,
                                                             xattr=True,
                                                             create_parents=True))
                        partition = bucket.kvs[1].acquire_partition(key)#["partition"]
                        if self.only_store_hash:
                            value = str(crc32.crc32_hash(value))
                        res = client.get(key)
                        partition.set(key, json.dumps(value), 0, res.flags)
                        bucket.kvs[1].release_partition(key)

            else:
                cluster.load_all_buckets_till_dgm(
                    active_resident_threshold=self._active_resident_threshold,
                    items=self._num_items)
コード例 #26
0
 def _direct_client(self, server, bucket, timeout=30):
     # CREATE SDK CLIENT
     if self.use_sdk_client:
         try:
             from sdk_client import SDKClient
             scheme = "couchbase"
             host = self.master.ip
             if self.master.ip == "127.0.0.1":
                 scheme = "http"
                 host="{0}:{1}".format(self.master.ip,self.master.port)
             return SDKClient(scheme=scheme,hosts = [host], bucket = bucket, password=self.master.rest_password)
         except Exception, ex:
             self.log.error("cannot load sdk client due to error {0}".format(str(ex)))
コード例 #27
0
    def direct_client(self, server, bucket, timeout=30):
        # CREATE SDK CLIENT
        if self.use_sdk_client:
            self.log.info("--> Using SDK client")
            self.is_secure = TestInputSingleton.input.param("is_secure", False)
            try:
                from sdk_client import SDKClient
                self.log.info("--> Try to use SDK2 client")
                if not self.is_secure:
                    scheme = "couchbase"
                else:
                    scheme = "couchbases"
                host = self.master.ip
                if self.master.ip == "127.0.0.1":
                    if not self.is_secure:
                        scheme = "http"
                    else:
                        scheme = "https"
                    host = "{0}:{1}".format(self.master.ip, self.master.port)
                return SDKClient(scheme=scheme,
                                 hosts=[host],
                                 bucket=bucket.name,
                                 username=server.rest_username,
                                 password=server.rest_password)
            except ImportError:
                self.log.info("--> Using SDK3 client")
                from sdk_client3 import SDKClient
                return SDKClient(RestConnection(self.master),
                                 bucket=bucket.name)
            except Exception as ex:
                self.log.error(
                    "cannot load sdk client due to error {0}".format(str(ex)))

        # USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
        self.log.info("--> Using Direct Memcached client")
        return self.direct_mc_bin_client(server, bucket, timeout=timeout)
コード例 #28
0
    def key_not_exists_test(self):
        client = SDKClient(hosts = [self.master.ip], bucket = "default")
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, "x")
            #For some reason could not get delete to work
            client.remove(KEY_NAME)
            rc = client.get(KEY_NAME)
            #.get is automatically set to quiet for the sdk_client, therefore I look for
            #none to indicate an error, otherwise the sdk_client spends 10 seconds trying
            #to retry the commands and is very slow
            if rc[2] == None:
                pass
            else:
                assert False
            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            try:
                #For some reason replace instead of cas would not reproduce the bug
                client.cas(KEY_NAME, "value", cas = 10)
            except NotFoundError:
                pass
        assert True 
コード例 #29
0
    def test_nru_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info(
            "Fetch initial inserted 100 documents, so they are not removed")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        for i in range(100):
            client.get("test_docs-" + str(i))

        self.log.info("Add 20% more items to trigger NRU")
        for i in range(item_count, int(item_count * 1.2)):
            client.insert_document("key-id" + str(i), '{"name":"dave"}')

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        if self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            pass
        else:
            self.log.info(
                "Document count mismatch might be due to ejection of documents on KV. Retry again"
            )
            count_n1ql = self.rest.query_tool(
                'select count(*) from %s' %
                (self.cb_bucket_name))['results'][0]['$1']
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql),
                            msg="Count mismatch on CBAS")
コード例 #30
0
 def test_sdk_client(self):
     """
         Test SDK Client Calls
     """
     scheme = "couchbase"
     host = self.master.ip
     if self.master.ip == "127.0.0.1":
         scheme = "http"
         host = "{0}:{1}".format(self.master.ip, self.master.port)
     client = SDKClient(scheme=scheme, hosts=[host], bucket="default")
     client.remove("1", quiet=True)
     client.insert("1", "{1:2}")
     flag, cas, val = client.get("1")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.upsert("1", "{1:3}")
     client.touch("1", ttl=100)
     flag, cas, val = client.get("1")
     self.assertTrue(val == "{1:3}", val)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.remove("1", cas=cas)
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 25)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.decr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     print flag, cas, val
     client.upsert("key1", "document1")
     client.upsert("key2", "document2")
     client.upsert("key3", "document3")
     set = client.get_multi(["key1", "key2"])
     self.log.info(set)
     client.upsert_multi({"key1": "{1:2}", "key2": "{3:2}"})
     set = client.get_multi(["key1", "key2"])
     self.log.info(set)
     client.touch_multi(["key1", "key2"], ttl=200)
     set = client.get_multi(["key1", "key2"])
     self.log.info(set)
     data = client.observe("key1")
     self.log.info(data)
     data = client.observe_multi(["key1", "key2"])
     self.log.info(data)
     stats = client.stats(["key1"])
     self.log.info(stats)
     client.n1ql_request(
         client.n1ql_query('create primary index on default')).execute()
     query = client.n1ql_query('select * from default')
     request = client.n1ql_request(query)
     obj = request.get_single_result()._jsobj
     self.log.info(obj)
     client.close()
コード例 #31
0
    def test_sdk_subddoc(self):
        """
            Test SDK Client Calls
        """
        scheme = "couchbase"
        host = self.master.ip
        if self.master.ip == "127.0.0.1":
            scheme = "http"
            host = "{0}:{1}".format(self.master.ip, self.master.port)

        client = SDKClient(scheme=scheme, hosts=[host], bucket="default")
        json_document = {"1": 1, "2": 2, "array": [1]}
        document_key = "1"
        client.insert("1", json_document)
        client.insert_in(document_key, "3", 3)
        client.upsert_in(document_key, "4", 4)
        client.upsert_in(document_key, "4", "change_4")
        client.replace_in(document_key, "4", "crap_4")
        client.arrayprepend_in(document_key, "array", "0")
        client.arrayappend_in(document_key, "array", "2")
        client.arrayinsert_in(document_key, "array[1]",
                              "INSERT_VALUE_AT_INDEX_1")
        client.arrayaddunique_in(document_key, "array", "INSERT_UNIQUE_VALUE")
        print json.dumps(client.get(document_key))
コード例 #32
0
ファイル: sdk_client_tests.py プロジェクト: lichia/testrunner
 def test_sdk_client(self):
     """
         Test SDK Client Calls
     """
     scheme = "couchbase"
     host=self.master.ip
     if self.master.ip == "127.0.0.1":
         scheme = "http"
         host="{0}:{1}".format(self.master.ip,self.master.port)
     client = SDKClient(scheme=scheme,hosts = [host], bucket = "default")
     client.remove("1",quiet=True)
     client.insert("1","{1:2}")
     flag, cas, val = client.get("1")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.upsert("1","{1:3}")
     client.touch("1",ttl=100)
     flag, cas, val = client.get("1")
     self.assertTrue(val == "{1:3}", val)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.remove("1",cas = cas)
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 25)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.decr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     print  flag, cas, val
     client.upsert("key1","document1")
     client.upsert("key2","document2")
     client.upsert("key3","document3")
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     client.upsert_multi({"key1":"{1:2}","key2":"{3:2}"})
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     client.touch_multi(["key1","key2"],ttl=200)
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     data = client.observe("key1")
     self.log.info(data)
     data = client.observe_multi(["key1","key2"])
     self.log.info(data)
     stats = client.stats(["key1"])
     self.log.info(stats)
     client.n1ql_request(client.n1ql_query('create primary index on default')).execute()
     query = client.n1ql_query('select * from default')
     request = client.n1ql_request(query)
     obj = request.get_single_result()._jsobj
     self.log.info(obj)
     client.close()
コード例 #33
0
ファイル: lww_stats.py プロジェクト: membase/testrunner
    def test_poisoned_cas(self):

        self.log.info("starting test_poisoned_cas")

        """
        - set the clock ahead
        - do lots of sets and get some CASs
        - do a set and get the CAS (flag, CAS, value) and save it
        - set the clock back
        - verify the CAS is still big on new sets
        - reset the CAS
        - do the vbucket max cas and verify
        - do a new mutation and verify the CAS is smaller


        """

        sdk_client = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip], bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # move the system clock ahead to poison the CAS
        shell = RemoteMachineShellConnection(self.servers[0])
        self.assertTrue(shell.change_system_time(LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to advance the clock")

        output, error = shell.execute_command("date")
        self.log.info("Date after is set forward {0}".format(output))

        rc = sdk_client.set("key1", "val1")
        rc = mc_client.get("key1")
        poisoned_cas = rc[1]
        self.log.info("The poisoned CAS is {0}".format(poisoned_cas))

        # do lots of mutations to set the max CAS for all vbuckets

        gen_load = BlobGenerator("key-for-cas-test", "value-for-cas-test-", self.value_size, end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        # move the clock back again and verify the CAS stays large
        self.assertTrue(shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to change the clock")
        output, error = shell.execute_command("date")
        self.log.info("Date after is set backwards {0}".format(output))

        use_mc_bin_client = self.input.param("use_mc_bin_client", False)

        if use_mc_bin_client:
            rc = mc_client.set("key2", 0, 0, "val2")
            second_poisoned_cas = rc[1]
        else:
            rc = sdk_client.set("key2", "val2")
            second_poisoned_cas = rc.cas
        self.log.info("The second_poisoned CAS is {0}".format(second_poisoned_cas))
        self.assertTrue(
            second_poisoned_cas > poisoned_cas,
            "Second poisoned CAS {0} is not larger than the first poisoned cas".format(
                second_poisoned_cas, poisoned_cas
            ),
        )

        # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not
        # changed then the CAS will immediately continue with the clock. I see two scenarios:
        # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used
        # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use

        # do case 1, set the CAS back 30 minutes.  Calculation below assumes the CAS is in nanoseconds
        earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000
        for i in range(self.vbuckets):
            output, error = shell.execute_cbepctl(
                self.buckets[0], "", "set_vbucket_param", "max_cas ", str(i) + " " + str(earlier_max_cas)
            )
            if len(error) > 0:
                self.fail("Failed to set the max cas")

        # verify the max CAS

        for i in range(self.vbuckets):
            max_cas = int(mc_client.stats("vbucket-details")["vb_" + str(i) + ":max_cas"])
            self.assertTrue(
                max_cas == earlier_max_cas,
                "Max CAS not properly set for vbucket {0} set as {1} and observed {2}".format(
                    i, earlier_max_cas, max_cas
                ),
            )
            self.log.info("Per cbstats the max cas for bucket {0} is {1}".format(i, max_cas))

        rc1 = sdk_client.set("key-after-resetting cas", "val1")
        rc2 = mc_client.get("key-after-resetting cas")
        set_cas_after_reset_max_cas = rc2[1]
        self.log.info("The later CAS is {0}".format(set_cas_after_reset_max_cas))
        self.assertTrue(
            set_cas_after_reset_max_cas < poisoned_cas,
            "For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                "key-after-resetting cas", set_cas_after_reset_max_cas, poisoned_cas
            ),
        )

        # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more

        gen_load = BlobGenerator(
            "key-for-cas-test-after-cas-is-reset", "value-for-cas-test-", self.value_size, end=1000
        )
        self._load_all_buckets(self.master, gen_load, "create", 0)

        gen_load.reset()
        while gen_load.has_next():
            key, value = gen_load.next()
            try:
                rc = mc_client.get(key)
                # rc = sdk_client.get(key)
                cas = rc[1]
                self.assertTrue(
                    cas < poisoned_cas,
                    "For key {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                        key, cas, poisoned_cas
                    ),
                )
            except:
                self.log.info("get error with {0}".format(key))

        rc = sdk_client.set("key3", "val1")
        better_cas = rc.cas

        self.log.info("The better CAS is {0}".format(better_cas))

        self.assertTrue(better_cas < poisoned_cas, "The CAS was not improved")

        # set the clock way ahead - remote_util_OS.py (new)
        # do a bunch of mutations - not really needed
        # do the fix command - cbepctl, the existing way (remote util)

        # do some mutations, verify they conform to the new CAS - build on the CAS code,
        #     where to iterate over the keys and get the CAS?
        """
コード例 #34
0
    def test_sdk_subddoc(self):
        """
            Test SDK Client Calls
        """
        scheme = "couchbase"
        host=self.master.ip
        if self.master.ip == "127.0.0.1":
            scheme = "http"
            host="{0}:{1}".format(self.master.ip,self.master.port)

        client = SDKClient(scheme=scheme,hosts = [host], bucket = "default")
        json_document = {"1":1, "2":2, "array": [1]}
        document_key = "1"
        client.insert("1",json_document)
        client.insert_in(document_key, "3", 3)
        client.upsert_in(document_key, "4", 4)
        client.upsert_in(document_key, "4", "change_4")
        client.replace_in(document_key, "4", "crap_4")
        client.arrayprepend_in(document_key, "array", "0")
        client.arrayappend_in(document_key, "array", "2")
        client.arrayinsert_in(document_key, "array[1]", "INSERT_VALUE_AT_INDEX_1")
        client.arrayaddunique_in(document_key, "array", "INSERT_UNIQUE_VALUE")
        print json.dumps(client.get(document_key))
コード例 #35
0
 def run(self, *args, **kw):
     cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
       bucket='default').cb
     for x in range(ITERATIONS):
         cb.mutate_in(DOCID, SD.array_append('recs', 1))
コード例 #36
0
ファイル: lww_stats.py プロジェクト: membase/testrunner
    def test_drift_stats(self):

        # An exercise in filling out the matrix with the right amount of code,
        # we want to test (ahead,behind) and (setwithmeta, deleteWithmeta) and (active,replica).
        # So for now let's do the set/del in sequences

        self.log.info("starting test_drift_stats")

        check_ahead_threshold = self.input.param("check_ahead_threshold", True)

        self.log.info("Checking the ahead threshold? {0}".format(check_ahead_threshold))

        sdk_client = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip], bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # get the current time
        rc = sdk_client.set("key1", "val1")
        current_time_cas = rc.cas

        test_key = "test-set-with-metaxxxx"
        vbId = (((zlib.crc32(test_key)) >> 16) & 0x7FFF) & (self.vbuckets - 1)

        # verifying the case where we are within the threshold, do a set and del, neither should trigger
        rc = mc_client.setWithMeta(test_key, "test-value", 0, 0, 0, current_time_cas)
        # rc = mc_client.setWithMetaLWW(test_key, 'test-value', 0, 0, current_time_cas)
        rc = mc_client.delWithMetaLWW(test_key, 0, 0, current_time_cas + 1)

        vbucket_stats = mc_client.stats("vbucket-details")
        ahead_exceeded = int(vbucket_stats["vb_" + str(vbId) + ":drift_ahead_threshold_exceeded"])
        self.assertTrue(ahead_exceeded == 0, "Ahead exceeded expected is 0 but is {0}".format(ahead_exceeded))

        behind_exceeded = int(vbucket_stats["vb_" + str(vbId) + ":drift_behind_threshold_exceeded"])
        self.assertTrue(behind_exceeded == 0, "Behind exceeded expected is 0 but is {0}".format(behind_exceeded))

        # out of curiousity, log the total counts
        self.log.info(
            "Total stats: total abs drift {0} and total abs drift count {1}".format(
                vbucket_stats["vb_" + str(vbId) + ":total_abs_drift"],
                vbucket_stats["vb_" + str(vbId) + ":total_abs_drift_count"],
            )
        )

        # do the ahead set with meta case - verify: ahead threshold exceeded, total_abs_drift count and abs_drift
        if check_ahead_threshold:
            stat_descriptor = "ahead"
            cas = current_time_cas + 5000 * LWWStatsTests.DEFAULT_THRESHOLD

        else:
            stat_descriptor = "behind"
            cas = current_time_cas - 5000 * LWWStatsTests.DEFAULT_THRESHOLD

        rc = mc_client.setWithMeta(test_key, "test-value", 0, 0, 0, cas)
        rc = mc_client.delWithMetaLWW(test_key, 0, 0, cas + 1)

        # verify the vbucket stats
        vbucket_stats = mc_client.stats("vbucket-details")
        drift_counter_stat = "vb_" + str(vbId) + ":drift_" + stat_descriptor + "_threshold_exceeded"
        threshold_exceeded = int(mc_client.stats("vbucket-details")[drift_counter_stat])
        # MB-21450 self.assertTrue( ahead_exceeded == 2, '{0} exceeded expected is 1 but is {1}'.
        # format( stat_descriptor, threshold_exceeded))

        self.log.info(
            "Total stats: total abs drift {0} and total abs drift count {1}".format(
                vbucket_stats["vb_" + str(vbId) + ":total_abs_drift"],
                vbucket_stats["vb_" + str(vbId) + ":total_abs_drift_count"],
            )
        )

        # and verify the bucket stats: ep_active_hlc_drift_count, ep_clock_cas_drift_threshold_exceeded,
        # ep_active_hlc_drift
        bucket_stats = mc_client.stats()
        ep_active_hlc_drift_count = int(bucket_stats["ep_active_hlc_drift_count"])
        ep_clock_cas_drift_threshold_exceeded = int(bucket_stats["ep_clock_cas_drift_threshold_exceeded"])
        ep_active_hlc_drift = int(bucket_stats["ep_active_hlc_drift"])

        # Drift count appears to be the number of mutations
        self.assertTrue(ep_active_hlc_drift_count > 0, "ep_active_hlc_drift_count is 0, expected a positive value")

        # drift itself is the sum of the absolute values of all drifts, so check that it is greater than 0
        self.assertTrue(ep_active_hlc_drift > 0, "ep_active_hlc_drift is 0, expected a positive value")

        # the actual drift count is a little more granular
        expected_drift_threshold_exceed_count = 2
        self.assertTrue(
            expected_drift_threshold_exceed_count == ep_clock_cas_drift_threshold_exceeded,
            "ep_clock_cas_drift_threshold_exceeded is incorrect. Expected {0}, actual {1}".format(
                expected_drift_threshold_exceed_count, ep_clock_cas_drift_threshold_exceeded
            ),
        )
コード例 #37
0
 def run(self, *args, **kw):
     cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
       bucket='default').cb
     for x in range(ITERATIONS):
         cb.mutate_in(DOCID, SD.array_append('recs', 1))
コード例 #38
0
ファイル: lww_stats.py プロジェクト: arod1987/testrunner
    def test_drift_stats(self):
        '''
        @note: An exercise in filling out the matrix with the right amount of code,
               we want to test (ahead,behind) and (setwithmeta, deleteWithmeta)
               and (active,replica).
               So for now let's do the set/del in sequences
        '''
        self.log.info('starting test_drift_stats')
        #Creating a user with the bucket name having admin access
        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        check_ahead_threshold = self.input.param("check_ahead_threshold",
                                                 True)

        self.log.info('Checking the ahead threshold? {0}'.format(
            check_ahead_threshold))

        sdk_client = SDKClient(scheme='couchbase',
                               hosts = [self.servers[0].ip],
                               bucket = self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0],
                                                        self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # get the current time
        rc = sdk_client.set('key1', 'val1')
        current_time_cas = rc.cas

        test_key = 'test-set-with-metaxxxx'
        vbId = (((zlib.crc32(test_key)) >> 16) & 0x7fff) & (self.vbuckets- 1)

        #import pdb;pdb.set_trace()
        # verifying the case where we are within the threshold, do a set and del, neither should trigger
        #mc_active.setWithMeta(key, '123456789', 0, 0, 123, cas)
        rc = mc_client.setWithMeta(test_key, 'test-value',
                                   0, 0, 1, current_time_cas)
        #rc = mc_client.setWithMetaLWW(test_key, 'test-value', 0, 0, current_time_cas)
        #rc = mc_client.delWithMetaLWW(test_key, 0, 0, current_time_cas+1)

        vbucket_stats = mc_client.stats('vbucket-details')
        ahead_exceeded  = int(vbucket_stats['vb_' + str(vbId) + ':drift_ahead_threshold_exceeded'])
        self.assertTrue(ahead_exceeded == 0,
                        'Ahead exceeded expected is 0 but is {0}'.format( ahead_exceeded))
        behind_exceeded  = int( vbucket_stats['vb_' + str(vbId) + ':drift_behind_threshold_exceeded'] )
        self.assertTrue( behind_exceeded == 0, 'Behind exceeded expected is 0 but is {0}'.format( behind_exceeded))
        # out of curiousity, log the total counts
        self.log.info('Total stats: total abs drift {0} and total abs drift count {1}'.
                      format(vbucket_stats['vb_' + str(vbId) + ':total_abs_drift'],
                             vbucket_stats['vb_' + str(vbId) + ':total_abs_drift_count']))

        # do the ahead set with meta case - verify: ahead threshold exceeded, total_abs_drift count and abs_drift
        if check_ahead_threshold:
            stat_descriptor = 'ahead'
            cas = current_time_cas + 5000 * LWWStatsTests.DEFAULT_THRESHOLD

        else:
            stat_descriptor = 'behind'
            cas = current_time_cas -(5000 * LWWStatsTests.DEFAULT_THRESHOLD)
        rc = mc_client.setWithMeta(test_key, 'test-value', 0, 0, 0, cas)
        #rc = mc_client.delWithMetaLWW(test_key, 0, 0, cas+1)
        # verify the vbucket stats
        vbucket_stats = mc_client.stats('vbucket-details')
        drift_counter_stat = 'vb_' + str(vbId) + ':drift_' + stat_descriptor + '_threshold_exceeded'
        threshold_exceeded  = int( mc_client.stats('vbucket-details')[drift_counter_stat] )
        # MB-21450 self.assertTrue( ahead_exceeded == 2, '{0} exceeded expected is 1 but is {1}'.
        # format( stat_descriptor, threshold_exceeded))

        self.log.info('Total stats: total abs drift {0} and total abs drift count {1}'.
                      format(vbucket_stats['vb_' + str(vbId) + ':total_abs_drift'],
                             vbucket_stats['vb_' + str(vbId) + ':total_abs_drift_count']))

        # and verify the bucket stats: ep_active_hlc_drift_count, ep_clock_cas_drift_threshold_exceeded,
        # ep_active_hlc_drift
        bucket_stats = mc_client.stats()
        ep_active_hlc_drift_count = int(bucket_stats['ep_active_hlc_drift_count'])
        ep_clock_cas_drift_threshold_exceeded = int(bucket_stats['ep_clock_cas_drift_threshold_exceeded'])
        ep_active_hlc_drift = int(bucket_stats['ep_active_hlc_drift'])

        # Drift count appears to be the number of mutations
        self.assertTrue( ep_active_hlc_drift_count > 0, 'ep_active_hlc_drift_count is 0, expected a positive value')

        # drift itself is the sum of the absolute values of all drifts, so check that it is greater than 0
        self.assertTrue( ep_active_hlc_drift > 0, 'ep_active_hlc_drift is 0, expected a positive value')

        # the actual drift count is a little more granular
        expected_drift_threshold_exceed_count = 1
        self.assertTrue( expected_drift_threshold_exceed_count == ep_clock_cas_drift_threshold_exceeded,
                         'ep_clock_cas_drift_threshold_exceeded is incorrect. Expected {0}, actual {1}'.
                             format(expected_drift_threshold_exceed_count,
                                    ep_clock_cas_drift_threshold_exceeded) )
コード例 #39
0
 def test_sdk_client(self):
     """
         Test SDK Client Calls
     """
     client = SDKClient(scheme="couchbase",hosts = [self.master], bucket = "default")
     client.remove("1",quiet=True)
     client.insert("1","{1:2}")
     val, flag, cas = client.get("1")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.upsert("1","{1:3}")
     client.touch("1",ttl=100)
     val, flag, cas = client.get("1")
     self.assertTrue(val == "{1:3}")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.remove("1",cas = cas)
     client.incr("key", delta=20, initial=5)
     val, flag, cas = client.get("key")
     self.assertTrue(val == 5)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.incr("key", delta=20, initial=5)
     val, flag, cas = client.get("key")
     self.assertTrue(val == 25)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.decr("key", delta=20, initial=5)
     val, flag, cas = client.get("key")
     self.assertTrue(val == 5)
     print val, flag, cas
     client.upsert("key1","document1")
     client.upsert("key2","document2")
     client.upsert("key3","document3")
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     client.upsert_multi({"key1":"{1:2}","key2":"{3:2}"})
     set = client.get_multi(["key1","key2","key3"])
     self.log.info(set)
     client.touch_multi(["key1","key2","key3"],ttl=200)
     set = client.get_multi(["key1","key2","key3"])
     self.log.info(set)
     set = client.get_multi(["key1","key2","key3"],replica=True)
     self.log.info(set)
     data = client.observe("key1")
     self.log.info(data)
     data = client.observe_multi(["key1","key2"])
     self.log.info(data)
     stats = client.stats(["key1"])
     self.log.info(stats)
     client.close()
コード例 #40
0
    def test_cbas_bucket_connect_with_more_than_eight_active_datasets(self):
        """
        1. Create a cb bucket
        2. Create a cbas bucket
        3. Create 9 datasets
        4. Connect to cbas bucket must fail with error - Maximum number of active writable datasets (8) exceeded
        5. Delete 1 dataset, now the count must be 8
        6. Re-connect the cbas bucket and this time connection must succeed
        7. Verify count in dataset post connect
        """
        self.log.info("Fetch test case arguments")
        self.fetch_test_case_arguments()

        self.log.info("Load data in the default bucket")
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0,
                                               self.num_items)

        self.log.info("Create reference to SDK client")
        client = SDKClient(scheme="couchbase",
                           hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)

        self.log.info("Insert binary data into default bucket")
        keys = ["%s" % (uuid.uuid4()) for i in range(0, self.num_items)]
        client.insert_binary_document(keys)

        self.log.info("Insert Non-Json string data into default bucket")
        keys = ["%s" % (uuid.uuid4()) for i in range(0, self.num_items)]
        client.insert_string_document(keys)

        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)

        self.log.info("Create a CBAS bucket")
        self.assertTrue(self.cbas_util.create_bucket_on_cbas(
            cbas_bucket_name=self.cbas_bucket_name,
            cb_bucket_name=self.cb_bucket_name),
                        msg="Failed to create CBAS bucket")

        self.log.info("Create datasets")
        for i in range(1, self.num_of_dataset + 1):
            self.assertTrue(
                self.cbas_util.create_dataset_on_bucket(
                    cbas_bucket_name=self.cb_bucket_name,
                    cbas_dataset_name=self.dataset_prefix + str(i)),
                msg="Failed to create dataset {0}".format(self.dataset_prefix +
                                                          str(i)))

        self.log.info("Verify connect to CBAS bucket must fail")
        self.assertTrue(
            self.cbas_util.connect_to_bucket(
                cbas_bucket_name=self.cbas_bucket_name,
                cb_bucket_password=self.cb_bucket_password,
                validate_error_msg=True,
                expected_error=BucketOperations.CBAS_BUCKET_CONNECT_ERROR_MSG),
            msg="Incorrect error msg while connecting to cbas bucket")

        self.log.info("Drop the last dataset created")
        self.assertTrue(
            self.cbas_util.drop_dataset(cbas_dataset_name=self.dataset_prefix +
                                        str(self.num_of_dataset)),
            msg="Failed to drop dataset {0}".format(self.dataset_prefix +
                                                    str(self.num_of_dataset)))

        self.log.info("Connect to CBAS bucket")
        self.assertTrue(self.cbas_util.connect_to_bucket(
            cbas_bucket_name=self.cbas_bucket_name),
                        msg="Failed to connect to cbas bucket")

        self.log.info("Wait for ingestion to complete and validate count")
        for i in range(1, self.num_of_dataset):
            self.cbas_util.wait_for_ingestion_complete(
                [self.dataset_prefix + str(i)], self.num_items)
            self.assertTrue(
                self.cbas_util.validate_cbas_dataset_items_count(
                    self.dataset_prefix + str(i), self.num_items))
コード例 #41
0
ファイル: xdcr_xattr_sdk.py プロジェクト: arod1987/testrunner
    def verify_results(self, skip_verify_data=[], skip_verify_revid=[], sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain()
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain()

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                 only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(cluster.get_master_node().ip))
                                self.assertEqual(json.loads(content)['total_rows'], self._num_items)
                                client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                                        bucket=bucket.name).cb
                                for i in xrange(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                                      replace('.', '_').replace(',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.iteritems():
                                        rv = client.mutate_in(key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request("http://{0}:4984/db/{1}".format(cluster.get_master_node().ip, key))
                                        self.assertEqual(json.loads(content)['_id'], key)
                                        self.assertEqual(json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(remote_cluster_ref.get_replications(),
                                                            skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail("Incomplete replication: Keys stuck in dcp queue")
                            if not (src_active_passed and dest_active_passed):
                                self.fail("Incomplete replication: Active key count is incorrect")
                            if not (src_replica_passed and dest_replica_passed):
                                self.fail("Incomplete intra-cluster replication: "
                                          "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail("RevID verification failed for remote-cluster: {0}".
                                          format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))