コード例 #1
0
 def test_sdk_client(self):
     """
         Test SDK Client Calls
     """
     scheme = "couchbase"
     host = self.master.ip
     if self.master.ip == "127.0.0.1":
         scheme = "http"
         host = "{0}:{1}".format(self.master.ip, self.master.port)
     client = SDKClient(scheme=scheme, hosts=[host], bucket="default")
     client.remove("1", quiet=True)
     client.insert("1", "{1:2}")
     flag, cas, val = client.get("1")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.upsert("1", "{1:3}")
     client.touch("1", ttl=100)
     flag, cas, val = client.get("1")
     self.assertTrue(val == "{1:3}", val)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.remove("1", cas=cas)
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 25)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.decr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     print flag, cas, val
     client.upsert("key1", "document1")
     client.upsert("key2", "document2")
     client.upsert("key3", "document3")
     set = client.get_multi(["key1", "key2"])
     self.log.info(set)
     client.upsert_multi({"key1": "{1:2}", "key2": "{3:2}"})
     set = client.get_multi(["key1", "key2"])
     self.log.info(set)
     client.touch_multi(["key1", "key2"], ttl=200)
     set = client.get_multi(["key1", "key2"])
     self.log.info(set)
     data = client.observe("key1")
     self.log.info(data)
     data = client.observe_multi(["key1", "key2"])
     self.log.info(data)
     stats = client.stats(["key1"])
     self.log.info(stats)
     client.n1ql_request(
         client.n1ql_query('create primary index on default')).execute()
     query = client.n1ql_query('select * from default')
     request = client.n1ql_request(query)
     obj = request.get_single_result()._jsobj
     self.log.info(obj)
     client.close()
コード例 #2
0
ファイル: sdk_client_tests.py プロジェクト: lichia/testrunner
 def test_sdk_client(self):
     """
         Test SDK Client Calls
     """
     scheme = "couchbase"
     host=self.master.ip
     if self.master.ip == "127.0.0.1":
         scheme = "http"
         host="{0}:{1}".format(self.master.ip,self.master.port)
     client = SDKClient(scheme=scheme,hosts = [host], bucket = "default")
     client.remove("1",quiet=True)
     client.insert("1","{1:2}")
     flag, cas, val = client.get("1")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.upsert("1","{1:3}")
     client.touch("1",ttl=100)
     flag, cas, val = client.get("1")
     self.assertTrue(val == "{1:3}", val)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.remove("1",cas = cas)
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.incr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 25)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.decr("key", delta=20, initial=5)
     flag, cas, val = client.get("key")
     self.assertTrue(val == 5)
     print  flag, cas, val
     client.upsert("key1","document1")
     client.upsert("key2","document2")
     client.upsert("key3","document3")
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     client.upsert_multi({"key1":"{1:2}","key2":"{3:2}"})
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     client.touch_multi(["key1","key2"],ttl=200)
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     data = client.observe("key1")
     self.log.info(data)
     data = client.observe_multi(["key1","key2"])
     self.log.info(data)
     stats = client.stats(["key1"])
     self.log.info(stats)
     client.n1ql_request(client.n1ql_query('create primary index on default')).execute()
     query = client.n1ql_query('select * from default')
     request = client.n1ql_request(query)
     obj = request.get_single_result()._jsobj
     self.log.info(obj)
     client.close()
コード例 #3
0
    def test_ephemeral_bucket_NRU_eviction_access_in_the_delete_range(self):
        """
        generate_load = BlobGenerator(EvictionKV.KEY_ROOT, 'param2', self.value_size, start=0, end=self.num_items)
        self._load_all_ephemeral_buckets_until_no_more_memory(self.servers[0], generate_load, "create", 0, self.num_items)


        # figure out how many items were loaded and load 10% more
        rest = RestConnection(self.servers[0])
        itemCount = rest.get_bucket(self.buckets[0]).stats.itemCount

        self.log.info( 'Reached OOM, the number of items is {0}'.format( itemCount))


        """

        # select some keys which we expect to be adjacent to the kvs which will be deleted
        # and how many KVs should we select, maybe that is a parameter

        itemCount = 50000
        max_delete_value = itemCount / 10
        NUM_OF_ACCESSES = 50
        keys_to_access = set()
        for i in range(NUM_OF_ACCESSES):
            keys_to_access.add(random.randint(0, max_delete_value))

        # and then do accesses on the key set
        client = SDKClient(hosts=[self.master.ip], bucket=self.buckets[0])
        for i in keys_to_access:
            # and we may want to parameterize the get at some point
            rc = client.get(EvictionKV.KEY_ROOT + str(i), no_format=True)

        # and then do puts to delete out stuff
        PERCENTAGE_TO_ADD = 10
        incremental_kv_population = BlobGenerator(EvictionKV.KEY_ROOT,
                                                  'param2',
                                                  self.value_size,
                                                  start=itemCount,
                                                  end=itemCount *
                                                  PERCENTAGE_TO_ADD / 100)
        self._load_bucket(self.buckets[0],
                          self.master,
                          incremental_kv_population,
                          "create",
                          exp=0,
                          kv_store=1)

        # and verify that the touched kvs are still there
        for i in keys_to_access:
            # and we may want to parameterize the get at some point
            rc = client.get(EvictionKV.KEY_ROOT + str(i), no_format=True)
            self.assertFalse(
                rc is None,
                'Key {0} was incorrectly deleted'.format(EvictionKV.KEY_ROOT +
                                                         str(i)))
コード例 #4
0
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        SERVER_IP = self.servers[0].ip
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip],
                  bucket='default').cb
        main_bucket.upsert(DOCID, {'recs':[]})

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
                  bucket='default').cb
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue(array_entry_count == ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
コード例 #5
0
    def __load_chain(self, start_num=0):
        for i, cluster in enumerate(self.get_cb_clusters()):
            if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION:
                if i > len(self.get_cb_clusters()) - 1:
                    break
            else:
                if i >= len(self.get_cb_clusters()) - 1:
                    break
            if not self._dgm_run:
                for bucket in cluster.get_buckets():
                    client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                            bucket=bucket.name).cb
                    for i in range(start_num, start_num + self._num_items):
                        key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                           replace('.', '_').replace(',', '_').replace(':', '_'))
                        value = {'xattr_%s' % i:'value%s' % i}
                        client.upsert(key, value)
                        client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i,
                                                             xattr=True,
                                                             create_parents=True))
                        partition = bucket.kvs[1].acquire_partition(key)#["partition"]
                        if self.only_store_hash:
                            value = str(crc32.crc32_hash(value))
                        res = client.get(key)
                        partition.set(key, json.dumps(value), 0, res.flags)
                        bucket.kvs[1].release_partition(key)

            else:
                cluster.load_all_buckets_till_dgm(
                    active_resident_threshold=self._active_resident_threshold,
                    items=self._num_items)
コード例 #6
0
ファイル: xdcr_xattr_sdk.py プロジェクト: arod1987/testrunner
    def __load_chain(self, start_num=0):
        for i, cluster in enumerate(self.get_cb_clusters()):
            if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION:
                if i > len(self.get_cb_clusters()) - 1:
                    break
            else:
                if i >= len(self.get_cb_clusters()) - 1:
                    break
            if not self._dgm_run:
                for bucket in cluster.get_buckets():
                    client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                            bucket=bucket.name).cb
                    for i in xrange(start_num, start_num + self._num_items):
                        key = 'k_%s_%s' % (i, str(cluster).replace(' ','_').
                                           replace('.','_').replace(',','_').replace(':','_'))
                        value = {'xattr_%s' % i:'value%s' % i}
                        client.upsert(key, value)
                        client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i,
                                                             xattr=True,
                                                             create_parents=True))
                        partition = bucket.kvs[1].acquire_partition(key)#["partition"]
                        if self.only_store_hash:
                            value = str(crc32.crc32_hash(value))
                        res = client.get(key)
                        partition.set(key, json.dumps(value), 0, res.flags)
                        bucket.kvs[1].release_partition(key)

            else:
                cluster.load_all_buckets_till_dgm(
                    active_resident_threshold=self._active_resident_threshold,
                    items=self._num_items)
コード例 #7
0
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        SERVER_IP = self.servers[0].ip
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip],
                  bucket='default').cb
        main_bucket.upsert(DOCID, {'recs':[]})

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
                  bucket='default').cb
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue(array_entry_count == ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
コード例 #8
0
    def test_nru_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info(
            "Fetch initial inserted 100 documents, so they are not removed")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        for i in range(100):
            client.get("test_docs-" + str(i))

        self.log.info("Add 20% more items to trigger NRU")
        for i in range(item_count, int(item_count * 1.2)):
            client.insert_document("key-id" + str(i), '{"name":"dave"}')

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        if self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            pass
        else:
            self.log.info(
                "Document count mismatch might be due to ejection of documents on KV. Retry again"
            )
            count_n1ql = self.rest.query_tool(
                'select count(*) from %s' %
                (self.cb_bucket_name))['results'][0]['$1']
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql),
                            msg="Count mismatch on CBAS")
コード例 #9
0
 def test_sdk_client(self):
     """
         Test SDK Client Calls
     """
     client = SDKClient(scheme="couchbase",hosts = [self.master], bucket = "default")
     client.remove("1",quiet=True)
     client.insert("1","{1:2}")
     val, flag, cas = client.get("1")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.upsert("1","{1:3}")
     client.touch("1",ttl=100)
     val, flag, cas = client.get("1")
     self.assertTrue(val == "{1:3}")
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.remove("1",cas = cas)
     client.incr("key", delta=20, initial=5)
     val, flag, cas = client.get("key")
     self.assertTrue(val == 5)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.incr("key", delta=20, initial=5)
     val, flag, cas = client.get("key")
     self.assertTrue(val == 25)
     self.log.info("val={0},flag={1},cas={2}".format(val, flag, cas))
     client.decr("key", delta=20, initial=5)
     val, flag, cas = client.get("key")
     self.assertTrue(val == 5)
     print val, flag, cas
     client.upsert("key1","document1")
     client.upsert("key2","document2")
     client.upsert("key3","document3")
     set = client.get_multi(["key1","key2"])
     self.log.info(set)
     client.upsert_multi({"key1":"{1:2}","key2":"{3:2}"})
     set = client.get_multi(["key1","key2","key3"])
     self.log.info(set)
     client.touch_multi(["key1","key2","key3"],ttl=200)
     set = client.get_multi(["key1","key2","key3"])
     self.log.info(set)
     set = client.get_multi(["key1","key2","key3"],replica=True)
     self.log.info(set)
     data = client.observe("key1")
     self.log.info(data)
     data = client.observe_multi(["key1","key2"])
     self.log.info(data)
     stats = client.stats(["key1"])
     self.log.info(stats)
     client.close()
コード例 #10
0
    def test_ephemeral_bucket_NRU_eviction(self):

        generate_load = BlobGenerator(EvictionKV.KEY_ROOT,
                                      'param2',
                                      self.value_size,
                                      start=0,
                                      end=self.num_items)
        self._load_all_ephemeral_buckets_until_no_more_memory(
            self.servers[0], generate_load, "create", 0, self.num_items)

        # figure out how many items were loaded and load 10% more
        rest = RestConnection(self.servers[0])
        itemCount = rest.get_bucket(self.buckets[0]).stats.itemCount

        self.log.info(
            'Reached OOM, the number of items is {0}'.format(itemCount))

        incremental_kv_population = BlobGenerator(EvictionKV.KEY_ROOT,
                                                  'param2',
                                                  self.value_size,
                                                  start=itemCount,
                                                  end=itemCount * 1.1)
        self._load_bucket(self.buckets[0],
                          self.master,
                          incremental_kv_population,
                          "create",
                          exp=0,
                          kv_store=1)

        # and then probe the keys that are left. For now print out a distribution but later apply some heuristic
        client = SDKClient(hosts=[self.master.ip], bucket=self.buckets[0])

        NUMBER_OF_CHUNKS = 11
        items_in_chunk = int(1.1 * itemCount / NUMBER_OF_CHUNKS)
        for i in range(NUMBER_OF_CHUNKS):
            keys_still_present = 0
            for j in range(items_in_chunk):
                rc = client.get(EvictionKV.KEY_ROOT +
                                str(i * items_in_chunk + j),
                                no_format=True)

                if rc[2] is not None:
                    keys_still_present = keys_still_present + 1

            self.log.info(
                'Chunk {0} has {1:.2f} percent items still present'.format(
                    i, 100 * keys_still_present /
                    (itemCount * 1.1 / NUMBER_OF_CHUNKS)))
コード例 #11
0
    def test_sdk_subddoc(self):
        """
            Test SDK Client Calls
        """
        scheme = "couchbase"
        host=self.master.ip
        if self.master.ip == "127.0.0.1":
            scheme = "http"
            host="{0}:{1}".format(self.master.ip,self.master.port)

        client = SDKClient(scheme=scheme,hosts = [host], bucket = "default")
        json_document = {"1":1, "2":2, "array": [1]}
        document_key = "1"
        client.insert("1",json_document)
        client.insert_in(document_key, "3", 3)
        client.upsert_in(document_key, "4", 4)
        client.upsert_in(document_key, "4", "change_4")
        client.replace_in(document_key, "4", "crap_4")
        client.arrayprepend_in(document_key, "array", "0")
        client.arrayappend_in(document_key, "array", "2")
        client.arrayinsert_in(document_key, "array[1]", "INSERT_VALUE_AT_INDEX_1")
        client.arrayaddunique_in(document_key, "array", "INSERT_UNIQUE_VALUE")
        print json.dumps(client.get(document_key))
コード例 #12
0
    def test_sdk_subddoc(self):
        """
            Test SDK Client Calls
        """
        scheme = "couchbase"
        host = self.master.ip
        if self.master.ip == "127.0.0.1":
            scheme = "http"
            host = "{0}:{1}".format(self.master.ip, self.master.port)

        client = SDKClient(scheme=scheme, hosts=[host], bucket="default")
        json_document = {"1": 1, "2": 2, "array": [1]}
        document_key = "1"
        client.insert("1", json_document)
        client.insert_in(document_key, "3", 3)
        client.upsert_in(document_key, "4", 4)
        client.upsert_in(document_key, "4", "change_4")
        client.replace_in(document_key, "4", "crap_4")
        client.arrayprepend_in(document_key, "array", "0")
        client.arrayappend_in(document_key, "array", "2")
        client.arrayinsert_in(document_key, "array[1]",
                              "INSERT_VALUE_AT_INDEX_1")
        client.arrayaddunique_in(document_key, "array", "INSERT_UNIQUE_VALUE")
        print json.dumps(client.get(document_key))
コード例 #13
0
    def key_not_exists_test(self):
        client = SDKClient(hosts = [self.master.ip], bucket = "default")
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, "x")
            #For some reason could not get delete to work
            client.remove(KEY_NAME)
            rc = client.get(KEY_NAME)
            #.get is automatically set to quiet for the sdk_client, therefore I look for
            #none to indicate an error, otherwise the sdk_client spends 10 seconds trying
            #to retry the commands and is very slow
            if rc[2] == None:
                pass
            else:
                assert False
            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            try:
                #For some reason replace instead of cas would not reproduce the bug
                client.cas(KEY_NAME, "value", cas = 10)
            except NotFoundError:
                pass
        assert True 
コード例 #14
0
    def verify_results(self,
                       skip_verify_data=[],
                       skip_verify_revid=[],
                       sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(
                                src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain(
                    )
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain(
                    )

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(
                            max_verify=self._max_verify,
                            skip=skip_verify_data,
                            only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(
                            max_verify=self._max_verify,
                            skip=skip_verify_data,
                            only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(
                                        cluster.get_master_node().ip))
                                self.assertEqual(
                                    json.loads(content)['total_rows'],
                                    self._num_items)
                                client = SDKClient(
                                    scheme="couchbase",
                                    hosts=[cluster.get_master_node().ip],
                                    bucket=bucket.name).cb
                                for i in range(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(
                                        ' ', '_').replace('.', '_').replace(
                                            ',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.items():
                                        rv = client.mutate_in(
                                            key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request(
                                            "http://{0}:4984/db/{1}".format(
                                                cluster.get_master_node().ip,
                                                key))
                                        self.assertEqual(
                                            json.loads(content)['_id'], key)
                                        self.assertEqual(
                                            json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(
                                            content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(
                                remote_cluster_ref.get_replications(),
                                skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail(
                                    "Incomplete replication: Keys stuck in dcp queue"
                                )
                            if not (src_active_passed and dest_active_passed):
                                self.fail(
                                    "Incomplete replication: Active key count is incorrect"
                                )
                            if not (src_replica_passed
                                    and dest_replica_passed):
                                self.fail(
                                    "Incomplete intra-cluster replication: "
                                    "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail(
                                    "RevID verification failed for remote-cluster: {0}"
                                    .format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))
コード例 #15
0
ファイル: xdcr_xattr_sdk.py プロジェクト: arod1987/testrunner
    def verify_results(self, skip_verify_data=[], skip_verify_revid=[], sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain()
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain()

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                 only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(cluster.get_master_node().ip))
                                self.assertEqual(json.loads(content)['total_rows'], self._num_items)
                                client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                                        bucket=bucket.name).cb
                                for i in xrange(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                                      replace('.', '_').replace(',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.iteritems():
                                        rv = client.mutate_in(key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request("http://{0}:4984/db/{1}".format(cluster.get_master_node().ip, key))
                                        self.assertEqual(json.loads(content)['_id'], key)
                                        self.assertEqual(json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(remote_cluster_ref.get_replications(),
                                                            skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail("Incomplete replication: Keys stuck in dcp queue")
                            if not (src_active_passed and dest_active_passed):
                                self.fail("Incomplete replication: Active key count is incorrect")
                            if not (src_replica_passed and dest_replica_passed):
                                self.fail("Incomplete intra-cluster replication: "
                                          "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail("RevID verification failed for remote-cluster: {0}".
                                          format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))