Exemple #1
0
    def __load_chain(self, start_num=0):
        for i, cluster in enumerate(self.get_cb_clusters()):
            if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION:
                if i > len(self.get_cb_clusters()) - 1:
                    break
            else:
                if i >= len(self.get_cb_clusters()) - 1:
                    break
            if not self._dgm_run:
                for bucket in cluster.get_buckets():
                    client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                            bucket=bucket.name).cb
                    for i in range(start_num, start_num + self._num_items):
                        key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                           replace('.', '_').replace(',', '_').replace(':', '_'))
                        value = {'xattr_%s' % i:'value%s' % i}
                        client.upsert(key, value)
                        client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i,
                                                             xattr=True,
                                                             create_parents=True))
                        partition = bucket.kvs[1].acquire_partition(key)#["partition"]
                        if self.only_store_hash:
                            value = str(crc32.crc32_hash(value))
                        res = client.get(key)
                        partition.set(key, json.dumps(value), 0, res.flags)
                        bucket.kvs[1].release_partition(key)

            else:
                cluster.load_all_buckets_till_dgm(
                    active_resident_threshold=self._active_resident_threshold,
                    items=self._num_items)
    def __load_chain(self, start_num=0):
        for i, cluster in enumerate(self.get_cb_clusters()):
            if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION:
                if i > len(self.get_cb_clusters()) - 1:
                    break
            else:
                if i >= len(self.get_cb_clusters()) - 1:
                    break
            if not self._dgm_run:
                for bucket in cluster.get_buckets():
                    client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                            bucket=bucket.name).cb
                    for i in xrange(start_num, start_num + self._num_items):
                        key = 'k_%s_%s' % (i, str(cluster).replace(' ','_').
                                           replace('.','_').replace(',','_').replace(':','_'))
                        value = {'xattr_%s' % i:'value%s' % i}
                        client.upsert(key, value)
                        client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i,
                                                             xattr=True,
                                                             create_parents=True))
                        partition = bucket.kvs[1].acquire_partition(key)#["partition"]
                        if self.only_store_hash:
                            value = str(crc32.crc32_hash(value))
                        res = client.get(key)
                        partition.set(key, json.dumps(value), 0, res.flags)
                        bucket.kvs[1].release_partition(key)

            else:
                cluster.load_all_buckets_till_dgm(
                    active_resident_threshold=self._active_resident_threshold,
                    items=self._num_items)
Exemple #3
0
    def test_MB_32114(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        if self.maxttl:
            self._expiry_pager(self.master)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket='default')
        KEY_NAME = 'key1'

        for i in range(1000):
            mcd = client.memcached(KEY_NAME + str(i))
            rc = mcd.set(KEY_NAME + str(i), 0, 0,
                         json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                KEY_NAME + str(i),
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # wait for it to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = client.observe(
                    KEY_NAME + str(i))

        start_time = time.time()
        self._load_doc_data_all_buckets(batch_size=1000)
        end_time = time.time()

        for i in range(1000):
            try:
                mcd = client.memcached(KEY_NAME + str(i))
                _, flags, exp, seqno, cas = client.memcached(
                    KEY_NAME + str(i)).getMeta(KEY_NAME + str(i))
                rc = mcd.del_with_meta(KEY_NAME + str(i), 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
        self.cluster.compact_bucket(self.master, "default")
        if self.maxttl:
            time_to_sleep = (self.maxttl - (end_time - start_time)) + 20
            self.sleep(int(time_to_sleep))
        else:
            self.sleep(60)
        active_bucket_items = rest.get_active_key_count("default")
        replica_bucket_items = rest.get_replica_key_count("default")
        print('active_bucket_items ', active_bucket_items)
        print('replica_bucket_items ', replica_bucket_items)
        if active_bucket_items * self.num_replicas != replica_bucket_items:
            self.fail("Mismatch in data !!!")
Exemple #4
0
    def test_MB_36087(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        g_key = "test_doc"
        bucket_name = "default"
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket=bucket_name)
        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, bucket_name)
        for i in range(self.num_items):
            key = g_key + str(i)
            mcd = client.memcached(key)
            rc = mcd.set(key, 0, 0, json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                key,
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # Wait for key to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = \
                    client.observe(key)

            time.sleep(10)
            # Evict the key
            try:
                rc = mcd.evict_key(key)
            except MemcachedError as exp:
                self.fail("Exception with evict meta - %s" % exp)

            # Perform del_with_meta
            try:
                mcd = client.memcached(key)
                _, flags, exp, seqno, cas = client.memcached(key).getMeta(key)
                rc = mcd.del_with_meta(key, 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
Exemple #5
0
 def run(self, *args, **kw):
     cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
       bucket='default').cb
     for x in range(ITERATIONS):
         cb.mutate_in(DOCID, SD.array_append('recs', 1))
Exemple #6
0
    def verify_results(self,
                       skip_verify_data=[],
                       skip_verify_revid=[],
                       sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(
                                src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain(
                    )
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain(
                    )

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(
                            max_verify=self._max_verify,
                            skip=skip_verify_data,
                            only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(
                            max_verify=self._max_verify,
                            skip=skip_verify_data,
                            only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(
                                        cluster.get_master_node().ip))
                                self.assertEqual(
                                    json.loads(content)['total_rows'],
                                    self._num_items)
                                client = SDKClient(
                                    scheme="couchbase",
                                    hosts=[cluster.get_master_node().ip],
                                    bucket=bucket.name).cb
                                for i in range(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(
                                        ' ', '_').replace('.', '_').replace(
                                            ',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.items():
                                        rv = client.mutate_in(
                                            key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request(
                                            "http://{0}:4984/db/{1}".format(
                                                cluster.get_master_node().ip,
                                                key))
                                        self.assertEqual(
                                            json.loads(content)['_id'], key)
                                        self.assertEqual(
                                            json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(
                                            content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(
                                remote_cluster_ref.get_replications(),
                                skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail(
                                    "Incomplete replication: Keys stuck in dcp queue"
                                )
                            if not (src_active_passed and dest_active_passed):
                                self.fail(
                                    "Incomplete replication: Active key count is incorrect"
                                )
                            if not (src_replica_passed
                                    and dest_replica_passed):
                                self.fail(
                                    "Incomplete intra-cluster replication: "
                                    "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail(
                                    "RevID verification failed for remote-cluster: {0}"
                                    .format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))
    def verify_results(self, skip_verify_data=[], skip_verify_revid=[], sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain()
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain()

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                 only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(cluster.get_master_node().ip))
                                self.assertEqual(json.loads(content)['total_rows'], self._num_items)
                                client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                                        bucket=bucket.name).cb
                                for i in xrange(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                                      replace('.', '_').replace(',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.iteritems():
                                        rv = client.mutate_in(key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request("http://{0}:4984/db/{1}".format(cluster.get_master_node().ip, key))
                                        self.assertEqual(json.loads(content)['_id'], key)
                                        self.assertEqual(json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(remote_cluster_ref.get_replications(),
                                                            skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail("Incomplete replication: Keys stuck in dcp queue")
                            if not (src_active_passed and dest_active_passed):
                                self.fail("Incomplete replication: Active key count is incorrect")
                            if not (src_replica_passed and dest_replica_passed):
                                self.fail("Incomplete intra-cluster replication: "
                                          "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail("RevID verification failed for remote-cluster: {0}".
                                          format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))
 def run(self, *args, **kw):
     cb = SDKClient(scheme="couchbase", hosts=[SERVER_IP],
       bucket='default').cb
     for x in range(ITERATIONS):
         cb.mutate_in(DOCID, SD.array_append('recs', 1))