예제 #1
0
    def key_not_exists_test(self):
        client = SDKClient([self.cluster.master], self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 1, 1,
                                                         1)
        scope_dict = collections[self.bucket.name]["scopes"]
        scope_name = scope_dict.keys()[0]
        collection_name = scope_dict[scope_name]["collections"].keys()[0]
        client.select_collection(scope_name, collection_name)
        self.log.info("CAS test on collection %s: %s" %
                      (scope_name, collection_name))

        load_gen = doc_generator(self.key, 0, self.num_items, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # Validate doc count as per bucket collections
            self.bucket_util.validate_docs_per_collections_all_buckets()
            self.validate_test_failure()
예제 #2
0
    def test_doc_key_size(self):
        """
        Insert document key with min and max key size on each available
        collection and validate
        :return:
        """
        min_doc_gen = doc_generator("test_min_key_size",
                                    0,
                                    self.num_items,
                                    key_size=1,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        max_doc_gen = doc_generator("test_max_key_size",
                                    0,
                                    self.num_items,
                                    key_size=245,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        known_cas = set()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master],
                           self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_gen, max_doc_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create",
                                             key,
                                             value,
                                             self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure(
                                "Doc create failed for key '%s' "
                                "collection::scope %s::%s - %s" %
                                (key, scope.name, collection.name, result))
                        else:
                            if result["cas"] in known_cas:
                                self.log_failure(
                                    "Same CAS exists under different "
                                    "collection: %s" % result)
                            collection.num_items += 1
                            known_cas.add(result["cas"])

        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
예제 #3
0
    def test_doc_size(self):
        """
        Insert document with empty content and max size on each available
        collection and validate
        :return:
        """
        # Empty docs
        min_doc_size_gen = doc_generator("test_min_doc_size",
                                         0, self.num_items,
                                         key_size=self.key_size,
                                         doc_size=0,
                                         mix_key_size=False,
                                         randomize_doc_size=False)
        # 20 MB docs
        max_doc_size_gen = doc_generator("test_max_doc_size",
                                         0, self.num_items,
                                         key_size=self.key_size,
                                         doc_size=1024 * 1024 * 20,
                                         mix_key_size=False,
                                         randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        # Format know_cas[CAS] = list(vb_lists)
        known_cas = dict()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master], self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_size_gen, max_doc_size_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create", key, value, self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure("Doc create failed for key '%s' "
                                             "collection::scope %s::%s - %s"
                                             % (key,
                                                scope.name,
                                                collection.name,
                                                result))
                        else:
                            self.__validate_cas_for_key(key, result, known_cas)
                            collection.num_items += 1

        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
예제 #4
0
    def touch_test(self):
        self.log.info("Loading bucket into DGM")
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            dgm_gen,
            "create",
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud("touch",
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        client.close()
        self.bucket_util._wait_for_stats_all_buckets()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
예제 #5
0
    def test_sync_write_in_progress(self):
        doc_ops = self.input.param("doc_ops", "create;create").split(';')
        shell_conn = dict()
        cbstat_obj = dict()
        error_sim = dict()
        vb_info = dict()
        active_vbs = dict()
        replica_vbs = dict()

        # Override d_level, error_simulation type based on d_level
        self.__get_d_level_and_error_to_simulate()

        # Acquire SDK client from the pool for performing doc_ops locally
        client = SDKClient([self.cluster.master], self.bucket)

        target_nodes = DurabilityHelper.getTargetNodes(self.cluster,
                                                       self.nodes_init,
                                                       self.num_nodes_affected)
        for node in target_nodes:
            shell_conn[node.ip] = RemoteMachineShellConnection(node)
            cbstat_obj[node.ip] = Cbstats(node)
            vb_info["init"] = dict()
            vb_info["init"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(
                self.bucket.name)
            error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])
            # Fetch affected nodes' vb_num which are of type=replica
            active_vbs[node.ip] = cbstat_obj[node.ip].vbucket_list(
                self.bucket.name, vbucket_type="active")
            replica_vbs[node.ip] = cbstat_obj[node.ip].vbucket_list(
                self.bucket.name, vbucket_type="replica")

        if self.durability_level \
                == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:
            target_vbs = active_vbs
            target_vbuckets = list()
            for target_node in target_nodes:
                target_vbuckets += target_vbs[target_node.ip]
        else:
            target_vbuckets = replica_vbs[target_nodes[0].ip]
            if len(target_nodes) > 1:
                index = 1
                while index < len(target_nodes):
                    target_vbuckets = list(
                        set(target_vbuckets).intersection(
                            set(replica_vbs[target_nodes[index].ip])))
                    index += 1

        doc_load_spec = dict()
        doc_load_spec["doc_crud"] = dict()
        doc_load_spec["doc_crud"][MetaCrudParams.DocCrud.COMMON_DOC_KEY] \
            = "test_collections"
        doc_load_spec[MetaCrudParams.TARGET_VBUCKETS] = target_vbuckets
        doc_load_spec[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 5
        doc_load_spec[MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD] = "all"
        doc_load_spec[MetaCrudParams.DURABILITY_LEVEL] = self.durability_level
        doc_load_spec[MetaCrudParams.SDK_TIMEOUT] = 60

        if doc_ops[0] == DocLoading.Bucket.DocOps.CREATE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.UPDATE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.REPLACE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.REPLACE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.DELETE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 1

        # Induce error condition for testing
        for node in target_nodes:
            error_sim[node.ip].create(self.simulate_error,
                                      bucket_name=self.bucket.name)
            self.sleep(3, "Wait for error simulation to take effect")

        doc_loading_task = \
            self.bucket_util.run_scenario_from_spec(
                self.task,
                self.cluster,
                self.cluster.buckets,
                doc_load_spec,
                async_load=True)

        self.sleep(5, "Wait for doc ops to reach server")

        for bucket, s_dict in doc_loading_task.loader_spec.items():
            for s_name, c_dict in s_dict["scopes"].items():
                for c_name, c_meta in c_dict["collections"].items():
                    client.select_collection(s_name, c_name)
                    for op_type in c_meta:
                        key, value = c_meta[op_type]["doc_gen"].next()
                        if self.with_non_sync_writes:
                            fail = client.crud(doc_ops[1],
                                               key,
                                               value,
                                               exp=0,
                                               timeout=2,
                                               time_unit="seconds")
                        else:
                            fail = client.crud(
                                doc_ops[1],
                                key,
                                value,
                                exp=0,
                                durability=self.durability_level,
                                timeout=2,
                                time_unit="seconds")

                        expected_exception = \
                            SDKException.AmbiguousTimeoutException
                        retry_reason = \
                            SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
                        if doc_ops[0] == DocLoading.Bucket.DocOps.CREATE \
                                and doc_ops[1] in \
                                [DocLoading.Bucket.DocOps.DELETE,
                                 DocLoading.Bucket.DocOps.REPLACE]:
                            expected_exception = \
                                SDKException.DocumentNotFoundException
                            retry_reason = None

                        # Validate the returned error from the SDK
                        if expected_exception not in str(fail["error"]):
                            self.log_failure("Invalid exception for %s: %s" %
                                             (key, fail["error"]))
                        if retry_reason \
                                and retry_reason not in str(fail["error"]):
                            self.log_failure(
                                "Invalid retry reason for %s: %s" %
                                (key, fail["error"]))

                        # Try reading the value in SyncWrite state
                        fail = client.crud("read", key)
                        if doc_ops[0] == "create":
                            # Expected KeyNotFound in case of CREATE op
                            if fail["status"] is True:
                                self.log_failure(
                                    "%s returned value during SyncWrite %s" %
                                    (key, fail))
                        else:
                            # Expects prev val in case of other operations
                            if fail["status"] is False:
                                self.log_failure(
                                    "Key %s read failed for prev value: %s" %
                                    (key, fail))

        # Revert the introduced error condition
        for node in target_nodes:
            error_sim[node.ip].revert(self.simulate_error,
                                      bucket_name=self.bucket.name)

        # Wait for doc_loading to complete
        self.task_manager.get_task_result(doc_loading_task)
        self.bucket_util.validate_doc_loading_results(doc_loading_task)
        if doc_loading_task.result is False:
            self.log_failure("Doc CRUDs failed")

        # Release the acquired SDK client
        client.close()
        self.validate_test_failure()
예제 #6
0
    def verify_cas(self, ops, generator, scope, collection):
        """
        Verify CAS value manipulation.

        For update we use the latest CAS value return by set()
        to do the mutation again to see if there is any exceptions.
        We should be able to mutate that item with the latest CAS value.
        For delete(), after it is called, we try to mutate that item with the
        cas value returned by delete(). We should see SDK Error.
        Otherwise the test should fail.
        For expire, We want to verify using the latest CAS value of that item
        can not mutate it because it is expired already.
        """

        for bucket in self.bucket_util.buckets:
            client = SDKClient([self.cluster.master], bucket)
            client.select_collection(scope, collection)
            self.log.info("CAS test on collection %s: %s" %
                          (scope, collection))
            gen = generator
            while gen.has_next():
                key, value = gen.next()
                vb_of_key = self.bucket_util.get_vbucket_num_for_key(key)
                active_node_ip = None
                for node_ip in self.shell_conn.keys():
                    if vb_of_key in self.vb_details[node_ip]["active"]:
                        active_node_ip = node_ip
                        break
                self.log.info("Performing %s on key %s" % (ops, key))
                if ops in ["update", "touch"]:
                    for x in range(self.mutate_times):
                        old_cas = client.crud("read", key, timeout=10)["cas"]
                        if ops == 'update':
                            result = client.crud(
                                "replace",
                                key,
                                value,
                                durability=self.durability_level,
                                cas=old_cas)
                        else:
                            prev_exp = 0
                            for exp in [0, 60, 0, 0]:
                                result = client.touch(
                                    key,
                                    exp,
                                    durability=self.durability_level,
                                    timeout=self.sdk_timeout)
                                if exp == prev_exp:
                                    if result["cas"] != old_cas:
                                        self.log_failure(
                                            "CAS updated for "
                                            "touch with same exp: %s" % result)
                                else:
                                    if result["cas"] == old_cas:
                                        self.log_failure(
                                            "CAS not updated %s == %s" %
                                            (old_cas, result["cas"]))
                                    old_cas = result["cas"]
                                prev_exp = exp

                        if result["status"] is False:
                            client.close()
                            self.log_failure("Touch / replace with cas failed")
                            return

                        new_cas = result["cas"]
                        if ops == 'update':
                            if old_cas == new_cas:
                                self.log_failure("CAS old (%s) == new (%s)" %
                                                 (old_cas, new_cas))

                            if result["value"] != value:
                                self.log_failure("Value mismatch. "
                                                 "%s != %s" %
                                                 (result["value"], value))
                            else:
                                self.log.debug(
                                    "Mutate %s with CAS %s successfully! "
                                    "Current CAS: %s" %
                                    (key, old_cas, new_cas))

                        active_read = client.crud("read",
                                                  key,
                                                  timeout=self.sdk_timeout)
                        active_cas = active_read["cas"]
                        replica_cas = -1
                        cas_in_active_node = \
                            self.cb_stat[active_node_ip].vbucket_details(
                                bucket.name)[str(vb_of_key)]["max_cas"]
                        if str(cas_in_active_node) != str(new_cas):
                            self.log_failure("CbStats CAS mismatch. %s != %s" %
                                             (cas_in_active_node, new_cas))

                        poll_count = 0
                        max_retry = 5
                        while poll_count < max_retry:
                            replica_read = client.getFromAllReplica(key)[0]
                            replica_cas = replica_read["cas"]
                            if active_cas == replica_cas \
                                    or self.durability_level:
                                break
                            poll_count = poll_count + 1
                            self.sleep(1, "Retry read CAS from replica..")

                        if active_cas != replica_cas:
                            self.log_failure("Replica cas mismatch. %s != %s" %
                                             (new_cas, replica_cas))
                elif ops == "delete":
                    old_cas = client.crud("read", key, timeout=10)["cas"]
                    result = client.crud("delete",
                                         key,
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout)
                    self.log.info("CAS after delete of key %s: %s" %
                                  (key, result["cas"]))
                    result = client.crud("replace",
                                         key,
                                         "test",
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout,
                                         cas=old_cas)
                    if result["status"] is True:
                        self.log_failure("The item should already be deleted")
                    if SDKException.DocumentNotFoundException \
                            not in result["error"]:
                        self.log_failure("Invalid Exception: %s" % result)
                    if result["cas"] != 0:
                        self.log_failure("Delete returned invalid cas: %s, "
                                         "Expected 0" % result["cas"])
                    if result["cas"] == old_cas:
                        self.log_failure("Deleted doc returned old cas: %s " %
                                         old_cas)
                elif ops == "expire":
                    old_cas = client.crud("read", key, timeout=10)["cas"]
                    result = client.crud("touch", key, exp=self.expire_time)
                    if result["status"] is True:
                        if result["cas"] == old_cas:
                            self.log_failure("Touch failed to update CAS")
                    else:
                        self.log_failure("Touch operation failed")

                    self.sleep(self.expire_time + 1, "Wait for item to expire")
                    result = client.crud("replace",
                                         key,
                                         "test",
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout,
                                         cas=old_cas)
                    if result["status"] is True:
                        self.log_failure("Able to mutate %s with old cas: %s" %
                                         (key, old_cas))
                    if SDKException.DocumentNotFoundException \
                            not in result["error"]:
                        self.log_failure("Invalid error after expiry: %s" %
                                         result)
예제 #7
0
class SDKCompression(CollectionBase):
    def setUp(self):
        super(SDKCompression, self).setUp()

        self.key = self.input.param("key", "test-compression")
        self.bucket = self.bucket_util.buckets[0]

        self.diff_client_for_validation = \
            self.input.param("diff_client_for_valiation", False)
        non_snappy_client = self.input.param("non_snappy_client", False)
        compression_settings = self.sdk_compression
        if non_snappy_client:
            compression_settings = None

        # Create required clients
        self.snappy_client = SDKClient(
            [self.cluster.master],
            self.bucket,
            compression_settings=self.sdk_compression)
        self.second_client = SDKClient(
            [self.cluster.master],
            self.bucket,
            compression_settings=compression_settings)

        # Create required doc_generators
        self.create_gen = doc_generator(self.key,
                                        0,
                                        self.num_items,
                                        key_size=self.key_size,
                                        doc_size=self.doc_size,
                                        randomize_doc_size=True,
                                        target_vbucket=self.target_vbucket,
                                        mutation_type="ADD")
        self.update_gen = doc_generator(self.key,
                                        0,
                                        self.num_items,
                                        key_size=self.key_size,
                                        doc_size=self.doc_size,
                                        randomize_doc_size=True,
                                        target_vbucket=self.target_vbucket,
                                        mutation_type="SET",
                                        mutate=1)

    def tearDown(self):
        # Close all SDK client connections before base-test teardown
        self.snappy_client.close()
        self.second_client.close()

        super(SDKCompression, self).tearDown()

    def test_compression_insert_validate(self):
        """
        1. Insert docs into multiple collections using snappy client.
           All inserted docs will have random doc_size/content
        2. Read back the docs from the same/different client and validate
           This validating client can be both snappy/non-snappy client.
        """
        random_clients = self.input.param("random_clients", False)
        s_name = None
        c_name = None
        bucket_dict = BucketUtils.get_random_collections(
            self.bucket_util.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")
        for bucket_name, scope_dict in bucket_dict.items():
            for scope_name, col_dict in scope_dict["scopes"].items():
                for collection_name, _ in col_dict["collections"].items():
                    s_name = scope_name
                    c_name = collection_name

        # Select clients for doc_ops based on user input params
        create_client = self.snappy_client
        update_client = self.snappy_client
        read_client = self.snappy_client
        if random_clients:
            sdk_clients = [self.snappy_client, self.second_client]
            create_client = sample(sdk_clients, 1)[0]
            update_client = sample(sdk_clients, 1)[0]
            read_client = sample(sdk_clients, 1)[0]
        elif self.diff_client_for_validation:
            read_client = self.second_client

        # Log client's compression info for debug purpose
        self.log.info("Create client's compression: %s" %
                      create_client.compression)
        self.log.info("Update client's compression: %s" %
                      update_client.compression)
        self.log.info("Read client's compression: %s" %
                      read_client.compression)

        self.log.info("Performing doc loading in bucket %s" % self.bucket)
        for _, scope in self.bucket.scopes.items():
            self.log.info("Mutating docs under scope: %s" % scope.name)
            for _, collection in scope.collections.items():
                self.snappy_client.select_collection(scope.name,
                                                     collection.name)
                while self.create_gen.has_next():
                    # Add new doc using snappy client
                    key, value = self.create_gen.next()
                    result = create_client.crud(
                        "create",
                        key,
                        value,
                        exp=self.maxttl,
                        durability=self.durability_level)
                    if result["status"] is False:
                        self.log_failure("Key '%s' insert failed for %s: %s" %
                                         (key, collection.name, result))

                    # Mutate same doc using the same client
                    key, value = self.update_gen.next()
                    result = update_client.crud(
                        "update",
                        key,
                        value,
                        exp=self.maxttl,
                        durability=self.durability_level)
                    if result["status"] is False:
                        self.log_failure("Key '%s' update failed for %s: %s" %
                                         (key, collection.name, result))

                # Reset doc_gens to be utilized by subsequent loaders
                self.create_gen.reset()
                self.update_gen.reset()
                # Validate and report fast failures per collection
                self.validate_test_failure()

        self.log.info("Validating docs in bucket %s" % self.bucket)
        for _, scope in self.bucket.scopes.items():
            self.log.info("Reading docs under scope: %s" % scope.name)
            for _, collection in scope.collections.items():
                read_client.select_collection(scope.name, collection.name)
                while self.update_gen.has_next():
                    key, value = self.update_gen.next()
                    result = read_client.crud("read", key)
                    if str(result["value"]) != str(value):
                        self.log_failure(
                            "Value mismatch for %s in collection %s: %s" %
                            (key, collection.name, result))

        self.validate_test_failure()
        self.bucket.scopes[s_name].collections[
            c_name].num_items += self.num_items
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

    def test_compression_with_parallel_mutations_on_same_collection(self):
        """
        1. Insert docs into single collections using both snappy/non-snappy
           clients in parallel (Includes overlapping CRUDs on same docs)
        2. Validate the results and stability of mutated docs
        """

        tasks = list()
        # Used for doc_loading tasks's SDK client creation
        scope = None
        collection = None
        self.batch_size = 30

        bucket_dict = BucketUtils.get_random_collections(
            self.bucket_util.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")
        for bucket_name, scope_dict in bucket_dict.items():
            for scope_name, col_dict in scope_dict["scopes"].items():
                for collection_name, _ in col_dict["collections"].items():
                    scope = scope_name
                    collection = collection_name

        self.log.info("Creating doc generators")
        create_gen_1 = self.create_gen
        create_gen_2 = doc_generator(self.key,
                                     self.num_items,
                                     self.num_items * 2,
                                     key_size=self.key_size,
                                     doc_size=self.doc_size,
                                     randomize_doc_size=True,
                                     target_vbucket=self.target_vbucket,
                                     mutation_type="ADD")
        update_gen_1 = self.update_gen
        update_gen_2 = doc_generator(self.key,
                                     0,
                                     self.num_items,
                                     key_size=self.key_size,
                                     doc_size=self.doc_size,
                                     randomize_doc_size=True,
                                     target_vbucket=self.target_vbucket,
                                     mutation_type="SET",
                                     mutate=2)

        self.log.info("Loading initial docs into collection %s::%s" %
                      (scope, collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_1,
                                          "create",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_2,
                                          "create",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=None,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))

        for task in tasks:
            self.task_manager.get_task_result(task)
            if task.fail.keys():
                self.log_failure("Failures during initial doc loading "
                                 "for keys: %s" % task.fail.keys())

        self.bucket.scopes[scope].collections[collection].num_items \
            += (self.num_items * 2)
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

        self.log.info("Performing overlapping mutations")
        tasks = list()
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          update_gen_1,
                                          "update",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          task_identifier="update_1",
                                          scope=scope,
                                          collection=collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          update_gen_2,
                                          "update",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=None,
                                          timeout_secs=self.sdk_timeout,
                                          task_identifier="update_2",
                                          scope=scope,
                                          collection=collection))
        for task in tasks:
            self.task_manager.get_task_result(task)
            if task.fail.keys():
                self.log_failure("Failures during %s updates for keys: %s" %
                                 (task.thread_name, task.fail.keys()))

        # Validate docs using snappy/non-snappy client in random
        task = self.task.async_validate_docs(
            self.cluster,
            self.bucket,
            create_gen_2,
            "create",
            compression=sample([self.sdk_compression, None], 1)[0],
            batch_size=self.batch_size,
            process_concurrency=3,
            scope=scope,
            collection=collection)
        self.task.jython_task_manager.get_task_result(task)

        # Intermediate collection-doc validation
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

        self.log.info("Performing parallel deletes")
        tasks = list()
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_1,
                                          "delete",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_2,
                                          "delete",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))

        for task in tasks:
            self.task_manager.get_task_result(task)
            if task.fail.keys():
                self.log_failure("Failures during initial doc loading "
                                 "for keys: %s" % task.fail.keys())

        # Doc validation
        self.bucket.scopes[scope].collections[collection].num_items \
            -= (self.num_items * 2)
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)