예제 #1
0
    def create_required_buckets(self):
        self.log.info("Get the available memory quota")
        self.info = self.rest.get_nodes_self()
        threshold_memory = 100
        # threshold_memory_vagrant = 100
        total_memory_in_mb = self.info.mcdMemoryReserved
        total_available_memory_in_mb = total_memory_in_mb
        active_service = self.info.services

        # If the mentioned service is already present,
        # we remove that much memory from available memory quota
        if "index" in active_service:
            total_available_memory_in_mb -= self.info.indexMemoryQuota
        if "fts" in active_service:
            total_available_memory_in_mb -= self.info.ftsMemoryQuota
        if "cbas" in active_service:
            total_available_memory_in_mb -= self.info.cbasMemoryQuota
        if "eventing" in active_service:
            total_available_memory_in_mb -= self.info.eventingMemoryQuota

        available_memory = total_available_memory_in_mb - threshold_memory
        # available_memory =  total_available_memory_in_mb - threshold_memory_vagrant
        self.rest.set_service_memoryQuota(service='memoryQuota',
                                          memoryQuota=available_memory)

        # Creating buckets for data loading purpose
        self.log.info("Create CB buckets")
        duration = self.input.param("bucket_expiry", 0)
        eviction_policy = self.input.param("eviction_policy", Bucket.EvictionPolicy.VALUE_ONLY)
        self.bucket_type = self.input.param("bucket_type", Bucket.Type.MEMBASE) # Bucket.bucket_type.EPHEMERAL
        compression_mode = self.input.param("compression_mode", Bucket.CompressionMode.PASSIVE)  # Bucket.bucket_compression_mode.ACTIVE
        ramQuota = self.input.param("ramQuota", available_memory)
        bucket_names = self.input.param("bucket_names", "GleamBookUsers")
        if bucket_names:
            bucket_names = bucket_names.split(';')
        if self.bucket_type:
            self.bucket_type = self.bucket_type.split(';')
        if compression_mode:
            compression_mode = compression_mode.split(';')
        if eviction_policy:
            eviction_policy = eviction_policy.split(';')
        if self.num_buckets == 1:
            bucket = Bucket({"name": "GleamBookUsers", "ramQuotaMB": ramQuota, "maxTTL": duration, "replicaNumber":self.num_replicas,
                            "evictionPolicy": eviction_policy[0], "bucketType":self.bucket_type[0], "compressionMode":compression_mode[0]})
            self.bucket_util.create_bucket(bucket)
        elif 1 < self.num_buckets == len(bucket_names):
            for i in range(self.num_buckets):
                bucket = Bucket({"name": bucket_names[i], "ramQuotaMB": ramQuota/self.num_buckets, "maxTTL": duration, "replicaNumber":self.num_replicas,
                             "evictionPolicy": eviction_policy[i], "bucketType":self.bucket_type[i], "compressionMode":compression_mode[i]})
                self.bucket_util.create_bucket(bucket)
        else:
            self.fail("Number of bucket/Names not sufficient")

        # rebalance the new buckets across all nodes.
        self.log.info("Rebalance Starts")
        self.nodes = self.rest.node_statuses()
        self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
                            ejectedNodes=[])
        self.rest.monitorRebalance()
        return bucket
예제 #2
0
 def __populate_cluster_buckets(self, cluster):
     self.log.debug("Fetching bucket details from cluster %s" % cluster.id)
     buckets = json.loads(
         CapellaAPI.get_all_buckets(cluster).content)["buckets"]["data"]
     for bucket in buckets:
         bucket = bucket["data"]
         bucket_obj = Bucket({
             Bucket.name:
             bucket["name"],
             Bucket.ramQuotaMB:
             bucket["memoryAllocationInMb"],
             Bucket.replicaNumber:
             bucket["replicas"],
             Bucket.conflictResolutionType:
             bucket["bucketConflictResolution"],
             Bucket.flushEnabled:
             bucket["flush"],
             Bucket.durabilityMinLevel:
             bucket["durabilityLevel"],
             Bucket.maxTTL:
             bucket["timeToLive"],
         })
         bucket_obj.uuid = bucket["id"]
         bucket_obj.stats.itemCount = bucket["stats"]["itemCount"]
         bucket_obj.stats.memUsed = bucket["stats"]["memoryUsedInMib"]
         cluster.buckets.append(bucket_obj)
예제 #3
0
 def setUp(self):
     super(EventingSanity, self).setUp()
     self.rest.set_service_mem_quota({CbServer.Settings.KV_MEM_QUOTA: 700})
     if self.create_functions_buckets:
         self.bucket_size = 200
         self.log.info(self.bucket_size)
         bucket_params_src = Bucket({
             "name": self.src_bucket_name,
             "replicaNumber": self.num_replicas
         })
         src_bucket = self.bucket_util.create_bucket(
             self.cluster, bucket_params_src)
         self.src_bucket = self.bucket_util.get_all_buckets(self.cluster)[0]
         bucket_params_dst = Bucket({
             "name": self.dst_bucket_name,
             "replicaNumber": self.num_replicas
         })
         bucket_params_meta = Bucket({
             "name": self.metadata_bucket_name,
             "replicaNumber": self.num_replicas
         })
         bucket_dst = self.bucket_util.create_bucket(
             self.cluster, bucket_params_dst)
         bucket_meta = self.bucket_util.create_bucket(
             self.cluster, bucket_params_meta)
         self.buckets = self.bucket_util.get_all_buckets(self.cluster)
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
예제 #4
0
    def initialise(self):
        # Create the collections for this task
        for collection in self.collections:
            BucketHelper(self.node).create_collection(
                Bucket({'name': self.bucket}), self.scope.name, {"name": collection})

        # A client for each collection
        self.clients = {}
        for collection in self.collections:
            self.clients[collection] = \
                SDKClient([self.node], Bucket(
                    {'name': self.bucket}), scope=self.scope.name, collection=collection)
예제 #5
0
 def test_default_moxi(self):
     name = "default"
     replicas = [0, 1, 2, 3]
     rest = RestConnection(self.cluster.master)
     remote = RemoteMachineShellConnection(self.cluster.master)
     for replicaNumber in replicas:
         bucket = Bucket({"name": name, "replicaNumber": replicaNumber})
         self.bucket_util.create_bucket(bucket)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
             name)
         self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket),
                         msg)
         self.bucket_util.delete_bucket(self.cluster.master, bucket.name)
         msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(
             name)
         self.assertTrue(self.bucket_util.wait_for_bucket_deletion(bucket),
                         msg)
         msg = 'bucket {0} data files are not deleted after bucket deleted from membase'.format(
             name)
         self.assertTrue(self.wait_for_data_files_deletion(
             name,
             remote_connection=remote,
             rest=rest,
             timeout_in_seconds=20),
                         msg=msg)
예제 #6
0
    def get_stats_memc(self, bucket_name, stat_name="", key=None):
        """
        Fetches stats using cbstat and greps for specific line.
        Uses command:
          cbstats localhost:port 'stat_name' | grep 'field_to_grep'

        Note: Function calling this API should take care of validating
        the outputs and handling the errors/warnings from execution.

        Arguments:
        :bucket_name   - Name of the bucket to get the stats
        :stat_name     - Any valid stat_command accepted by cbstats
        :field_to_grep - Target stat name string to grep.
                         Default=None, means fetch all data

        Returns:
        :output - Output for the cbstats command
        :error  - Buffer containing warnings/errors from the execution
        """
        # result = dict()
        if stat_name == "all":
            stat_name = ""
        client = MemcachedClientHelper.direct_client(
            self.server, Bucket({"name": bucket_name}), 30, self.username,
            self.password)
        output = client.stats(stat_name)
        client.close()
        return output if key is None else output[key]
예제 #7
0
 def test_default_moxi(self):
     bucket_name = 'default'
     rest = RestConnection(self.cluster.master)
     replicaNumber = 1
     bucket = Bucket({"name": bucket_name, "replicaNumber": replicaNumber})
     self.bucket_util.create_bucket(bucket)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
         bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket), msg)
     self.sleep(5)
     #self.assertTrue(self.bucket_util.wait_for_memcached(self.cluster.master, bucket), "Wait_for_memcached failed")
     try:
         self._generate_load(bucket)
     except Exception as e:
         self.fail("unable to insert any key to memcached")
     try:
         self._validate_load(bucket)
     except Exception as e:
         self.fail("Not all values were stored successfully")
     self.bucket_util.delete_bucket(self.cluster.master, bucket)
     msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(
         bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_deletion(bucket), msg)
     self.bucket_util.create_bucket(bucket)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
         bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket), msg)
     self.sleep(5)
예제 #8
0
    def test_durability_with_bucket_level_none(self):
        """
        Create Buckets with NONE durability level.
        Attempts sync_write with different durability_levels and validate
        CRUDs are honored with respective durability_levels set from clients
        """

        create_desc = "Creating %s bucket with level 'None'" % self.bucket_type

        b_durability = Bucket.DurabilityLevel.NONE
        verification_dict = self.get_cb_stat_verification_dict()
        bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)

        self.log.info(create_desc)
        # Object to support performing CRUDs and create Bucket
        bucket_obj = Bucket(bucket_dict)
        self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                       wait_for_warmup=True)
        self.get_vbucket_type_mapping(bucket_obj.name)
        self.summary.add_step(create_desc)

        # Index for doc_gen to avoid creating/deleting same docs across d_level
        index = 0
        for d_level in self.get_supported_durability_for_bucket():
            self.validate_durability_with_crud(bucket_obj, b_durability,
                                               verification_dict,
                                               doc_durability=d_level,
                                               doc_start_index=index)
            self.summary.add_step("CRUD with doc_durability %s" % d_level)

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)
            index += 10
예제 #9
0
    def test_ops_only_with_bucket_level_durability(self):
        """
        Create Buckets with durability_levels set and perform
        CRUDs from client without explicitly setting the durability and
        validate the ops to make sure respective durability is honored
        """
        for d_level in self.get_supported_durability_for_bucket():
            # Avoid creating bucket with durability=None
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            step_desc = "Creating %s bucket with level '%s'" \
                        % (self.bucket_type, d_level)
            verification_dict = self.get_cb_stat_verification_dict()

            self.log.info(step_desc)
            # Object to support performing CRUDs and create Bucket
            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                           wait_for_warmup=True)
            self.get_vbucket_type_mapping(bucket_obj.name)
            self.summary.add_step(step_desc)

            self.validate_durability_with_crud(bucket_obj, d_level,
                                               verification_dict)
            self.summary.add_step("Async write with bucket durability %s"
                                  % d_level)

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Delete the bucket on server
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete %s bucket" % self.bucket_type)
예제 #10
0
    def get_vbucket_stats(self,
                          bucket_name,
                          stat_name,
                          vbucket_num,
                          field_to_grep=None):
        """
        Fetches failovers stats for specified vbucket
        and greps for specific stat.
        Uses command:
          cbstats localhost:port failovers '[vbucket_num]' | \
            grep '[field_to_grep]'

        Note: Function calling this API should take care of validating
        the outputs and handling the errors/warnings from execution.

        Arguments:
        :bucket_name   - Name of the bucket to get the stats
        :stat_name     - Any valid stat_command accepted by cbstats
        :vbucket_num   - Target vbucket number to fetch the stats
        :field_to_grep - Target stat name string to grep.
                         Default=None, means to fetch all stats related to
                         the selected vbucket stat

        Returns:
        :output - Output for the cbstats command
        :error  - Buffer containing warnings/errors from the execution
        """
        client = MemcachedClientHelper.direct_client(self.server,
                                                     Bucket(bucket_name), 30,
                                                     self.username,
                                                     self.password)
        output = client.stats("{} {}".format(stat_name, vbucket_num))
        client.close()
        return output
예제 #11
0
 def test_two_replica(self):
     bucket_name = 'default'
     rest = RestConnection(self.cluster.master)
     replicaNumber = 2
     bucket = Bucket({"name": bucket_name, "replicaNumber": replicaNumber})
     self.bucket_util.create_bucket(bucket)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket), msg)
예제 #12
0
 def test_two_replica(self):
     name = 'default'
     replica_number = 2
     bucket = Bucket({"name": name, "replicaNumber": replica_number})
     self.bucket_util.create_bucket(self.cluster, bucket)
     msg = 'create_bucket succeeded but bucket %s does not exist' % name
     self.assertTrue(
         self.bucket_util.wait_for_bucket_creation(self.cluster, bucket),
         msg)
    def test_backup_restore_txn_sanity(self):
        """
        1. Create default bucket on the cluster
        2. Loads bucket with docs (5 docs per txn)
        2. Perform updates or delete and run backup
        3. Perform restores and verify docs
        """
        self.bk_cluster = self.get_cb_cluster_by_name("C1")
        self.rs_cluster = self.get_cb_cluster_by_name("C2")

        self.bucket_name = 'default'
        bk_rest = RestConnection(self.bk_cluster.master)
        bucket = Bucket({"name": self.bucket_name, "replicaNumber": self.num_replicas})
        self.bk_cluster.bucket_util.create_bucket(bucket)
        msg = 'create_bucket succeeded but bucket {0} does not exist'.format(self.bucket_name)
        self.assertTrue(self.bk_cluster.bucket_util.wait_for_bucket_creation(bucket), msg)
        self.bk_cluster.bucket_util.add_rbac_user()
        self.backup_create()
        self.sleep(5)
        self.log.info("*** start to load items to all buckets")
        self.load_buckets(self.bk_cluster, self.bk_cluster.bucket_util.buckets, "create", self.commit)
        if self.ops_type != "create":
            self.load_buckets(self.bk_cluster,
                              self.bk_cluster.bucket_util.buckets,
                              self.ops_type, self.commit)
        self.log.info("*** done to load txn items to all buckets")
        self.expected_error = self.input.param("expected_error", None)

        self.backup_cluster()

        self.log.info("*** start to restore cluster")
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)

        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.rs_cluster_utils)
            if self.same_cluster:
                self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
            else:
                self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
            self.log.info("Done reset cluster")

        """ Add built-in user cbadminbucket to restore cluster """
        self.rs_cluster.bucket_util.add_rbac_user()
        rs_rest = RestConnection(self.rs_cluster.master)
        self.rs_cluster.bucket_util.create_bucket(bucket)
        self.assertTrue(self.rs_cluster.bucket_util.wait_for_bucket_creation(bucket), msg)
        self.backupset.start = start
        self.backupset.end = end
        self.backup_restore()

        self.verify_txn_in_bkrs_bucket()
예제 #14
0
    def test_create_bucket_using_cli(self):
        """
        Create Bucket with all possible durability_levels and make sure
        durability levels are honored for document CRUDs
        - Will test for all bucket types (Couchbase, Ephemeral, Memcached)
        - With all possible d_levels for bucket_durability
        - Perform doc insert for each bucket to validate the sync_writes
        """
        # Create cb_cli session object
        shell = self.vbs_in_node[self.cluster.master]["shell"]
        cb_cli = CbCli(shell)

        for d_level in self.bucket_util.get_supported_durability_levels():
            create_failed = False
            test_step = "Creating %s bucket with level %s" \
                        % (self.bucket_type, d_level)

            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Remove unsupported replica string in case if MC bucket
            if self.bucket_type == Bucket.Type.MEMCACHED:
                del bucket_dict[Bucket.replicaNumber]

            # Object to support performing CRUDs
            bucket_obj = Bucket(bucket_dict)

            output = cb_cli.create_bucket(bucket_dict, wait=True)
            self.get_vbucket_type_mapping(bucket_obj.name)
            if "SUCCESS: Bucket created" not in str(output):
                create_failed = True
                if d_level in self.possible_d_levels[self.bucket_type]:
                    self.log_failure("Create failed for %s bucket "
                                     "with min_durability_level %s"
                                     % (self.bucket_type, d_level))

            self.bucket_util.buckets = [bucket_obj]
            self.bucket_util.print_bucket_stats()
            self.summary.add_step(test_step)

            # Perform CRUDs to validate bucket_creation with durability
            if not create_failed:
                verification_dict = self.get_cb_stat_verification_dict()
                self.validate_durability_with_crud(bucket_obj, d_level,
                                                   verification_dict)
                self.summary.add_step("Validate_CRUD_operation")

                # Cbstats vbucket-details validation
                self.cb_stat_verify(verification_dict)

            output = cb_cli.delete_bucket(bucket_obj.name)
            if create_failed:
                if "ERROR: Bucket not found" not in str(output):
                    self.log_failure("Mismatch in bucket-delete output")
            elif "SUCCESS: Bucket deleted" not in str(output):
                self.log_failure("Mismatch in bucket-delete output")
            self.summary.add_step("Delete bucket")
예제 #15
0
    def test_higher_durability_level_from_client(self):
        """
        Create bucket with durability_levels set and perform CRUDs using
        durability_level > the bucket's durability_level and validate
        """
        d_level_order_len = len(self.d_level_order)
        supported_d_levels = self.get_supported_durability_for_bucket()
        for d_level in supported_d_levels:
            create_desc = "Creating %s bucket with level '%s'" \
                          % (self.bucket_type, d_level)
            verification_dict = self.get_cb_stat_verification_dict()

            self.log.info(create_desc)
            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Object to support performing CRUDs and create Bucket
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster,
                                           bucket_obj,
                                           wait_for_warmup=True)
            self.get_vbucket_type_mapping(bucket_obj.name)
            self.summary.add_step(create_desc)

            # Perform doc_ops using all possible higher durability levels
            index = 0
            op_type = "create"
            durability_index = self.d_level_order.index(d_level) + 1

            while durability_index < d_level_order_len:
                # Ephemeral case
                if self.d_level_order[durability_index] \
                        not in supported_d_levels:
                    durability_index += 1
                    continue
                self.validate_durability_with_crud(
                    bucket_obj,
                    d_level,
                    verification_dict,
                    op_type=op_type,
                    doc_durability=self.d_level_order[durability_index],
                    doc_start_index=index)

                self.summary.add_step(
                    "%s with doc_level_durability %s" %
                    (op_type, self.d_level_order[durability_index]))
                durability_index += 1
                index += 10

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Delete the bucket on server
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete %s bucket" % self.bucket_type)
예제 #16
0
    def create_required_buckets(self):
        self.log.info("Get the available memory quota")
        self.info = self.rest.get_nodes_self()
        threshold_memory = 100
        # threshold_memory_vagrant = 100
        total_memory_in_mb = self.info.mcdMemoryReserved
        total_available_memory_in_mb = total_memory_in_mb

        # If the mentioned service is already present,
        # we remove that much memory from available memory quota
        if "index" in self.info.services:
            total_available_memory_in_mb -= self.info.indexMemoryQuota
        if "fts" in self.info.services:
            total_available_memory_in_mb -= self.info.ftsMemoryQuota
        if "cbas" in self.info.services:
            total_available_memory_in_mb -= self.info.cbasMemoryQuota
        if "eventing" in self.info.services:
            total_available_memory_in_mb -= self.info.eventingMemoryQuota

        available_memory = total_available_memory_in_mb - threshold_memory

        self.rest.set_service_memoryQuota(service='memoryQuota',
                                          memoryQuota=available_memory)

        # Creating buckets for data loading purpose
        self.log.info("Create CB buckets")
        self.bucket_expiry = self.input.param("bucket_expiry", 0)
        ramQuota = self.input.param("ramQuota", available_memory)
        buckets = self.input.param("bucket_names", "GleamBookUsers").split(';')
        self.bucket_type = self.bucket_type.split(';')
        self.compression_mode = self.compression_mode.split(';')
        self.bucket_eviction_policy = self.bucket_eviction_policy
        for i in range(self.num_buckets):
            bucket = Bucket({
                Bucket.name: buckets[i],
                Bucket.ramQuotaMB: ramQuota / self.num_buckets,
                Bucket.maxTTL: self.bucket_expiry,
                Bucket.replicaNumber: self.num_replicas,
                Bucket.storageBackend: self.bucket_storage,
                Bucket.evictionPolicy: self.bucket_eviction_policy,
                Bucket.bucketType: self.bucket_type[i],
                Bucket.compressionMode: self.compression_mode[i]
            })
            self.bucket_util.create_bucket(bucket)

        # rebalance the new buckets across all nodes.
        self.log.info("Rebalance Starts")
        self.nodes = self.rest.node_statuses()
        self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
                            ejectedNodes=[])
        self.rest.monitorRebalance()
        return bucket
예제 #17
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     rest = RestConnection(self.cluster.master)
     replicaNumber = 1
     bucket = Bucket({"name": name, "replicaNumber": replicaNumber})
     try:
         self.bucket_util.create_bucket(bucket)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
         self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket), msg)
     except BucketCreationException as ex:
         self.log.error(ex)
         self.fail('could not create bucket with valid length')
예제 #18
0
 def test_valid_length(self):
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     replica_number = 1
     bucket = Bucket({"name": name, "replicaNumber": replica_number})
     msg = 'create_bucket succeeded but bucket %s does not exist' % name
     try:
         self.bucket_util.create_bucket(self.cluster, bucket)
         self.assertTrue(
             self.bucket_util.wait_for_bucket_creation(
                 self.cluster, bucket), msg)
     except BucketCreationException as ex:
         self.log.error(ex)
         self.fail('could not create bucket with valid length')
예제 #19
0
    def get_collections(self, bucket):
        """
        Fetches list of collections from the server
        Uses command:
          cbstats localhost:port collections

        Arguments:
        :bucket - Bucket object to fetch the name

        Returns:
        :collection_data - Dict containing the collections stat values
        """
        collection_data = dict()

        client = MemcachedClientHelper.direct_client(
            self.server, Bucket({"name": bucket.name}), 30, self.username,
            self.password)
        client.collections_supported = True
        collection_details = json.loads(client.get_collections()[2])
        collection_stats = client.stats("collections")
        client.close()

        collection_data["count"] = 0
        collection_data["manifest_uid"] = collection_stats["manifest_uid"]

        for scope_details in collection_details["scopes"]:
            s_name = scope_details["name"]
            s_id = scope_details["uid"]
            collection_data[s_name] = dict()
            for col_details in scope_details["collections"]:
                c_name = col_details["name"]
                c_id = col_details["uid"]

                collection_data[s_name][c_name] = dict()
                scope_col_id = "0x%s:0x%s:" % (s_id, c_id)

                for stat, value in collection_stats.items():
                    if stat.startswith(scope_col_id):
                        stat = stat.split(':')[2]
                        # Convert to number if possible
                        try:
                            value = int(value)
                        except ValueError:
                            pass
                        collection_data[s_name][c_name][stat] = value
                collection_data["count"] += 1
        return collection_data
    def test_backup_with_txn_pending(self):
        """
        1. Creates default bucket on the backup cluster
        2. Loads 100K docs with num_threads = 100
        2. While loading, run backup.  Backup will not backup doc due to txn in pending
        3. Verify docs in backup repo less than 100K
        """
        self.bk_cluster = self.get_cb_cluster_by_name("C1")
        self.rs_cluster = self.get_cb_cluster_by_name("C2")

        self.bucket_name = 'default'
        bk_rest = RestConnection(self.bk_cluster.master)
        bucket = Bucket({
            "name": self.bucket_name,
            "replicaNumber": self.num_replicas
        })
        self.bk_cluster.bucket_util.create_bucket(self.cluster, bucket)
        msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
            self.bucket_name)
        self.assertTrue(
            self.bk_cluster.bucket_util.wait_for_bucket_creation(
                self.cluster, bucket), msg)
        self.bk_cluster.bucket_util.add_rbac_user(self.cluster.master)
        self.backup_create()

        self.kv_gen = self.key_generators(self.ops_type)
        bk_threads = []
        load_thread = Thread(target=self.load_buckets,
                             args=(self.bk_cluster,
                                   self.bk_cluster.bucket_util.buckets,
                                   "create", self.commit))
        bk_threads.append(load_thread)
        load_thread.start()
        bk_thread = Thread(target=self.backup_cluster)
        bk_threads.append(bk_thread)
        bk_thread.start()
        for bk_thread in bk_threads:
            bk_thread.join()

        _, output, _ = self.backup_list()
        backup_docs = 0
        for x in output:
            if "data" in x:
                backup_docs = int(x.split(" ").strip()[1])
                break
        if backup_docs >= int(self.num_items):
            self.fail("pending txn should not backup")
예제 #21
0
    def test_durability_impossible(self):
        """
        Create bucket with replica > num_kv_nodes.
        Make sure the bucket creating fails due to durability.
        """

        for d_level in self.possible_d_levels[self.bucket_type]:
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Object to support performing CRUDs
            bucket_obj = Bucket(bucket_dict)
            task = self.bucket_util.async_create_bucket(self.cluster,
                                                        bucket_obj)
            self.task_manager.get_task_result(task)
            if task.result is True:
                self.fail("Bucket created with replica=%s, durability=%s")
예제 #22
0
    def test_create_bucket_using_rest(self):
        log_failure_msg = "Bucket creation succeeded for replica=3"
        for d_level in self.bucket_util.get_supported_durability_levels():
            create_failed = False
            test_step = "Creating %s bucket with level %s" \
                        % (self.bucket_type, d_level)

            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Object to support performing CRUDs
            bucket_obj = Bucket(bucket_dict)

            try:
                self.bucket_util.create_bucket(self.cluster,
                                               bucket_obj,
                                               wait_for_warmup=True)
                self.get_vbucket_type_mapping(bucket_obj.name)
                if self.num_replicas == Bucket.ReplicaNum.THREE:
                    if d_level != Bucket.DurabilityLevel.NONE:
                        self.log_failure(log_failure_msg)
                elif d_level not in self.possible_d_levels[self.bucket_type]:
                    self.log_failure("Create succeeded for %s bucket for "
                                     "unsupported durability %s" %
                                     (self.bucket_type, d_level))
            except Exception as rest_exception:
                create_failed = True
                self.log.debug(rest_exception)

            self.bucket_util.print_bucket_stats(self.cluster)
            self.summary.add_step(test_step)

            # Perform CRUDs to validate bucket_creation with durability
            if not create_failed:
                verification_dict = self.get_cb_stat_verification_dict()

                self.validate_durability_with_crud(bucket_obj, d_level,
                                                   verification_dict)
                self.summary.add_step("Validate CRUD operation")

                # Cbstats vbucket-details validation
                self.cb_stat_verify(verification_dict)

            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Bucket deletion")
예제 #23
0
파일: opd.py 프로젝트: couchbaselabs/TAF
    def create_required_buckets(self, cluster):
        if self.cluster.cloud_cluster:
            return
        self.log.info("Get the available memory quota")
        rest = RestConnection(cluster.master)
        self.info = rest.get_nodes_self()

        # threshold_memory_vagrant = 100
        kv_memory = self.info.memoryQuota - 100

        # Creating buckets for data loading purpose
        self.log.info("Create CB buckets")
        self.bucket_expiry = self.input.param("bucket_expiry", 0)
        ramQuota = self.input.param("ramQuota", kv_memory)
        buckets = ["GleamBookUsers"] * self.num_buckets
        bucket_type = self.bucket_type.split(';') * self.num_buckets
        compression_mode = self.compression_mode.split(';') * self.num_buckets
        self.bucket_eviction_policy = self.bucket_eviction_policy
        for i in range(self.num_buckets):
            bucket = Bucket({
                Bucket.name: buckets[i] + str(i),
                Bucket.ramQuotaMB: ramQuota / self.num_buckets,
                Bucket.maxTTL: self.bucket_expiry,
                Bucket.replicaNumber: self.num_replicas,
                Bucket.storageBackend: self.bucket_storage,
                Bucket.evictionPolicy: self.bucket_eviction_policy,
                Bucket.bucketType: bucket_type[i],
                Bucket.flushEnabled: Bucket.FlushBucket.ENABLED,
                Bucket.compressionMode: compression_mode[i],
                Bucket.fragmentationPercentage: self.fragmentation
            })
            self.bucket_util.create_bucket(cluster, bucket)

        # rebalance the new buckets across all nodes.
        self.log.info("Rebalance Starts")
        self.nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in self.nodes],
                       ejectedNodes=[])
        rest.monitorRebalance()
예제 #24
0
    def test_durability_impossible(self):
        """
        Create bucket with replica > num_kv_nodes.
        Perform doc insert to make sure we get TimeoutException due to
        durability_impossible from the server.
        """

        verification_dict = self.get_cb_stat_verification_dict()

        key, value = doc_generator("test_key", 0, 1).next()
        for d_level in self.possible_d_levels[self.bucket_type]:
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Object to support performing CRUDs
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster,
                                           bucket_obj,
                                           wait_for_warmup=True)
            self.summary.add_step("Create bucket with durability %s" % d_level)

            client = SDKClient([self.cluster.master], bucket_obj)
            result = client.crud("create", key, value, timeout=3)
            if result["status"] is True \
                    or SDKException.DurabilityImpossibleException \
                    not in result["error"]:
                self.log_failure("Indirect sync_write succeeded "
                                 "without enough nodes")
            client.close()

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Delete the created bucket
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete bucket with d_level %s" % d_level)
예제 #25
0
    def get_scopes(self, bucket):
        """
        Fetches list of scopes for the particular bucket
        Uses command:
          cbstats localhost:port scopes

        Arguments:
        :bucket - Bucket object to fetch the name

        Returns:
        :scope_data - Dict containing the scopes stat values
        """
        scope_data = dict()

        client = MemcachedClientHelper.direct_client(
            self.server, Bucket({"name": bucket.name}), 30, self.username,
            self.password)
        client.collections_supported = True
        collection_details = json.loads(client.get_collections()[2])
        collection_stats = client.stats("collections")
        client.close()
        scope_data["manifest_uid"] = int(collection_stats["manifest_uid"])
        scope_data["count"] = 0
        for s_details in collection_details["scopes"]:
            s_name = s_details["name"]
            s_id = s_details["uid"]
            scope_data["count"] += 1
            scope_data[s_name] = dict()
            scope_data[s_name]["collections"] = len(s_details["collections"])
            scope_data[s_name]["num_items"] = 0
            for col_details in s_details["collections"]:
                c_id = col_details["uid"]
                i_key = "0x%s:0x%s:items" % (s_id, c_id)
                scope_data[s_name]["num_items"] += int(collection_stats[i_key])

        return scope_data
예제 #26
0
    def setUp(self):
        super(MultiDurabilityTests, self).setUp()

        replica_list = self.input.param("replica_list", list())
        bucket_type_list = self.input.param("bucket_type_list", list())
        self.bucket_dict = dict()
        tasks = dict()

        # Create cluster
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance(self.cluster, nodes_init, [])
        self.bucket_util.add_rbac_user(self.cluster.master)

        rest = RestConnection(self.cluster.master)
        info = rest.get_nodes_self()
        if info.memoryQuota < 450.0:
            self.fail("At least 450MB of memory required")
        else:
            available_ram = info.memoryQuota * (2.0 / 3.0)
            if available_ram / self.standard_buckets > 100:
                bucket_ram_quota = int(available_ram / self.standard_buckets)
            else:
                bucket_ram_quota = 100

        if type(replica_list) is str:
            replica_list = replica_list.split(";")
        if type(bucket_type_list) is str:
            bucket_type_list = bucket_type_list.split(";")

        for index in range(self.standard_buckets):
            self.bucket_dict[index] = dict()

            # If replica not provided, set replica value to '0'
            if len(replica_list) - 1 < index:
                self.bucket_dict[index]["replica"] = 0
            else:
                self.bucket_dict[index]["replica"] = int(replica_list[index])

            # If bucket_type not provided, set replica value to 'MEMBASE'
            if len(bucket_type_list) - 1 < index:
                self.bucket_dict[index]["type"] = Bucket.Type.MEMBASE
            else:
                self.bucket_dict[index]["type"] = bucket_type_list[index]

            # create bucket object for creation
            bucket = Bucket({
                Bucket.name:
                "bucket_{0}".format(index),
                Bucket.bucketType:
                self.bucket_dict[index]["type"],
                Bucket.ramQuotaMB:
                bucket_ram_quota,
                Bucket.replicaNumber:
                self.bucket_dict[index]["replica"],
                Bucket.compressionMode:
                "off",
                Bucket.maxTTL:
                0
            })
            tasks[bucket] = self.bucket_util.async_create_bucket(
                self.cluster, bucket)

            # Append bucket object into the bucket_info dict
            self.bucket_dict[index]["object"] = bucket

        raise_exception = None
        for bucket, task in tasks.items():
            self.task_manager.get_task_result(task)
            if task.result:
                self.sleep(2)
                warmed_up = self.bucket_util._wait_warmup_completed(
                    self.cluster_util.get_kv_nodes(self.cluster),
                    bucket,
                    wait_time=60)
                if not warmed_up:
                    task.result = False
                    raise_exception = "Bucket %s not warmed up" % bucket.name

            if task.result:
                self.cluster.buckets.append(bucket)
            self.task_manager.stop_task(task)

        if raise_exception:
            raise Exception("Create bucket failed: %s" % raise_exception)
        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)
        self.log.info("=== MultiDurabilityTests base setup done ===")
예제 #27
0
 def __init__(self, bucket, scope, user, node):
     super(NsServerNumCollections, self).__init__(bucket, scope, user, node)
     self.bucket_obj = Bucket({'name': bucket})
     self.bucket_hlp = BucketHelper(self.node)
예제 #28
0
    def test_observe_scenario(self):
        """
        Creates bucket with bucket level durability.
        Perform CRUD operations and make sure all the operations are
        done as sync_write in server.
        Note: Passing persistTo/replicateTo will test the observe scenarios
        """

        def perform_crud_ops():
            old_cas = 0
            client = SDKClient([self.cluster.master], bucket_obj)

            for op_type in ["create", "update", "read", "replace", "delete"]:
                crud_desc = "Key %s, doc_op: %s" % (key, op_type)
                self.log.info(crud_desc)
                result = client.crud(op_type, key, value,
                                     replicate_to=self.replicate_to,
                                     persist_to=self.persist_to)

                if op_type != "read":
                    if op_type != "replace":
                        dict_key = "ops_%s" % op_type
                    else:
                        dict_key = "ops_update"

                    verification_dict[dict_key] += 1
                    verification_dict["sync_write_committed_count"] += 1
                    if result["cas"] == old_cas:
                        self.log_failure("CAS didn't get updated: %s"
                                         % result["cas"])
                elif op_type == "read":
                    if result["cas"] != old_cas:
                        self.log_failure("CAS updated for read operation: %s"
                                         % result["cas"])

                self.summary.add_step(crud_desc)
                old_cas = result["cas"]
            client.close()

        doc_gen = doc_generator("test_key", 0, 1, mutate=0)
        key, value = doc_gen.next()

        for d_level in self.possible_d_levels[self.bucket_type]:
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            create_desc = "Create bucket with durability %s" % d_level
            self.log.info(create_desc)

            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Object to support performing CRUDs
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                           wait_for_warmup=True)
            self.summary.add_step(create_desc)

            verification_dict = self.get_cb_stat_verification_dict()

            # Test CRUD operations
            perform_crud_ops()

            # Validate doc_count
            self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                         self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster, 0)

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Delete the created bucket
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete bucket with d_level %s" % d_level)
예제 #29
0
    def test_sync_write_in_progress(self):
        """
        Test to simulate sync_write_in_progress error and validate the behavior
        This will validate failure in majority of nodes, where durability will
        surely fail for all CRUDs

        1. Select nodes to simulate the error which will affect the durability
        2. Enable the specified error_scenario on the selected nodes
        3. Perform individual CRUDs and verify sync_write_in_progress errors
        4. Validate the end results
        """

        def test_scenario(bucket, doc_ops,
                          with_sync_write_val=None):
            # Set crud_batch_size
            crud_batch_size = 4
            simulate_error = CouchbaseError.STOP_MEMCACHED

            # Fetch target_vbs for CRUDs
            node_vb_info = self.vbs_in_node
            target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
            if len(target_nodes) > 1:
                index = 1
                while index < len(target_nodes):
                    target_vbuckets = list(
                        set(target_vbuckets).intersection(
                            set(node_vb_info[target_nodes[index]]["replica"]))
                    )
                    index += 1

            # Variable to hold one of the doc_generator objects
            gen_loader_1 = None
            gen_loader_2 = None

            # Initialize doc_generators to use for testing
            self.log.info("Creating doc_generators")
            gen_create = doc_generator(
                self.key, self.num_items, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets)
            gen_update = doc_generator(
                self.key, 0, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets, mutate=1)
            gen_delete = doc_generator(
                self.key, 0, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets)
            self.log.info("Done creating doc_generators")

            # Start CRUD operation based on the given 'doc_op' type
            if doc_ops[0] == "create":
                self.num_items += crud_batch_size
                gen_loader_1 = gen_create
            elif doc_ops[0] in ["update", "replace", "touch"]:
                gen_loader_1 = gen_update
            elif doc_ops[0] == "delete":
                gen_loader_1 = gen_delete
                self.num_items -= crud_batch_size

            if doc_ops[1] == "create":
                gen_loader_2 = gen_create
            elif doc_ops[1] in ["update", "replace", "touch"]:
                gen_loader_2 = gen_update
            elif doc_ops[1] == "delete":
                gen_loader_2 = gen_delete

            # Load required docs for doc_op_1 in case of type != create
            if doc_op[2] == "load_initial_docs":
                doc_loading_task = self.task.async_load_gen_docs(
                    self.cluster, bucket, gen_loader_1, "create", 0,
                    batch_size=crud_batch_size, process_concurrency=1,
                    timeout_secs=10,
                    print_ops_rate=False,
                    sdk_client_pool=self.sdk_client_pool)
                self.task_manager.get_task_result(doc_loading_task)
                if doc_loading_task.fail:
                    self.log_failure("Failure while loading initial docs")
                self.summary.add_step("Create docs for %s" % doc_op[0])
                verification_dict["ops_create"] += crud_batch_size
                verification_dict["sync_write_committed_count"] \
                    += crud_batch_size

            # Initialize tasks and store the task objects
            doc_loader_task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
                batch_size=crud_batch_size, process_concurrency=8,
                timeout_secs=60,
                print_ops_rate=False,
                start_task=False,
                sdk_client_pool=self.sdk_client_pool)

            # SDK client for performing individual ops
            client = SDKClient([self.cluster.master], bucket)

            # Perform specified action
            for node in target_nodes:
                error_sim = CouchbaseError(self.log,
                                           self.vbs_in_node[node]["shell"])
                error_sim.create(simulate_error,
                                 bucket_name=bucket.name)
            self.sleep(5, "Wait for error simulation to take effect")

            self.task_manager.add_new_task(doc_loader_task)
            self.sleep(5, "Wait for task_1 CRUDs to reach server")

            # Perform specified CRUD operation on sync_write docs
            tem_gen = deepcopy(gen_loader_2)
            while tem_gen.has_next():
                key, value = tem_gen.next()
                for retry_strategy in [
                        SDKConstants.RetryStrategy.FAIL_FAST,
                        SDKConstants.RetryStrategy.BEST_EFFORT]:
                    if with_sync_write_val:
                        fail = client.crud(doc_ops[1], key, value=value,
                                           exp=0,
                                           durability=with_sync_write_val,
                                           timeout=3, time_unit="seconds",
                                           sdk_retry_strategy=retry_strategy)
                    else:
                        fail = client.crud(doc_ops[1], key, value=value,
                                           exp=0,
                                           timeout=3, time_unit="seconds",
                                           sdk_retry_strategy=retry_strategy)

                    expected_exception = SDKException.AmbiguousTimeoutException
                    retry_reason = \
                        SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
                    if retry_strategy == SDKConstants.RetryStrategy.FAIL_FAST:
                        expected_exception = \
                            SDKException.RequestCanceledException
                        retry_reason = \
                            SDKException.RetryReason \
                            .KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES

                    # Validate the returned error from the SDK
                    if expected_exception not in str(fail["error"]):
                        self.log_failure("Invalid exception for {0}: {1}"
                                         .format(key, fail["error"]))
                    if retry_reason not in str(fail["error"]):
                        self.log_failure("Invalid retry reason for {0}: {1}"
                                         .format(key, fail["error"]))

                    # Try reading the value in SyncWrite in-progress state
                    fail = client.crud("read", key)
                    if doc_ops[0] == "create":
                        # Expected KeyNotFound in case of CREATE operation
                        if fail["status"] is True:
                            self.log_failure(
                                "%s returned value during SyncWrite state: %s"
                                % (key, fail))
                    else:
                        # Expects prev value in case of other operations
                        if fail["status"] is False:
                            self.log_failure(
                                "Key %s read failed for previous value: %s"
                                % (key, fail))

            # Revert the introduced error condition
            for node in target_nodes:
                error_sim = CouchbaseError(self.log,
                                           self.vbs_in_node[node]["shell"])
                error_sim.revert(simulate_error,
                                 bucket_name=bucket.name)

            # Wait for doc_loader_task to complete
            self.task.jython_task_manager.get_task_result(doc_loader_task)

            verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
            verification_dict["sync_write_committed_count"] \
                += crud_batch_size

            # Disconnect the client
            client.close()

        crud_variations = [
            ["create", "create", ""],

            ["update", "update", "load_initial_docs"],
            ["update", "delete", ""],
            ["update", "touch", ""],
            ["update", "replace", ""],

            ["delete", "delete", ""],
            ["delete", "update", "load_initial_docs"],
            ["delete", "touch", "load_initial_docs"],
            ["delete", "replace", "load_initial_docs"]
        ]

        # Select nodes to affect and open required shell_connections
        target_nodes = self.getTargetNodes()

        for b_d_level in self.possible_d_levels[self.bucket_type]:
            # Skip of Bucket durability level 'None'
            if b_d_level == Bucket.DurabilityLevel.NONE:
                continue

            verification_dict = self.get_cb_stat_verification_dict()

            create_desc = "Creating %s bucket with level '%s'" \
                          % (self.bucket_type, b_d_level)
            self.log.info(create_desc)
            bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)

            # Object to support performing CRUDs and create Bucket
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                           wait_for_warmup=True)
            self.get_vbucket_type_mapping(bucket_obj.name)
            self.summary.add_step(create_desc)

            for doc_op in crud_variations:
                test_scenario(bucket_obj, doc_op)
                self.summary.add_step("SyncWriteInProgress for [%s, %s]"
                                      % (doc_op[0], doc_op[1]))

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Bucket deletion
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete %s bucket" % self.bucket_type)
예제 #30
0
    def test_update_durability_between_doc_op(self):
        """
        1. Create Bucket with durability level set.
        2. Bring down a node such that durability CRUD will wait
        3. Perform doc_op and update bucket_level_durability
        4. Revert scenario induced in step#2, such that doc_op will complete
        5. Make sure doc_ops in step#3 went through using prev. d-level
        """
        # Starting from max_durability levels because to iterate
        # all lower levels for doc_ops with level update
        supported_d_levels = deepcopy(self.d_level_order)
        if self.bucket_type == Bucket.Type.EPHEMERAL:
            supported_d_levels = supported_d_levels[0:2]

        supported_d_levels.reverse()
        supported_d_levels += [supported_d_levels[0]]

        create_desc = "Creating %s bucket with level '%s'" \
                      % (self.bucket_type, supported_d_levels[0])

        self.log.info(create_desc)
        bucket_dict = self.get_bucket_dict(self.bucket_type,
                                           supported_d_levels[0])
        # Object to support performing CRUDs and create Bucket
        bucket_obj = Bucket(bucket_dict)
        self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                       wait_for_warmup=True)
        self.get_vbucket_type_mapping(bucket_obj.name)
        self.summary.add_step(create_desc)

        self.bucket_util.print_bucket_stats(self.cluster)

        # Loop to update all other durability levels
        prev_d_level = supported_d_levels[0]
        for bucket_durability in supported_d_levels[1:]:
            target_vb_type, simulate_error = \
                self.durability_helper.get_vb_and_error_type(bucket_durability)

            # Pick a random node to perform error sim and load
            random_node = choice(self.vbs_in_node.keys())
            error_sim = CouchbaseError(
                self.log,
                self.vbs_in_node[random_node]["shell"])

            target_vbs = self.vbs_in_node[random_node][target_vb_type]
            doc_gen = doc_generator(self.key, 0, 1,
                                    target_vbucket=target_vbs)

            doc_load_task = self.task.async_load_gen_docs(
                self.cluster, bucket_obj, doc_gen, "update",
                durability=Bucket.DurabilityLevel.NONE,
                timeout_secs=60,
                start_task=False,
                sdk_client_pool=self.sdk_client_pool)

            # Simulate target error condition
            error_sim.create(simulate_error)
            self.sleep(5, "Wait before starting doc_op")
            self.task_manager.add_new_task(doc_load_task)

            new_d_level = BucketDurability[bucket_durability]
            self.sleep(5, "Wait before updating bucket level "
                          "durability=%s" % new_d_level)

            self.bucket_util.update_bucket_property(
                self.cluster.master,
                bucket_obj,
                bucket_durability=new_d_level)
            self.bucket_util.print_bucket_stats(self.cluster)

            buckets = self.bucket_util.get_all_buckets(self.cluster)
            if buckets[0].durability_level != new_d_level:
                self.log_failure("Failed to update bucket_d_level to %s"
                                 % new_d_level)
            self.summary.add_step("Set bucket-durability=%s" % new_d_level)

            if prev_d_level == Bucket.DurabilityLevel.NONE:
                if not doc_load_task.completed:
                    self.log_failure("Doc-op still pending for d_level 'NONE'")
            elif doc_load_task.completed:
                self.log_failure("Doc-op completed before reverting the "
                                 "error condition: %s" % simulate_error)

            # Revert the induced error condition
            error_sim.revert(simulate_error)

            self.task_manager.get_task_result(doc_load_task)
            if doc_load_task.fail:
                self.log_failure("Doc_op failed")
            self.summary.add_step("Doc_op with previous d_level %s"
                                  % prev_d_level)
            prev_d_level = bucket_durability

        # Delete the bucket on server
        self.bucket_util.delete_bucket(self.cluster, bucket_obj)
        self.summary.add_step("Delete %s bucket" % self.bucket_type)