Exemple #1
0
    def key_not_exists_test(self):
        client = SDKClient([self.cluster.master], self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 1, 1,
                                                         1)
        scope_dict = collections[self.bucket.name]["scopes"]
        scope_name = scope_dict.keys()[0]
        collection_name = scope_dict[scope_name]["collections"].keys()[0]
        client.select_collection(scope_name, collection_name)
        self.log.info("CAS test on collection %s: %s" %
                      (scope_name, collection_name))

        load_gen = doc_generator(self.key, 0, self.num_items, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # Validate doc count as per bucket collections
            self.bucket_util.validate_docs_per_collections_all_buckets()
            self.validate_test_failure()
 def test_load_duplicate_key_within_same_collection(self):
     client = SDKClient([self.cluster.master], self.bucket,
                        scope=CbServer.default_scope,
                        collection=CbServer.default_collection,
                        compression_settings=self.sdk_compression)
     result = client.crud("create", "test_key-1", "TestValue")
     if result["status"] is True:
         self.log.info("CRUD succeeded first time")
     result = client.crud("create", "test_key-1", "TestValue")
     if result["status"] is True:
         self.fail("CRUD succeeded second time when it should have not")
     elif result["status"] is False:
         self.log.info("CRUD didn't succeed for duplicate key as expected")
    def key_not_exists_test(self):
        client = SDKClient(self.rest, self.bucket)
        load_gen = doc_generator(self.key, 0, 1, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif DurableExceptions.KeyNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if DurableExceptions.KeyNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            self.validate_test_failure()
Exemple #4
0
    def touch_test(self):
        self.log.info("1. Loading initial set of documents")
        load_gen = doc_generator(self.key, 0, self.num_items,
                                 doc_size=self.doc_size)
        self._load_all_buckets(load_gen, "create")
        self.bucket_util.verify_stats_all_buckets(self.num_items)
        self.bucket_util._wait_for_stats_all_buckets()

        self.log.info("2. Loading bucket into DGM")
        dgm_gen = doc_generator(
            self.key, self.num_items, self.num_items+1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster, self.bucket_util.buckets[0], dgm_gen, "create", 0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("3. Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master],
                           self.bucket_util.buckets[0])
        while load_gen.has_next():
            key, _ = load_gen.next()
            result = client.crud("touch", key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is not True:
                self.log_failure("Touch on %s failed: %s" % (key, result))
        client.close()
        self.validate_test_failure()
        def perform_crud_ops():
            old_cas = 0
            client = SDKClient([self.cluster.master], bucket_obj)

            for op_type in ["create", "update", "read", "replace", "delete"]:
                crud_desc = "Key %s, doc_op: %s" % (key, op_type)
                self.log.info(crud_desc)
                result = client.crud(op_type, key, value,
                                     replicate_to=self.replicate_to,
                                     persist_to=self.persist_to)

                if op_type != "read":
                    if op_type != "replace":
                        dict_key = "ops_%s" % op_type
                    else:
                        dict_key = "ops_update"

                    verification_dict[dict_key] += 1
                    verification_dict["sync_write_committed_count"] += 1
                    if result["cas"] == old_cas:
                        self.log_failure("CAS didn't get updated: %s"
                                         % result["cas"])
                elif op_type == "read":
                    if result["cas"] != old_cas:
                        self.log_failure("CAS updated for read operation: %s"
                                         % result["cas"])

                self.summary.add_step(crud_desc)
                old_cas = result["cas"]
            client.close()
Exemple #6
0
    def test_doc_key_size(self):
        """
        Insert document key with min and max key size on each available
        collection and validate
        :return:
        """
        min_doc_gen = doc_generator("test_min_key_size",
                                    0,
                                    self.num_items,
                                    key_size=1,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        max_doc_gen = doc_generator("test_max_key_size",
                                    0,
                                    self.num_items,
                                    key_size=245,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        known_cas = set()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master],
                           self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_gen, max_doc_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create",
                                             key,
                                             value,
                                             self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure(
                                "Doc create failed for key '%s' "
                                "collection::scope %s::%s - %s" %
                                (key, scope.name, collection.name, result))
                        else:
                            if result["cas"] in known_cas:
                                self.log_failure(
                                    "Same CAS exists under different "
                                    "collection: %s" % result)
                            collection.num_items += 1
                            known_cas.add(result["cas"])

        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
    def test_nru_eviction_impact_on_cbas(self):
        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        target_bucket = None
        self.bucket_util.get_all_buckets()
        for tem_bucket in self.bucket_util.buckets:
            if tem_bucket.name == self.cb_bucket_name:
                target_bucket = tem_bucket
                break

        item_count = target_bucket.stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info("Get initial inserted 100 docs, so they aren't removed")
        client = SDKClient([self.cluster.master], target_bucket)
        for doc_index in range(100):
            doc_key = "test_docs-" + str(doc_index)
            client.read(doc_key)

        self.log.info("Add 20% more items to trigger NRU")
        for doc_index in range(item_count, int(item_count * 1.2)):
            doc_key = "key_id-" + str(doc_index)
            op_result = client.crud("create",
                                    doc_key,
                                    '{"name":"dave"}',
                                    durability=self.durability_level)
            if op_result["status"] is False:
                self.log.warning("Insert failed for %s: %s" %
                                 (doc_key, op_result))

        # Disconnect the SDK client
        client.close()

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            self.cb_bucket_name)['results'][0]['$1']
        if self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            pass
        else:
            self.log.info("Document count mismatch might be due to ejection "
                          "of documents on KV. Retry again")
            count_n1ql = self.rest.query_tool(
                'select count(*) from %s' %
                self.cb_bucket_name)['results'][0]['$1']
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql),
                            msg="Count mismatch on CBAS")
    def test_no_eviction_impact_on_cbas(self):

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        target_bucket = None
        buckets = self.bucket_util.get_all_buckets(self.cluster)
        for tem_bucket in buckets:
            if tem_bucket.name == self.bucket_name:
                target_bucket = tem_bucket
                break
        item_count = target_bucket.stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info("Load more until we are out of memory")
        client = SDKClient([self.cluster.master], target_bucket)
        i = item_count
        op_result = {"status": True}
        while op_result["status"] is True:
            op_result = client.crud("create",
                                    "key-id" + str(i),
                                    '{"name":"dave"}',
                                    durability=self.durability_level)
            i += 1

        if SDKException.AmbiguousTimeoutException not in op_result["error"] \
                or SDKException.RetryReason.KV_TEMPORARY_FAILURE \
                not in op_result["error"]:
            client.close()
            self.fail("Invalid exception for OOM insert: %s" % op_result)

        self.log.info('Memory is full at {0} items'.format(i))
        self.log.info("As a result added more %s items" % (i - item_count))

        self.log.info("Fetch item count")
        target_bucket = None
        buckets = self.bucket_util.get_all_buckets(self.cluster)
        for tem_bucket in buckets:
            if tem_bucket.name == self.cb_bucket_name:
                target_bucket = tem_bucket
                break
        item_count_when_oom = target_bucket.stats.itemCount
        mem_when_oom = target_bucket.stats.memUsed
        self.log.info('Item count when OOM {0} and memory used {1}'.format(
            item_count_when_oom, mem_when_oom))

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool('select count(*) from %s' %
                                          self.bucket_name)['results'][0]['$1']

        dataset = self.cbas_util.list_all_dataset_objs()[0]
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cluster, dataset.full_name, count_n1ql):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the KV bucket")
Exemple #9
0
 def perform_create_deletes():
     index = 0
     client = SDKClient([self.cluster.master], self.cluster.buckets[0])
     self.log.info("Starting ops to create tomb_stones")
     while not self.stop_thread:
         key = "temp_key--%s" % index
         result = client.crud("create", key, "")
         if result["status"] is False:
             self.log_failure("Key %s create failed: %s" %
                              (key, result))
             break
         result = client.crud("delete", key)
         if result["status"] is False:
             self.log_failure("Key %s delete failed: %s" %
                              (key, result))
             break
         index += 1
     client.close()
     self.log.info("Total keys deleted: %s" % index)
Exemple #10
0
    def test_doc_size(self):
        """
        Insert document with empty content and max size on each available
        collection and validate
        :return:
        """
        # Empty docs
        min_doc_size_gen = doc_generator("test_min_doc_size",
                                         0, self.num_items,
                                         key_size=self.key_size,
                                         doc_size=0,
                                         mix_key_size=False,
                                         randomize_doc_size=False)
        # 20 MB docs
        max_doc_size_gen = doc_generator("test_max_doc_size",
                                         0, self.num_items,
                                         key_size=self.key_size,
                                         doc_size=1024 * 1024 * 20,
                                         mix_key_size=False,
                                         randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        # Format know_cas[CAS] = list(vb_lists)
        known_cas = dict()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master], self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_size_gen, max_doc_size_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create", key, value, self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure("Doc create failed for key '%s' "
                                             "collection::scope %s::%s - %s"
                                             % (key,
                                                scope.name,
                                                collection.name,
                                                result))
                        else:
                            self.__validate_cas_for_key(key, result, known_cas)
                            collection.num_items += 1

        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemple #11
0
    def MB36948(self):
        node_to_stop = self.servers[0]
        self.log.info("Adding index/query node")
        self.task.rebalance([self.cluster.master], [self.servers[2]], [],
                            services=["n1ql,index"])
        self.log.info("Creating SDK client connection")
        client = SDKClient([self.cluster.master],
                           self.bucket_util.buckets[0],
                           compression_settings=self.sdk_compression)

        self.log.info("Stopping memcached on: %s" % node_to_stop)
        ssh_conn = RemoteMachineShellConnection(node_to_stop)
        err_sim = CouchbaseError(self.log, ssh_conn)
        err_sim.create(CouchbaseError.STOP_MEMCACHED)

        result = client.crud("create", "abort1", "abort1_val")
        if not result["status"]:
            self.log_failure("Async SET failed")

        result = client.crud("update",
                             "abort1",
                             "abort1_val",
                             durability=self.durability_level,
                             timeout=3,
                             time_unit="seconds")
        if result["status"]:
            self.log_failure("Sync write succeeded")
        if SDKException.DurabilityAmbiguousException not in result["error"]:
            self.log_failure("Invalid exception for sync_write: %s" % result)

        self.log.info("Resuming memcached on: %s" % node_to_stop)
        err_sim.revert(CouchbaseError.STOP_MEMCACHED)

        self.bucket_util._wait_for_stats_all_buckets()
        self.bucket_util.verify_stats_all_buckets(1)

        self.log.info("Closing ssh & SDK connections")
        ssh_conn.disconnect()
        client.close()

        self.validate_test_failure()
    def test_ops_on_same_key(self):
        """
        1. Set key
        2. Delete a key
        3. Set the same key
        4. Validate the rev_id for the key is maintained

        Ref: MB-48179
        """
        if self.durability_level in ["", Bucket.DurabilityLevel.NONE]:
            self.fail("Test supported only for sync_write scenarios")

        crud_pattern = self.input.param("crud_pattern", "async:sync:async")
        crud_pattern = crud_pattern.split(":")
        rev_ids = dict()

        client = SDKClient([self.cluster.master], self.cluster.buckets[0])

        # Async create of keys
        for i in range(self.num_items):
            key = self.key + str(i)
            durability = ""
            if crud_pattern[0] == "sync":
                durability = self.durability_level
            client.crud(DocLoading.Bucket.DocOps.CREATE,
                        key, {},
                        durability=durability)

        # Sync delete of keys
        for i in range(self.num_items):
            key = self.key + str(i)
            durability = ""
            if crud_pattern[1] == "sync":
                durability = self.durability_level
            client.crud(DocLoading.Bucket.DocOps.DELETE,
                        key,
                        durability=durability)

        # Async create of keys
        for i in range(self.num_items):
            key = self.key + str(i)
            durability = ""
            if crud_pattern[2] == "sync":
                durability = self.durability_level
            client.crud(DocLoading.Bucket.DocOps.CREATE,
                        key, {},
                        durability=durability)
            result = client.collection.lookupIn(
                key,
                Collections.singletonList(
                    LookupInSpec.get(LookupInMacro.REV_ID).xattr()))
            rev_ids[key] = int(result.contentAs(0, String))
        client.close()

        # Rev_id validation
        for i in range(self.num_items):
            key = self.key + str(i)
            if rev_ids[key] != 3:
                self.fail("Rev id mismatch for key '%s'. RevId: %s" %
                          (key, rev_ids[key]))
Exemple #13
0
    def touch_test(self):
        self.log.info("Loading bucket into DGM")
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            dgm_gen,
            "create",
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud("touch",
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        client.close()
        self.bucket_util._wait_for_stats_all_buckets()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
    def test_durability_impossible(self):
        """
        Create bucket with replica > num_kv_nodes.
        Perform doc insert to make sure we get TimeoutException due to
        durability_impossible from the server.
        """

        verification_dict = self.get_cb_stat_verification_dict()

        key, value = doc_generator("test_key", 0, 1).next()
        for d_level in self.possible_d_levels[self.bucket_type]:
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            # Object to support performing CRUDs
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster,
                                           bucket_obj,
                                           wait_for_warmup=True)
            self.summary.add_step("Create bucket with durability %s" % d_level)

            client = SDKClient([self.cluster.master], bucket_obj)
            result = client.crud("create", key, value, timeout=3)
            if result["status"] is True \
                    or SDKException.DurabilityImpossibleException \
                    not in result["error"]:
                self.log_failure("Indirect sync_write succeeded "
                                 "without enough nodes")
            client.close()

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Delete the created bucket
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete bucket with d_level %s" % d_level)
Exemple #15
0
    def test_stop_process(self):
        """
        1. Starting loading docs into the default bucket
        2. Stop the requested process, which will impact the
           memcached operations
        3. Wait for load bucket task to complete
        4. Validate the docs for durability
        """
        error_to_simulate = self.input.param("simulate_error", None)
        def_bucket = self.bucket_util.buckets[0]
        target_node = self.getTargetNode()
        remote = RemoteMachineShellConnection(target_node)
        error_sim = CouchbaseError(self.log, remote)
        target_vbuckets = self.getVbucketNumbers(remote, def_bucket.name,
                                                 self.target_node)
        if len(target_vbuckets) == 0:
            self.log.error("No target vbucket list generated to load data")
            remote.disconnect()
            return

        # Create doc_generator targeting only the active/replica vbuckets
        # present in the target_node
        gen_load = doc_generator(self.key,
                                 self.num_items,
                                 self.new_docs_to_add,
                                 key_size=self.key_size,
                                 doc_size=self.doc_size,
                                 doc_type=self.doc_type,
                                 target_vbucket=target_vbuckets,
                                 vbuckets=self.cluster_util.vbuckets)

        if self.atomicity:
            task = self.task.async_load_gen_docs_atomicity(
                self.cluster,
                self.bucket_util.buckets,
                gen_load,
                "create",
                exp=0,
                batch_size=10,
                process_concurrency=self.process_concurrency,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                update_count=self.update_count,
                transaction_timeout=self.transaction_timeout,
                commit=True,
                sync=self.sync)
        else:
            task = self.task.async_load_gen_docs(
                self.cluster,
                def_bucket,
                gen_load,
                "create",
                exp=0,
                batch_size=1,
                process_concurrency=8,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                skip_read_on_error=True)

        # Induce the error condition
        error_sim.create(error_to_simulate)

        self.sleep(20, "Wait before reverting the error condition")
        # Revert the simulated error condition and close the ssh session
        error_sim.revert(error_to_simulate)
        remote.disconnect()

        # Wait for doc loading task to complete
        self.task.jython_task_manager.get_task_result(task)
        if not self.atomicity:
            if len(task.fail.keys()) != 0:
                if self.target_node == "active" or self.num_replicas in [2, 3]:
                    self.log_failure("Unwanted failures for keys: %s" %
                                     task.fail.keys())

            validate_passed = \
                self.durability_helper.validate_durability_exception(
                    task.fail,
                    SDKException.DurabilityAmbiguousException)
            if not validate_passed:
                self.log_failure("Unwanted exception seen during validation")

            # Create SDK connection for CRUD retries
            sdk_client = SDKClient([self.cluster.master], def_bucket)
            for doc_key, crud_result in task.fail.items():
                result = sdk_client.crud("create",
                                         doc_key,
                                         crud_result["value"],
                                         replicate_to=self.replicate_to,
                                         persist_to=self.persist_to,
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout)
                if result["status"] is False:
                    self.log_failure("Retry of doc_key %s failed: %s" %
                                     (doc_key, result["error"]))
            # Close the SDK connection
            sdk_client.close()

        # Update self.num_items
        self.num_items += self.new_docs_to_add

        if not self.atomicity:
            # Validate doc count
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)

        self.validate_test_failure()
Exemple #16
0
    def test_create_scopes(self):
        """
        1. Load data into '_default' collection (if required by test)
        2. Create scope(s) under the bucket
        3. Validate the scopes are created properly
        4. Validate '_default' collection is intact
        """
        num_scopes = self.input.param("num_scopes", 1)
        if self.action_phase == "before_default_load":
            BucketUtils.create_scopes(self.cluster, self.bucket, num_scopes)

        create_gen = doc_generator("scope_create_key",
                                   0,
                                   self.num_items,
                                   doc_size=self.doc_size,
                                   doc_type=self.doc_type,
                                   target_vbucket=self.target_vbucket,
                                   mutation_type="ADD",
                                   mutate=1,
                                   key_size=self.key_size)
        update_gen = doc_generator("scope_create_key",
                                   0,
                                   self.num_items,
                                   doc_size=self.doc_size,
                                   doc_type=self.doc_type,
                                   target_vbucket=self.target_vbucket,
                                   mutation_type="SET",
                                   mutate=2,
                                   key_size=self.key_size)
        self.log.info("Loading %d docs into '_default' collection" %
                      self.num_items)
        client = SDKClient([self.cluster.master],
                           self.bucket,
                           compression_settings=self.sdk_compression)
        while create_gen.has_next():
            key, val = create_gen.next()
            result = client.crud("create",
                                 key,
                                 val,
                                 exp=self.maxttl,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Doc create failed for '_default' collection")
                break
        client.close()
        # Update num_items for default collection
        self.bucket.scopes[CbServer.default_scope] \
            .collections[CbServer.default_collection] \
            .num_items += self.num_items

        # Doc count validation
        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

        # Perform update mutation
        task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            update_gen,
            "update",
            self.maxttl,
            batch_size=10,
            process_concurrency=8,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to,
            durability=self.durability_level,
            compression=self.sdk_compression,
            timeout_secs=self.sdk_timeout,
            scope=CbServer.default_scope,
            collection=CbServer.default_collection)

        # Create scope(s) while CRUDs are running in background
        if self.action_phase == "during_default_load":
            BucketUtils.create_scopes(self.cluster, self.bucket, num_scopes)

        # Validate drop collection using cbstats
        for node in self.cluster_util.get_kv_nodes():
            shell_conn = RemoteMachineShellConnection(node)
            cbstats = Cbstats(shell_conn)
            c_data = cbstats.get_collections(self.bucket)
            if c_data["count"] != 1:
                self.log_failure("%s - Expected scope count is '1'."
                                 "Actual: %s" % (node.ip, c_data["count"]))
            if "_default" not in c_data:
                self.log_failure("%s: _default collection missing in cbstats" %
                                 node.ip)

        # Wait for doc_loading to complete
        self.task_manager.get_task_result(task)
        self.validate_test_failure()
Exemple #17
0
    def verify_cas(self, ops, generator):
        """
        Verify CAS value manipulation.

        For update we use the latest CAS value return by set()
        to do the mutation again to see if there is any exceptions.
        We should be able to mutate that item with the latest CAS value.
        For delete(), after it is called, we try to mutate that item with the
        cas value returned by delete(). We should see SDK Error.
        Otherwise the test should fail.
        For expire, We want to verify using the latest CAS value of that item
        can not mutate it because it is expired already.
        """

        for bucket in self.bucket_util.buckets:
            client = SDKClient(RestConnection(self.cluster.master), bucket)
            gen = generator
            while gen.has_next():
                key, value = gen.next()
                vb_of_key = self.bucket_util.get_vbucket_num_for_key(key)
                active_node_ip = None
                for node_ip in self.shell_conn.keys():
                    if vb_of_key in self.vb_details[node_ip]["active"]:
                        active_node_ip = node_ip
                        break
                self.log.info("Performing %s on key %s" % (ops, key))
                if ops in ["update", "touch"]:
                    for x in range(self.mutate_times):
                        old_cas = client.crud("read", key, timeout=10)["cas"]
                        # value = {"val": "mysql-new-value-%s" % x}
                        if ops == 'update':
                            result = client.crud(
                                "replace",
                                key,
                                value,
                                durability=self.durability_level,
                                cas=old_cas)
                        else:
                            result = client.touch(
                                key,
                                0,
                                durability=self.durability_level,
                                timeout=self.sdk_timeout)

                        if result["status"] is False:
                            client.close()
                            self.log_failure("Touch / replace with cas failed")
                            return

                        new_cas = result["cas"]
                        if old_cas == new_cas:
                            self.log_failure("CAS old (%s) == new (%s)" %
                                             (old_cas, new_cas))

                        if ops == 'update':
                            if json.loads(str(result["value"])) \
                                    != json.loads(value):
                                self.log_failure("Value mismatch. "
                                                 "%s != %s" %
                                                 (result["value"], value))
                            else:
                                self.log.debug(
                                    "Mutate %s with CAS %s successfully! "
                                    "Current CAS: %s" %
                                    (old_cas, key, new_cas))

                        active_read = client.crud("read",
                                                  key,
                                                  timeout=self.sdk_timeout)
                        active_cas = active_read["cas"]
                        replica_cas = -1
                        cas_in_active_node = \
                            self.cb_stat[active_node_ip].vbucket_details(
                                bucket.name)[str(vb_of_key)]["max_cas"]
                        if str(cas_in_active_node) != str(new_cas):
                            self.log_failure("CbStats CAS mismatch. %s != %s" %
                                             (cas_in_active_node, new_cas))

                        poll_count = 0
                        max_retry = 5
                        while poll_count < max_retry:
                            replica_read = client.getFromReplica(
                                key, ReplicaMode.FIRST)[0]
                            replica_cas = replica_read["cas"]
                            if active_cas == replica_cas \
                                    or self.durability_level:
                                break
                            poll_count = poll_count + 1
                            self.sleep(1, "Retry read CAS from replica..")

                        if active_cas != replica_cas:
                            self.log_failure("Replica cas mismatch. %s != %s" %
                                             (new_cas, replica_cas))
                elif ops == "delete":
                    old_cas = client.crud("read", key, timeout=10)["cas"]
                    result = client.crud("delete",
                                         key,
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout)
                    self.log.info("CAS after delete of key %s: %s" %
                                  (key, result["cas"]))
                    result = client.crud("replace",
                                         key,
                                         "test",
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout,
                                         cas=old_cas)
                    if result["status"] is True:
                        self.log_failure("The item should already be deleted")
                    if DurableExceptions.KeyNotFoundException \
                            not in result["error"]:
                        self.log_failure("Invalid Excepetion: %s" % result)
                    if result["cas"] != 0:
                        self.log_failure("Delete returned invalid cas: %s, "
                                         "Expected 0" % result["cas"])
                    if result["cas"] == old_cas:
                        self.log_failure("Deleted doc returned old cas: %s " %
                                         old_cas)
                elif ops == "expire":
                    old_cas = client.crud("read", key, timeout=10)["cas"]
                    result = client.crud("touch", key, exp=self.expire_time)
                    if result["status"] is True:
                        if result["cas"] == old_cas:
                            self.log_failure("Touch failed to update CAS")
                    else:
                        self.log_failure("Touch operation failed")

                    self.sleep(self.expire_time + 1, "Wait for item to expire")
                    result = client.crud("replace",
                                         key,
                                         "test",
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout,
                                         cas=old_cas)
                    if result["status"] is True:
                        self.log_failure("Able to mutate %s with old cas: %s" %
                                         (key, old_cas))
                    if ClientException.KeyNotFoundException \
                            not in result["error"]:
                        self.log_failure("Invalid error after expiry: %s" %
                                         result)
    def test_sync_write_in_progress(self):
        doc_ops = self.input.param("doc_ops", "create;create").split(';')
        shell_conn = dict()
        cbstat_obj = dict()
        error_sim = dict()
        vb_info = dict()
        active_vbs = dict()
        replica_vbs = dict()

        # Override d_level, error_simulation type based on d_level
        self.__get_d_level_and_error_to_simulate()

        # Acquire SDK client from the pool for performing doc_ops locally
        client = SDKClient([self.cluster.master], self.bucket)

        target_nodes = DurabilityHelper.getTargetNodes(self.cluster,
                                                       self.nodes_init,
                                                       self.num_nodes_affected)
        for node in target_nodes:
            shell_conn[node.ip] = RemoteMachineShellConnection(node)
            cbstat_obj[node.ip] = Cbstats(node)
            vb_info["init"] = dict()
            vb_info["init"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(
                self.bucket.name)
            error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])
            # Fetch affected nodes' vb_num which are of type=replica
            active_vbs[node.ip] = cbstat_obj[node.ip].vbucket_list(
                self.bucket.name, vbucket_type="active")
            replica_vbs[node.ip] = cbstat_obj[node.ip].vbucket_list(
                self.bucket.name, vbucket_type="replica")

        if self.durability_level \
                == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:
            target_vbs = active_vbs
            target_vbuckets = list()
            for target_node in target_nodes:
                target_vbuckets += target_vbs[target_node.ip]
        else:
            target_vbuckets = replica_vbs[target_nodes[0].ip]
            if len(target_nodes) > 1:
                index = 1
                while index < len(target_nodes):
                    target_vbuckets = list(
                        set(target_vbuckets).intersection(
                            set(replica_vbs[target_nodes[index].ip])))
                    index += 1

        doc_load_spec = dict()
        doc_load_spec["doc_crud"] = dict()
        doc_load_spec["doc_crud"][MetaCrudParams.DocCrud.COMMON_DOC_KEY] \
            = "test_collections"
        doc_load_spec[MetaCrudParams.TARGET_VBUCKETS] = target_vbuckets
        doc_load_spec[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 5
        doc_load_spec[MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD] = "all"
        doc_load_spec[MetaCrudParams.DURABILITY_LEVEL] = self.durability_level
        doc_load_spec[MetaCrudParams.SDK_TIMEOUT] = 60

        if doc_ops[0] == DocLoading.Bucket.DocOps.CREATE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.UPDATE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.REPLACE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.REPLACE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.DELETE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 1

        # Induce error condition for testing
        for node in target_nodes:
            error_sim[node.ip].create(self.simulate_error,
                                      bucket_name=self.bucket.name)
            self.sleep(3, "Wait for error simulation to take effect")

        doc_loading_task = \
            self.bucket_util.run_scenario_from_spec(
                self.task,
                self.cluster,
                self.cluster.buckets,
                doc_load_spec,
                async_load=True)

        self.sleep(5, "Wait for doc ops to reach server")

        for bucket, s_dict in doc_loading_task.loader_spec.items():
            for s_name, c_dict in s_dict["scopes"].items():
                for c_name, c_meta in c_dict["collections"].items():
                    client.select_collection(s_name, c_name)
                    for op_type in c_meta:
                        key, value = c_meta[op_type]["doc_gen"].next()
                        if self.with_non_sync_writes:
                            fail = client.crud(doc_ops[1],
                                               key,
                                               value,
                                               exp=0,
                                               timeout=2,
                                               time_unit="seconds")
                        else:
                            fail = client.crud(
                                doc_ops[1],
                                key,
                                value,
                                exp=0,
                                durability=self.durability_level,
                                timeout=2,
                                time_unit="seconds")

                        expected_exception = \
                            SDKException.AmbiguousTimeoutException
                        retry_reason = \
                            SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
                        if doc_ops[0] == DocLoading.Bucket.DocOps.CREATE \
                                and doc_ops[1] in \
                                [DocLoading.Bucket.DocOps.DELETE,
                                 DocLoading.Bucket.DocOps.REPLACE]:
                            expected_exception = \
                                SDKException.DocumentNotFoundException
                            retry_reason = None

                        # Validate the returned error from the SDK
                        if expected_exception not in str(fail["error"]):
                            self.log_failure("Invalid exception for %s: %s" %
                                             (key, fail["error"]))
                        if retry_reason \
                                and retry_reason not in str(fail["error"]):
                            self.log_failure(
                                "Invalid retry reason for %s: %s" %
                                (key, fail["error"]))

                        # Try reading the value in SyncWrite state
                        fail = client.crud("read", key)
                        if doc_ops[0] == "create":
                            # Expected KeyNotFound in case of CREATE op
                            if fail["status"] is True:
                                self.log_failure(
                                    "%s returned value during SyncWrite %s" %
                                    (key, fail))
                        else:
                            # Expects prev val in case of other operations
                            if fail["status"] is False:
                                self.log_failure(
                                    "Key %s read failed for prev value: %s" %
                                    (key, fail))

        # Revert the introduced error condition
        for node in target_nodes:
            error_sim[node.ip].revert(self.simulate_error,
                                      bucket_name=self.bucket.name)

        # Wait for doc_loading to complete
        self.task_manager.get_task_result(doc_loading_task)
        self.bucket_util.validate_doc_loading_results(doc_loading_task)
        if doc_loading_task.result is False:
            self.log_failure("Doc CRUDs failed")

        # Release the acquired SDK client
        client.close()
        self.validate_test_failure()
Exemple #19
0
class basic_ops(BaseTestCase):
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        self.key = 'test_docs'.rjust(self.key_size, '0')

        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.bucket_util.add_rbac_user()
        
        if self.default_bucket:
            self.bucket_util.create_default_bucket(replica=self.num_replicas,
                                               compression_mode=self.compression_mode, ram_quota=100)
           
        time.sleep(10)
        self.def_bucket= self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()
        self.create_Transaction()
        self._stop = threading.Event()
        self.log.info("==========Finished Basic_ops base setup========")

    def tearDown(self):
        self.client.close()
        super(basic_ops, self).tearDown()
        
    def __durability_level(self):
        if self.durability_level == "MAJORITY":
            self.durability = 1
        elif self.durability_level == "MAJORITY_AND_PERSIST_ON_MASTER":
            self.durability = 2
        elif self.durability_level == "PERSIST_TO_MAJORITY":
            self.durability = 3
        elif self.durability_level == "ONLY_NONE":
            self.durability = 4
        else:
            self.durability = 0

    def get_doc_generator(self, start, end):
        age = range(5)
        first = ['james', 'sharon']
        body = [''.rjust(self.doc_size - 10, 'a')]
        template = '{{ "age": {0}, "first_name": "{1}", "body": "{2}"}}'
        generator = DocumentGenerator(self.key, template, age, first, body, start=start,
                                      end=end)
        return generator
    
    def set_exception(self, exception):
        self.exception = exception
        raise BaseException("Got an exception {}".format(self.exception))
        
    def __chunks(self, l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]
            
    def create_Transaction(self, client=None):
        if not client:
            client = self.client
        transaction_config = Transaction().createTransactionConfig(self.transaction_timeout, self.durability)
        try:
            self.transaction = Transaction().createTansaction(client.cluster, transaction_config)
        except Exception as e:
            self.set_exception(e)
        
    def __thread_to_transaction(self, transaction, op_type, doc, txn_commit, update_count=1, sync=True, set_exception=True, client=None):
        if not client:
           client = self.client 
        if op_type == "create":
            exception = Transaction().RunTransaction(transaction, [client.collection], doc, [], [], txn_commit, sync, update_count)
        elif op_type == "update":
            self.test_log.info("updating all the keys through threads")
            exception = Transaction().RunTransaction(transaction, [client.collection], [], doc, [], txn_commit, sync, update_count)
        elif op_type == "delete":
            exception = Transaction().RunTransaction(transaction, [client.collection], [], [], doc, txn_commit, sync, update_count)
        if set_exception:
            if exception:
                self.set_exception("Failed")
 
        
    def doc_gen(self, num_items, start=0, value={'value':'value1'}):
        self.docs = []
        self.keys = []
        self.content = self.client.translate_to_json_object(value)
        for i in range(start, self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)
            
    def verify_doc(self, num_items, client):
        for i in range(num_items):
            key = "test_docs-" + str(i)
            result = client.read(key)
            actual_val = self.client.translate_to_json_object(result['value'])
            self.assertEquals(self.content, actual_val)
        
        
    def test_MultiThreadTxnLoad(self):
        # Atomicity.basic_retry.basic_ops.test_MultiThreadTxnLoad,num_items=1000
        ''' Load data through txn, update half the items through different threads 
        and delete half the items through different threads. if update_retry then update and delete 
        the same key in two different transaction and make sure update fails '''
        
        self.num_txn = self.input.param("num_txn", 9)
        self.update_retry = self.input.param("update_retry", False)
        
        self.doc_gen(self.num_items)
        threads = []
         
        # create the docs   
        exception = Transaction().RunTransaction(self.transaction, [self.client.collection], self.docs, [], [], self.transaction_commit, True, self.update_count)
        if exception:
            self.set_exception("Failed")
            
        if self.update_retry:
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "delete", self.keys, self.transaction_commit, self.update_count)))
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", self.keys, self.transaction_commit, self.update_count)))    
        
        else:
            update_docs = self.__chunks(self.keys[:self.num_items/2], self.num_txn)    
            delete_docs = self.__chunks(self.keys[self.num_items/2:], self.num_txn)
                
            for keys in update_docs:
                threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", keys, self.transaction_commit, self.update_count)))
            
            for keys in delete_docs:
                threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "delete", keys, self.transaction_commit, self.update_count)))
        
        for thread in threads:
            thread.start()
            
        for thread in threads:
            thread.join() 
        
        self.sleep(60)
        if self.update_retry: 
            for key in self.keys:
                result = self.client.read(key)
                self.assertEquals(result['status'], False)
                
        else:   
            self.value = {'mutated':1, 'value':'value1'}
            self.content = self.client.translate_to_json_object(self.value)
                        
            self.verify_doc(self.num_items/2, self.client)
                
            for key in self.keys[self.num_items/2:]:
                result = self.client.read(key)
                self.assertEquals(result['status'], False)
                       
 
    def test_basic_retry(self):
        ''' Load set of data to the cluster, update through 2 different threads, make sure transaction maintains the order of update'''
        self.write_conflict = self.input.param("write_conflict", 2)
        
        self.test_log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(self.cluster, self.def_bucket,
                                             self.gen_create, "create" , exp=0,
                                             batch_size=10,
                                             process_concurrency=8,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries,update_count=self.update_count, transaction_timeout=self.transaction_timeout, 
                                             commit=True,durability=self.durability_level,sync=self.sync)
    
        self.task.jython_task_manager.get_task_result(task)
        
        self.test_log.info("get all the keys in the cluster")
        
        self.doc_gen(self.num_items)
                
        threads = []
        for update_count in [2, 4, 6]:
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", self.keys, self.transaction_commit, update_count)))
        # Add verification task
        if self.transaction_commit:
            self.update_count = 6
        else:
            self.update_count = 0
            
        for thread in threads:
            thread.start()
            self.sleep(2)
         
        for thread in threads:
            thread.join()
        
        self.sleep(10)    
            
        task = self.task.async_load_gen_docs_atomicity(self.cluster, self.def_bucket,
                                             self.gen_create, "verify" , exp=0,
                                             batch_size=10,
                                             process_concurrency=8,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries,update_count=self.update_count, transaction_timeout=self.transaction_timeout, 
                                             commit=True,durability=self.durability_level)
    
        self.task.jython_task_manager.get_task_result(task)
        
    def test_basic_retry_async(self):
        self.test_log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(self.cluster, self.def_bucket,
                                             self.gen_create, "create" , exp=0,
                                             batch_size=10,
                                             process_concurrency=1,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries,update_count=self.update_count, transaction_timeout=self.transaction_timeout, 
                                             commit=True,durability=self.durability_level,sync=True,num_threads=1)
    
        self.task.jython_task_manager.get_task_result(task)
        
        
        self.test_log.info("get all the keys in the cluster")
        keys = ["test_docs-0"]*20
        
        exception = Transaction().RunTransaction(self.transaction, [self.client.collection], [], keys, [], self.transaction_commit, False, 0)
        if exception:
            self.set_exception(Exception(exception)) 

        
    def basic_concurrency(self):
        self.crash = self.input.param("crash", False)
        
        self.doc_gen(self.num_items)

        # run transaction
        thread = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count, True, False))
        thread.start()
        self.sleep(1)
        
        if self.crash:
            self.client.cluster.shutdown() 
            self.transaction.close()
            print "going to create a new transaction"
            self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
            self.create_Transaction(self.client1)
            self.sleep(self.transaction_timeout+60)
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], self.docs, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)
                
            self.verify_doc(self.num_items, self.client1)
            self.client1.close() 

        else:
            key = "test_docs-0"
            # insert will fail
            result = self.client.insert(key, "value")
            self.assertEqual(result["status"], False)
            
            # Update should pass
            result = self.client.upsert(key,"value")
            self.assertEqual(result["status"], True) 
            
            # delete should pass
            result = self.client.delete(key)
            self.assertEqual(result["status"], True) 
        
        thread.join()
            
    def test_stop_loading(self):
        ''' Load through transactions and close the transaction abruptly, create a new transaction sleep for 60 seconds and
        perform create on the same set of docs '''
        self.num_txn = self.input.param("num_txn", 9)
        self.doc_gen(self.num_items)
        threads = []
        
        docs = list(self.__chunks(self.docs, len(self.docs)/self.num_txn))
        
        for doc in docs: 
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", doc, self.transaction_commit, self.update_count, True, False)))
          
        for thread in threads:
            thread.start()
        
        self.client.cluster.shutdown()       
        self.transaction.close()
          
        self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.create_Transaction(self.client1)
        self.sleep(self.transaction_timeout+60) # sleep for 60 seconds so that transaction cleanup can happen
        
        self.test_log.info("going to start the load")  
        for doc in docs:
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], doc, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)

        self.verify_doc(self.num_items, self.client1)  
        self.client1.close()   
            
    def __insert_sub_doc_and_validate(self, doc_id, op_type, key, value):
        _, failed_items = self.client.crud(
            op_type,
            doc_id,
            [key, value],
            durability=self.durability_level,
            timeout=self.sdk_timeout,
            time_unit="seconds",
            create_path=True,
            xattr=True)
        self.assertFalse(failed_items, "Subdoc Xattr insert failed")
    
    def __read_doc_and_validate(self, doc_id, expected_val, subdoc_key=None):
        if subdoc_key:
            success, failed_items = self.client.crud("subdoc_read",
                                                     doc_id,
                                                     subdoc_key,
                                                     xattr=True)
            self.assertFalse(failed_items, "Xattr read failed")
            self.assertEqual(expected_val,
                             str(success[doc_id]["value"][0]),
                             "Sub_doc value mismatch: %s != %s"
                             % (success[doc_id]["value"][0],
                                expected_val))  
                  
    def test_TxnWithXattr(self):
        self.system_xattr = self.input.param("system_xattr", False)
        
        if self.system_xattr:
            xattr_key = "my._attr"
        else:
            xattr_key="my.attr"
        
        val = "v" * self.doc_size
        self.doc_gen(self.num_items)
            
        threads = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count))
        
        threads.start()
        self.sleep(1)
        
        self.__insert_sub_doc_and_validate("test_docs-0", "subdoc_insert",
                                           xattr_key, val)
        
        threads.join()
        
        if self.transaction_commit:
            self.__read_doc_and_validate("test_docs-0", val, xattr_key)
            
        self.sleep(60)
        self.verify_doc(self.num_items, self.client) 
        
    def test_TxnWithMultipleXattr(self):
        xattrs_to_insert = [["my.attr", "value"],
                            ["new_my.attr", "new_value"]]

        self.doc_gen(self.num_items)
        threads = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count))
        
        threads.start()
        self.sleep(1)
        
        for key, val in xattrs_to_insert:
            self.__insert_sub_doc_and_validate("test_docs-0", "subdoc_insert",
                                           key, val)
        
        threads.join()
        
        if self.transaction_commit:
            for key, val in xattrs_to_insert:
                self.__read_doc_and_validate("test_docs-0", val, key)
        
        self.sleep(60)
        self.verify_doc(self.num_items, self.client) 
Exemple #20
0
class OpsChangeCasTests(CasBaseTest):
    def setUp(self):
        super(OpsChangeCasTests, self).setUp()
        self.key = "test_cas"
        self.expire_time = self.input.param("expire_time", 35)
        self.item_flag = self.input.param("item_flag", 0)
        self.load_gen = doc_generator(self.key, 0, self.num_items,
                                      doc_size=self.doc_size)
        self.node_data = dict()
        for node in self.cluster_util.get_kv_nodes():
            shell = RemoteMachineShellConnection(node)
            cb_stat = Cbstats(shell)
            self.node_data[node.ip] = dict()
            self.node_data[node.ip]["shell"] = shell
            self.node_data[node.ip]["cb_stat"] = Cbstats(shell)
            self.node_data[node.ip]["active"] = cb_stat.vbucket_list(
                self.bucket,
                "active")
            self.node_data[node.ip]["replica"] = cb_stat.vbucket_list(
                self.bucket,
                "replica")
        if self.sdk_client_pool:
            self.client = self.sdk_client_pool.get_client_for_bucket(
                self.bucket)
        else:
            self.client = SDKClient([self.cluster.master], self.bucket)

    def tearDown(self):
        # Close opened shell connections
        for node_ip in self.node_data.keys():
            self.node_data[node_ip]["shell"].disconnect()

        # Close SDK client connection
        self.client.close()

        super(OpsChangeCasTests, self).tearDown()

    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            self.client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = self.client._get_vBucket_id(KEY_NAME)
            mc_active = self.client.memcached(KEY_NAME)
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            cas_active = mc_active.getMeta(KEY_NAME)[4]

        max_cas = int(mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )
        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')
        rebalance = self.cluster.async_rebalance(self.servers[-1:],
                                                 [],
                                                 [self.master])
        rebalance.result()
        replica_cas = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME,
                                           request_extended_meta_data=False)
        # Add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:],
                                                 [self.master], [])
        if rebalance.result is False:
            self.log_failure("Node add-back rebalance failed")

        # verify the CAS is good
        mc_active = self.client.memcached(KEY_NAME)
        active_cas = mc_active.getMeta(KEY_NAME)[4]

        if replica_cas == active_cas:
            self.log_failure("CAS mismatch. Active: %s, Replica: %s"
                             % (active_cas, replica_cas))
        self.validate_test_failure()

    def test_meta_failover(self):
        KEY_NAME = 'key2'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        self.sleep(60)

        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        #print 'replica CAS {0}'.format(replica_CAS)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_soft_restart(self):
        KEY_NAME = 'key2'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # restart nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        self.sleep(60)
        # verify the CAS is good
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_cas_set(self):
        """
        Test Incremental sets on cas and max cas values for keys
        """
        self._load_ops(self.load_gen, 'update', mutations=20)
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self.validate_test_failure()

    def test_cas_updates(self):
        """
        Test Incremental updates on cas and max cas values for keys
        """
        self._load_ops(self.load_gen, 'update', mutations=20)
        self._load_ops(self.load_gen, 'replace', mutations=20)
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self.validate_test_failure()

    def test_cas_deletes(self):
        """
        Test Incremental deletes on cas and max cas values for keys
        """
        self._load_ops(self.load_gen, 'create')
        self._load_ops(self.load_gen, 'replace', mutations=20)
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self._load_ops(self.load_gen, 'delete')
        self._check_cas(self.load_gen, check_conflict_resolution=False,
                        docs_deleted=True)
        self.validate_test_failure()

    ''' Test expiry on cas and max cas values for keys
    '''
    def test_cas_expiry(self):
        self._load_ops(self.load_gen, 'create')
        self._load_ops(self.load_gen, 'expiry')
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self._check_expiry(self.load_gen)
        self.validate_test_failure()

    def test_cas_touch(self):
        """
        Test touch on cas and max cas values for keys
        """
        self.log.info('Starting test-touch')
        self._load_ops(self.load_gen, 'update', mutations=20)
        self._load_ops(self.load_gen, 'touch')
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self.validate_test_failure()

    def test_cas_getMeta(self):
        """
        Test getMeta on cas and max cas values for keys
        """
        self.log.info('Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace',mutations=20)
        self._check_cas(check_conflict_resolution=False)

        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    def test_cas_setMeta_lower(self):

        self.log.info(' Starting test-getMeta')


        # set some kv
        self._load_ops(ops='set', mutations=1)
        #self._check_cas(check_conflict_resolution=False)

        k=0
        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            self.log.info('For key {0} the vbucket is {1}'.format( key,vbucket_id ))
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            TEST_CAS = k

            rc = mc_active.getMeta(key)
            cas = rc[4] + 1

            self.log.info('Key {0} retrieved CAS is {1} and will set CAS to {2}'.format(key, rc[4], cas))
            rev_seqno = rc[3]



            # do a set meta based on the existing CAS
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 123, cas)


            # check what get meta say
            rc = mc_active.getMeta(key)
            cas_post_meta = rc[4]
            self.log.info('Getmeta CAS is {0}'.format(cas_post_meta))
            self.assertTrue( cas_post_meta == cas, 'Meta expected {0} actual {1}'.format( cas, cas_post_meta))

            # and what stats says
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Max CAS for key {0} vbucket is {1}'.format( key, max_cas))
            self.assertTrue(cas_post_meta >= max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))


            # do another mutation and compare
            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # and then mix in a set with meta
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 225, max_cas+1)
            cas_post_meta = mc_active.getMeta(key)[4]


            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))


            # and one more mutation for good measure
            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

    def test_cas_setMeta_higher(self):

        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr {0}'.format(get_meta_1)
            #print '-'*100
            TEST_SEQNO = 123
            TEST_CAS = 9966180844186042368

            cas = mc_active.getMeta(key)[4]
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id,
             #   add_extended_meta_data=True, conflict_resolution_mode=1)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, TEST_CAS)

            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not equal it is {0}'.format(cas_post_meta))
            self.assertTrue(max_cas > cas, '[ERROR]Max cas  is not higher than original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, max_cas+1)

            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS+1, '223456789',vbucket_id,
            #    add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr3 {0}'.format(get_meta_3)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not lower it is higher than {0}'.format(cas_post_meta))
            #self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


    ''' Test deleteMeta on cas and max cas values for keys
    '''
    def test_cas_deleteMeta(self):

        self.log.info(' Starting test-deleteMeta')


        # load 20 kvs and check the CAS
        self._load_ops(ops='set', mutations=20)
        self.sleep(60)
        self._check_cas(check_conflict_resolution=False)

        k = 0
        test_cas = 456

        while k < 1:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            test_cas = test_cas + 1


            # get the meta data
            cas = mc_active.getMeta(key)[4] + 1

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, cas)



            cas_post_meta = mc_active.getMeta(key)[4]

            # verify the observed CAS is as set

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))


            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # what is test cas for? Commenting out for now
            """
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, test_cas)
            cas_post_meta = mc_active.getMeta(key)[4]

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta < max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))
            """

            # test the delete

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


            #
            self.log.info('Doing delete with meta, using a lower CAS value')
            get_meta_pre = mc_active.getMeta(key)[4]
            del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, test_cas, test_cas+1)
            get_meta_post = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas > test_cas+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, test_cas))




    ''' Testing skipping conflict resolution, whereby the last write wins, and it does neither cas CR nor rev id CR
    '''
    def test_cas_skip_conflict_resolution(self):

        self.log.info(' Starting test_cas_skip_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            low_seq=12

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to allow insertion of lower Seq Number')
            lower_cas = int(cas)-1
            #import pdb;pdb.set_trace()
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, low_seq, lower_cas, 3)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')

            #print 'cas meta data after set_meta_force {0}'.format(cas_post_meta)
            #print 'all meta data after set_meta_force {0}'.format(all_post_meta)
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq > post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        '''
    def test_revid_conflict_resolution(self):

        self.log.info(' Starting test_cas_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, cas, '123456789',vbucket_id,
                                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))



    ''' Testing conflict resolution, where timeSync is enabled and cas is lower but higher revid, expect Higher Cas to Win
        '''
    def test_cas_conflict_resolution(self):

        self.log.info(' Starting test_cas_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            lower_cas = int(cas)-100
            self.log.info('Forcing lower rev-id to win with higher CAS value, instead of higher rev-id with Lower Cas ')
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, lower_cas, '123456789',vbucket_id)
            try:
                set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, lower_cas)
            except mc_bin_client.MemcachedError as e:
                # this is expected
                pass

            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect CAS conflict_resolution to occur, and the first mutation to be the winner..')

            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a restart server'''
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        k = 0
        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        try:
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        except mc_bin_client.MemcachedError as e:
            # this is expected
            pass
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        #self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a rebalance server'''
    def test_rebalance_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        value = 'value'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)

        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])
        rebalance.result()
        self.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a failover server'''
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        value = 'value'
        self.client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        self.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test getMeta on cas and max cas values for empty vbucket
    '''
    def test_cas_getMeta_empty_vBucket(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        k=0
        all_keys = []
        while k<10:
            k+=1
            key = "{0}{1}".format(self.prefix, k)
            all_keys.append(key)

        vbucket_ids = self.client._get_vBucket_ids(all_keys)

        print 'bucket_ids'
        for v in vbucket_ids:
            print v

        print 'done'

        i=1111
        if i not in vbucket_ids and i <= 1023:
            vb_non_existing=i
        elif i>1023:
            i +=1
        else:
            self.log.info('ERROR generating empty vbucket id')

        vb_non_existing=vbucket_ids.pop()
        print 'nominated vb_nonexisting is {0}'.format(vb_non_existing)
        mc_active = self.client.memcached(all_keys[0]) #Taking a temp connection to the mc.
        #max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(vb_non_existing) + ':max_cas'] )
        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(all_keys[0])) + ':max_cas'] )
        self.assertTrue( max_cas != 0, msg='[ERROR] Max cas is non-zero')


    def test_meta_backup(self):
        """
        Test addMeta on cas and max cas values for keys
        """
        self.log.info('Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        # Do the backup on the bucket
        self.shell = RemoteMachineShellConnection(self.cluster.master)
        self.buckets = self.bucket_util.buckets
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.cluster.master)
            self.shell.execute_cluster_backup(
                self.couchbase_login_info,
                self.backup_location,
                self.command_options)
            self.sleep(5, "Wait before restore backup")
            shell.restore_backupFile(self.couchbase_login_info,
                                     self.backup_location,
                                     [bucket.name for bucket in self.buckets])
            self.log.info("Done with restore")
        finally:
            self._check_cas(check_conflict_resolution=False)

    def _check_cas(self, load_gen, check_conflict_resolution=False,
                   time_sync=None, docs_deleted=False):
        """
        Common function to verify the expected values on cas
        """
        load_gen = copy.deepcopy(load_gen)
        self.log.info('Verifying CAS and max-cas for the keys')
        while load_gen.has_next():
            key, value = load_gen.next()
            cas = self.client.crud("read", key)["cas"]
            if docs_deleted:
                if cas != 0:
                    self.log_failure("Max CAS mismatch. %s != 0" % cas)
            else:
                max_cas = None
                vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)
                for _, data in self.node_data.items():
                    if vb_for_key in data["active"]:
                        vb_stat = data["cb_stat"].vbucket_details(
                            self.bucket.name)
                        max_cas = long(vb_stat[str(vb_for_key)]["max_cas"])
                        break
                if cas != max_cas:
                    self.log_failure("Max CAS mismatch. %s != %s"
                                     % (cas, max_cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(
                    key,
                    request_extended_meta_data=False)
                if time_sync == 'enabledWithoutDrift':
                    if get_meta_resp[5] != 1:
                        self.log_failure("Metadata indicates "
                                         "conflict resolution is not set")
                elif time_sync == 'disabled':
                    if get_meta_resp[5] != 0:
                        self.log_failure("Metadata indicates "
                                         "conflict resolution is set")

    def _load_ops(self, load_gen, op_type, mutations=1):
        """
        Common function to add set delete etc operations on the bucket
        """
        mutations = 1
        self.log.info("Performing %s for %s times" % (op_type, mutations))
        exp = 0
        if op_type == "expiry":
            op_type = "touch"
            exp = self.expire_time

        for _ in range(mutations):
            tasks_info = self.bucket_util.sync_load_all_buckets(
                self.cluster, load_gen, op_type, exp=exp,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                batch_size=self.num_items,
                sdk_client_pool=self.sdk_client_pool)
            for task, _ in tasks_info.items():
                if task.fail:
                    self.log_failure("Failures observed during %s" % op_type)

    def _check_expiry(self, load_gen):
        """
        Check if num_items are expired as expected
        """
        self.sleep(self.expire_time, "Wait for docs to expire")

        while load_gen.has_next():
            key, value = load_gen.next()
            vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)
            cas = None
            for _, data in self.node_data.items():
                if vb_for_key in data["active"]:
                    vb_stat = data["cb_stat"].vbucket_details(self.bucket.name)
                    cas = long(vb_stat[str(vb_for_key)]["max_cas"])
                    break

            replace_result = self.client.crud(
                "replace", key, value,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout,
                cas=cas)
            if replace_result["status"] is True:
                self.log_failure("Replace on %s succeeded using old CAS %s"
                                 % (key, cas))
            if SDKException.DocumentNotFoundException \
                    not in str(replace_result["error"]):
                self.log_failure("Invalid exception for %s: %s"
                                 % (key, replace_result))
Exemple #21
0
    def test_bucket_durability_upgrade(self):
        update_task = None
        self.sdk_timeout = 60
        create_batch_size = 10000
        if self.atomicity:
            create_batch_size = 10

        # To make sure sync_write can we supported by initial cluster version
        sync_write_support = True
        if float(self.initial_version[0:3]) < 6.5:
            sync_write_support = False

        if sync_write_support:
            self.verification_dict["rollback_item_count"] = 0
            self.verification_dict["sync_write_aborted_count"] = 0

        if self.upgrade_with_data_load:
            self.log.info("Starting async doc updates")
            update_task = self.task.async_continuous_doc_ops(
                self.cluster, self.bucket, self.gen_load,
                op_type=DocLoading.Bucket.DocOps.UPDATE,
                process_concurrency=1,
                persist_to=1,
                replicate_to=1,
                timeout_secs=30)

        self.log.info("Upgrading cluster nodes to target version")
        node_to_upgrade = self.fetch_node_to_upgrade()
        while node_to_upgrade is not None:
            self.log.info("Selected node for upgrade: %s"
                          % node_to_upgrade.ip)
            self.upgrade_function[self.upgrade_type](node_to_upgrade,
                                                     self.upgrade_version)
            try:
                self.cluster.update_master_using_diag_eval(
                    self.cluster.servers[0])
            except Exception:
                self.cluster.update_master_using_diag_eval(
                    self.cluster.servers[self.nodes_init-1])

            create_gen = doc_generator(self.key, self.num_items,
                                       self.num_items+create_batch_size)
            # Validate sync_write results after upgrade
            if self.atomicity:
                sync_write_task = self.task.async_load_gen_docs_atomicity(
                    self.cluster, self.bucket_util.buckets,
                    create_gen, DocLoading.Bucket.DocOps.CREATE,
                    process_concurrency=1,
                    transaction_timeout=self.transaction_timeout,
                    record_fail=True)
            else:
                sync_write_task = self.task.async_load_gen_docs(
                    self.cluster, self.bucket, create_gen,
                    DocLoading.Bucket.DocOps.CREATE,
                    timeout_secs=self.sdk_timeout,
                    process_concurrency=4,
                    sdk_client_pool=self.sdk_client_pool,
                    skip_read_on_error=True,
                    suppress_error_table=True)
            self.task_manager.get_task_result(sync_write_task)
            self.num_items += create_batch_size

            retry_index = 0
            while retry_index < 5:
                self.sleep(3, "Wait for num_items to match")
                current_items = self.bucket_util.get_bucket_current_item_count(
                    self.cluster, self.bucket)
                if current_items == self.num_items:
                    break
                self.log.debug("Num_items mismatch. Expected: %s, Actual: %s"
                               % (self.num_items, current_items))
            # Doc count validation
            self.cluster_util.print_cluster_stats()

            self.verification_dict["ops_create"] += create_batch_size
            self.summary.add_step("Upgrade %s" % node_to_upgrade.ip)

            # Halt further upgrade if test has failed during current upgrade
            if self.test_failure:
                break

            node_to_upgrade = self.fetch_node_to_upgrade()

        if self.upgrade_with_data_load:
            # Wait for update_task to complete
            update_task.end_task()
            self.task_manager.get_task_result(update_task)
        else:
            self.verification_dict["ops_update"] = 0

        # Cb_stats vb-details validation
        failed = self.durability_helper.verify_vbucket_details_stats(
            self.bucket_util.buckets[0],
            self.cluster_util.get_kv_nodes(),
            vbuckets=self.cluster_util.vbuckets,
            expected_val=self.verification_dict)
        if failed:
            self.log_failure("Cbstat vbucket-details validation failed")
        self.summary.add_step("Cbstats vb-details verification")

        self.validate_test_failure()

        possible_d_levels = dict()
        possible_d_levels[Bucket.Type.MEMBASE] = \
            self.bucket_util.get_supported_durability_levels()
        possible_d_levels[Bucket.Type.EPHEMERAL] = [
            Bucket.DurabilityLevel.NONE,
            Bucket.DurabilityLevel.MAJORITY]
        len_possible_d_levels = len(possible_d_levels[self.bucket_type]) - 1

        if not sync_write_support:
            self.verification_dict["rollback_item_count"] = 0
            self.verification_dict["sync_write_aborted_count"] = 0

        # Perform bucket_durability update
        key, value = doc_generator("b_durability_doc", 0, 1).next()
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        for index, d_level in enumerate(possible_d_levels[self.bucket_type]):
            self.log.info("Updating bucket_durability=%s" % d_level)
            self.bucket_util.update_bucket_property(
                self.bucket_util.buckets[0],
                bucket_durability=BucketDurability[d_level])
            self.bucket_util.print_bucket_stats()

            buckets = self.bucket_util.get_all_buckets()
            if buckets[0].durability_level != BucketDurability[d_level]:
                self.log_failure("New bucket_durability not taken")

            self.summary.add_step("Update bucket_durability=%s" % d_level)

            self.sleep(10, "MB-39678: Bucket_d_level change to take effect")

            if index == 0:
                op_type = DocLoading.Bucket.DocOps.CREATE
                self.verification_dict["ops_create"] += 1
            elif index == len_possible_d_levels:
                op_type = DocLoading.Bucket.DocOps.DELETE
                self.verification_dict["ops_delete"] += 1
            else:
                op_type = DocLoading.Bucket.DocOps.UPDATE
                if "ops_update" in self.verification_dict:
                    self.verification_dict["ops_update"] += 1

            result = client.crud(op_type, key, value,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Doc_op %s failed on key %s: %s"
                                 % (op_type, key, result["error"]))
            self.summary.add_step("Doc_op %s" % op_type)
        client.close()

        # Cb_stats vb-details validation
        failed = self.durability_helper.verify_vbucket_details_stats(
            self.bucket_util.buckets[0],
            self.cluster_util.get_kv_nodes(),
            vbuckets=self.cluster_util.vbuckets,
            expected_val=self.verification_dict)
        if failed:
            self.log_failure("Cbstat vbucket-details validation failed")
        self.summary.add_step("Cbstats vb-details verification")
        self.validate_test_failure()
Exemple #22
0
    def test_delete_default_collection(self):
        """
        Test to delete '_default' collection under '_default' scope.

        Params:
        client_type: Supports collection deletion using REST/SDK client.
        data_load: Load data into default collection based load_during_phase.
                   supports 'disabled / before_drop / during_drop'
        """
        task = None
        client_type = self.input.param("client_type", "sdk").lower()
        data_load = self.input.param("load_data", "disabled")
        load_gen = doc_generator('test_drop_default',
                                 0,
                                 self.num_items,
                                 mutate=0,
                                 target_vbucket=self.target_vbucket)

        if data_load in ["before_drop", "during_drop"]:
            self.log.info("Loading %s docs into '%s::%s' collection" %
                          (self.num_items, CbServer.default_scope,
                           CbServer.default_collection))
            task = self.task.async_load_gen_docs(
                self.cluster,
                self.bucket,
                load_gen,
                "create",
                self.maxttl,
                batch_size=10,
                process_concurrency=8,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                compression=self.sdk_compression,
                timeout_secs=self.sdk_timeout,
                scope=CbServer.default_scope,
                collection=CbServer.default_collection,
                suppress_error_table=True)
            self.bucket.scopes[CbServer.default_scope] \
                .collections[CbServer.default_collection] \
                .num_items += self.num_items

        # To make sure data_loading done before collection drop
        if data_load == "before_drop":
            self.task_manager.get_task_result(task)
            if task.fail:
                self.log_failure("Doc loading failed for keys: %s" %
                                 task.fail.keys())

        # Data validation
        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

        # Drop collection phase
        self.log.info("Deleting collection '%s::%s'" %
                      (CbServer.default_scope, CbServer.default_collection))
        if client_type == "sdk":
            client = SDKClient([self.cluster.master],
                               self.bucket,
                               compression_settings=self.sdk_compression)
            client.drop_collection(CbServer.default_scope,
                                   CbServer.default_collection)
            client.close()
            BucketUtils.mark_collection_as_dropped(self.bucket,
                                                   CbServer.default_scope,
                                                   CbServer.default_collection)
        elif client_type == "rest":
            self.bucket_util.drop_collection(self.cluster.master, self.bucket,
                                             CbServer.default_scope,
                                             CbServer.default_collection)
        else:
            self.log_failure("Invalid client_type '%s'" % client_type)

        self.sleep(60)

        # Wait for doc_loading task to complete
        if data_load == "during_drop":
            self.task_manager.get_task_result(task)
            if task.fail:
                self.log.info("Doc loading failed for keys: %s" % task.fail)

        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        # Validate drop collection using cbstats
        for node in self.cluster_util.get_kv_nodes():
            shell_conn = RemoteMachineShellConnection(node)
            cbstats = Cbstats(shell_conn)
            c_data = cbstats.get_collections(self.bucket)
            expected_collection_count = \
                len(self.bucket_util.get_active_collections(
                    self.bucket,
                    CbServer.default_scope,
                    only_names=True))
            if c_data["count"] != expected_collection_count:
                self.log_failure(
                    "%s - Expected collection count is '%s'. "
                    "Actual: %s" %
                    (node.ip, expected_collection_count, c_data["count"]))
            if "_default" in c_data:
                self.log_failure("%s: _default collection exists in cbstats" %
                                 node.ip)

        # SDK connection to default(dropped) collection to validate failure
        try:
            client = SDKClient([self.cluster.master],
                               self.bucket,
                               scope=CbServer.default_scope,
                               collection=CbServer.default_collection,
                               compression_settings=self.sdk_compression)
            result = client.crud("create", "test_key-1", "TestValue")
            if result["status"] is True:
                self.log_failure("CRUD succeeded on deleted collection")
            elif SDKException.RetryReason.KV_COLLECTION_OUTDATED \
                    not in result["error"]:
                self.log_failure("Invalid error '%s'" % result["error"])
            client.close()
        except Exception as e:
            self.log.info(e)

        # Validate the bucket doc count is '0' after drop collection
        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)
        self.validate_test_failure()
Exemple #23
0
    def create_delete_collections(self):
        """
        1. Create Scope-Collection
        2. Validate '_default' collection values are intact
        3. Load documents into created collection
        4. Validate documents are loaded in new collection
        5. Delete the collection and validate the '_default' collection
           is unaffected
        """
        use_scope_name_for_collection = \
            self.input.param("use_scope_name_for_collection", False)
        scope_name = BucketUtils.get_random_name()
        collection_name = scope_name
        if not use_scope_name_for_collection:
            collection_name = BucketUtils.get_random_name()

        gen_add = doc_generator(self.key, 0, self.num_items)
        gen_set = doc_generator(self.key,
                                0,
                                self.num_items,
                                mutate=1,
                                mutation_type='SET')

        self.log.info("Creating scope::collection '%s::%s'" %
                      (scope_name, collection_name))
        self.bucket_util.create_scope(self.cluster.master, self.bucket,
                                      scope_name)
        self.bucket_util.create_collection(self.cluster.master, self.bucket,
                                           scope_name, collection_name)

        self.bucket_util.create_collection(self.cluster.master, self.bucket,
                                           scope_name, "my_collection_2")

        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cbstats = Cbstats(shell_conn)
        cbstats.get_collections(self.bucket_util.buckets[0])

        self.log.info("Validating the documents in default collection")
        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        self.bucket_util.verify_stats_all_buckets(self.num_items)

        self.log.info("Load documents into the created collection")
        sdk_client = SDKClient([self.cluster.master],
                               self.bucket,
                               scope=scope_name,
                               collection=collection_name,
                               compression_settings=self.sdk_compression)
        while gen_add.has_next():
            key, value = gen_add.next()
            result = sdk_client.crud("create",
                                     key,
                                     value,
                                     replicate_to=self.replicate_to,
                                     persist_to=self.persist_to,
                                     durability=self.durability_level,
                                     timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Doc create failed for collection: %s" %
                                 result)
                break
        sdk_client.close()
        self.validate_test_failure()
        self.bucket.scopes[scope_name] \
            .collections[collection_name] \
            .num_items += self.num_items

        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        self.bucket_util.verify_stats_all_buckets(self.num_items * 2)

        task = self.task.async_load_gen_docs(self.cluster,
                                             self.bucket,
                                             gen_set,
                                             "update",
                                             0,
                                             batch_size=10,
                                             process_concurrency=8,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to,
                                             durability=self.durability_level,
                                             compression=self.sdk_compression,
                                             timeout_secs=self.sdk_timeout,
                                             scope=scope_name,
                                             collection=collection_name)
        self.task_manager.get_task_result(task)

        self.bucket_util._wait_for_stats_all_buckets()
        # Prints bucket stats after doc_ops
        self.bucket_util.print_bucket_stats()
        self.bucket_util.verify_stats_all_buckets(self.num_items * 2)
        self.validate_test_failure()
    def test_sub_doc_op_with_bucket_level_durability(self):
        """
        Create Buckets with durability_levels set and perform
        Sub_doc CRUDs from client without durability settings and
        validate the ops to make sure respective durability is honored
        """
        key, value = doc_generator("test_key", 0, 1).next()
        sub_doc_key = "sub_doc_key"
        sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
        for d_level in self.get_supported_durability_for_bucket():
            # Avoid creating bucket with durability=None
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            step_desc = "Creating %s bucket with level '%s'" \
                        % (self.bucket_type, d_level)
            verification_dict = self.get_cb_stat_verification_dict()

            self.log.info(step_desc)
            # Object to support performing CRUDs and create Bucket
            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                           wait_for_warmup=True)
            self.summary.add_step(step_desc)

            # SDK client to perform sub_doc ops
            client = SDKClient([self.cluster.master], bucket_obj)

            result = client.crud("create", key, value)
            verification_dict["ops_create"] += 1
            verification_dict["sync_write_committed_count"] += 1
            if result["status"] is False:
                self.log_failure("Doc insert failed for key: %s" % key)

            # Perform sub_doc CRUD
            for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
                               "subdoc_replace"]:
                sub_doc_val = choice(sub_doc_vals)
                _, fail = client.crud(sub_doc_op, key,
                                      [sub_doc_key, sub_doc_val])
                if fail:
                    self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
                                     % (sub_doc_op, key,
                                        sub_doc_key, sub_doc_val, result))
                else:
                    verification_dict["ops_update"] += 1
                    verification_dict["sync_write_committed_count"] += 1

                success, fail = client.crud("subdoc_read", key, sub_doc_key)
                if fail or str(success[key]["value"].get(0)) != sub_doc_val:
                    self.log_failure("%s failed. Expected: %s, Actual: %s"
                                     % (sub_doc_op, sub_doc_val,
                                        success[key]["value"].get(0)))
                self.summary.add_step("%s for key %s" % (sub_doc_op, key))

            # Subdoc_delete and verify
            sub_doc_op = "subdoc_delete"
            _, fail = client.crud(sub_doc_op, key, sub_doc_key)
            if fail:
                self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
                                 % (sub_doc_op, key,
                                    sub_doc_key, sub_doc_val, result))
            verification_dict["ops_update"] += 1
            verification_dict["sync_write_committed_count"] += 1

            _, fail = client.crud(sub_doc_op, key, sub_doc_key)
            if SDKException.PathNotFoundException \
                    not in str(fail[key]["error"]):
                self.log_failure("Invalid error after sub_doc_delete")

            self.summary.add_step("%s for key %s" % (sub_doc_op, key))

            # Validate doc_count
            self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                         self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster, 1)

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Close SDK client
            client.close()

            # Delete the bucket on server
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete %s bucket" % self.bucket_type)
        def test_scenario(bucket, doc_ops,
                          with_sync_write_val=None):
            # Set crud_batch_size
            crud_batch_size = 4
            simulate_error = CouchbaseError.STOP_MEMCACHED

            # Fetch target_vbs for CRUDs
            node_vb_info = self.vbs_in_node
            target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
            if len(target_nodes) > 1:
                index = 1
                while index < len(target_nodes):
                    target_vbuckets = list(
                        set(target_vbuckets).intersection(
                            set(node_vb_info[target_nodes[index]]["replica"]))
                    )
                    index += 1

            # Variable to hold one of the doc_generator objects
            gen_loader_1 = None
            gen_loader_2 = None

            # Initialize doc_generators to use for testing
            self.log.info("Creating doc_generators")
            gen_create = doc_generator(
                self.key, self.num_items, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets)
            gen_update = doc_generator(
                self.key, 0, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets, mutate=1)
            gen_delete = doc_generator(
                self.key, 0, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets)
            self.log.info("Done creating doc_generators")

            # Start CRUD operation based on the given 'doc_op' type
            if doc_ops[0] == "create":
                self.num_items += crud_batch_size
                gen_loader_1 = gen_create
            elif doc_ops[0] in ["update", "replace", "touch"]:
                gen_loader_1 = gen_update
            elif doc_ops[0] == "delete":
                gen_loader_1 = gen_delete
                self.num_items -= crud_batch_size

            if doc_ops[1] == "create":
                gen_loader_2 = gen_create
            elif doc_ops[1] in ["update", "replace", "touch"]:
                gen_loader_2 = gen_update
            elif doc_ops[1] == "delete":
                gen_loader_2 = gen_delete

            # Load required docs for doc_op_1 in case of type != create
            if doc_op[2] == "load_initial_docs":
                doc_loading_task = self.task.async_load_gen_docs(
                    self.cluster, bucket, gen_loader_1, "create", 0,
                    batch_size=crud_batch_size, process_concurrency=1,
                    timeout_secs=10,
                    print_ops_rate=False,
                    sdk_client_pool=self.sdk_client_pool)
                self.task_manager.get_task_result(doc_loading_task)
                if doc_loading_task.fail:
                    self.log_failure("Failure while loading initial docs")
                self.summary.add_step("Create docs for %s" % doc_op[0])
                verification_dict["ops_create"] += crud_batch_size
                verification_dict["sync_write_committed_count"] \
                    += crud_batch_size

            # Initialize tasks and store the task objects
            doc_loader_task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
                batch_size=crud_batch_size, process_concurrency=8,
                timeout_secs=60,
                print_ops_rate=False,
                start_task=False,
                sdk_client_pool=self.sdk_client_pool)

            # SDK client for performing individual ops
            client = SDKClient([self.cluster.master], bucket)

            # Perform specified action
            for node in target_nodes:
                error_sim = CouchbaseError(self.log,
                                           self.vbs_in_node[node]["shell"])
                error_sim.create(simulate_error,
                                 bucket_name=bucket.name)
            self.sleep(5, "Wait for error simulation to take effect")

            self.task_manager.add_new_task(doc_loader_task)
            self.sleep(5, "Wait for task_1 CRUDs to reach server")

            # Perform specified CRUD operation on sync_write docs
            tem_gen = deepcopy(gen_loader_2)
            while tem_gen.has_next():
                key, value = tem_gen.next()
                for retry_strategy in [
                        SDKConstants.RetryStrategy.FAIL_FAST,
                        SDKConstants.RetryStrategy.BEST_EFFORT]:
                    if with_sync_write_val:
                        fail = client.crud(doc_ops[1], key, value=value,
                                           exp=0,
                                           durability=with_sync_write_val,
                                           timeout=3, time_unit="seconds",
                                           sdk_retry_strategy=retry_strategy)
                    else:
                        fail = client.crud(doc_ops[1], key, value=value,
                                           exp=0,
                                           timeout=3, time_unit="seconds",
                                           sdk_retry_strategy=retry_strategy)

                    expected_exception = SDKException.AmbiguousTimeoutException
                    retry_reason = \
                        SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
                    if retry_strategy == SDKConstants.RetryStrategy.FAIL_FAST:
                        expected_exception = \
                            SDKException.RequestCanceledException
                        retry_reason = \
                            SDKException.RetryReason \
                            .KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES

                    # Validate the returned error from the SDK
                    if expected_exception not in str(fail["error"]):
                        self.log_failure("Invalid exception for {0}: {1}"
                                         .format(key, fail["error"]))
                    if retry_reason not in str(fail["error"]):
                        self.log_failure("Invalid retry reason for {0}: {1}"
                                         .format(key, fail["error"]))

                    # Try reading the value in SyncWrite in-progress state
                    fail = client.crud("read", key)
                    if doc_ops[0] == "create":
                        # Expected KeyNotFound in case of CREATE operation
                        if fail["status"] is True:
                            self.log_failure(
                                "%s returned value during SyncWrite state: %s"
                                % (key, fail))
                    else:
                        # Expects prev value in case of other operations
                        if fail["status"] is False:
                            self.log_failure(
                                "Key %s read failed for previous value: %s"
                                % (key, fail))

            # Revert the introduced error condition
            for node in target_nodes:
                error_sim = CouchbaseError(self.log,
                                           self.vbs_in_node[node]["shell"])
                error_sim.revert(simulate_error,
                                 bucket_name=bucket.name)

            # Wait for doc_loader_task to complete
            self.task.jython_task_manager.get_task_result(doc_loader_task)

            verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
            verification_dict["sync_write_committed_count"] \
                += crud_batch_size

            # Disconnect the client
            client.close()
Exemple #26
0
    def test_expired_sys_xattr_consumed_by_dcp(self):
        """
        1. Create a empty doc with exp=5sec
        2. Insert xattr to the same doc (preserve_expiry=True)
        3. Wait for ep_queue_size to become zero
        4. Insert few more docs to the bucket
        5. Create a new secondary index to start DCP streaming from memory
        6. Expect no crash after step#5
        """

        doc_ttl = 10
        key = "test_xattr_doc-1"
        index_retry = 10
        index_created = False
        index_name = "test_index"
        bucket = self.cluster.buckets[0]
        vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)

        # Open SDK client
        client = SDKClient([self.cluster.master], bucket)

        # Create Sync_write doc with xattr + doc_ttl=10s
        client.crud(DocLoading.Bucket.DocOps.CREATE, key, {},
                    durability=self.durability_level)
        client.crud("subdoc_insert", key, ["_sdkey", "abc123"],
                    xattr=True, durability=self.durability_level)
        client.crud(DocLoading.Bucket.DocOps.UPDATE, key, {},
                    durability=self.durability_level, exp=doc_ttl)

        # Wait for all items to get persist
        self.log.info("Waiting for ep_queue_size to become zero")
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        self.sleep(doc_ttl, "Wait for doc to expire")

        self.log.info("Loading more docs into the targeted vb: %s"
                      % vb_for_key)
        doc_gen = doc_generator(self.key, self.num_items, 1000,
                                target_vbucket=[vb_for_key])
        doc_load_task = self.task.async_load_gen_docs(
            self.cluster, bucket, doc_gen, DocLoading.Bucket.DocOps.UPDATE,
            durability=self.durability_level)
        self.task_manager.get_task_result(doc_load_task)

        rest = RestConnection(self.cluster.master)
        rest.set_indexer_storage_mode()
        self.log.info("Creating 2i on the bucket")
        client.cluster.query("CREATE PRIMARY INDEX %s ON %s"
                             % (index_name, bucket.name))
        self.sleep(2, "Wait for primary index to be created")
        while not index_created and index_retry != 0:
            state = client.cluster \
                .query("SELECT state FROM system:indexes "
                       "WHERE name='%s'" % index_name) \
                .rowsAsObject()[0].get("state")
            if state == "online":
                index_created = True
            else:
                index_retry -= 1
                self.sleep(1, "Retrying.. Index not yet online")

        client.close()
Exemple #27
0
    def test_MB_41944(self):
        num_index = self.input.param("num_index", 1)
        # Create doc_gen for loading
        doc_gen = doc_generator(self.key, 0, 1)

        # Get key for delete op and reset the gen
        key, v = doc_gen.next()
        doc_gen.reset()

        # Open SDK client connection
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])

        query = list()
        query.append("CREATE PRIMARY INDEX index_0 on %s USING GSI" %
                     self.bucket_util.buckets[0].name)
        if num_index == 2:
            query.append("CREATE INDEX index_1 on %s(name,age) "
                         "WHERE mutated=0 USING GSI" %
                         self.bucket_util.buckets[0].name)
        # Create primary index on the bucket
        for q in query:
            client.cluster.query(q)
        # Wait for index to become online`
        for index, _ in enumerate(query):
            query = "SELECT state FROM system:indexes WHERE name='index_%s'" \
                    % index
            index = 0
            state = None
            while index < 30:
                state = client.cluster.query(query) \
                    .rowsAsObject()[0].get("state")
                if state == "online":
                    break
                self.sleep(1)

            if state != "online":
                self.log_failure("Index 'index_%s' not yet online" % index)

        # Start transaction to create the doc
        trans_task = self.task.async_load_gen_docs_atomicity(
            self.cluster, self.bucket_util.buckets, doc_gen,
            DocLoading.Bucket.DocOps.CREATE)
        self.task_manager.get_task_result(trans_task)

        # Perform sub_doc operation on same key
        _, fail = client.crud(DocLoading.Bucket.SubDocOps.INSERT,
                              key=key,
                              value=["_sysxattr", "sysxattr-payload"],
                              xattr=True)
        if fail:
            self.log_failure("Subdoc insert failed: %s" % fail)
        else:
            self.log.info("Subdoc insert success")

        # Delete the created doc
        result = client.crud(DocLoading.Bucket.DocOps.DELETE, key)
        if result["status"] is False:
            self.log_failure("Doc delete failed: %s" % result["error"])
        else:
            self.log.info("Document deleted")
            # Re-insert same doc through transaction
            trans_task = self.task.async_load_gen_docs_atomicity(
                self.cluster, self.bucket_util.buckets, doc_gen,
                DocLoading.Bucket.DocOps.CREATE)
            self.task_manager.get_task_result(trans_task)

        # Close SDK Client connection
        client.close()
        self.validate_test_failure()
Exemple #28
0
    def test_staged_doc_read(self):
        self.verify = self.input.param("verify", True)

        bucket = self.cluster.buckets[0]
        expected_exception = SDKException.DocumentNotFoundException

        # Create SDK client for transactions
        client = SDKClient([self.cluster.master], bucket)

        if self.doc_op in ["update", "delete"]:
            for doc in self.docs:
                result = client.crud("create",
                                     doc.getT1(),
                                     doc.getT2(),
                                     durability=self.durability_level,
                                     timeout=60)
                if result["status"] is False:
                    self.log_failure("Key %s create failed: %s" %
                                     (doc.getT1(), result))
                    break
            expected_exception = None

        read_thread = Thread(
            target=self.__perform_read_on_doc_keys,
            args=(bucket, self.keys),
            kwargs=dict(expected_exception=expected_exception))
        read_thread.start()

        # Transaction load
        exception = self.__run_mock_test(client, self.doc_op)
        if SDKException.TransactionExpired not in str(exception):
            self.log_failure("Expected exception not found")

        self.log.info("Terminating reader thread")
        self.stop_thread = True
        read_thread.join()

        self.transaction_fail_count = 2
        exception = self.__run_mock_test(client, self.doc_op)
        if exception:
            self.log_failure(exception)

        # verify the values
        for key in self.keys:
            result = client.read(key)
            if "Remove" in self.operation \
                    or self.transaction_commit is False \
                    or self.verify is False:
                if result['status']:
                    actual_val = client.translate_to_json_object(
                        result['value'])
                    self.log.info("Actual value for key %s is %s" %
                                  (key, actual_val))
                    self.log_failure(
                        "Key '%s' should be deleted but present in the bucket"
                        % key)
            else:
                actual_val = client.translate_to_json_object(result['value'])
                if self.doc_op == "update":
                    self.content.put("mutated", 1)
                elif self.doc_op == "delete":
                    self.content.removeKey("value")

                if self.content != actual_val:
                    self.log.info("Key %s Actual: %s, Expected: %s" %
                                  (key, actual_val, self.content))
                    self.log_failure("Mismatch in doc content")

        # Close SDK client
        client.close()

        if self.read_failed[self.cluster.buckets[0]] is True:
            self.log_failure("Failure in read thread for bucket: %s" %
                             self.cluster.buckets[0].name)

        self.validate_test_failure()
Exemple #29
0
    def test_staged_doc_query_from_index(self):
        self.verify = self.input.param("verify", True)

        expected_val = dict()
        bucket = self.cluster.buckets[0]

        # Create SDK client for transactions
        client = SDKClient([self.cluster.master], bucket)

        if self.doc_op in ["update", "delete"]:
            for doc in self.docs:
                result = client.crud("create",
                                     doc.getT1(),
                                     doc.getT2(),
                                     durability=self.durability_level,
                                     timeout=60)
                if result["status"] is False:
                    self.log_failure("Key %s create failed: %s" %
                                     (doc.getT1(), result))
                    break
                expected_val[doc.getT1()] = json.loads(str(doc.getT2()))

        # Create primary Index on all buckets
        for t_bucket in self.cluster.buckets:
            q_result = client.cluster.query("CREATE PRIMARY INDEX ON `%s`" %
                                            t_bucket.name)
            if q_result.metaData().status().toString() != "SUCCESS":
                client.close()
                self.fail("Create primary index failed for bucket %s" %
                          t_bucket.name)
        self.sleep(10, "Wait for primary indexes to get warmed up")

        query_thread = Thread(target=self.__perform_query_on_doc_keys,
                              args=(bucket, self.keys, expected_val))
        query_thread.start()

        # Transaction load
        exception = self.__run_mock_test(client, self.doc_op)
        if SDKException.TransactionExpired not in str(exception):
            self.log_failure("Expected exception not found")

        self.log.info("Terminating query thread")
        self.stop_thread = True
        query_thread.join()

        self.transaction_fail_count = 2
        exception = self.__run_mock_test(client, self.doc_op)
        if exception:
            self.log_failure(exception)

        # verify the values
        for key in self.keys:
            result = client.read(key)
            if "Remove" in self.operation \
                    or self.transaction_commit is False \
                    or self.verify is False:
                if result['status']:
                    actual_val = client.translate_to_json_object(
                        result['value'])
                    self.log.info("Actual value for key %s is %s" %
                                  (key, actual_val))
                    self.log_failure(
                        "Key '%s' should be deleted but present in the bucket"
                        % key)
            else:
                actual_val = client.translate_to_json_object(result['value'])

                if self.doc_op == "update":
                    self.content.put("mutated", 1)
                elif self.doc_op == "delete":
                    self.content.removeKey("value")

                if self.content != actual_val:
                    self.log.info("Key %s Actual: %s, Expected: %s" %
                                  (key, actual_val, self.content))
                    self.log_failure("Mismatch in doc content")

        # Close SDK client
        client.close()

        if self.read_failed[self.cluster.buckets[0]] is True:
            self.log_failure("Failure in read thread for bucket: %s" %
                             self.cluster.buckets[0].name)
        self.validate_test_failure()
Exemple #30
0
class basic_ops(ClusterSetup):
    def setUp(self):
        super(basic_ops, self).setUp()
        if self.default_bucket:
            # Over-ride bucket ram quota=100MB
            self.bucket_size = 100
            self.create_bucket(self.cluster)

        self.sleep(10, "Wait for bucket to become ready for ops")

        self.def_bucket = self.bucket_util.get_all_buckets(self.cluster)
        self.client = SDKClient([self.cluster.master], self.def_bucket[0])
        self.__durability_level()
        self.create_Transaction()
        self._stop = threading.Event()
        self.log.info("==========Finished Basic_ops base setup========")

    def tearDown(self):
        self.client.close()
        super(basic_ops, self).tearDown()

    def __durability_level(self):
        if self.durability_level == Bucket.DurabilityLevel.MAJORITY:
            self.durability = 1
        elif self.durability_level \
                == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:
            self.durability = 2
        elif self.durability_level \
                == Bucket.DurabilityLevel.PERSIST_TO_MAJORITY:
            self.durability = 3
        elif self.durability_level == "ONLY_NONE":
            self.durability = 4
        else:
            self.durability = 0

    def get_doc_generator(self, start, end):
        age = range(5)
        name = ['james', 'sharon']
        body = [''.rjust(self.doc_size - 10, 'a')]
        template = JsonObject.create()
        template.put("age", age)
        template.put("first_name", name)
        template.put("body", body)
        generator = DocumentGenerator(self.key,
                                      template,
                                      start=start,
                                      end=end,
                                      key_size=self.key_size,
                                      doc_size=self.doc_size,
                                      doc_type=self.doc_type,
                                      randomize=True,
                                      age=age,
                                      first_name=name)
        return generator

    def set_exception(self, exception):
        self.exception = exception
        raise BaseException("Got an exception %s" % self.exception)

    def __chunks(self, l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]

    def create_Transaction(self, client=None):
        if not client:
            client = self.client
        transaction_config = Transaction().createTransactionConfig(
            self.transaction_timeout, self.durability)
        try:
            self.transaction = Transaction().createTansaction(
                client.cluster, transaction_config)
        except Exception as e:
            self.set_exception(e)

    def __thread_to_transaction(self,
                                transaction,
                                op_type,
                                doc,
                                txn_commit,
                                update_count=1,
                                sync=True,
                                set_exception=True,
                                client=None):
        exception = None
        if client is None:
            client = self.client
        if op_type == "create":
            exception = Transaction().RunTransaction(client.cluster,
                                                     transaction,
                                                     [client.collection], doc,
                                                     [], [], txn_commit, sync,
                                                     update_count)
        elif op_type == "update":
            self.log.info("updating all the keys through threads")
            exception = Transaction().RunTransaction(client.cluster,
                                                     transaction,
                                                     [client.collection], [],
                                                     doc, [], txn_commit, sync,
                                                     update_count)
        elif op_type == "delete":
            exception = Transaction().RunTransaction(client.cluster,
                                                     transaction,
                                                     [client.collection], [],
                                                     [], doc, txn_commit, sync,
                                                     update_count)
        if set_exception and exception:
            self.set_exception("Failed")

    def doc_gen(self,
                num_items,
                start=0,
                value={'value': 'value1'},
                op_type="create"):
        self.docs = []
        self.keys = []
        self.content = self.client.translate_to_json_object(value)
        for i in range(start, self.num_items):
            key = "test_docs-" + str(i)
            if op_type == "create":
                doc = Tuples.of(key, self.content)
                self.keys.append(key)
                self.docs.append(doc)
            else:
                self.docs.append(key)

    def verify_doc(self, num_items, client):
        for i in range(num_items):
            key = "test_docs-" + str(i)
            result = client.read(key)
            actual_val = self.client.translate_to_json_object(result['value'])
            self.assertEquals(self.content, actual_val)

    def test_MultiThreadTxnLoad(self):
        """
        Load data through txn, update half the items through different threads
        and delete half the items through different threads. if update_retry
        then update and delete the same key in two different transaction
        and make sure update fails
        """

        self.num_txn = self.input.param("num_txn", 9)
        self.update_retry = self.input.param("update_retry", False)

        self.doc_gen(self.num_items)
        threads = []

        # create the docs
        exception = Transaction().RunTransaction(self.client.cluster,
                                                 self.transaction,
                                                 [self.client.collection],
                                                 self.docs, [], [],
                                                 self.transaction_commit, True,
                                                 self.update_count)
        if exception:
            self.set_exception("Failed")

        if self.update_retry:
            threads.append(
                threading.Thread(target=self.__thread_to_transaction,
                                 args=(self.transaction, "delete", self.keys,
                                       self.transaction_commit,
                                       self.update_count)))
            threads.append(
                threading.Thread(target=self.__thread_to_transaction,
                                 args=(self.transaction, "update", self.keys,
                                       10, self.update_count)))

        else:
            update_docs = self.__chunks(self.keys[:self.num_items / 2],
                                        self.num_txn)
            delete_docs = self.__chunks(self.keys[self.num_items / 2:],
                                        self.num_txn)

            for keys in update_docs:
                threads.append(
                    threading.Thread(target=self.__thread_to_transaction,
                                     args=(self.transaction, "update", keys,
                                           self.transaction_commit,
                                           self.update_count)))

            for keys in delete_docs:
                threads.append(
                    threading.Thread(target=self.__thread_to_transaction,
                                     args=(self.transaction, "delete", keys,
                                           self.transaction_commit,
                                           self.update_count)))

        for thread in threads:
            thread.start()

        for thread in threads:
            thread.join()

        self.sleep(60, "Wait for transactions to complete")
        if self.update_retry:
            for key in self.keys:
                result = self.client.read(key)
                self.assertEquals(result['status'], False)

        else:
            self.value = {'mutated': 1, 'value': 'value1'}
            self.content = self.client.translate_to_json_object(self.value)

            self.verify_doc(self.num_items / 2, self.client)

            for key in self.keys[self.num_items / 2:]:
                result = self.client.read(key)
                self.assertEquals(result['status'], False)

    def test_basic_retry(self):
        """
        Load set of data to the cluster, update through 2 different threads,
        make sure transaction maintains the order of update
        :return:
        """
        self.write_conflict = self.input.param("write_conflict", 2)

        self.log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(
            self.cluster,
            self.def_bucket,
            self.gen_create,
            "create",
            exp=0,
            batch_size=10,
            process_concurrency=8,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to,
            timeout_secs=self.sdk_timeout,
            retries=self.sdk_retries,
            update_count=self.update_count,
            transaction_timeout=self.transaction_timeout,
            commit=True,
            durability=self.durability_level,
            sync=self.sync)
        self.task.jython_task_manager.get_task_result(task)
        self.log.info("Get all the keys in the cluster")
        self.doc_gen(self.num_items)

        threads = []
        for update_count in [2, 4, 6]:
            threads.append(
                threading.Thread(target=self.__thread_to_transaction,
                                 args=(self.transaction, "update", self.keys,
                                       self.transaction_commit, update_count)))
        # Add verification task
        if self.transaction_commit:
            self.update_count = 6
        else:
            self.update_count = 0

        for thread in threads:
            thread.start()
            self.sleep(2, "Wait for transaction thread to start")

        for thread in threads:
            thread.join()

    def test_basic_retry_async(self):
        self.log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(
            self.cluster,
            self.def_bucket,
            self.gen_create,
            "create",
            exp=0,
            batch_size=10,
            process_concurrency=1,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to,
            timeout_secs=self.sdk_timeout,
            retries=self.sdk_retries,
            update_count=self.update_count,
            transaction_timeout=self.transaction_timeout,
            commit=True,
            durability=self.durability_level,
            sync=True,
            num_threads=1)
        self.task.jython_task_manager.get_task_result(task)
        self.log.info("get all the keys in the cluster")
        keys = ["test_docs-0"] * 2

        exception = Transaction().RunTransaction(
            self.client.cluster, self.transaction, [self.client.collection],
            [], keys, [], self.transaction_commit, False, 0)
        if exception:
            self.set_exception(Exception(exception))

    def basic_concurrency(self):
        self.crash = self.input.param("crash", False)

        self.doc_gen(self.num_items)

        # run transaction
        thread = threading.Thread(target=self.__thread_to_transaction,
                                  args=(self.transaction, "create", self.docs,
                                        self.transaction_commit,
                                        self.update_count, True, False))
        thread.start()
        self.sleep(1, "Wait for transaction thread to start")

        if self.crash:
            self.client.cluster.disconnect()
            self.transaction.close()
            self.client1 = SDKClient([self.cluster.master], self.def_bucket[0])
            self.create_Transaction(self.client1)
            self.sleep(self.transaction_timeout + 60,
                       "Wait for transaction cleanup to complete")
            exception = Transaction().RunTransaction(
                self.client.cluster, self.transaction,
                [self.client1.collection], self.docs, [], [],
                self.transaction_commit, self.sync, self.update_count)
            if exception:
                self.sleep(60, "Wait for transaction cleanup to happen")

            self.verify_doc(self.num_items, self.client1)
            self.client1.close()

        else:
            key = "test_docs-0"
            # insert will succeed due to doc_isoloation feature
            result = self.client.insert(key, "value")
            self.assertEqual(result["status"], True)

            # Update should pass
            result = self.client.upsert(key, "value")
            self.assertEqual(result["status"], True)

            # delete should pass
            result = self.client.delete(key)
            self.assertEqual(result["status"], True)

        thread.join()

    def test_stop_loading(self):
        """
        Load through transactions and close the transaction abruptly,
        create a new transaction sleep for 60 seconds and
        perform create on the same set of docs
        """
        self.num_txn = self.input.param("num_txn", 9)
        self.doc_gen(self.num_items)
        threads = []

        docs = list(self.__chunks(self.docs, len(self.docs) / self.num_txn))

        for doc in docs:
            threads.append(
                threading.Thread(target=self.__thread_to_transaction,
                                 args=(self.transaction, "create", doc,
                                       self.transaction_commit,
                                       self.update_count, True, False)))

        for thread in threads:
            thread.start()

        self.client.cluster.disconnect()
        self.transaction.close()

        self.client1 = SDKClient([self.cluster.master], self.def_bucket[0])
        self.create_Transaction(self.client1)
        self.sleep(self.transaction_timeout + 60,
                   "Wait for transaction cleanup to happen")

        self.log.info("going to start the load")
        for doc in docs:
            exception = Transaction().RunTransaction(
                self.client1.cluster, self.transaction,
                [self.client1.collection], doc, [], [],
                self.transaction_commit, self.sync, self.update_count)
            if exception:
                self.sleep(60, "Wait for transaction cleanup to happen")

        self.verify_doc(self.num_items, self.client1)
        self.client1.close()

    def __insert_sub_doc_and_validate(self, doc_id, op_type, key, value):
        _, failed_items = self.client.crud(op_type,
                                           doc_id, [key, value],
                                           durability=self.durability_level,
                                           timeout=self.sdk_timeout,
                                           time_unit="seconds",
                                           create_path=True,
                                           xattr=True)
        self.assertFalse(failed_items, "Subdoc Xattr insert failed")

    def __read_doc_and_validate(self, doc_id, expected_val, subdoc_key=None):
        if subdoc_key:
            success, failed_items = self.client.crud("subdoc_read",
                                                     doc_id,
                                                     subdoc_key,
                                                     xattr=True)
            self.assertFalse(failed_items, "Xattr read failed")
            self.assertEqual(
                expected_val, str(success[doc_id]["value"][0]),
                "Sub_doc value mismatch: %s != %s" %
                (success[doc_id]["value"][0], expected_val))

    def test_TxnWithXattr(self):
        self.system_xattr = self.input.param("system_xattr", False)
        if self.system_xattr:
            xattr_key = "my._attr"
        else:
            xattr_key = "my.attr"
        val = "v" * self.doc_size

        self.doc_gen(self.num_items)
        thread = threading.Thread(target=self.__thread_to_transaction,
                                  args=(self.transaction, "create", self.docs,
                                        self.transaction_commit,
                                        self.update_count))
        thread.start()
        thread.join()

        self.doc_gen(self.num_items,
                     op_type="update",
                     value={
                         "mutated": 1,
                         "value": "value1"
                     })
        thread = threading.Thread(target=self.__thread_to_transaction,
                                  args=(self.transaction, "update", self.docs,
                                        self.transaction_commit,
                                        self.update_count))
        thread.start()
        self.sleep(1)
        self.__insert_sub_doc_and_validate("test_docs-0", "subdoc_insert",
                                           xattr_key, val)

        thread.join()

        if self.transaction_commit:
            self.__read_doc_and_validate("test_docs-0", val, xattr_key)
        self.sleep(60, "Wait for transaction to complete")
        self.verify_doc(self.num_items, self.client)

    def test_TxnWithMultipleXattr(self):
        xattrs_to_insert = [["my.attr", "value"], ["new_my.attr", "new_value"]]

        self.doc_gen(self.num_items)
        thread = threading.Thread(target=self.__thread_to_transaction,
                                  args=(self.transaction, "create", self.docs,
                                        self.transaction_commit,
                                        self.update_count))
        thread.start()
        thread.join()

        self.doc_gen(self.num_items,
                     op_type="update",
                     value={
                         "mutated": 1,
                         "value": "value1"
                     })
        thread = threading.Thread(target=self.__thread_to_transaction,
                                  args=(self.transaction, "update", self.docs,
                                        self.transaction_commit,
                                        self.update_count))

        thread.start()
        self.sleep(1, "Wait for transx-thread to start")
        for key, val in xattrs_to_insert:
            self.__insert_sub_doc_and_validate("test_docs-0", "subdoc_insert",
                                               key, val)
        thread.join()

        if self.transaction_commit:
            for key, val in xattrs_to_insert:
                self.__read_doc_and_validate("test_docs-0", val, key)
        self.sleep(60, "Wait for transaction to complete")
        self.verify_doc(self.num_items, self.client)