예제 #1
0
 def test_create_delete_recreate_collection(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # delete collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # recreate collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 # Cannot create a _default collection
                 if c_name == CbServer.default_collection:
                     continue
                 col_obj = \
                     bucket.scopes[scope_name].collections[c_name]
                 BucketUtils.create_collection(self.cluster.master, bucket,
                                               scope_name,
                                               col_obj.get_dict_object())
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
예제 #2
0
파일: basic_ops.py 프로젝트: ritalrw/TAF
    def test_create_delete_recreate_scope(self):
        bucket_dict = BucketUtils.get_random_scopes(self.bucket_util.buckets,
                                                    "all", 1)
        # Delete scopes
        for bucket_name, scope_dict in bucket_dict.items():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                bucket_name)
            for scope_name, _ in scope_dict["scopes"].items():
                BucketUtils.drop_scope(self.cluster.master, bucket, scope_name)

        # Recreate scopes
        for bucket_name, scope_dict in bucket_dict.items():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                bucket_name)
            for scope_name, _ in scope_dict["scopes"].items():
                BucketUtils.create_scope(self.cluster.master, bucket,
                                         scope_name)
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
예제 #3
0
 def process_value_for_verification(self,
                                    bucket_col,
                                    doc_gen_list,
                                    results,
                                    buckets=None):
     """
     1. get the collection
     2. get its doc_gen
     3. first validate deleted docs
     4. then check updated docs
     5. validate inserted docs
     """
     for collection in bucket_col:
         self.log.info("validation started for collection %s" % collection)
         gen_load = doc_gen_list[collection]
         self.validate_dict = {}
         self.deleted_key = []
         doc_gen = copy.deepcopy(gen_load)
         while doc_gen.has_next():
             key, val = next(doc_gen)
             self.validate_dict[key] = val
         for res in results:
             for savepoint in res[1]:
                 if collection in res[0][savepoint].keys():
                     for key in set(
                             res[0][savepoint][collection]["DELETE"]):
                         self.deleted_key.append(key)
                     for key, val in res[0][savepoint][collection][
                             "INSERT"].items():
                         self.validate_dict[key] = val
                     for key, val in res[0][savepoint][collection][
                             "UPDATE"].items():
                         mutated = key.split("=")
                         for t_id in val:
                             try:
                                 self.validate_dict[t_id][mutated[0]] = \
                                     mutated[1]
                             except:
                                 self.validate_dict[t_id].put(
                                     mutated[0], mutated[1])
         bucket_collection = collection.split('.')
         if buckets:
             self.buckets = buckets
         else:
             self.buckets = self.bucket_util.buckets
         bucket = BucketUtils.get_bucket_obj(self.buckets,
                                             bucket_collection[0])
         client = \
             DocLoaderUtils.sdk_client_pool.get_client_for_bucket(
                 bucket, bucket_collection[1], bucket_collection[2])
         self.validate_keys(client, self.validate_dict, self.deleted_key)
예제 #4
0
    def touch_test(self):
        self.log.info("Loading bucket into DGM")
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            dgm_gen,
            "create",
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud("touch",
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        client.close()
        self.bucket_util._wait_for_stats_all_buckets()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
예제 #5
0
    def test_create_delete_recreate_scope(self):
        scope_drop_fails = False
        bucket_dict = BucketUtils.get_random_scopes(self.bucket_util.buckets,
                                                    "all", 1)
        # Delete scopes
        for bucket_name, scope_dict in bucket_dict.items():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                bucket_name)
            for scope_name, _ in scope_dict["scopes"].items():
                if scope_name == CbServer.default_scope:
                    scope_drop_fails = True
                try:
                    BucketUtils.drop_scope(self.cluster.master, bucket,
                                           scope_name)
                    if scope_drop_fails:
                        raise Exception("default scope deleted")
                except Exception as drop_exception:
                    if scope_drop_fails \
                            and "delete_scope failed" in str(drop_exception):
                        pass
                    else:
                        raise drop_exception

        # Recreate scopes
        for bucket_name, scope_dict in bucket_dict.items():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                bucket_name)
            for scope_name, _ in scope_dict["scopes"].items():
                # Cannot create a _default scope
                if scope_name == CbServer.default_collection:
                    continue
                BucketUtils.create_scope(self.cluster.master, bucket,
                                         {"name": scope_name})
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
예제 #6
0
 def test_drop_collection_compaction(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # Delete collection
     for self.bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             self.bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, c_data in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # Trigger compaction
     remote_client = RemoteMachineShellConnection(self.cluster.master)
     _ = remote_client.wait_till_compaction_end(
         RestConnection(self.cluster.master),
         self.bucket_name,
         timeout_in_seconds=(self.wait_timeout * 10))
     remote_client.disconnect()
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
예제 #7
0
    def test_crash_process(self):
        """
        1. Starting loading docs into the default bucket
        2. Crash the requested process, which will not impact the
           memcached operations
        3. Wait for load bucket task to complete
        4. Validate the docs for durability
        """
        def_bucket = self.cluster.buckets[0]
        target_node = self.getTargetNode()
        remote = RemoteMachineShellConnection(target_node)
        target_vbuckets = range(0, self.cluster_util.vbuckets)
        retry_exceptions = list()
        self.transaction_load_task = None
        self.doc_loading_task = None
        self.N1ql_load_task = None

        # If Memcached is killed, we should not perform KV ops on
        # particular node. If not we can target all nodes for KV operation.
        if self.process_name == "memcached":
            target_vbuckets = CrashTest.getVbucketNumbers(
                remote, def_bucket.name, self.target_node)
            if self.target_node == "active":
                retry_exceptions = [SDKException.TimeoutException]
        if len(target_vbuckets) == 0:
            self.log.error("No target vbucket list generated to load data")
            remote.disconnect()
            return

        bucket_dict = BucketUtils.get_random_collections(
            self.cluster.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")

        bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,
                                            bucket_dict.keys()[0])
        scope_name = bucket_dict[bucket.name]["scopes"].keys()[0]
        collection_name = bucket_dict[bucket.name][
            "scopes"][scope_name]["collections"].keys()[0]
        scope = BucketUtils.get_scope_obj(
            bucket, scope_name)
        collection = BucketUtils.get_collection_obj(
            scope, collection_name)

        self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)

        task_info = dict()
        task_info[self.doc_loading_task] = \
            self.bucket_util.get_doc_op_info_dict(
                def_bucket, DocLoading.Bucket.DocOps.CREATE, 0,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout, time_unit="seconds",
                retry_exceptions=retry_exceptions)

        self.sleep(10, "Wait for doc_ops to start")
        self.log.info("Killing {0}:{1} on node {2}"
                      .format(self.process_name, self.service_name,
                              target_node.ip))
        remote.kill_process(self.process_name, self.service_name,
                            signum=signum[self.sig_type])
        remote.disconnect()
        # Wait for tasks completion and validate failures
        if self.transaction_load_task:
            self.task.jython_task_manager.get_task_result(
                self.transaction_load_task)
        if self.N1qltxn:
            self.task.jython_task_manager.get_task_result(
                self.N1ql_load_task)
        self.task_manager.get_task_result(self.doc_loading_task)
        self.bucket_util.verify_doc_op_task_exceptions(task_info,
                                                       self.cluster)
        self.bucket_util.log_doc_ops_task_failures(task_info)

        # Verification stats
        verification_dict = dict()
        verification_dict["ops_create"] = 2*self.num_items
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["pending_writes"] = 0
        if self.durability_level:
            verification_dict["sync_write_committed_count"] = 2*self.num_items

        if self.bucket_type == Bucket.Type.EPHEMERAL \
                and self.process_name == "memcached":
            result = self.task.rebalance(self.servers[:self.nodes_init],
                                         [], [])
            self.assertTrue(result, "Rebalance failed")

        # Validate doc count
        if self.process_name != "memcached":
            stats_failed = \
                self.durability_helper.verify_vbucket_details_stats(
                    def_bucket, self.cluster_util.get_kv_nodes(),
                    vbuckets=self.cluster_util.vbuckets,
                    expected_val=verification_dict)
            if stats_failed:
                self.fail("Cbstats verification failed")

        # Doc count validation per collection
        if not self.N1qltxn and self.atomicity is False:
            self.bucket_util.validate_docs_per_collections_all_buckets(
                self.cluster)
예제 #8
0
    def test_stop_process(self):
        """
        1. Starting loading docs into the default bucket
        2. Stop the requested process, which will impact the
           memcached operations
        3. Wait for load bucket task to complete
        4. Validate the docs for durability
        """
        error_to_simulate = self.input.param("simulate_error", None)
        target_node = self.getTargetNode()
        remote = RemoteMachineShellConnection(target_node)
        error_sim = CouchbaseError(self.log, remote)
        target_vbuckets = CrashTest.getVbucketNumbers(
            remote, self.bucket.name, self.target_node)

        bucket_dict = BucketUtils.get_random_collections(
            self.cluster.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")

        bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,
                                            bucket_dict.keys()[0])
        scope_name = bucket_dict[bucket.name]["scopes"].keys()[0]
        collection_name = bucket_dict[bucket.name][
            "scopes"][scope_name]["collections"].keys()[0]
        scope = BucketUtils.get_scope_obj(
            bucket, scope_name)
        collection = BucketUtils.get_collection_obj(scope, collection_name)

        if len(target_vbuckets) == 0:
            self.log.error("No target vbucket list generated to load data")
            remote.disconnect()
            return

        self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)

        # Induce the error condition
        error_sim.create(error_to_simulate)

        self.sleep(20, "Wait before reverting the error condition")
        # Revert the simulated error condition and close the ssh session
        error_sim.revert(error_to_simulate)
        remote.disconnect()

        # Wait for doc loading task to complete
        self.task.jython_task_manager.get_task_result(self.doc_loading_task)
        if self.atomicity:
            self.task.jython_task_manager.get_task_result(
                self.transaction_load_task)
        elif self.N1qltxn:
            self.task.jython_task_manager.get_task_result(
                self.N1ql_load_task)

        if len(self.doc_loading_task.fail.keys()) != 0:
            if self.target_node == "active" or self.num_replicas in [2, 3]:
                self.log_failure("Unwanted failures for keys: %s"
                                 % self.doc_loading_task.fail.keys())

        validate_passed = \
            self.durability_helper.validate_durability_exception(
                self.doc_loading_task.fail,
                SDKException.DurabilityAmbiguousException)
        if not validate_passed:
            self.log_failure("Unwanted exception seen during validation")

        # Get SDK client for CRUD retries
        sdk_client = self.sdk_client_pool.get_client_for_bucket(self.bucket)
        for doc_key, crud_result in self.doc_loading_task.fail.items():
            result = sdk_client.crud(DocLoading.Bucket.DocOps.CREATE,
                                     doc_key,
                                     crud_result["value"],
                                     replicate_to=self.replicate_to,
                                     persist_to=self.persist_to,
                                     durability=self.durability_level,
                                     timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Retry of doc_key %s failed: %s"
                                 % (doc_key, result["error"]))
        # Close the SDK connection
        self.sdk_client_pool.release_client(sdk_client)

        self.validate_test_failure()

        self.bucket_util._wait_for_stats_all_buckets(self.cluster.buckets)
        # Update self.num_items and validate docs per collection
        if not self.N1qltxn and self.atomicity is False:
            self.bucket_util.validate_docs_per_collections_all_buckets(
                self.cluster)
예제 #9
0
    def ops_change_cas(self):
        """
        CAS value manipulation by update, delete, expire test.
        We load a certain number of items. Then for half of them, we use
        MemcachedClient cas() method to mutate those item values in order
        to change CAS value of those items.
        We use MemcachedClient set() to set a quarter of the items expired.
        We also use MemcachedClient delete() to delete a quarter of the items
        """
        gen_update = doc_generator(self.key,
                                   0,
                                   self.num_items / 2,
                                   doc_size=self.doc_size)
        gen_delete = doc_generator(self.key,
                                   self.num_items / 2,
                                   (self.num_items * 3 / 4),
                                   doc_size=self.doc_size)
        gen_expire = doc_generator(self.key, (self.num_items * 3 / 4),
                                   self.num_items,
                                   doc_size=self.doc_size)

        # Create cbstat objects
        self.shell_conn = dict()
        self.cb_stat = dict()
        self.vb_details = dict()
        for node in self.cluster_util.get_kv_nodes():
            self.vb_details[node.ip] = dict()
            self.vb_details[node.ip]["active"] = list()
            self.vb_details[node.ip]["replica"] = list()

            self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
            self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
            self.vb_details[node.ip]["active"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
            self.vb_details[node.ip]["replica"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")

        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    if self.doc_ops is not None:
                        if "update" in self.doc_ops:
                            self.verify_cas("update", gen_update, scope_name,
                                            c_name)
                        if "touch" in self.doc_ops:
                            self.verify_cas("touch", gen_update, scope_name,
                                            c_name)
                        if "delete" in self.doc_ops:
                            self.verify_cas("delete", gen_delete, scope_name,
                                            c_name)
                        if "expire" in self.doc_ops:
                            self.verify_cas("expire", gen_expire, scope_name,
                                            c_name)

        # Validate test failure
        self.validate_test_failure()