コード例 #1
0
ファイル: basic_ops.py プロジェクト: ritalrw/TAF
    def test_delete_collection_during_load(self):
        """ Get a random collection/scope ,
            delete collection/scope while loading"""
        delete_scope = self.input.param("delete_scope", False)
        retry = 5
        scope_dict = dict()
        scope_name = ""

        while retry > 0:
            bucket_dict = BucketUtils.get_random_collections([self.bucket], 1,
                                                             "all", "all")
            scope_dict = bucket_dict[self.bucket.name]["scopes"]
            scope_name = scope_dict.keys()[0]
            # Check to prevent default scope deletion, which is not allowed
            if (scope_name != CbServer.default_scope) or not delete_scope:
                break
            retry -= 1
        collection_name = scope_dict[scope_name]["collections"].keys()[0]

        self.num_items = \
            self.bucket \
                .scopes[scope_name] \
                .collections[collection_name] \
                .num_items
        load_gen = doc_generator(self.key, self.num_items, self.num_items * 20)

        self.log.info("Delete collection while load %s: %s" %
                      (scope_name, collection_name))
        task = self.task.async_load_gen_docs(self.cluster,
                                             self.bucket,
                                             load_gen,
                                             "create",
                                             exp=self.maxttl,
                                             batch_size=200,
                                             process_concurrency=1,
                                             scope=scope_name,
                                             compression=self.sdk_compression,
                                             collection=collection_name,
                                             print_ops_rate=True,
                                             retries=0)

        self.sleep(5)
        self.bucket_util.print_bucket_stats()

        if delete_scope:
            self.bucket_util.drop_scope(self.cluster.master, self.bucket,
                                        scope_name)
            del self.bucket.scopes[scope_name]

        else:
            self.bucket_util.drop_collection(self.cluster.master, self.bucket,
                                             scope_name, collection_name)
            del self.bucket.scopes[scope_name].collections[collection_name]

        # validate task failure
        self.task_manager.stop_task(task)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
コード例 #2
0
    def key_not_exists_test(self):
        client = SDKClient([self.cluster.master], self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 1, 1,
                                                         1)
        scope_dict = collections[self.bucket.name]["scopes"]
        scope_name = scope_dict.keys()[0]
        collection_name = scope_dict[scope_name]["collections"].keys()[0]
        client.select_collection(scope_name, collection_name)
        self.log.info("CAS test on collection %s: %s" %
                      (scope_name, collection_name))

        load_gen = doc_generator(self.key, 0, self.num_items, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # Validate doc count as per bucket collections
            self.bucket_util.validate_docs_per_collections_all_buckets()
            self.validate_test_failure()
コード例 #3
0
ファイル: basic_ops.py プロジェクト: AnithaKuberan/TAF
 def test_create_delete_recreate_collection(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # delete collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # recreate collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 # Cannot create a _default collection
                 if c_name == CbServer.default_collection:
                     continue
                 col_obj = \
                     bucket.scopes[scope_name].collections[c_name]
                 BucketUtils.create_collection(self.cluster.master, bucket,
                                               scope_name,
                                               col_obj.get_dict_object())
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
コード例 #4
0
 def get_collection_for_atrcollection(self):
     collections = BucketUtils.get_random_collections(
             self.buckets, 1, "all", self.num_buckets)
     for bucket, scope_dict in collections.items():
         for s_name, c_dict in scope_dict["scopes"].items():
             for c_name, c_data in c_dict["collections"].items():
                 if random.choice([True, False]):
                     atrcollection = ("`%s`.`%s`.`%s`"%(bucket, s_name, c_name))
                 else:
                     atrcollection = ("`%s`.`%s`.`%s`"%(bucket,
                                  CbServer.default_scope,
                                  CbServer.default_collection))
     return atrcollection
コード例 #5
0
    def touch_test(self):
        self.log.info("Loading bucket into DGM")
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            dgm_gen,
            "create",
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud("touch",
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        client.close()
        self.bucket_util._wait_for_stats_all_buckets()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
コード例 #6
0
    def touch_test(self):
        self.log.info("Loading bucket %s into %s%% DGM" %
                      (self.bucket.name, self.active_resident_threshold))
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            dgm_gen,
            DocLoading.Bucket.DocOps.CREATE,
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold,
            sdk_client_pool=self.sdk_client_pool)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch initial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = self.sdk_client_pool.get_client_for_bucket(self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 2, 2,
                                                         1)
        for bucket_name, scope_dict in collections.iteritems():
            for scope_name, collection_dict in scope_dict["scopes"].items():
                for c_name, c_data in collection_dict["collections"].items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud(DocLoading.Bucket.DocOps.TOUCH,
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        # change back client's scope and coll name to _default
        # since it was changed in the while loop to select different collection
        client.scope_name = CbServer.default_scope
        client.collection_name = CbServer.default_collection
        self.sdk_client_pool.release_client(client)
        self.validate_test_failure()
コード例 #7
0
ファイル: basic_ops.py プロジェクト: AnithaKuberan/TAF
 def test_drop_collection_compaction(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # Delete collection
     for self.bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             self.bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, c_data in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # Trigger compaction
     remote_client = RemoteMachineShellConnection(self.cluster.master)
     _ = remote_client.wait_till_compaction_end(
         RestConnection(self.cluster.master),
         self.bucket_name,
         timeout_in_seconds=(self.wait_timeout * 10))
     remote_client.disconnect()
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
コード例 #8
0
ファイル: process_crash.py プロジェクト: bkumaran/TAF
    def test_crash_process(self):
        """
        1. Starting loading docs into the default bucket
        2. Crash the requested process, which will not impact the
           memcached operations
        3. Wait for load bucket task to complete
        4. Validate the docs for durability
        """
        def_bucket = self.cluster.buckets[0]
        target_node = self.getTargetNode()
        remote = RemoteMachineShellConnection(target_node)
        target_vbuckets = range(0, self.cluster_util.vbuckets)
        retry_exceptions = list()
        self.transaction_load_task = None
        self.doc_loading_task = None
        self.N1ql_load_task = None

        # If Memcached is killed, we should not perform KV ops on
        # particular node. If not we can target all nodes for KV operation.
        if self.process_name == "memcached":
            target_vbuckets = CrashTest.getVbucketNumbers(
                remote, def_bucket.name, self.target_node)
            if self.target_node == "active":
                retry_exceptions = [SDKException.TimeoutException]
        if len(target_vbuckets) == 0:
            self.log.error("No target vbucket list generated to load data")
            remote.disconnect()
            return

        bucket_dict = BucketUtils.get_random_collections(
            self.cluster.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")

        bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,
                                            bucket_dict.keys()[0])
        scope_name = bucket_dict[bucket.name]["scopes"].keys()[0]
        collection_name = bucket_dict[bucket.name][
            "scopes"][scope_name]["collections"].keys()[0]
        scope = BucketUtils.get_scope_obj(
            bucket, scope_name)
        collection = BucketUtils.get_collection_obj(
            scope, collection_name)

        self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)

        task_info = dict()
        task_info[self.doc_loading_task] = \
            self.bucket_util.get_doc_op_info_dict(
                def_bucket, DocLoading.Bucket.DocOps.CREATE, 0,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout, time_unit="seconds",
                retry_exceptions=retry_exceptions)

        self.sleep(10, "Wait for doc_ops to start")
        self.log.info("Killing {0}:{1} on node {2}"
                      .format(self.process_name, self.service_name,
                              target_node.ip))
        remote.kill_process(self.process_name, self.service_name,
                            signum=signum[self.sig_type])
        remote.disconnect()
        # Wait for tasks completion and validate failures
        if self.transaction_load_task:
            self.task.jython_task_manager.get_task_result(
                self.transaction_load_task)
        if self.N1qltxn:
            self.task.jython_task_manager.get_task_result(
                self.N1ql_load_task)
        self.task_manager.get_task_result(self.doc_loading_task)
        self.bucket_util.verify_doc_op_task_exceptions(task_info,
                                                       self.cluster)
        self.bucket_util.log_doc_ops_task_failures(task_info)

        # Verification stats
        verification_dict = dict()
        verification_dict["ops_create"] = 2*self.num_items
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["pending_writes"] = 0
        if self.durability_level:
            verification_dict["sync_write_committed_count"] = 2*self.num_items

        if self.bucket_type == Bucket.Type.EPHEMERAL \
                and self.process_name == "memcached":
            result = self.task.rebalance(self.servers[:self.nodes_init],
                                         [], [])
            self.assertTrue(result, "Rebalance failed")

        # Validate doc count
        if self.process_name != "memcached":
            stats_failed = \
                self.durability_helper.verify_vbucket_details_stats(
                    def_bucket, self.cluster_util.get_kv_nodes(),
                    vbuckets=self.cluster_util.vbuckets,
                    expected_val=verification_dict)
            if stats_failed:
                self.fail("Cbstats verification failed")

        # Doc count validation per collection
        if not self.N1qltxn and self.atomicity is False:
            self.bucket_util.validate_docs_per_collections_all_buckets(
                self.cluster)
コード例 #9
0
ファイル: process_crash.py プロジェクト: bkumaran/TAF
    def test_stop_process(self):
        """
        1. Starting loading docs into the default bucket
        2. Stop the requested process, which will impact the
           memcached operations
        3. Wait for load bucket task to complete
        4. Validate the docs for durability
        """
        error_to_simulate = self.input.param("simulate_error", None)
        target_node = self.getTargetNode()
        remote = RemoteMachineShellConnection(target_node)
        error_sim = CouchbaseError(self.log, remote)
        target_vbuckets = CrashTest.getVbucketNumbers(
            remote, self.bucket.name, self.target_node)

        bucket_dict = BucketUtils.get_random_collections(
            self.cluster.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")

        bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,
                                            bucket_dict.keys()[0])
        scope_name = bucket_dict[bucket.name]["scopes"].keys()[0]
        collection_name = bucket_dict[bucket.name][
            "scopes"][scope_name]["collections"].keys()[0]
        scope = BucketUtils.get_scope_obj(
            bucket, scope_name)
        collection = BucketUtils.get_collection_obj(scope, collection_name)

        if len(target_vbuckets) == 0:
            self.log.error("No target vbucket list generated to load data")
            remote.disconnect()
            return

        self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)

        # Induce the error condition
        error_sim.create(error_to_simulate)

        self.sleep(20, "Wait before reverting the error condition")
        # Revert the simulated error condition and close the ssh session
        error_sim.revert(error_to_simulate)
        remote.disconnect()

        # Wait for doc loading task to complete
        self.task.jython_task_manager.get_task_result(self.doc_loading_task)
        if self.atomicity:
            self.task.jython_task_manager.get_task_result(
                self.transaction_load_task)
        elif self.N1qltxn:
            self.task.jython_task_manager.get_task_result(
                self.N1ql_load_task)

        if len(self.doc_loading_task.fail.keys()) != 0:
            if self.target_node == "active" or self.num_replicas in [2, 3]:
                self.log_failure("Unwanted failures for keys: %s"
                                 % self.doc_loading_task.fail.keys())

        validate_passed = \
            self.durability_helper.validate_durability_exception(
                self.doc_loading_task.fail,
                SDKException.DurabilityAmbiguousException)
        if not validate_passed:
            self.log_failure("Unwanted exception seen during validation")

        # Get SDK client for CRUD retries
        sdk_client = self.sdk_client_pool.get_client_for_bucket(self.bucket)
        for doc_key, crud_result in self.doc_loading_task.fail.items():
            result = sdk_client.crud(DocLoading.Bucket.DocOps.CREATE,
                                     doc_key,
                                     crud_result["value"],
                                     replicate_to=self.replicate_to,
                                     persist_to=self.persist_to,
                                     durability=self.durability_level,
                                     timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Retry of doc_key %s failed: %s"
                                 % (doc_key, result["error"]))
        # Close the SDK connection
        self.sdk_client_pool.release_client(sdk_client)

        self.validate_test_failure()

        self.bucket_util._wait_for_stats_all_buckets(self.cluster.buckets)
        # Update self.num_items and validate docs per collection
        if not self.N1qltxn and self.atomicity is False:
            self.bucket_util.validate_docs_per_collections_all_buckets(
                self.cluster)
コード例 #10
0
    def test_key_not_exists(self):
        def run_test(bucket, scope, collection):
            self.log.info("CAS test on %s:%s" % (scope, collection))

            client = self.sdk_client_pool.get_client_for_bucket(
                bucket, scope, collection)
            for _ in range(1500):
                result = client.crud(DocLoading.Bucket.DocOps.CREATE,
                                     key,
                                     val,
                                     durability=self.durability_level,
                                     timeout=self.sdk_timeout)
                if result["status"] is False:
                    self.log_failure("Create failed: %s" % result)
                create_cas = result["cas"]

                # Delete and verify get fails
                result = client.crud(DocLoading.Bucket.DocOps.DELETE,
                                     key,
                                     durability=self.durability_level,
                                     timeout=self.sdk_timeout)
                if result["status"] is False:
                    self.log_failure("Delete failed: %s" % result)
                elif result["cas"] <= create_cas:
                    self.log_failure("Invalid cas on delete: %s" % result)

                result = client.crud(DocLoading.Bucket.DocOps.READ,
                                     key,
                                     timeout=self.sdk_timeout)
                if result["status"] is True:
                    self.log_failure("Read okay after delete: %s" % result)
                elif SDKException.DocumentNotFoundException \
                        not in str(result["error"]):
                    self.log_failure("Invalid exception during read "
                                     "for non-exists key: %s" % result)

                # cas errors do not sleep the test for 10 seconds,
                # plus we need to check that the correct error is being thrown
                result = client.crud(DocLoading.Bucket.DocOps.REPLACE,
                                     key,
                                     val,
                                     exp=60,
                                     timeout=self.sdk_timeout,
                                     cas=create_cas)
                if result["status"] is True:
                    self.log_failure("Replace okay after delete: %s" % result)
                if SDKException.DocumentNotFoundException \
                        not in str(result["error"]):
                    self.log_failure("Invalid exception during read "
                                     "for non-exists key: %s" % result)
            self.sdk_client_pool.release_client(client)

        self.key = "test_key_not_exists"
        load_gen = doc_generator(self.key, 0, 1, doc_size=256)
        key, val = load_gen.next()

        collections = BucketUtils.get_random_collections([self.bucket], 2, 2,
                                                         1)
        threads = list()
        for bucket_name, scope_dict in collections.iteritems():
            bucket_obj = self.bucket_util.get_bucket_obj(
                self.bucket_util.buckets, bucket_name)
            for scope_name, collection_dict in scope_dict["scopes"].items():
                for c_name, c_data in collection_dict["collections"].items():
                    thread = Thread(target=run_test,
                                    args=[bucket_obj, scope_name, c_name])
                    thread.start()
                    threads.append(thread)
        for thread in threads:
            thread.join()

        self.validate_test_failure()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
コード例 #11
0
    def ops_change_cas(self):
        """
        CAS value manipulation by update, delete, expire test.
        We load a certain number of items. Then for half of them, we use
        MemcachedClient cas() method to mutate those item values in order
        to change CAS value of those items.
        We use MemcachedClient set() to set a quarter of the items expired.
        We also use MemcachedClient delete() to delete a quarter of the items
        """
        gen_update = doc_generator(self.key,
                                   0,
                                   self.num_items / 2,
                                   doc_size=self.doc_size)
        gen_delete = doc_generator(self.key,
                                   self.num_items / 2,
                                   (self.num_items * 3 / 4),
                                   doc_size=self.doc_size)
        gen_expire = doc_generator(self.key, (self.num_items * 3 / 4),
                                   self.num_items,
                                   doc_size=self.doc_size)

        # Create cbstat objects
        self.shell_conn = dict()
        self.cb_stat = dict()
        self.vb_details = dict()
        for node in self.cluster_util.get_kv_nodes():
            self.vb_details[node.ip] = dict()
            self.vb_details[node.ip]["active"] = list()
            self.vb_details[node.ip]["replica"] = list()

            self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
            self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
            self.vb_details[node.ip]["active"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
            self.vb_details[node.ip]["replica"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")

        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for bucket_name, scope_dict in collections.iteritems():
            bucket = self.bucket_util.get_bucket_obj(self.bucket_util.buckets,
                                                     bucket_name)
            for scope_name, collection_dict in scope_dict["scopes"].items():
                for c_name, c_data in collection_dict["collections"].items():
                    threads = list()
                    if self.doc_ops is not None:
                        if DocLoading.Bucket.DocOps.UPDATE in self.doc_ops:
                            thread = Thread(
                                target=self.verify_cas,
                                args=[
                                    DocLoading.Bucket.DocOps.UPDATE,
                                    gen_update, bucket, scope_name, c_name
                                ])
                            thread.start()
                            threads.append(thread)
                        if DocLoading.Bucket.DocOps.TOUCH in self.doc_ops:
                            thread = Thread(target=self.verify_cas,
                                            args=[
                                                DocLoading.Bucket.DocOps.TOUCH,
                                                gen_update, bucket, scope_name,
                                                c_name
                                            ])
                            thread.start()
                            threads.append(thread)
                        if DocLoading.Bucket.DocOps.DELETE in self.doc_ops:
                            thread = Thread(
                                target=self.verify_cas,
                                args=[
                                    DocLoading.Bucket.DocOps.DELETE,
                                    gen_delete, bucket, scope_name, c_name
                                ])
                            thread.start()
                            threads.append(thread)
                        if "expire" in self.doc_ops:
                            thread = Thread(target=self.verify_cas,
                                            args=[
                                                "expire", gen_expire, bucket,
                                                scope_name, c_name
                                            ])
                            thread.start()
                            threads.append(thread)

                    # Wait for all threads to complete
                    for thread in threads:
                        thread.join()

        # Validate test failure
        self.validate_test_failure()
コード例 #12
0
    def ops_change_cas(self):
        """
        CAS value manipulation by update, delete, expire test.
        We load a certain number of items. Then for half of them, we use
        MemcachedClient cas() method to mutate those item values in order
        to change CAS value of those items.
        We use MemcachedClient set() to set a quarter of the items expired.
        We also use MemcachedClient delete() to delete a quarter of the items
        """
        gen_update = doc_generator(self.key,
                                   0,
                                   self.num_items / 2,
                                   doc_size=self.doc_size)
        gen_delete = doc_generator(self.key,
                                   self.num_items / 2,
                                   (self.num_items * 3 / 4),
                                   doc_size=self.doc_size)
        gen_expire = doc_generator(self.key, (self.num_items * 3 / 4),
                                   self.num_items,
                                   doc_size=self.doc_size)

        # Create cbstat objects
        self.shell_conn = dict()
        self.cb_stat = dict()
        self.vb_details = dict()
        for node in self.cluster_util.get_kv_nodes():
            self.vb_details[node.ip] = dict()
            self.vb_details[node.ip]["active"] = list()
            self.vb_details[node.ip]["replica"] = list()

            self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
            self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
            self.vb_details[node.ip]["active"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
            self.vb_details[node.ip]["replica"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")

        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    if self.doc_ops is not None:
                        if "update" in self.doc_ops:
                            self.verify_cas("update", gen_update, scope_name,
                                            c_name)
                        if "touch" in self.doc_ops:
                            self.verify_cas("touch", gen_update, scope_name,
                                            c_name)
                        if "delete" in self.doc_ops:
                            self.verify_cas("delete", gen_delete, scope_name,
                                            c_name)
                        if "expire" in self.doc_ops:
                            self.verify_cas("expire", gen_expire, scope_name,
                                            c_name)

        # Validate test failure
        self.validate_test_failure()
コード例 #13
0
ファイル: basic_ops.py プロジェクト: sreebhargava143/TAF
    def test_delete_collection_during_load(self):
        """ Get a random collection/scope ,
            delete collection/scope while loading"""
        delete_scope = self.input.param("delete_scope", False)
        exclude_dict = dict()
        if delete_scope:
            exclude_dict = {
                self.bucket.name:
                {"scopes": {
                    CbServer.default_scope: {
                        "collections": {}}}}}
        bucket_dict = BucketUtils.get_random_collections(
            [self.bucket], 1, "all", "all", exclude_from=exclude_dict)
        scope_name = collection_name = None
        scope_dict = bucket_dict[self.bucket.name]["scopes"]
        for t_scope, scope_data in scope_dict.items():
            print(scope_data["collections"])
            if scope_data["collections"]:
                scope_name = t_scope
                collection_name = scope_data["collections"].keys()[0]
                break

        self.num_items = \
            self.bucket \
                .scopes[scope_name] \
                .collections[collection_name] \
                .num_items
        load_gen = doc_generator(self.key, self.num_items, self.num_items * 20)

        self.log.info("Delete collection while load %s: %s"
                      % (scope_name, collection_name))
        task = self.task.async_load_gen_docs(
            self.cluster, self.bucket, load_gen, "create",
            exp=self.maxttl,
            batch_size=200, process_concurrency=1,
            scope=scope_name,
            collection=collection_name,
            sdk_client_pool=self.sdk_client_pool,
            suppress_error_table=True,
            compression=self.sdk_compression,
            print_ops_rate=True, retries=0)

        self.sleep(5)
        self.bucket_util.print_bucket_stats()

        if delete_scope:
            # Attempt deleting scope only if it is NOT default scope
            if scope_name != CbServer.default_scope:
                self.bucket_util.drop_scope(self.cluster.master,
                                            self.bucket,
                                            scope_name)

        else:
            self.bucket_util.drop_collection(self.cluster.master,
                                             self.bucket,
                                             scope_name,
                                             collection_name)

        # validate task failure
        self.task_manager.get_task_result(task)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
コード例 #14
0
    def test_compression_insert_validate(self):
        """
        1. Insert docs into multiple collections using snappy client.
           All inserted docs will have random doc_size/content
        2. Read back the docs from the same/different client and validate
           This validating client can be both snappy/non-snappy client.
        """
        random_clients = self.input.param("random_clients", False)
        s_name = None
        c_name = None
        bucket_dict = BucketUtils.get_random_collections(
            self.bucket_util.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")
        for bucket_name, scope_dict in bucket_dict.items():
            for scope_name, col_dict in scope_dict["scopes"].items():
                for collection_name, _ in col_dict["collections"].items():
                    s_name = scope_name
                    c_name = collection_name

        # Select clients for doc_ops based on user input params
        create_client = self.snappy_client
        update_client = self.snappy_client
        read_client = self.snappy_client
        if random_clients:
            sdk_clients = [self.snappy_client, self.second_client]
            create_client = sample(sdk_clients, 1)[0]
            update_client = sample(sdk_clients, 1)[0]
            read_client = sample(sdk_clients, 1)[0]
        elif self.diff_client_for_validation:
            read_client = self.second_client

        # Log client's compression info for debug purpose
        self.log.info("Create client's compression: %s" %
                      create_client.compression)
        self.log.info("Update client's compression: %s" %
                      update_client.compression)
        self.log.info("Read client's compression: %s" %
                      read_client.compression)

        self.log.info("Performing doc loading in bucket %s" % self.bucket)
        for _, scope in self.bucket.scopes.items():
            self.log.info("Mutating docs under scope: %s" % scope.name)
            for _, collection in scope.collections.items():
                self.snappy_client.select_collection(scope.name,
                                                     collection.name)
                while self.create_gen.has_next():
                    # Add new doc using snappy client
                    key, value = self.create_gen.next()
                    result = create_client.crud(
                        "create",
                        key,
                        value,
                        exp=self.maxttl,
                        durability=self.durability_level)
                    if result["status"] is False:
                        self.log_failure("Key '%s' insert failed for %s: %s" %
                                         (key, collection.name, result))

                    # Mutate same doc using the same client
                    key, value = self.update_gen.next()
                    result = update_client.crud(
                        "update",
                        key,
                        value,
                        exp=self.maxttl,
                        durability=self.durability_level)
                    if result["status"] is False:
                        self.log_failure("Key '%s' update failed for %s: %s" %
                                         (key, collection.name, result))

                # Reset doc_gens to be utilized by subsequent loaders
                self.create_gen.reset()
                self.update_gen.reset()
                # Validate and report fast failures per collection
                self.validate_test_failure()

        self.log.info("Validating docs in bucket %s" % self.bucket)
        for _, scope in self.bucket.scopes.items():
            self.log.info("Reading docs under scope: %s" % scope.name)
            for _, collection in scope.collections.items():
                read_client.select_collection(scope.name, collection.name)
                while self.update_gen.has_next():
                    key, value = self.update_gen.next()
                    result = read_client.crud("read", key)
                    if str(result["value"]) != str(value):
                        self.log_failure(
                            "Value mismatch for %s in collection %s: %s" %
                            (key, collection.name, result))

        self.validate_test_failure()
        self.bucket.scopes[s_name].collections[
            c_name].num_items += self.num_items
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)
コード例 #15
0
    def test_compression_with_parallel_mutations_on_same_collection(self):
        """
        1. Insert docs into single collections using both snappy/non-snappy
           clients in parallel (Includes overlapping CRUDs on same docs)
        2. Validate the results and stability of mutated docs
        """

        tasks = list()
        # Used for doc_loading tasks's SDK client creation
        scope = None
        collection = None
        self.batch_size = 30

        bucket_dict = BucketUtils.get_random_collections(
            self.bucket_util.buckets,
            req_num=1,
            consider_scopes="all",
            consider_buckets="all")
        for bucket_name, scope_dict in bucket_dict.items():
            for scope_name, col_dict in scope_dict["scopes"].items():
                for collection_name, _ in col_dict["collections"].items():
                    scope = scope_name
                    collection = collection_name

        self.log.info("Creating doc generators")
        create_gen_1 = self.create_gen
        create_gen_2 = doc_generator(self.key,
                                     self.num_items,
                                     self.num_items * 2,
                                     key_size=self.key_size,
                                     doc_size=self.doc_size,
                                     randomize_doc_size=True,
                                     target_vbucket=self.target_vbucket,
                                     mutation_type="ADD")
        update_gen_1 = self.update_gen
        update_gen_2 = doc_generator(self.key,
                                     0,
                                     self.num_items,
                                     key_size=self.key_size,
                                     doc_size=self.doc_size,
                                     randomize_doc_size=True,
                                     target_vbucket=self.target_vbucket,
                                     mutation_type="SET",
                                     mutate=2)

        self.log.info("Loading initial docs into collection %s::%s" %
                      (scope, collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_1,
                                          "create",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_2,
                                          "create",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=None,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))

        for task in tasks:
            self.task_manager.get_task_result(task)
            if task.fail.keys():
                self.log_failure("Failures during initial doc loading "
                                 "for keys: %s" % task.fail.keys())

        self.bucket.scopes[scope].collections[collection].num_items \
            += (self.num_items * 2)
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

        self.log.info("Performing overlapping mutations")
        tasks = list()
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          update_gen_1,
                                          "update",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          task_identifier="update_1",
                                          scope=scope,
                                          collection=collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          update_gen_2,
                                          "update",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=None,
                                          timeout_secs=self.sdk_timeout,
                                          task_identifier="update_2",
                                          scope=scope,
                                          collection=collection))
        for task in tasks:
            self.task_manager.get_task_result(task)
            if task.fail.keys():
                self.log_failure("Failures during %s updates for keys: %s" %
                                 (task.thread_name, task.fail.keys()))

        # Validate docs using snappy/non-snappy client in random
        task = self.task.async_validate_docs(
            self.cluster,
            self.bucket,
            create_gen_2,
            "create",
            compression=sample([self.sdk_compression, None], 1)[0],
            batch_size=self.batch_size,
            process_concurrency=3,
            scope=scope,
            collection=collection)
        self.task.jython_task_manager.get_task_result(task)

        # Intermediate collection-doc validation
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)

        self.log.info("Performing parallel deletes")
        tasks = list()
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_1,
                                          "delete",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          create_gen_2,
                                          "delete",
                                          self.maxttl,
                                          batch_size=self.batch_size,
                                          process_concurrency=3,
                                          replicate_to=self.replicate_to,
                                          persist_to=self.persist_to,
                                          durability=self.durability_level,
                                          compression=self.sdk_compression,
                                          timeout_secs=self.sdk_timeout,
                                          scope=scope,
                                          collection=collection))

        for task in tasks:
            self.task_manager.get_task_result(task)
            if task.fail.keys():
                self.log_failure("Failures during initial doc loading "
                                 "for keys: %s" % task.fail.keys())

        # Doc validation
        self.bucket.scopes[scope].collections[collection].num_items \
            -= (self.num_items * 2)
        self.bucket_util.validate_doc_count_as_per_collections(self.bucket)