Ejemplo n.º 1
0
    def test_doc_key_size(self):
        """
        Insert document key with min and max key size on each available
        collection and validate
        :return:
        """
        min_doc_gen = doc_generator("test_min_key_size",
                                    0,
                                    self.num_items,
                                    key_size=1,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        max_doc_gen = doc_generator("test_max_key_size",
                                    0,
                                    self.num_items,
                                    key_size=245,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        known_cas = set()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master],
                           self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_gen, max_doc_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create",
                                             key,
                                             value,
                                             self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure(
                                "Doc create failed for key '%s' "
                                "collection::scope %s::%s - %s" %
                                (key, scope.name, collection.name, result))
                        else:
                            if result["cas"] in known_cas:
                                self.log_failure(
                                    "Same CAS exists under different "
                                    "collection: %s" % result)
                            collection.num_items += 1
                            known_cas.add(result["cas"])

        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Ejemplo n.º 2
0
    def _dockey_data_ops(self, dockey="dockey"):
        if self.target_vbucket is None:
            gen_load = doc_generator(dockey,
                                     0,
                                     self.num_items,
                                     doc_type="json")
        else:
            gen_load = doc_generator(dockey,
                                     0,
                                     self.num_items,
                                     doc_type="json",
                                     target_vbucket=[self.target_vbucket])

        bucket = self.bucket_util.get_all_buckets()[0]
        for op_type in ["create", "update", "delete"]:
            task = self.task.async_load_gen_docs(
                self.cluster,
                bucket,
                gen_load,
                op_type,
                0,
                batch_size=20,
                persist_to=self.persist_to,
                replicate_to=self.replicate_to,
                durability=self.durability_level,
                pause_secs=5,
                timeout_secs=self.sdk_timeout,
                retries=self.sdk_retries)
            self.task.jython_task_manager.get_task_result(task)
            if op_type == "delete":
                self.num_items = 0
            self._persist_and_verify()
Ejemplo n.º 3
0
    def touch_test(self):
        self.log.info("1. Loading initial set of documents")
        load_gen = doc_generator(self.key, 0, self.num_items,
                                 doc_size=self.doc_size)
        self._load_all_buckets(load_gen, "create")
        self.bucket_util.verify_stats_all_buckets(self.num_items)
        self.bucket_util._wait_for_stats_all_buckets()

        self.log.info("2. Loading bucket into DGM")
        dgm_gen = doc_generator(
            self.key, self.num_items, self.num_items+1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster, self.bucket_util.buckets[0], dgm_gen, "create", 0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("3. Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master],
                           self.bucket_util.buckets[0])
        while load_gen.has_next():
            key, _ = load_gen.next()
            result = client.crud("touch", key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is not True:
                self.log_failure("Touch on %s failed: %s" % (key, result))
        client.close()
        self.validate_test_failure()
Ejemplo n.º 4
0
    def ops_change_cas(self):
        """
        CAS value manipulation by update, delete, expire test.

        We load a certain number of items. Then for half of them, we use
        MemcachedClient cas() method to mutate those item values in order
        to change CAS value of those items.
        We use MemcachedClient set() to set a quarter of the items expired.
        We also use MemcachedClient delete() to delete a quarter of the items
        """

        gen_load = doc_generator('nosql',
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        gen_update = doc_generator('nosql',
                                   0,
                                   self.num_items / 2,
                                   doc_size=self.doc_size)
        gen_delete = doc_generator('nosql',
                                   self.num_items / 2,
                                   (self.num_items * 3 / 4),
                                   doc_size=self.doc_size)
        gen_expire = doc_generator('nosql', (self.num_items * 3 / 4),
                                   self.num_items,
                                   doc_size=self.doc_size)
        self._load_all_buckets(gen_load, "create")
        self.bucket_util.verify_stats_all_buckets(self.cluster, self.num_items)
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        # Create cbstat objects
        self.shell_conn = dict()
        self.cb_stat = dict()
        self.vb_details = dict()
        for node in self.cluster_util.get_kv_nodes(self.cluster):
            self.vb_details[node.ip] = dict()
            self.vb_details[node.ip]["active"] = list()
            self.vb_details[node.ip]["replica"] = list()

            self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
            self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
            self.vb_details[node.ip]["active"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
            self.vb_details[node.ip]["replica"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")

        if self.doc_ops is not None:
            if "update" in self.doc_ops:
                self.verify_cas("update", gen_update)
            if "touch" in self.doc_ops:
                self.verify_cas("touch", gen_update)
            if "delete" in self.doc_ops:
                self.verify_cas("delete", gen_delete)
            if "expire" in self.doc_ops:
                self.verify_cas("expire", gen_expire)

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)
        self.validate_test_failure()
Ejemplo n.º 5
0
 def start_doc_loading_tasks(self, target_vbuckets, scope_name,
                             collection_obj):
     # Create doc_generator targeting only the active/replica vbuckets
     # present in the target_node
     transaction_gen_load = doc_generator(
         "transaction_key",
         self.num_items,
         self.new_docs_to_add,
         key_size=self.key_size,
         doc_size=self.doc_size,
         doc_type=self.doc_type,
         target_vbucket=target_vbuckets,
         vbuckets=self.cluster_util.vbuckets)
     gen_load = doc_generator(self.key,
                              self.num_items,
                              self.new_docs_to_add,
                              key_size=self.key_size,
                              doc_size=self.doc_size,
                              doc_type=self.doc_type,
                              target_vbucket=target_vbuckets,
                              vbuckets=self.cluster_util.vbuckets)
     if self.atomicity:
         self.transaction_load_task = \
             self.task.async_load_gen_docs_atomicity(
                 self.cluster, self.bucket_util.buckets,
                 transaction_gen_load, DocLoading.Bucket.DocOps.CREATE,
                 exp=0,
                 batch_size=10,
                 process_concurrency=self.process_concurrency,
                 replicate_to=self.replicate_to,
                 persist_to=self.persist_to,
                 durability=self.durability_level,
                 timeout_secs=self.sdk_timeout,
                 update_count=self.update_count,
                 transaction_timeout=self.transaction_timeout,
                 commit=True,
                 sync=self.sync)
         collection_obj.num_items += self.new_docs_to_add
     elif self.N1qltxn:
         self.N1ql_load_task = self.task.async_n1qlTxn_query(
             self.stmts,
             n1ql_helper=self.n1ql_helper,
             commit=True,
             scan_consistency="REQUEST_PLUS")
     self.doc_loading_task = self.task.async_load_gen_docs(
         self.cluster,
         self.bucket,
         gen_load,
         DocLoading.Bucket.DocOps.CREATE,
         exp=0,
         batch_size=10,
         process_concurrency=8,
         replicate_to=self.replicate_to,
         persist_to=self.persist_to,
         durability=self.durability_level,
         timeout_secs=self.sdk_timeout,
         scope=scope_name,
         collection=collection_obj.name,
         skip_read_on_error=True)
     collection_obj.num_items += self.new_docs_to_add
Ejemplo n.º 6
0
    def test_non_overlapping_parallel_cruds(self):
        """
        Test to run non-overlapping durability cruds on single bucket
        and make sure all CRUD operation succeeds

        1. Run single task_1 with durability operation
        2. Create parallel task to run either SyncWrite / Non-SyncWrite
           operation based on the config param and run that over the docs
           such that it will not overlap with the other tasks
        3. Make sure all CRUDs succeeded without any unexpected exceptions
        """

        doc_ops = self.input.param("doc_ops", "create;delete;update;read")
        doc_ops = doc_ops.split(";")
        half_of_num_items = int(self.num_items/2)
        doc_gen = dict()
        tasks = list()

        # Create required doc_generators for CRUD ops
        doc_gen["create"] = doc_generator(self.key, self.num_items,
                                          self.num_items * 2)
        doc_gen["update"] = doc_generator(self.key, half_of_num_items,
                                          self.num_items)
        doc_gen["delete"] = doc_generator(self.key, 0, half_of_num_items)
        doc_gen["read"] = doc_gen["update"]

        for index in range(0, 4):
            op_type = doc_ops[index]
            curr_doc_gen = doc_gen[op_type]

            if index < 2:
                # Durability doc_loader for first two ops specified in doc_ops
                tasks.append(self.task.async_load_gen_docs(
                    self.cluster, self.bucket, curr_doc_gen, op_type, 0,
                    batch_size=10, process_concurrency=1,
                    durability=self.durability_level,
                    timeout_secs=self.sdk_timeout,
                    sdk_client_pool=self.sdk_client_pool))
            else:
                # Non-SyncWrites for last two ops specified in doc_ops
                tasks.append(self.task.async_load_gen_docs(
                    self.cluster, self.bucket, curr_doc_gen, op_type, 0,
                    batch_size=10, process_concurrency=1,
                    replicate_to=self.replicate_to, persist_to=self.persist_to,
                    timeout_secs=self.sdk_timeout,
                    sdk_client_pool=self.sdk_client_pool))

        # Update num_items according to the CRUD operations
        self.num_items += self.num_items - half_of_num_items

        # Wait for all task to complete
        for task in tasks:
            # TODO: Receive failed docs and make sure only expected exceptions
            #       are generated
            self.task.jython_task_manager.get_task_result(task)

        # Verify doc count and other stats
        self.bucket_util._wait_for_stats_all_buckets()
        self.bucket_util.verify_stats_all_buckets(self.num_items)
Ejemplo n.º 7
0
    def test_rollback_and_persistence_race_condition(self):
        cluster = self.cluster
        gen_load = doc_generator(self.key, 0, self.num_items)
        for bucket in self.bucket_util.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_load, "create", 0,
                batch_size=10, process_concurrency=8,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                timeout_secs=self.sdk_timeout, retries=self.sdk_retries)
            self.task.jython_task_manager.get_task_result(task)

        # Stop persistence
        for server in cluster.servers[:self.nodes_init]:
            # Create cbepctl command object
            node_shell_conn = RemoteMachineShellConnection(server)
            cbepctl_obj = Cbepctl(node_shell_conn)

            for bucket in self.bucket_util.buckets:
                cbepctl_obj.persistence(bucket.name, "stop")

            # Disconnect the shell_connection
            node_shell_conn.disconnect()

        self.sleep(10, "Wait after stop_persistence")

        # more (non-intersecting) load
        gen_load = doc_generator(self.key, 0, self.num_items, doc_size=64)
        for bucket in self.bucket_util.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_load, "create", 0,
                batch_size=10, process_concurrency=8,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                timeout_secs=self.sdk_timeout, retries=self.sdk_retries)
            self.task.jython_task_manager.get_task_result(task)

        shell = RemoteMachineShellConnection(cluster.servers[0])
        shell.kill_memcached()

        self.sleep(10, "Wait after kill memcached")

        node1_shell_conn = RemoteMachineShellConnection(cluster.servers[0])
        node2_shell_conn = RemoteMachineShellConnection(cluster.servers[1])
        node1_cb_stat_obj = Cbstats(node1_shell_conn)
        node2_cb_stat_obj = Cbstats(node2_shell_conn)

        node1_items = node1_cb_stat_obj.all_stats(bucket, "curr_items_tot")
        node2_items = node2_cb_stat_obj.all_stats(bucket, "curr_items_tot")

        # Disconnect the opened connections
        node1_shell_conn.disconnect()
        node2_shell_conn.disconnect()

        self.assertTrue(node1_items == node2_items,
                        'Node items not equal. Node 1:{0}, node 2:{1}'
                        .format(node1_items, node2_items))
Ejemplo n.º 8
0
    def test_doc_size(self):
        """
        Insert document with empty content and max size on each available
        collection and validate
        :return:
        """
        # Empty docs
        min_doc_size_gen = doc_generator("test_min_doc_size",
                                         0, self.num_items,
                                         key_size=self.key_size,
                                         doc_size=0,
                                         mix_key_size=False,
                                         randomize_doc_size=False)
        # 20 MB docs
        max_doc_size_gen = doc_generator("test_max_doc_size",
                                         0, self.num_items,
                                         key_size=self.key_size,
                                         doc_size=1024 * 1024 * 20,
                                         mix_key_size=False,
                                         randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        # Format know_cas[CAS] = list(vb_lists)
        known_cas = dict()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master], self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_size_gen, max_doc_size_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create", key, value, self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure("Doc create failed for key '%s' "
                                             "collection::scope %s::%s - %s"
                                             % (key,
                                                scope.name,
                                                collection.name,
                                                result))
                        else:
                            self.__validate_cas_for_key(key, result, known_cas)
                            collection.num_items += 1

        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Ejemplo n.º 9
0
    def test_dgm_to_non_dgm(self):
        # Prepare DGM scenario
        bucket = self.bucket_util.get_all_buckets()[0]
        num_items = self.task.load_bucket_into_dgm(
            self.cluster,
            bucket,
            self.key,
            self.num_items,
            self.active_resident_threshold,
            batch_size=10,
            process_concurrency=8,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to)

        gen_create = doc_generator(self.key, num_items,
                                   num_items + self.num_items)
        gen_update = doc_generator(self.key, 0, self.num_items)
        gen_delete = doc_generator(self.key, self.num_items, num_items)

        # Perform continuous updates while bucket moves from DGM->non-DGM state
        tasks = list()
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          bucket,
                                          gen_update,
                                          "update",
                                          0,
                                          persist_to=self.persist_to,
                                          replicate_to=self.replicate_to,
                                          batch_size=10,
                                          process_concurrency=2))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          bucket,
                                          gen_delete,
                                          "delete",
                                          0,
                                          persist_to=self.persist_to,
                                          replicate_to=self.replicate_to,
                                          batch_size=10,
                                          process_concurrency=2))
        tasks.append(
            self.task.async_load_gen_docs(self.cluster,
                                          bucket,
                                          gen_create,
                                          "create",
                                          0,
                                          persist_to=self.persist_to,
                                          replicate_to=self.replicate_to,
                                          batch_size=10,
                                          process_concurrency=2))
        for task in tasks:
            self.task.jython_task_manager.get_task_result(task)
Ejemplo n.º 10
0
    def touch_test(self):
        self.log.info("Loading bucket into DGM")
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            dgm_gen,
            "create",
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud("touch",
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        client.close()
        self.bucket_util._wait_for_stats_all_buckets()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Ejemplo n.º 11
0
    def touch_test(self):
        self.log.info("Loading bucket %s into %s%% DGM" %
                      (self.bucket.name, self.active_resident_threshold))
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            dgm_gen,
            DocLoading.Bucket.DocOps.CREATE,
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold,
            sdk_client_pool=self.sdk_client_pool)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch initial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = self.sdk_client_pool.get_client_for_bucket(self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 2, 2,
                                                         1)
        for bucket_name, scope_dict in collections.iteritems():
            for scope_name, collection_dict in scope_dict["scopes"].items():
                for c_name, c_data in collection_dict["collections"].items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud(DocLoading.Bucket.DocOps.TOUCH,
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        # change back client's scope and coll name to _default
        # since it was changed in the while loop to select different collection
        client.scope_name = CbServer.default_scope
        client.collection_name = CbServer.default_collection
        self.sdk_client_pool.release_client(client)
        self.validate_test_failure()
Ejemplo n.º 12
0
    def rebalance_out_with_ops(self):
        gen_create = doc_generator(self.key, self.num_items,
                                   self.num_items * 2)
        gen_delete = doc_generator(self.key, self.num_items / 2,
                                   self.num_items)
        servs_out = [
            self.cluster.servers[self.num_servers - i - 1]
            for i in range(self.nodes_out)
        ]
        tasks = list()
        rebalance_task = self.task.async_rebalance(
            self.cluster.servers[:self.nodes_init], [], servs_out)
        tasks_info = self.self.start_parallel_cruds(gen_create, gen_delete)
        self.task.jython_task_manager.get_task_result(rebalance_task)

        self.bucket_util.verify_doc_op_task_exceptions(tasks_info,
                                                       self.cluster)
        self.bucket_util.log_doc_ops_task_failures(tasks_info)
        self.cluster.nodes_in_cluster = list(
            set(self.cluster.nodes_in_cluster) - set(servs_out))
        for bucket in self.bucket_util.buckets:
            if self.doc_ops is not None:
                if "update" in self.doc_ops:
                    tasks.append(
                        self.task.async_validate_docs(self.cluster,
                                                      bucket,
                                                      self.gen_update,
                                                      "update",
                                                      0,
                                                      batch_size=10))
                if "create" in self.doc_ops:
                    tasks.append(
                        self.task.async_validate_docs(self.cluster,
                                                      bucket,
                                                      gen_create,
                                                      "create",
                                                      0,
                                                      batch_size=10,
                                                      process_concurrency=8))
                if "delete" in self.doc_ops:
                    tasks.append(
                        self.task.async_validate_docs(self.cluster,
                                                      bucket,
                                                      gen_delete,
                                                      "delete",
                                                      0,
                                                      batch_size=10))
        for task in tasks:
            self.task.jython_task_manager.get_task_result(task)
        self.bucket_util.verify_stats_all_buckets(self.num_items)
Ejemplo n.º 13
0
 def _load_json(self, bucket, num_items, exp=0, op_type="create"):
     self.log.info("Creating doc_generator..")
     doc_create = doc_generator(self.key,
                                0,
                                num_items,
                                doc_size=self.doc_size,
                                doc_type="json",
                                target_vbucket=self.target_vbucket,
                                vbuckets=self.cluster.vbuckets)
     self.log.info("doc_generator created")
     task = self.task.async_load_gen_docs(
         self.cluster,
         bucket,
         doc_create,
         op_type,
         exp,
         batch_size=10,
         process_concurrency=8,
         replicate_to=self.replicate_to,
         persist_to=self.persist_to,
         durability=self.durability_level,
         timeout_secs=self.sdk_timeout,
         compression=self.sdk_compression,
         sdk_client_pool=self.sdk_client_pool)
     self.task.jython_task_manager.get_task_result(task)
     self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                  self.cluster.buckets)
     self.bucket_util.verify_stats_all_buckets(self.cluster, self.num_items)
     return
Ejemplo n.º 14
0
    def setUp(self):
        super(Bucket_param_test, self).setUp()
        self.transaction_timeout = self.input.param("transaction_timeout", 100)
        self.transaction_commit = self.input.param("transaction_commit", True)
        self.op_type = self.input.param("op_type", 'create')
        self.start_doc_for_insert = 0
        self.key = 'test-doc'.rjust(self.key_size, '0')
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend(
            [self.cluster.master] + nodes_init)
        self.bucket_util.create_default_bucket(
            replica=self.num_replicas, compression_mode=self.compression_mode)
        self.bucket_util.add_rbac_user()
        self.src_bucket = self.bucket_util.get_all_buckets()
        # Reset active_resident_threshold to avoid further data load as DGM
        self.active_resident_threshold = 0

        doc_create = doc_generator('test-d', 0, self.num_items,
                                   doc_size=self.doc_size,
                                   doc_type="json",
                                   vbuckets=self.vbuckets)
        for bucket in self.bucket_util.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, doc_create, "create", 0,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                batch_size=10, process_concurrency=8)
            self.task.jython_task_manager.get_task_result(task)
            # Verify initial doc load count
            time.sleep(20)
        self.log.info("==========Finished Bucket_param_test setup========")
Ejemplo n.º 15
0
    def _dockey_data_ops(self, dockey="dockey"):
        target_vb = None
        if self.target_vbucket is not None:
            target_vb = [self.target_vbucket]
        gen_load = doc_generator(dockey,
                                 0,
                                 self.num_items,
                                 key_size=self.key_size,
                                 doc_size=self.doc_size,
                                 doc_type=self.doc_type,
                                 vbuckets=self.cluster.vbuckets,
                                 target_vbucket=target_vb)

        bucket = self.bucket_util.get_all_buckets(self.cluster)[0]
        for op_type in ["create", "update", "delete"]:
            task = self.task.async_load_gen_docs(
                self.cluster,
                bucket,
                gen_load,
                op_type,
                0,
                batch_size=20,
                persist_to=self.persist_to,
                replicate_to=self.replicate_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                sdk_client_pool=self.sdk_client_pool)
            self.task.jython_task_manager.get_task_result(task)
            if op_type == "delete":
                self.num_items = 0
            self._persist_and_verify()
Ejemplo n.º 16
0
 def setUp(self):
     super(OpsChangeCasTests, self).setUp()
     self.key = "test_cas"
     self.expire_time = self.input.param("expire_time", 35)
     self.item_flag = self.input.param("item_flag", 0)
     self.load_gen = doc_generator(self.key, 0, self.num_items,
                                   doc_size=self.doc_size)
     self.node_data = dict()
     for node in self.cluster_util.get_kv_nodes():
         shell = RemoteMachineShellConnection(node)
         cb_stat = Cbstats(shell)
         self.node_data[node.ip] = dict()
         self.node_data[node.ip]["shell"] = shell
         self.node_data[node.ip]["cb_stat"] = Cbstats(shell)
         self.node_data[node.ip]["active"] = cb_stat.vbucket_list(
             self.bucket,
             "active")
         self.node_data[node.ip]["replica"] = cb_stat.vbucket_list(
             self.bucket,
             "replica")
     if self.sdk_client_pool:
         self.client = self.sdk_client_pool.get_client_for_bucket(
             self.bucket)
     else:
         self.client = SDKClient([self.cluster.master], self.bucket)
Ejemplo n.º 17
0
 def setUp(self):
     super(CollectionsRebalance, self).setUp()
     self.bucket_util._expiry_pager()
     self.load_gen = doc_generator(self.key, 0, self.num_items)
     self.bucket = self.bucket_util.buckets[0]
     self.rest = RestConnection(self.cluster.master)
     self.data_load_spec = self.input.param("data_load_spec",
                                            "volume_test_load")
     self.data_load_stage = self.input.param("data_load_stage", "before")
     self.data_load_type = self.input.param("data_load_type", "async")
     self.nodes_swap = self.input.param("nodes_swap", 1)
     self.nodes_failover = self.input.param("nodes_failover", 1)
     self.failover_ops = [
         "graceful_failover_rebalance_out", "hard_failover_rebalance_out",
         "graceful_failover_recovery", "hard_failover_recovery"
     ]
     self.step_count = self.input.param("step_count", -1)
     self.replicas_for_failover = self.input.param("replicas_for_failover",
                                                   3)
     self.recovery_type = self.input.param("recovery_type", "full")
     self.compaction = self.input.param("compaction", False)
     self.warmup = self.input.param("warmup", False)
     self.update_replica = self.input.param(
         "update_replica", False)  # for replica + rebalance tests
     self.num_replicas = self.input.param(
         "num_replicas", 1)  # for replica + rebalance tests
     if (self.compaction):
         self.compaction_tasks = list()
Ejemplo n.º 18
0
 def setUp(self):
     super(CollectionsRebalance, self).setUp()
     self.bucket_util._expiry_pager()
     self.load_gen = doc_generator(self.key, 0, self.num_items)
     self.bucket = self.bucket_util.buckets[0]
     self.rest = RestConnection(self.cluster.master)
     self.data_load_spec = self.input.param("data_load_spec",
                                            "volume_test_load")
     self.data_load_stage = self.input.param("data_load_stage", "before")
     self.data_load_type = self.input.param("data_load_type", "async")
     self.nodes_swap = self.input.param("nodes_swap", 1)
     self.nodes_failover = self.input.param("nodes_failover", 1)
     self.failover_ops = [
         "graceful_failover_rebalance_out", "hard_failover_rebalance_out",
         "graceful_failover_recovery", "hard_failover_recovery"
     ]
     self.step_count = self.input.param("step_count", -1)
     self.recovery_type = self.input.param("recovery_type", "full")
     self.compaction = self.input.param("compaction", False)
     if self.compaction:
         self.disable_auto_compaction()
     self.warmup = self.input.param("warmup", False)
     self.update_replica = self.input.param(
         "update_replica", False)  # for replica + rebalance tests
     self.updated_num_replicas = self.input.param(
         "updated_num_replicas",
         1)  # for replica + rebalance tests, forced hard failover
     self.forced_hard_failover = self.input.param(
         "forced_hard_failover", False)  # for forced hard failover tests
     self.change_ram_quota_cluster = self.input.param(
         "change_ram_quota_cluster", False)  # To change during rebalance
     self.skip_validations = self.input.param("skip_validations", True)
     if self.compaction:
         self.compaction_tasks = list()
     self.dgm_test = self.input.param("dgm_test", False)
Ejemplo n.º 19
0
    def setUp(self):
        super(Bucket_DGM_Tests, self).setUp()
        self.key = 'test_docs'.rjust(self.key_size, '0')
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend(
            [self.cluster.master] + nodes_init)
        self.bucket_util.create_default_bucket(
            ram_quota=self.bucket_size, replica=self.num_replicas,
            maxTTL=self.maxttl, compression_mode=self.compression_mode)
        self.bucket_util.add_rbac_user()

        self.cluster_util.print_cluster_stats()
        doc_create = doc_generator(
            self.key, 0, self.num_items, doc_size=self.doc_size,
            doc_type=self.doc_type, vbuckets=self.vbuckets)
        for bucket in self.bucket_util.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, doc_create, "create", 0,
                persist_to=self.persist_to,
                replicate_to=self.replicate_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                batch_size=10,
                process_concurrency=8)
            self.task.jython_task_manager.get_task_result(task)
            # Verify initial doc load count
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
        self.log.info("========= Finished Bucket_DGM_Tests setup =======")
Ejemplo n.º 20
0
 def setUp(self):
     super(RebalanceStartStopTests, self).setUp()
     extra_nodes_in = self.input.param("extra_nodes_in", 0)
     extra_nodes_out = self.input.param("extra_nodes_out", 0)
     self.servs_init = self.servers[:self.nodes_init]
     self.servs_in = [
         self.servers[i + self.nodes_init] for i in range(self.nodes_in)
     ]
     self.servs_out = [
         self.servers[self.nodes_init - i - 1]
         for i in range(self.nodes_out)
     ]
     self.extra_servs_in = [
         self.servers[i + self.nodes_init + self.nodes_in]
         for i in range(extra_nodes_in)
     ]
     self.extra_servs_out = [
         self.servers[self.nodes_init - i - 1 - self.nodes_out]
         for i in range(extra_nodes_out)
     ]
     self.withMutationOps = self.input.param("withMutationOps", True)
     self.sleep_before_rebalance = self.input.param(
         "sleep_before_rebalance", 0)
     if self.spec_name is not None:
         self.num_items = 20000
         self.items = 20000
         # We need to use "test_collections" key for update,
         # since doc_loading was done from spec
         self.gen_update = doc_generator("test_collections",
                                         0, (self.items / 2),
                                         mutation_type="SET")
Ejemplo n.º 21
0
    def test_MB_34947(self):
        # Update already Created docs with async_writes
        load_gen = doc_generator(self.key, 0, self.num_items,
                                 key_size=self.key_size,
                                 doc_size=self.doc_size,
                                 doc_type=self.doc_type,
                                 vbuckets=self.cluster.vbuckets)
        task = self.task.async_load_gen_docs(
            self.cluster, self.def_bucket, load_gen, "update", 0,
            persist_to=self.persist_to, replicate_to=self.replicate_to,
            timeout_secs=self.sdk_timeout,
            batch_size=10, process_concurrency=8)
        self.task.jython_task_manager.get_task_result(task)

        # Update bucket replica to new value
        bucket_helper = BucketHelper(self.cluster.master)
        bucket_helper.change_bucket_props(
            self.def_bucket, replicaNumber=self.new_replica)
        self.bucket_util.print_bucket_stats(self.cluster)

        # Start rebalance task
        rebalance = self.task.async_rebalance(self.cluster.servers, [], [])
        self.sleep(10, "Wait for rebalance to start")

        # Wait for rebalance task to complete
        self.task.jython_task_manager.get_task_result(rebalance)

        # Assert if rebalance failed
        self.assertTrue(rebalance.result,
                        "Rebalance failed after replica update")
Ejemplo n.º 22
0
 def test_max_key_size(self):
     if self.use_default_collection:
         self.key_size = 251
         self.collection_name = CbServer.default_collection
     else:
         self.key_size = 247
         self.collection_name = "collection-1"
         BucketUtils.create_collection(
             self.cluster.master,
             self.bucket,
             scope_name=CbServer.default_scope,
             collection_spec={"name": self.collection_name})
     gen_load = doc_generator("test-max-key-size",
                              0,
                              1,
                              key_size=self.key_size,
                              vbuckets=self.cluster.vbuckets)
     task = self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          gen_load,
                                          "create",
                                          self.maxttl,
                                          batch_size=20,
                                          persist_to=self.persist_to,
                                          replicate_to=self.replicate_to,
                                          durability=self.durability_level,
                                          timeout_secs=self.sdk_timeout,
                                          retries=self.sdk_retries,
                                          collection=self.collection_name)
     self.task.jython_task_manager.get_task_result(task)
     if task.fail:
         self.log.info("Inserting doc key > max size failed as expected")
     else:
         self.fail("Inserting doc key greater than max key size "
                   "succeeded when it should have failed")
Ejemplo n.º 23
0
    def load_docs_in_cb_bucket_before_and_after_cbas_connect(self):
        self.setup_for_test()

        # Load more docs in Couchbase bucket.
        self.perform_doc_ops_in_all_cb_buckets("create", self.num_items,
                                               self.num_items * 2)
        self.bucket_util.verify_stats_all_buckets(self.num_items * 2)

        if self.test_abort_snapshot:
            self.log.info("Creating sync_write aborts after dataset connect")
            for server in self.cluster_util.get_kv_nodes():
                ssh_shell = RemoteMachineShellConnection(server)
                cbstats = Cbstats(ssh_shell)
                replica_vbs = cbstats.vbucket_list(
                    self.bucket_util.buckets[0].name, "replica")
                load_gen = doc_generator("test_abort_key",
                                         self.num_items,
                                         self.num_items,
                                         target_vbucket=replica_vbs)
                success = self.bucket_util.load_durable_aborts(
                    ssh_shell, [load_gen], self.bucket_util.buckets[0],
                    self.durability_level, "update", "all_aborts")
                if not success:
                    self.log_failure("Simulating aborts failed")
                ssh_shell.disconnect()

            self.validate_test_failure()

        # Validate no. of items in CBAS dataset
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the CB bucket")
Ejemplo n.º 24
0
 def setUp(self):
     super(RebalanceStartStopTests, self).setUp()
     extra_nodes_in = self.input.param("extra_nodes_in", 0)
     extra_nodes_out = self.input.param("extra_nodes_out", 0)
     self.servs_init = self.servers[:self.nodes_init]
     self.servs_in = [self.servers[i + self.nodes_init]
                      for i in range(self.nodes_in)]
     self.servs_out = [self.servers[self.nodes_init - i - 1]
                       for i in range(self.nodes_out)]
     self.extra_servs_in = [self.servers[i + self.nodes_init + self.nodes_in] for i in range(extra_nodes_in)]
     self.extra_servs_out = [self.servers[self.nodes_init - i - 1 - self.nodes_out] for i in range(extra_nodes_out)]
     self.withMutationOps = self.input.param("withMutationOps", True)
     self.sleep_before_rebalance = self.input.param("sleep_before_rebalance", 0)
     if self.spec_name is not None:
         self.num_items = 20000
         self.items = 20000
         init_doc_load_spec = \
             self.bucket_util.get_crud_template_from_package("initial_load")
         # Using the same key as defined in the loading spec
         self.gen_update = doc_generator(
             init_doc_load_spec["doc_crud"][
                 MetaCrudParams.DocCrud.COMMON_DOC_KEY],
             0, (self.items / 2),
             mutation_type="SET")
     shell = RemoteMachineShellConnection(self.cluster.master)
     shell.enable_diag_eval_on_non_local_hosts()
     shell.disconnect()
Ejemplo n.º 25
0
    def key_not_exists_test(self):
        client = SDKClient([self.cluster.master], self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 1, 1,
                                                         1)
        scope_dict = collections[self.bucket.name]["scopes"]
        scope_name = scope_dict.keys()[0]
        collection_name = scope_dict[scope_name]["collections"].keys()[0]
        client.select_collection(scope_name, collection_name)
        self.log.info("CAS test on collection %s: %s" %
                      (scope_name, collection_name))

        load_gen = doc_generator(self.key, 0, self.num_items, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # Validate doc count as per bucket collections
            self.bucket_util.validate_docs_per_collections_all_buckets()
            self.validate_test_failure()
Ejemplo n.º 26
0
    def _dockey_tap(self, dockey="dockey"):
        gen_load = doc_generator(dockey, 0, self.num_items, doc_type="json")
        bucket = self.bucket_util.get_all_buckets()[0]
        task = self.task.async_load_gen_docs(self.cluster, bucket,
                                             gen_load, "create", 0,
                                             batch_size=20,
                                             persist_to=self.persist_to,
                                             replicate_to=self.replicate_to,
                                             pause_secs=5,
                                             timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries)
        self.task.jython_task_manager.get_task_result(task)
        self._persist_and_verify()

        # assert if there are not enough nodes to failover
        rest = RestConnection(self.cluster.master)
        num_nodes = len(rest.node_statuses())
        self.assertTrue(num_nodes > 1,
                        "ERROR: Not enough nodes to do failover")

        # failover 1 node(we have 1 replica) and verify the keys
        self.cluster.failover(self.servers[:num_nodes],
                              self.servers[(num_nodes - 1):num_nodes])

        self.nodes_init -= 1
        self._persist_and_verify()
Ejemplo n.º 27
0
 def custom_load(self, maxttl=0):
     self.key = "test_collections"
     start = self.bucket.scopes[CbServer.default_scope] \
         .collections[CbServer.default_collection] \
         .num_items
     load_gen = doc_generator(self.key, start, start + 1)
     tasks = []
     tasks.append(
         self.task.async_load_gen_docs(
             self.cluster,
             self.bucket,
             load_gen,
             "create",
             maxttl,
             batch_size=1000,
             process_concurrency=8,
             replicate_to=self.replicate_to,
             persist_to=self.persist_to,
             durability=self.durability_level,
             active_resident_threshold=self.dgm,
             compression=self.sdk_compression,
             timeout_secs=self.sdk_timeout,
             scope=CbServer.default_scope,
             collection=CbServer.default_collection))
     for task in tasks:
         self.task.jython_task_manager.get_task_result(task)
         if task.fail:
             self.fail("preload dgm failed")
     self.bucket_util.print_bucket_stats()
Ejemplo n.º 28
0
 def load_bucket_into_dgm(self,
                          cluster,
                          bucket,
                          key,
                          num_items,
                          active_resident_threshold,
                          load_batch_size=20000,
                          batch_size=10,
                          process_concurrency=4,
                          persist_to=None,
                          replicate_to=None):
     rest = BucketHelper(cluster.master)
     bucket_stat = rest.get_bucket_stats_for_node(bucket.name,
                                                  cluster.master)
     while bucket_stat["vb_active_resident_items_ratio"] > \
             active_resident_threshold:
         gen_load = doc_generator(key,
                                  num_items,
                                  num_items + load_batch_size,
                                  doc_type="binary")
         num_items += load_batch_size
         task = self.async_load_gen_docs(
             cluster,
             bucket,
             gen_load,
             "create",
             0,
             batch_size=batch_size,
             process_concurrency=process_concurrency,
             persist_to=persist_to,
             replicate_to=replicate_to)
         self.jython_task_manager.get_task_result(task)
         bucket_stat = rest.get_bucket_stats_for_node(
             bucket.name, cluster.master)
     return num_items
Ejemplo n.º 29
0
    def rebalance_out_with_warming_up(self):
        master_restart = self.input.param("master_restart", False)
        if master_restart:
            warmup_node = self.cluster.master
        else:
            warmup_node = self.cluster.servers[len(self.cluster.servers) - self.nodes_out - 1]
        servs_out = self.cluster.servers[len(self.cluster.servers) - self.nodes_out:]

        if self.test_abort_snapshot:
            self.log.info("Creating sync_write abort scenario for replica vbs")
            for server in self.cluster_util.get_kv_nodes(self.cluster):
                ssh_shell = RemoteMachineShellConnection(server)
                cbstats = Cbstats(ssh_shell)
                replica_vbs = cbstats.vbucket_list(
                    self.cluster.buckets[0].name, "replica")
                load_gen = doc_generator(self.key, 0, 5000,
                                         target_vbucket=replica_vbs)
                success = self.bucket_util.load_durable_aborts(
                    ssh_shell, [load_gen],
                    self.cluster.buckets[0],
                    self.durability_level,
                    "update", "all_aborts")
                if not success:
                    self.log_failure("Simulating aborts failed")
                ssh_shell.disconnect()

            self.validate_test_failure()

        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        self.sleep(20)
        shell.start_couchbase()
        shell.disconnect()

        # Workaround for Eph case (MB-44682 - Not a bug)
        if self.bucket_type == Bucket.Type.EPHEMERAL:
            self.sleep(15, "Wait for couchbase server to start")

        rebalance = self.task.async_rebalance(
            self.cluster.servers, [], servs_out)
        self.task.jython_task_manager.get_task_result(rebalance)
        self.assertTrue(rebalance.result, "Rebalance Failed")
        self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster) - set(servs_out))
        if rebalance.result is False:
            self.log.info("Rebalance was failed as expected")
            self.assertTrue(self.bucket_util._wait_warmup_completed(
                self.cluster_util.get_kv_nodes(self.cluster),
                self.cluster.buckets[0],
                wait_time=self.wait_timeout * 10))

            self.log.info("Second attempt to rebalance")
            rebalance = self.task.async_rebalance(
                self.cluster.servers, [], servs_out)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, "Rebalance attempt failed again")
            self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster) - set(servs_out))
        if not self.atomicity:
            self.bucket_util.verify_cluster_stats(self.cluster, self.num_items,
                                                  timeout=self.wait_timeout)
            self.bucket_util.verify_unacked_bytes_all_buckets(self.cluster)
Ejemplo n.º 30
0
    def setUp(self):
        super(MagmaFailures, self).setUp()

        self.gen_create = doc_generator(
            self.key,
            0,
            self.num_items,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets,
            key_size=self.key_size,
            randomize_doc_size=self.randomize_doc_size,
            randomize_value=self.randomize_value,
            mix_key_size=self.mix_key_size,
            deep_copy=self.deep_copy)

        self.result_task = self._load_all_buckets(self.cluster,
                                                  self.gen_create,
                                                  "create",
                                                  0,
                                                  batch_size=self.batch_size,
                                                  dgm_batch=self.dgm_batch)

        if self.active_resident_threshold != 100:
            for task in self.result_task.keys():
                self.num_items = task.doc_index

        self.log.info("Verifying num_items counts after doc_ops")
        self.bucket_util._wait_for_stats_all_buckets()
        self.bucket_util.verify_stats_all_buckets(self.num_items)

        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()