Exemple #1
0
 def test_max_key_size(self):
     if self.use_default_collection:
         self.key_size = 251
         self.collection_name = CbServer.default_collection
     else:
         self.key_size = 247
         self.collection_name = "collection-1"
         BucketUtils.create_collection(
             self.cluster.master,
             self.bucket,
             scope_name=CbServer.default_scope,
             collection_spec={"name": self.collection_name})
     gen_load = doc_generator("test-max-key-size",
                              0,
                              1,
                              key_size=self.key_size,
                              vbuckets=self.cluster.vbuckets)
     task = self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          gen_load,
                                          "create",
                                          self.maxttl,
                                          batch_size=20,
                                          persist_to=self.persist_to,
                                          replicate_to=self.replicate_to,
                                          durability=self.durability_level,
                                          timeout_secs=self.sdk_timeout,
                                          retries=self.sdk_retries,
                                          collection=self.collection_name)
     self.task.jython_task_manager.get_task_result(task)
     if task.fail:
         self.log.info("Inserting doc key > max size failed as expected")
     else:
         self.fail("Inserting doc key greater than max key size "
                   "succeeded when it should have failed")
Exemple #2
0
 def test_create_delete_recreate_collection(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # delete collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # recreate collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 # Cannot create a _default collection
                 if c_name == CbServer.default_collection:
                     continue
                 col_obj = \
                     bucket.scopes[scope_name].collections[c_name]
                 BucketUtils.create_collection(self.cluster.master, bucket,
                                               scope_name,
                                               col_obj.get_dict_object())
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
Exemple #3
0
 def test_illegal_collection_name(self):
     BucketUtils.create_scope(self.cluster.master, self.bucket,
                              {"name": "scope1"})
     for name in self.invalid:
         try:
             BucketUtils.create_collection(self.cluster.master, self.bucket,
                                           "scope1", {"name": name})
         except Exception:
             self.log.info("Illegal collection name as expected")
         else:
             self.fail("Illegal collection name did not fail")
Exemple #4
0
 def test_create_collection_with_existing_name(self):
     BucketUtils.create_scope(self.cluster.master, self.bucket,
                              {"name": "scope1"})
     BucketUtils.create_collection(self.cluster.master, self.bucket,
                                   "scope1", {"name": "collection1"})
     try:
         BucketUtils.create_collection(self.cluster.master, self.bucket,
                                       "scope1", {"name": "collection1"})
     except Exception:
         self.log.info("Collection creation failed as expected "
                       "as there was collection1 already")
     else:
         self.fail("Collection creation did not fail "
                   "even when given duplicate")
 def populate_uids(self, base_name="pre_qf"):
     """
     Creates a scope, collection in each bucket and
     returns a dict like:
     {bucket_name:{"uid":uid, "cid":cid, "sid":sid}, ..}
     """
     uids = dict()
     for bucket in self.cluster.buckets:
         scope_name = "custom_scope-" + base_name
         collection_name = "custom_collection" + base_name
         BucketUtils.create_scope(self.cluster.master, bucket,
                                  {"name": scope_name})
         BucketUtils.create_collection(self.cluster.master, bucket,
                                       scope_name,
                                       {"name": collection_name})
         uids[bucket.name] = dict()
         uids[bucket.name]["sid"] = BucketHelper(self.cluster.master). \
             get_scope_id(bucket.name, scope_name)
         uids[bucket.name]["cid"] = BucketHelper(self.cluster.master). \
             get_collection_id(bucket.name, scope_name, collection_name)
         uids[bucket.name]["uid"] = BucketHelper(self.cluster.master). \
             get_bucket_manifest_uid(bucket.name)
     return uids
Exemple #6
0
    def setUp(self):
        super(basic_ops, self).setUp()

        # Scope/collection name can be default or create a random one to test
        self.scope_name = self.input.param("scope", CbServer.default_scope)
        self.collection_name = self.input.param("collection",
                                                CbServer.default_collection)

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.create_default_bucket(
            replica=self.num_replicas,
            compression_mode=self.compression_mode,
            bucket_type=self.bucket_type,
            storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy)
        self.bucket_util.add_rbac_user()

        # Create Scope/Collection with random names if not equal to default
        if self.scope_name != CbServer.default_scope:
            self.scope_name = BucketUtils.get_random_name()
            BucketUtils.create_scope(self.cluster.master,
                                     self.bucket_util.buckets[0],
                                     {"name": self.scope_name})
        if self.collection_name != CbServer.default_collection:
            self.collection_name = BucketUtils.get_random_name()
            BucketUtils.create_collection(self.cluster.master,
                                          self.bucket_util.buckets[0],
                                          self.scope_name, {
                                              "name": self.collection_name,
                                              "num_items": self.num_items
                                          })
            self.log.info("Using scope::collection - '%s::%s'" %
                          (self.scope_name, self.collection_name))

        # Update required num_items under default collection
        self.bucket_util.buckets[0] \
            .scopes[self.scope_name] \
            .collections[self.collection_name] \
            .num_items = self.num_items

        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket_util.buckets[0],
                self.cluster.nodes_in_cluster,
                req_clients=self.task_manager.number_of_threads,
                compression_settings=self.sdk_compression)

        self.durability_helper = DurabilityHelper(
            self.log,
            len(self.cluster.nodes_in_cluster),
            durability=self.durability_level,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to)
        # Reset active_resident_threshold to avoid further data load as DGM
        self.active_resident_threshold = 0
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
        self.log.info("==========Finished Basic_ops base setup========")
Exemple #7
0
    def setUp(self):
        super(RebalanceBaseTest, self).setUp()
        self.rest = RestConnection(self.cluster.master)
        self.doc_ops = self.input.param("doc_ops", "create")
        self.key_size = self.input.param("key_size", 0)
        self.zone = self.input.param("zone", 1)
        self.replica_to_update = self.input.param("new_replica", None)
        self.default_view_name = "default_view"
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.default_view = View(self.default_view_name, self.defaul_map_func,
                                 None)
        self.max_verify = self.input.param("max_verify", None)
        self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
        self.flusher_total_batch_limit = self.input.param(
            "flusher_total_batch_limit", None)
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        self.items = self.num_items
        self.logs_folder = self.input.param("logs_folder")
        node_ram_ratio = self.bucket_util.base_bucket_ratio(
            self.cluster.servers)
        info = self.rest.get_nodes_self()
        self.rest.init_cluster(username=self.cluster.master.rest_username,
                               password=self.cluster.master.rest_password)
        self.rest.init_cluster_memoryQuota(
            memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        self.check_temporary_failure_exception = False
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []

        services = None
        if self.services_init:
            services = list()
            for service in self.services_init.split("-"):
                services.append(service.replace(":", ","))
            services = services[1:] if len(services) > 1 else None

        if nodes_init:
            result = self.task.rebalance([self.cluster.master],
                                         nodes_init, [],
                                         services=services)
            self.assertTrue(result, "Initial rebalance failed")
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.check_replica = self.input.param("check_replica", False)
        self.spec_name = self.input.param("bucket_spec", None)

        self.bucket_util.add_rbac_user()
        # Buckets creation and initial data load done by bucket_spec
        if self.spec_name is not None:
            try:
                self.collection_setup()
            except Java_base_exception as exception:
                self.handle_setup_exception(exception)
            except Exception as exception:
                self.handle_setup_exception(exception)
        else:
            if self.standard_buckets > 10:
                self.bucket_util.change_max_buckets(self.standard_buckets)
            self.create_buckets(self.bucket_size)

            # Create Scope/Collection based on inputs given
            for bucket in self.bucket_util.buckets:
                if self.scope_name != CbServer.default_scope:
                    self.scope_name = BucketUtils.get_random_name()
                    BucketUtils.create_scope(self.cluster.master, bucket,
                                             {"name": self.scope_name})
                if self.collection_name != CbServer.default_collection:
                    self.collection_name = BucketUtils.get_random_name()
                    BucketUtils.create_collection(
                        self.cluster.master, bucket, self.scope_name, {
                            "name": self.collection_name,
                            "num_items": self.num_items
                        })
                    self.log.info(
                        "Bucket %s using scope::collection - '%s::%s'" %
                        (bucket.name, self.scope_name, self.collection_name))

                # Update required num_items under default collection
                bucket.scopes[self.scope_name] \
                    .collections[self.collection_name] \
                    .num_items = self.num_items

            if self.flusher_total_batch_limit:
                self.bucket_util.set_flusher_total_batch_limit(
                    self.cluster.master, self.flusher_total_batch_limit,
                    self.bucket_util.buckets)

            self.gen_create = self.get_doc_generator(0, self.num_items)
            if self.active_resident_threshold < 100:
                self.check_temporary_failure_exception = True
                # Reset num_items=0 since the num_items will be populated
                # by the DGM load task
                for bucket in self.bucket_util.buckets:
                    bucket.scopes[self.scope_name] \
                        .collections[self.collection_name] \
                        .num_items = 0

            # Create clients in SDK client pool
            if self.sdk_client_pool:
                self.log.info("Creating SDK clients for client_pool")
                for bucket in self.bucket_util.buckets:
                    self.sdk_client_pool.create_clients(
                        bucket, [self.cluster.master],
                        self.sdk_pool_capacity,
                        compression_settings=self.sdk_compression)

            if not self.atomicity:
                _ = self._load_all_buckets(self.cluster,
                                           self.gen_create,
                                           "create",
                                           0,
                                           batch_size=self.batch_size)
                self.log.info("Verifying num_items counts after doc_ops")
                self.bucket_util._wait_for_stats_all_buckets()
                self.bucket_util.validate_docs_per_collections_all_buckets(
                    timeout=self.wait_timeout)
            else:
                self.transaction_commit = True
                self._load_all_buckets_atomicty(self.gen_create, "create")
                self.transaction_commit = self.input.param(
                    "transaction_commit", True)

            # Initialize doc_generators
            self.active_resident_threshold = 100
            self.gen_create = None
            self.gen_delete = None
            self.gen_update = self.get_doc_generator(0, (self.items / 2))
            self.durability_helper = DurabilityHelper(
                self.log,
                len(self.cluster.nodes_in_cluster),
                durability=self.durability_level,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to)
            self.cluster_util.print_cluster_stats()
            self.bucket_util.print_bucket_stats()
        self.log_setup_status("RebalanceBase", "complete")
Exemple #8
0
    def setUp(self):
        super(RebalanceBaseTest, self).setUp()
        self.rest = RestConnection(self.cluster.master)
        self.doc_ops = self.input.param("doc_ops", "create")
        self.key_size = self.input.param("key_size", 0)
        self.zone = self.input.param("zone", 1)
        self.replica_to_update = self.input.param("new_replica", None)
        self.default_view_name = "default_view"
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.default_view = View(self.default_view_name, self.defaul_map_func,
                                 None)
        self.max_verify = self.input.param("max_verify", None)
        self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
        self.flusher_total_batch_limit = self.input.param("flusher_total_batch_limit", None)
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        self.items = self.num_items
        node_ram_ratio = self.bucket_util.base_bucket_ratio(self.cluster.servers)
        info = self.rest.get_nodes_self()
        self.rest.init_cluster(username=self.cluster.master.rest_username,
                               password=self.cluster.master.rest_password)
        self.rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved*node_ram_ratio))
        self.check_temporary_failure_exception = False
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        if nodes_init:
            result = self.task.rebalance([self.cluster.master], nodes_init, [])
            self.assertTrue(result, "Initial rebalance failed")
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.check_replica = self.input.param("check_replica", False)
        self.spec_name = self.input.param("bucket_spec", None)

        # If buckets creation and initial data load is to be done by bucket_spec
        if self.spec_name is not None:
            self.log.info("Creating buckets from spec")
            # Create bucket(s) and add rbac user
            buckets_spec = self.bucket_util.get_bucket_template_from_package(
                self.spec_name)
            doc_loading_spec = \
                self.bucket_util.get_crud_template_from_package("initial_load")

            self.bucket_util.create_buckets_using_json_data(buckets_spec)
            self.bucket_util.wait_for_collection_creation_to_complete()
            # Create clients in SDK client pool
            if self.sdk_client_pool:
                self.log.info("Creating required SDK clients for client_pool")
                bucket_count = len(self.bucket_util.buckets)
                max_clients = self.task_manager.number_of_threads
                clients_per_bucket = int(ceil(max_clients / bucket_count))
                for bucket in self.bucket_util.buckets:
                    self.sdk_client_pool.create_clients(
                        bucket,
                        [self.cluster.master],
                        clients_per_bucket,
                        compression_settings=self.sdk_compression)

            self.bucket_util.run_scenario_from_spec(self.task,
                                                    self.cluster,
                                                    self.bucket_util.buckets,
                                                    doc_loading_spec,
                                                    mutation_num=0)
            self.bucket_util.add_rbac_user()

            self.cluster_util.print_cluster_stats()

            # Verify initial doc load count
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.validate_docs_per_collections_all_buckets()

            self.cluster_util.print_cluster_stats()
            self.bucket_util.print_bucket_stats()
            self.bucket_helper_obj = BucketHelper(self.cluster.master)
            self.log.info("==========Finished rebalance base setup========")
        else:
            self.bucket_util.add_rbac_user()
            if self.standard_buckets > 10:
                self.bucket_util.change_max_buckets(self.standard_buckets)
            self.create_buckets(self.bucket_size)

            # Create Scope/Collection based on inputs given
            for bucket in self.bucket_util.buckets:
                if self.scope_name != CbServer.default_scope:
                    self.scope_name = BucketUtils.get_random_name()
                    BucketUtils.create_scope(self.cluster.master,
                                             bucket,
                                             {"name": self.scope_name})
                if self.collection_name != CbServer.default_collection:
                    self.collection_name = BucketUtils.get_random_name()
                    BucketUtils.create_collection(self.cluster.master,
                                                  bucket,
                                                  self.scope_name,
                                                  {"name": self.collection_name,
                                                   "num_items": self.num_items})
                    self.log.info("Bucket %s using scope::collection - '%s::%s'"
                                  % (bucket.name,
                                     self.scope_name,
                                     self.collection_name))

                # Update required num_items under default collection
                bucket.scopes[self.scope_name] \
                    .collections[self.collection_name] \
                    .num_items = self.num_items

            if self.flusher_total_batch_limit:
                self.bucket_util.set_flusher_total_batch_limit(
                    self.cluster.master,
                    self.flusher_total_batch_limit,
                    self.bucket_util.buckets)

            self.gen_create = self.get_doc_generator(0, self.num_items)
            if self.active_resident_threshold < 100:
                self.check_temporary_failure_exception = True
            if not self.atomicity:
                _ = self._load_all_buckets(self.cluster, self.gen_create,
                                           "create", 0, batch_size=self.batch_size)
                self.log.info("Verifying num_items counts after doc_ops")
                self.bucket_util._wait_for_stats_all_buckets()
                self.bucket_util.validate_docs_per_collections_all_buckets(
                    timeout=120)
            else:
                self.transaction_commit = True
                self._load_all_buckets_atomicty(self.gen_create, "create")
                self.transaction_commit = self.input.param("transaction_commit",
                                                           True)

            # Initialize doc_generators
            self.active_resident_threshold = 100
            self.gen_create = None
            self.gen_delete = None
            self.gen_update = self.get_doc_generator(0, (self.items / 2))
            self.durability_helper = DurabilityHelper(
                self.log, len(self.cluster.nodes_in_cluster),
                durability=self.durability_level,
                replicate_to=self.replicate_to, persist_to=self.persist_to)
            self.cluster_util.print_cluster_stats()
            self.bucket_util.print_bucket_stats()
            self.log.info("==========Finished rebalance base setup========")