Exemplo n.º 1
0
 def test_delete_nonexistant_scope(self):
     try:
         BucketUtils.drop_scope(self.cluster.master, self.bucket, "sumedh")
     except Exception as e:
         self.log.info("Non existant collection deletion failed as expected")
     else:
         self.fail("deletion of non existing scope did not fail")
Exemplo n.º 2
0
 def test_delete_default_scope(self):
     try:
         BucketUtils.drop_scope(self.cluster.master, self.bucket, "_default")
     except Exception as e:
         self.log.info("Deafult Scope deletion failed as expected")
     else:
         self.fail("Default scope deletion did not fail")
Exemplo n.º 3
0
 def test_max_key_size(self):
     if self.use_default_collection:
         self.key_size = 251
         self.collection_name = CbServer.default_collection
     else:
         self.key_size = 247
         self.collection_name = "collection-1"
         BucketUtils.create_collection(
             self.cluster.master,
             self.bucket,
             scope_name=CbServer.default_scope,
             collection_spec={"name": self.collection_name})
     gen_load = doc_generator("test-max-key-size",
                              0,
                              1,
                              key_size=self.key_size,
                              vbuckets=self.cluster.vbuckets)
     task = self.task.async_load_gen_docs(self.cluster,
                                          self.bucket,
                                          gen_load,
                                          "create",
                                          self.maxttl,
                                          batch_size=20,
                                          persist_to=self.persist_to,
                                          replicate_to=self.replicate_to,
                                          durability=self.durability_level,
                                          timeout_secs=self.sdk_timeout,
                                          retries=self.sdk_retries,
                                          collection=self.collection_name)
     self.task.jython_task_manager.get_task_result(task)
     if task.fail:
         self.log.info("Inserting doc key > max size failed as expected")
     else:
         self.fail("Inserting doc key greater than max key size "
                   "succeeded when it should have failed")
    def test_MB_41383(self):
        """
        1. Introduce network split between orchestrator(node1) and the last node.
        2. Create collections on node1
        3. Create collections on the last node.
        4. Perform data validation
        """
        self.involve_orchestrator = True
        self.node1 = self.cluster.servers[0]
        self.node2 = self.cluster.servers[self.nodes_init - 1]
        self.split_brain(self.node1, self.node2)
        self.split_brain(self.node2, self.node1)
        self.sleep(120, "wait for network split to finish")

        BucketUtils.create_collections(
            self.cluster,
            self.bucket_util.buckets[0],
            5,
            CbServer.default_scope,
            collection_name="collection_from_first_node")

        self.cluster.master = self.master = self.node2
        BucketUtils.create_collections(
            self.cluster,
            self.bucket_util.buckets[0],
            5,
            CbServer.default_scope,
            collection_name="collection_from_last_node")
        self.remove_network_split()
        self.sleep(30, "wait for iptables rules to take effect")
        self.data_validation_collection()
Exemplo n.º 5
0
    def test_invalid_name_collection(self):
        for _ in range(1000):
            scope_name = BucketUtils.get_random_name(invalid_name=True)
            try:
                status, content = BucketHelper(
                    self.cluster.master).create_scope(self.bucket, scope_name)
                if status is True:
                    self.log_failure("Scope '%s::%s' creation not failed: %s" %
                                     (self.bucket, scope_name, content))
            except Exception as e:
                self.log.debug(e)

        for _ in range(1000):
            collection_name = BucketUtils.get_random_name(invalid_name=True)
            try:
                status, content = BucketHelper(self.cluster.master) \
                    .create_collection(self.bucket, CbServer.default_scope,
                                       collection_name)
                if status is True:
                    self.log_failure(
                        "Collection '%s::%s::%s' creation not failed: %s" %
                        (self.bucket, CbServer.default_scope, collection_name,
                         content))
            except Exception as e:
                self.log.debug(e)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemplo n.º 6
0
 def test_more_than_max_collections_multiple_scopes(self):
     try:
         BucketUtils.create_scopes(self.cluster, self.bucket, 10, collection_count=200)
     except Exception as e:
         self.log.info("Creating more than max collections failed as expected")
     else:
         self.fail("Creating more than max collections did not fail")
Exemplo n.º 7
0
 def test_delete_nonexistant_scope(self):
     try:
         BucketUtils.drop_scope(self.cluster.master, self.bucket,
                                "non_existent_col_123")
     except Exception:
         self.log.info("Drop scope failed as expected")
     else:
         self.fail("deletion of non existing scope did not fail")
Exemplo n.º 8
0
    def test_load_default_collection(self):
        self.delete_default_collection = \
                    self.input.param("delete_default_collection", False)
        self.perform_ops = self.input.param("perform_ops", False)
        load_gen = doc_generator('test_drop_default',
                                 0,
                                 self.num_items,
                                 mutate=0,
                                 target_vbucket=self.target_vbucket)

        self.log.info("Loading %s docs into '%s::%s' collection" %
                      (self.num_items, CbServer.default_scope,
                       CbServer.default_collection))
        task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            load_gen,
            "create",
            self.maxttl,
            batch_size=10,
            process_concurrency=2,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to,
            durability=self.durability_level,
            compression=self.sdk_compression,
            timeout_secs=self.sdk_timeout,
            scope=CbServer.default_scope,
            collection=CbServer.default_collection,
            suppress_error_table=True)

        # perform some collection operation
        if self.perform_ops:
            doc_loading_spec = \
                self.bucket_util.get_crud_template_from_package("initial_load")
            self.bucket_util.run_scenario_from_spec(self.task,
                                                    self.cluster,
                                                    self.bucket_util.buckets,
                                                    doc_loading_spec,
                                                    mutation_num=0)
        self.task_manager.get_task_result(task)
        # Data validation
        self.bucket_util._wait_for_stats_all_buckets()
        task = self.task.async_validate_docs(self.cluster,
                                             self.bucket,
                                             load_gen,
                                             "create",
                                             self.maxttl,
                                             batch_size=10,
                                             process_concurrency=2)
        self.task_manager.get_task_result(task)

        if self.delete_default_collection:
            for bucket in self.bucket_util.buckets:
                BucketUtils.drop_collection(self.cluster.master, bucket)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemplo n.º 9
0
 def test_illegal_scope_name(self):
     for name in self.invalid:
         try:
             BucketUtils.create_scope(self.cluster.master, self.bucket,
                                      {"name": name})
         except Exception:
             self.log.info("Illegal scope name as expected")
         else:
             self.fail("Illegal scope name did not fail")
Exemplo n.º 10
0
 def test_more_than_max_collections_multiple_scopes(self):
     # create max collections across 10 scopes
     BucketUtils.create_scopes(self.cluster, self.bucket, 10, collection_count=120)
     try:
         # create one more collection under a new scope
         BucketUtils.create_scopes(self.cluster, self.bucket, 1, collection_count=1)
     except Exception as e:
         self.log.info("Creating more than max collections failed as expected")
     else:
         self.fail("Creating more than max collections did not fail")
Exemplo n.º 11
0
 def test_more_than_max_scopes(self):
     # Max_scopes count, after considering default scope in setup
     max_scopes = 1000
     BucketUtils.create_scopes(self.cluster, self.bucket, max_scopes - 1)
     try:
         BucketUtils.create_scopes(self.cluster, self.bucket, 500)
     except Exception as e:
         self.log.info("Creating more than max scopes failed as expected")
     else:
         self.fail("Creating more than max scopes did not fail")
Exemplo n.º 12
0
 def test_create_scope_with_existing_name(self):
     BucketUtils.create_scope(self.cluster.master, self.bucket,
                              {"name": "scope1"})
     try:
         BucketUtils.create_scope(self.cluster.master, self.bucket,
                                  {"name": "scope1"})
     except Exception as e:
         self.log.info("Scope creation failed as expected as there was scope1 already")
     else:
         self.fail("Scope creation did not fail even when given duplicate")
Exemplo n.º 13
0
 def test_illegal_collection_name(self):
     BucketUtils.create_scope(self.cluster.master, self.bucket,
                              {"name": "scope1"})
     for name in self.invalid:
         try:
             BucketUtils.create_collection(self.cluster.master, self.bucket,
                                           "scope1", {"name": name})
         except Exception as e:
             self.log.info("Illegal collection name as expected")
         else:
             self.fail("Illegal collection name did not fail")
Exemplo n.º 14
0
 def test_more_than_max_scopes(self):
     # create max scopes
     scopes_dict = BucketUtils.create_scopes(self.cluster, self.bucket, self.MAX_SCOPES)
     actual_count = len(scopes_dict)
     if actual_count != self.MAX_SCOPES:
         self.fail("failed to create max number of scopes")
     try:
         # create one more than the max allowed
         BucketUtils.create_scopes(self.cluster, self.bucket, 1)
     except Exception as e:
         self.log.info("Creating more than max scopes failed as expected")
     else:
         self.fail("Creating more than max scopes did not fail")
Exemplo n.º 15
0
    def touch_test(self):
        self.log.info("Loading bucket into DGM")
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            dgm_gen,
            "create",
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch intial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        collections = BucketUtils.get_random_collections(
            self.bucket_util.buckets, 2, 2, 1)
        for self.bucket_name, scope_dict in collections.iteritems():
            bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                                self.bucket_name)
            scope_dict = scope_dict["scopes"]
            for scope_name, collection_dict in scope_dict.items():
                collection_dict = collection_dict["collections"]
                for c_name, c_data in collection_dict.items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud("touch",
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        client.close()
        self.bucket_util._wait_for_stats_all_buckets()
        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemplo n.º 16
0
 def test_more_than_max_collections_single_scope(self):
     BucketUtils.create_scope(self.cluster.master, self.bucket,
                              {"name": "scope1"})
     # create max collections under single scope
     collects_dict = BucketUtils.create_collections(self.cluster, self.bucket, self.MAX_COLLECTIONS, "scope1")
     actual_count = len(collects_dict)
     if actual_count != self.MAX_COLLECTIONS:
         self.fail("failed to create max number of collections")
     try:
         # create one more than the max allowed
         BucketUtils.create_collections(self.cluster, self.bucket, 1, "scope1")
     except Exception as e:
         self.log.info("Creating more than max collections failed as expected")
     else:
         self.fail("Creating more than max collections did not fail")
Exemplo n.º 17
0
    def key_not_exists_test(self):
        client = SDKClient([self.cluster.master], self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 1, 1,
                                                         1)
        scope_dict = collections[self.bucket.name]["scopes"]
        scope_name = scope_dict.keys()[0]
        collection_name = scope_dict[scope_name]["collections"].keys()[0]
        client.select_collection(scope_name, collection_name)
        self.log.info("CAS test on collection %s: %s" %
                      (scope_name, collection_name))

        load_gen = doc_generator(self.key, 0, self.num_items, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if SDKException.DocumentNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # Validate doc count as per bucket collections
            self.bucket_util.validate_docs_per_collections_all_buckets()
            self.validate_test_failure()
Exemplo n.º 18
0
def key_for_node(node, bucket_name):
    vbuckets = vbuckets_on_node(node, bucket_name)
    for i in range(10000):
        key = "key{}".format(i)
        if BucketUtils.get_vbucket_num_for_key(key) in vbuckets:
            return key
    raise RuntimeError('A key that belongs to this node could not be found.')
Exemplo n.º 19
0
    def test_delete_collection_during_load(self):
        """ Get a random collection/scope ,
            delete collection/scope while loading"""
        delete_scope = self.input.param("delete_scope", False)
        retry = 5
        scope_dict = dict()
        scope_name = ""

        while retry > 0:
            bucket_dict = BucketUtils.get_random_collections([self.bucket], 1,
                                                             "all", "all")
            scope_dict = bucket_dict[self.bucket.name]["scopes"]
            scope_name = scope_dict.keys()[0]
            # Check to prevent default scope deletion, which is not allowed
            if (scope_name != CbServer.default_scope) or not delete_scope:
                break
            retry -= 1
        collection_name = scope_dict[scope_name]["collections"].keys()[0]

        self.num_items = \
            self.bucket \
                .scopes[scope_name] \
                .collections[collection_name] \
                .num_items
        load_gen = doc_generator(self.key, self.num_items, self.num_items * 20)

        self.log.info("Delete collection while load %s: %s" %
                      (scope_name, collection_name))
        task = self.task.async_load_gen_docs(self.cluster,
                                             self.bucket,
                                             load_gen,
                                             "create",
                                             exp=self.maxttl,
                                             batch_size=200,
                                             process_concurrency=1,
                                             scope=scope_name,
                                             compression=self.sdk_compression,
                                             collection=collection_name,
                                             print_ops_rate=True,
                                             retries=0)

        self.sleep(5)
        self.bucket_util.print_bucket_stats()

        if delete_scope:
            self.bucket_util.drop_scope(self.cluster.master, self.bucket,
                                        scope_name)
            del self.bucket.scopes[scope_name]

        else:
            self.bucket_util.drop_collection(self.cluster.master, self.bucket,
                                             scope_name, collection_name)
            del self.bucket.scopes[scope_name].collections[collection_name]

        # validate task failure
        self.task_manager.stop_task(task)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemplo n.º 20
0
 def __get_random_doc_ttl_and_durability_level():
     # Max doc_ttl value=2147483648. Reference:
     # docs.couchbase.com/server/6.5/learn/buckets-memory-and-storage/expiration.html
     doc_ttl = sample([0, 30000, 2147483648], 1)[0]
     durability_level = sample(
         BucketUtils.get_supported_durability_levels() + [""], 1)[0]
     return doc_ttl, durability_level
Exemplo n.º 21
0
 def __get_random_doc_ttl_and_durability_level():
     # Although the max TTL that can be set is 2147483648, we cannot set it to more than 1576800000
     # using javs SDK's "Duration". Refer:
     # https://docs.couchbase.com/java-sdk/current/howtos/kv-operations.html#document-expiration
     doc_ttl = sample([0, 30000, 1576800000], 1)[0]
     durability_level = sample(
         BucketUtils.get_supported_durability_levels() + [""], 1)[0]
     return doc_ttl, durability_level
Exemplo n.º 22
0
 def test_create_delete_recreate_collection(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # delete collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # recreate collection
     for bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, _ in collection_dict.items():
                 # Cannot create a _default collection
                 if c_name == CbServer.default_collection:
                     continue
                 col_obj = \
                     bucket.scopes[scope_name].collections[c_name]
                 BucketUtils.create_collection(self.cluster.master, bucket,
                                               scope_name,
                                               col_obj.get_dict_object())
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
Exemplo n.º 23
0
 def __init__(self, cb_clusters, task, taskmgr):
     self.__cb_clusters = cb_clusters
     self.task = task
     self.task_manager = taskmgr
     for cluster in self.__cb_clusters:
         cluster.cluster_util = ClusterUtils(cluster, self.task_manager)
         cluster.bucket_util = BucketUtils(cluster.cluster_util, self.task)
     self.input = TestInputSingleton.input
     self.init_parameters()
     self.create_buckets()
     self.log = logger.get("test")
Exemplo n.º 24
0
 def get_collection_for_atrcollection(self):
     collections = BucketUtils.get_random_collections(
             self.buckets, 1, "all", self.num_buckets)
     for bucket, scope_dict in collections.items():
         for s_name, c_dict in scope_dict["scopes"].items():
             for c_name, c_data in c_dict["collections"].items():
                 if random.choice([True, False]):
                     atrcollection = ("`%s`.`%s`.`%s`"%(bucket, s_name, c_name))
                 else:
                     atrcollection = ("`%s`.`%s`.`%s`"%(bucket,
                                  CbServer.default_scope,
                                  CbServer.default_collection))
     return atrcollection
Exemplo n.º 25
0
 def get_cb_cluster_by_name(self, name):
     """Return couchbase cluster object for given name.
     @return: CBCluster object
     """
     for cluster in self.clusters:
         if cluster.name == name:
             cluster.cluster_util = ClusterUtils(cluster, self.task_manager)
             cluster.bucket_util = BucketUtils(cluster,
                                               cluster.cluster_util,
                                               self.task)
             return cluster
     raise Exception(
         "Couchbase Cluster with name: {0} not exist".format(name))
Exemplo n.º 26
0
 def process_value_for_verification(self,
                                    bucket_col,
                                    doc_gen_list,
                                    results,
                                    buckets=None):
     """
     1. get the collection
     2. get its doc_gen
     3. first validate deleted docs
     4. then check updated docs
     5. validate inserted docs
     """
     for collection in bucket_col:
         self.log.info("validation started for collection %s" % collection)
         gen_load = doc_gen_list[collection]
         self.validate_dict = {}
         self.deleted_key = []
         doc_gen = copy.deepcopy(gen_load)
         while doc_gen.has_next():
             key, val = next(doc_gen)
             self.validate_dict[key] = val
         for res in results:
             for savepoint in res[1]:
                 if collection in res[0][savepoint].keys():
                     for key in set(
                             res[0][savepoint][collection]["DELETE"]):
                         self.deleted_key.append(key)
                     for key, val in res[0][savepoint][collection][
                             "INSERT"].items():
                         self.validate_dict[key] = val
                     for key, val in res[0][savepoint][collection][
                             "UPDATE"].items():
                         mutated = key.split("=")
                         for t_id in val:
                             try:
                                 self.validate_dict[t_id][mutated[0]] = \
                                     mutated[1]
                             except:
                                 self.validate_dict[t_id].put(
                                     mutated[0], mutated[1])
         bucket_collection = collection.split('.')
         if buckets:
             self.buckets = buckets
         else:
             self.buckets = self.bucket_util.buckets
         bucket = BucketUtils.get_bucket_obj(self.buckets,
                                             bucket_collection[0])
         client = \
             DocLoaderUtils.sdk_client_pool.get_client_for_bucket(
                 bucket, bucket_collection[1], bucket_collection[2])
         self.validate_keys(client, self.validate_dict, self.deleted_key)
Exemplo n.º 27
0
 def populate_uids(self, base_name="pre_qf"):
     """
     Creates a scope, collection in each bucket and
     returns a dict like:
     {bucket_name:{"uid":uid, "cid":cid, "sid":sid}, ..}
     """
     uids = dict()
     for bucket in self.cluster.buckets:
         scope_name = "custom_scope-" + base_name
         collection_name = "custom_collection" + base_name
         BucketUtils.create_scope(self.cluster.master, bucket,
                                  {"name": scope_name})
         BucketUtils.create_collection(self.cluster.master, bucket,
                                       scope_name,
                                       {"name": collection_name})
         uids[bucket.name] = dict()
         uids[bucket.name]["sid"] = BucketHelper(self.cluster.master). \
             get_scope_id(bucket.name, scope_name)
         uids[bucket.name]["cid"] = BucketHelper(self.cluster.master). \
             get_collection_id(bucket.name, scope_name, collection_name)
         uids[bucket.name]["uid"] = BucketHelper(self.cluster.master). \
             get_bucket_manifest_uid(bucket.name)
     return uids
Exemplo n.º 28
0
    def touch_test(self):
        self.log.info("Loading bucket %s into %s%% DGM" %
                      (self.bucket.name, self.active_resident_threshold))
        load_gen = doc_generator(self.key,
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        dgm_gen = doc_generator(self.key, self.num_items, self.num_items + 1)
        dgm_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            dgm_gen,
            DocLoading.Bucket.DocOps.CREATE,
            0,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            timeout_secs=self.sdk_timeout,
            batch_size=10,
            process_concurrency=4,
            active_resident_threshold=self.active_resident_threshold,
            sdk_client_pool=self.sdk_client_pool)
        self.task_manager.get_task_result(dgm_task)

        self.log.info("Touch initial self.num_items docs which are "
                      "residing on disk due to DGM")
        client = self.sdk_client_pool.get_client_for_bucket(self.bucket)
        collections = BucketUtils.get_random_collections([self.bucket], 2, 2,
                                                         1)
        for bucket_name, scope_dict in collections.iteritems():
            for scope_name, collection_dict in scope_dict["scopes"].items():
                for c_name, c_data in collection_dict["collections"].items():
                    self.log.info("CAS test on collection %s: %s" %
                                  (scope_name, c_name))
                    client.select_collection(scope_name, c_name)
                    while load_gen.has_next():
                        key, _ = load_gen.next()
                        result = client.crud(DocLoading.Bucket.DocOps.TOUCH,
                                             key,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout)
                        if result["status"] is not True:
                            self.log_failure("Touch on %s failed: %s" %
                                             (key, result))
        # change back client's scope and coll name to _default
        # since it was changed in the while loop to select different collection
        client.scope_name = CbServer.default_scope
        client.collection_name = CbServer.default_collection
        self.sdk_client_pool.release_client(client)
        self.validate_test_failure()
Exemplo n.º 29
0
 def test_drop_collection_compaction(self):
     collections = BucketUtils.get_random_collections(
         self.bucket_util.buckets, 10, 10, 1)
     # Delete collection
     for self.bucket_name, scope_dict in collections.iteritems():
         bucket = BucketUtils.get_bucket_obj(self.bucket_util.buckets,
                                             self.bucket_name)
         scope_dict = scope_dict["scopes"]
         for scope_name, collection_dict in scope_dict.items():
             collection_dict = collection_dict["collections"]
             for c_name, c_data in collection_dict.items():
                 BucketUtils.drop_collection(self.cluster.master, bucket,
                                             scope_name, c_name)
     # Trigger compaction
     remote_client = RemoteMachineShellConnection(self.cluster.master)
     _ = remote_client.wait_till_compaction_end(
         RestConnection(self.cluster.master),
         self.bucket_name,
         timeout_in_seconds=(self.wait_timeout * 10))
     remote_client.disconnect()
     # Validate doc count as per bucket collections
     self.bucket_util.validate_docs_per_collections_all_buckets()
     self.validate_test_failure()
Exemplo n.º 30
0
 def data_load(self):
     cycles = 0
     start_time = time.time()
     while self.data_load_flag:
         doc_loading_spec = self.spec_for_drop_recreate()
         try:
             _ = BucketUtils.perform_tasks_from_spec(
                 self.cluster, self.cluster.buckets, doc_loading_spec)
         except Exception as e:
             self.data_load_exception = e
             raise
         cycles = cycles + 1
     end_time = time.time()
     elapsed_time = end_time - start_time
     self.print_spec_details(self.spec_for_drop_recreate(), cycles,
                             elapsed_time)