Пример #1
0
    def test_invalid_name_collection(self):
        for _ in range(1000):
            scope_name = BucketUtils.get_random_name(invalid_name=True)
            try:
                status, content = BucketHelper(
                    self.cluster.master).create_scope(self.bucket, scope_name)
                if status is True:
                    self.log_failure("Scope '%s::%s' creation not failed: %s" %
                                     (self.bucket, scope_name, content))
            except Exception as e:
                self.log.debug(e)

        for _ in range(1000):
            collection_name = BucketUtils.get_random_name(invalid_name=True)
            try:
                status, content = BucketHelper(self.cluster.master) \
                    .create_collection(self.bucket, CbServer.default_scope,
                                       collection_name)
                if status is True:
                    self.log_failure(
                        "Collection '%s::%s::%s' creation not failed: %s" %
                        (self.bucket, CbServer.default_scope, collection_name,
                         content))
            except Exception as e:
                self.log.debug(e)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Пример #2
0
    def setUp(self):
        super(ExpiryMaxTTL, self).setUp()

        # Create default bucket
        self.create_bucket(self.cluster)

        self.key = 'test_ttl_docs'.rjust(self.key_size, '0')

        if self.target_vbucket and type(self.target_vbucket) is not list:
            self.target_vbucket = [self.target_vbucket]

        self.bucket_util.get_all_buckets(self.cluster)
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.cluster.buckets[0],
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        self.log.info("==========Finished ExpiryMaxTTL base setup========")
Пример #3
0
    def test_replica_update(self):
        if self.atomicity:
            replica_count = 3
        else:
            replica_count = 4
        if self.nodes_init < 2:
            self.log.error("Test not supported for < 2 node cluster")
            return

        doc_ops = self.input.param("doc_ops", "")
        bucket_helper = BucketHelper(self.cluster.master)

        doc_count = self.num_items
        start_doc_for_insert = self.num_items

        # Replica increment tests
        doc_count, start_doc_for_insert = self.generic_replica_update(
            doc_count, doc_ops, bucket_helper,
            range(1, min(replica_count, self.nodes_init)),
            start_doc_for_insert)

        # Replica decrement tests
        _, _ = self.generic_replica_update(
            doc_count, doc_ops, bucket_helper,
            range(min(replica_count, self.nodes_init) - 2, -1, -1),
            start_doc_for_insert)
Пример #4
0
    def test_bucket_flush_while_index_are_created(self):
        self.log.info("Add documents, create CBAS buckets, "
                      "dataset and validate count")
        self.setup_for_test()

        self.log.info('Disconnect CBAS bucket')
        self.cbas_util.disconnect_from_bucket(self.cbas_bucket_name)

        self.log.info('Create secondary index in Async')
        index_fields = self.input.param("index_fields", None)
        index_fields = index_fields.replace('-', ',')
        query = "create index {0} on {1}({2});" \
            .format("sec_idx", self.cbas_dataset_name, index_fields)
        create_index_task = self.task.async_cbas_query_execute(
            self.cluster.master, self.cbas_util, None, query, 'default')

        self.log.info('Flush bucket while index are getting created')
        # Flush the CB bucket
        BucketHelper(self.cluster.master).flush_bucket(self.cb_bucket_name)

        self.log.info('Get result on index creation')
        self.task_manager.get_task_result(create_index_task)

        self.log.info('Connect back cbas bucket')
        self.cbas_util.connect_to_bucket(self.cbas_bucket_name)

        self.log.info('Validate no. of items in CBAS dataset')
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, 0):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the CB bucket")
Пример #5
0
    def setUp(self):
        super(ExpiryMaxTTL, self).setUp()
        self.key = 'test_ttl_docs'.rjust(self.key_size, '0')

        if self.target_vbucket and type(self.target_vbucket) is not list:
            self.target_vbucket = [self.target_vbucket]

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.create_default_bucket(
            bucket_type=self.bucket_type,
            maxTTL=self.maxttl,
            storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy,
            replica=self.num_replicas,
            compression_mode=self.compression_mode)
        self.bucket_util.add_rbac_user()
        self.bucket_util.get_all_buckets()
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket_util.buckets[0],
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        self.log.info("==========Finished ExpiryMaxTTL base setup========")
Пример #6
0
    def test_MB_34947(self):
        # Update already Created docs with async_writes
        load_gen = doc_generator(self.key, 0, self.num_items,
                                 key_size=self.key_size,
                                 doc_size=self.doc_size,
                                 doc_type=self.doc_type,
                                 vbuckets=self.cluster.vbuckets)
        task = self.task.async_load_gen_docs(
            self.cluster, self.def_bucket, load_gen, "update", 0,
            persist_to=self.persist_to, replicate_to=self.replicate_to,
            timeout_secs=self.sdk_timeout,
            batch_size=10, process_concurrency=8)
        self.task.jython_task_manager.get_task_result(task)

        # Update bucket replica to new value
        bucket_helper = BucketHelper(self.cluster.master)
        bucket_helper.change_bucket_props(
            self.def_bucket, replicaNumber=self.new_replica)
        self.bucket_util.print_bucket_stats(self.cluster)

        # Start rebalance task
        rebalance = self.task.async_rebalance(self.cluster.servers, [], [])
        self.sleep(10, "Wait for rebalance to start")

        # Wait for rebalance task to complete
        self.task.jython_task_manager.get_task_result(rebalance)

        # Assert if rebalance failed
        self.assertTrue(rebalance.result,
                        "Rebalance failed after replica update")
Пример #7
0
 def get_bucket_dgm(self, bucket):
     self.rest_client = BucketHelper(self.cluster.master)
     dgm = self.rest_client.fetch_bucket_stats(
         bucket.name)["op"]["samples"]["vb_active_resident_items_ratio"][-1]
     self.log.info("Active Resident Threshold of {0} is {1}".format(
         bucket.name, dgm))
     return dgm
Пример #8
0
    def durability_succeeds(self,
                            bucket_name,
                            master,
                            induced_error=None,
                            failed_nodes=[]):
        """
        Determines whether the durability will fail/work based on
        the type of error_induced during the test and number of nodes the
        error is induced on.

        :param bucket_name:   Name of the bucket used for fetching
                              the replica value (str)
        :param master:        Master node from the cluster
        :param induced_error: Error induced during the test execution (str)
        :param failed_nodes:  No of nodes failed due to the induced_error (int)

        :return durability_succeeds: Durability status for the bucket (bool)
        """
        durability_succeeds = True
        bucket = BucketHelper(master).get_bucket_json(bucket_name)
        min_nodes_req = bucket["replicaNumber"] + 1
        majority_value = floor(min_nodes_req / 2) + 1

        if induced_error is None:
            if (self.cluster_len - failed_nodes) < majority_value:
                durability_succeeds = False
        else:
            if (self.durability == Bucket.DurabilityLevel.MAJORITY
                    and induced_error in self.disk_error_types):
                durability_succeeds = True
            elif (self.cluster_len - len(failed_nodes)) < majority_value:
                durability_succeeds = False

        return durability_succeeds
Пример #9
0
    def load_document_until_ram_percentage(self):
        self.start = 0
        doc_batch_size = 5000
        self.end = doc_batch_size
        bucket_helper = BucketHelper(self.cluster.master)
        mem_cap = (self.document_ram_percentage * self.bucket_ram * 1000000)
        while True:
            self.log.info("Add documents to bucket")
            self.perform_doc_ops_in_all_cb_buckets(
                "create",
                self.start,
                self.end,
                durability=self.durability_level)

            self.log.info("Calculate available free memory")
            bucket_json = bucket_helper.get_bucket_json(self.cb_bucket_name)
            mem_used = 0
            for node_stat in bucket_json["nodes"]:
                mem_used += node_stat["interestingStats"]["mem_used"]

            if mem_used < mem_cap:
                self.log.info("Memory used: %s < %s" % (mem_used, mem_cap))
                self.start = self.end
                self.end = self.end + doc_batch_size
                self.num_items = self.end
            else:
                break
Пример #10
0
 def load_bucket_into_dgm(self,
                          cluster,
                          bucket,
                          key,
                          num_items,
                          active_resident_threshold,
                          load_batch_size=20000,
                          batch_size=10,
                          process_concurrency=4,
                          persist_to=None,
                          replicate_to=None):
     rest = BucketHelper(cluster.master)
     bucket_stat = rest.get_bucket_stats_for_node(bucket.name,
                                                  cluster.master)
     while bucket_stat["vb_active_resident_items_ratio"] > \
             active_resident_threshold:
         gen_load = doc_generator(key,
                                  num_items,
                                  num_items + load_batch_size,
                                  doc_type="binary")
         num_items += load_batch_size
         task = self.async_load_gen_docs(
             cluster,
             bucket,
             gen_load,
             "create",
             0,
             batch_size=batch_size,
             process_concurrency=process_concurrency,
             persist_to=persist_to,
             replicate_to=replicate_to)
         self.jython_task_manager.get_task_result(task)
         bucket_stat = rest.get_bucket_stats_for_node(
             bucket.name, cluster.master)
     return num_items
Пример #11
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")

        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        # If True, creates bucket/scope/collections with simpler names
        self.use_simple_names = self.input.param("use_simple_names", True)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.data_spec_name = self.input.param("data_spec_name",
                                               "initial_load")
        self.remove_default_collection = \
            self.input.param("remove_default_collection", False)

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.skip_collections_cleanup = \
            self.input.param("skip_collections_cleanup", False)
        self.validate_docs_count_during_teardown = \
            self.input.param("validate_docs_count_during_teardown", False)
        self.batch_size = self.input.param("batch_size", 200)
        self.process_concurrency = self.input.param("process_concurrency", 1)
        self.retry_get_process_num = \
            self.input.param("retry_get_process_num", 200)
        self.change_magma_quota = self.input.param("change_magma_quota", False)
        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120)
        self.assertTrue(status, msg="Failure during disabling auto-failover")
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.disk_optimized_thread_settings = self.input.param("disk_optimized_thread_settings", False)
        if self.disk_optimized_thread_settings:
            self.set_num_writer_and_reader_threads(num_writer_threads="disk_io_optimized",
                                                   num_reader_threads="disk_io_optimized")

        try:
            self.collection_setup()
        except Java_base_exception as exception:
            self.handle_setup_exception(exception)
        except Exception as exception:
            self.handle_setup_exception(exception)
        self.supported_d_levels = \
            self.bucket_util.get_supported_durability_levels()
        self.log_setup_status("CollectionBase", "complete")
Пример #12
0
 def set_num_writer_and_reader_threads(self,
                                       num_writer_threads="default",
                                       num_reader_threads="default"):
     for node in self.cluster_util.get_kv_nodes():
         bucket_helper = BucketHelper(node)
         bucket_helper.update_memcached_settings(
             num_writer_threads=num_writer_threads,
             num_reader_threads=num_reader_threads)
Пример #13
0
 def set_num_writer_and_reader_threads(self,
                                       num_writer_threads="default",
                                       num_reader_threads="default",
                                       num_storage_threads="default"):
     bucket_helper = BucketHelper(self.cluster.master)
     bucket_helper.update_memcached_settings(
         num_writer_threads=num_writer_threads,
         num_reader_threads=num_reader_threads,
         num_storage_threads=num_storage_threads)
Пример #14
0
 def _record_vbuckets(self, master, servers):
     map = dict()
     for bucket in self.buckets:
         self.log.info("Record vbucket for the bucket {0}".format(
             bucket.name))
         map[bucket.name] = BucketHelper(master)\
             ._get_vbuckets(servers, bucket_name=bucket.name)
     #self.log.info("Map: {0}".format(map))
     return map
Пример #15
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")

        self.MAX_SCOPES = CbServer.max_scopes
        self.MAX_COLLECTIONS = CbServer.max_collections
        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.data_spec_name = self.input.param("data_spec_name",
                                               "initial_load")
        self.remove_default_collection = \
            self.input.param("remove_default_collection", False)

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.skip_collections_cleanup = \
            self.input.param("skip_collections_cleanup", False)
        self.validate_docs_count_during_teardown = \
            self.input.param("validate_docs_count_during_teardown", False)
        self.batch_size = self.input.param("batch_size", 200)
        self.vbuckets = self.input.param("vbuckets",
                                         self.cluster_util.vbuckets)
        self.retry_get_process_num = self.input.param("retry_get_process_num",
                                                      25)

        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")
        self.bucket_helper_obj = BucketHelper(self.cluster.master)

        try:
            self.collection_setup()
        except Java_base_exception as exception:
            self.handle_setup_exception(exception)
        except Exception as exception:
            self.handle_setup_exception(exception)
        self.supported_d_levels = \
            self.bucket_util.get_supported_durability_levels()
        self.log_setup_status("CollectionBase", "complete")
Пример #16
0
    def load_document_until_ram_percentage(self):
        self.start = 0
        doc_batch_size = 5000
        self.end = doc_batch_size
        bucket_helper = BucketHelper(self.cluster.master)
        mem_cap = (self.document_ram_percentage * self.bucket_ram * 1000000)

        first = ['james', 'sharon', 'dave', 'bill', 'mike', 'steve']
        profession = ['doctor', 'lawyer']

        template_obj = JsonObject.create()
        template_obj.put("number", 0)
        template_obj.put("first_name", "")
        template_obj.put("profession", "")
        template_obj.put("mutated", 0)
        template_obj.put("mutation_type", "ADD")

        while True:
            self.log.info("Add documents to bucket")

            doc_gen = DocumentGenerator("test_docs",
                                        template_obj,
                                        start=self.start,
                                        end=self.end,
                                        randomize=False,
                                        first_name=first,
                                        profession=profession,
                                        number=range(70))

            try:
                self.bucket_util.sync_load_all_buckets(
                    self.cluster,
                    doc_gen,
                    "create",
                    0,
                    batch_size=doc_batch_size,
                    durability=self.durability_level,
                    suppress_error_table=True)
            except Exception as e:
                self.fail("Following error occurred while loading bucket - {"
                          "0}".format(str(e)))

            self.log.info("Calculate available free memory")
            bucket_json = bucket_helper.get_bucket_json(self.bucket_name)
            mem_used = 0
            for node_stat in bucket_json["nodes"]:
                mem_used += node_stat["interestingStats"]["mem_used"]

            if mem_used < mem_cap:
                self.log.info("Memory used: %s < %s" % (mem_used, mem_cap))
                self.start = self.end
                self.end = self.end + doc_batch_size
                self.num_items = self.end
            else:
                break
Пример #17
0
    def execute(self):

        try:
            status = BucketHelper(self.server).compact_bucket(self.bucket)
            self.state = CHECKING
            self.call()
        except BucketCompactionException as e:
            self.test_log.error("Bucket compaction failed: {0}".format(e))
            self.set_unexpected_exception(e)
            self.state = FINISHED
            self.set_result(False)
Пример #18
0
    def compact_cb_bucket_with_cbas_connected(self):
        self.setup_for_test()

        # Compact the CB bucket
        BucketHelper(self.cluster.master).flush_bucket(self.cb_bucket_name)

        # Validate no. of items in CBAS dataset
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the CB bucket")
Пример #19
0
    def execute(self):

        try:
            status = BucketHelper(self.server).compact_bucket(self.bucket)
            self.state = CHECKING
            self.call()
        except BucketCompactionException as e:
            log.error("Bucket compaction failed for unknown reason")
            self.set_unexpected_exception(e)
            self.state = FINISHED
            self.set_result(False)
Пример #20
0
    def create_scope_collections(self):
        self.__print_step("Creating required scope/collections")
        BucketHelper(self.cluster.master).import_collection_using_manifest(
            self.bucket.name,
            str(collection_spec).replace("'", '"'))
        self.bucket.stats.increment_manifest_uid()

        for scope in collection_spec["scopes"]:
            self.bucket_util.create_scope_object(self.bucket, scope)
            for collection in scope["collections"]:
                self.bucket_util.create_collection_object(
                    self.bucket, scope["name"], collection)
Пример #21
0
    def initialise(self):
        # Create the collections for this task
        for collection in self.collections:
            BucketHelper(self.node).create_collection(
                Bucket({'name': self.bucket}), self.scope.name, {"name": collection})

        # A client for each collection
        self.clients = {}
        for collection in self.collections:
            self.clients[collection] = \
                SDKClient([self.node], Bucket(
                    {'name': self.bucket}), scope=self.scope.name, collection=collection)
Пример #22
0
    def execute(self):
        try:
            rest = RestConnection(self.server)
        except ServerUnavailableException as error:
            self.state = FINISHED
            self.set_unexpected_exception(error)
            return
        info = rest.get_nodes_self()
        if int(info.port) in xrange(9091, 9991):
            self.port = info.port

        if self.bucket.ramQuotaMB <= 0:
            self.size = info.memoryQuota * 2 / 3

        if int(info.port) in xrange(9091, 9991):
            try:
                BucketHelper(self.server).create_bucket(self.bucket.__dict__)
                self.state = CHECKING
                self.call()
            except Exception as e:
                self.test_log.error(str(e))
                self.state = FINISHED
                #self. set_unexpected_exception(e)
            return
        version = rest.get_nodes_self().version
        try:
            if float(version[:2]) >= 3.0 and self.bucket_priority is not None:
                self.bucket.threadsNumber = self.bucket_priority
            BucketHelper(self.server).create_bucket(self.bucket.__dict__)
            self.state = CHECKING
            self.call()

        except BucketCreationException as e:
            self.state = FINISHED
            self.test_log.debug(str(e))
            #self. set_unexpected_exception(e)
        # catch and set all unexpected exceptions
        except Exception as e:
            self.test_log.error(str(e))
            self.state = FINISHED
Пример #23
0
 def update_bucket_replica(self):
     self.log.info("Updating all the bucket replicas to {0}".format(
         self.replicas_for_failover))
     for i in range(len(self.bucket_util.buckets)):
         bucket_helper = BucketHelper(self.cluster.master)
         bucket_helper.change_bucket_props(
             self.bucket_util.buckets[i],
             replicaNumber=self.replicas_for_failover)
     task = self.task.async_rebalance(
         self.cluster.servers[:self.nodes_init], [], [])
     self.task.jython_task_manager.get_task_result(task)
     self.log.info("Bucket stats before failover")
     self.bucket_util.print_bucket_stats()
Пример #24
0
    def collection_setup(self):
        self.log.info("Creating buckets from spec")
        # Create bucket(s) and add rbac user
        buckets_spec = self.bucket_util.get_bucket_template_from_package(
            self.spec_name)
        doc_loading_spec = \
            self.bucket_util.get_crud_template_from_package("initial_load")

        self.bucket_util.create_buckets_using_json_data(self.cluster,
                                                        buckets_spec)
        self.bucket_util.wait_for_collection_creation_to_complete(self.cluster)

        # Init sdk_client_pool if not initialized before
        if self.sdk_client_pool is None:
            self.init_sdk_pool_object()

        # Create clients in SDK client pool
        if self.sdk_client_pool:
            self.log.info("Creating required SDK clients for client_pool")
            bucket_count = len(self.cluster.buckets)
            max_clients = self.task_manager.number_of_threads
            clients_per_bucket = int(ceil(max_clients / bucket_count))
            for bucket in self.cluster.buckets:
                self.sdk_client_pool.create_clients(
                    bucket,
                    [self.cluster.master],
                    clients_per_bucket,
                    compression_settings=self.sdk_compression)

        doc_loading_task = \
            self.bucket_util.run_scenario_from_spec(
                self.task,
                self.cluster,
                self.cluster.buckets,
                doc_loading_spec,
                mutation_num=0)
        if doc_loading_task.result is False:
            self.fail("Initial doc_loading failed")

        self.cluster_util.print_cluster_stats(self.cluster)

        # Verify initial doc load count
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)
        self.bucket_util.validate_docs_per_collections_all_buckets(
            self.cluster,
            timeout=self.wait_timeout)

        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
Пример #25
0
 def get_bucket_dgm(self, bucket):
     self.rest_client = BucketHelper(self.cluster.master)
     count = 0
     dgm = 100
     while count < 5:
         try:
             dgm = self.rest_client.fetch_bucket_stats(
                 bucket.name
             )["op"]["samples"]["vb_active_resident_items_ratio"][-1]
             self.log.info("Active Resident Threshold of {0} is {1}".format(
                 bucket.name, dgm))
             return dgm
         except Exception as e:
             self.sleep(5, e)
         count += 1
     return dgm
Пример #26
0
    def test_nru_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info(
            "Fetch initial inserted 100 documents, so they are not removed")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        for i in range(100):
            client.get("test_docs-" + str(i))

        self.log.info("Add 20% more items to trigger NRU")
        for i in range(item_count, int(item_count * 1.2)):
            client.insert_document("key-id" + str(i), '{"name":"dave"}')

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        if self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            pass
        else:
            self.log.info(
                "Document count mismatch might be due to ejection of documents on KV. Retry again"
            )
            count_n1ql = self.rest.query_tool(
                'select count(*) from %s' %
                (self.cb_bucket_name))['results'][0]['$1']
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql),
                            msg="Count mismatch on CBAS")
Пример #27
0
 def verify_data(server,
                 keys,
                 value_equal_to_key,
                 verify_flags,
                 test,
                 debug=False,
                 bucket="default"):
     log_error_count = 0
     # verify all the keys
     log = logger.get("infra")
     client = MemcachedClientHelper.direct_client(server, bucket)
     vbucket_count = len(BucketHelper(server).get_vbuckets(bucket))
     # populate key
     index = 0
     all_verified = True
     keys_failed = []
     for key in keys:
         try:
             index += 1
             vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1)
             client.vbucketId = vbucketId
             flag, keyx, value = client.get(key=key)
             if value_equal_to_key:
                 test.assertEquals(value, key, msg='values dont match')
             if verify_flags:
                 actual_flag = socket.ntohl(flag)
                 expected_flag = ctypes.c_uint32(zlib.adler32(value)).value
                 test.assertEquals(actual_flag,
                                   expected_flag,
                                   msg='flags dont match')
             if debug:
                 log.info("verified key #{0} : {1}".format(index, key))
         except mc_bin_client.MemcachedError as error:
             if debug:
                 log_error_count += 1
                 if log_error_count < 100:
                     log.error(error)
                     log.error(
                         "memcachedError : {0} - unable to get a pre-inserted key : {0}"
                         .format(error.status, key))
             keys_failed.append(key)
             all_verified = False
     client.close()
     if len(keys_failed) > 0:
         log.error('unable to verify #{0} keys'.format(len(keys_failed)))
     return all_verified
Пример #28
0
    def set_limits_for_all_users(self, resource_name, resource_limit):
        """ Sets limits for all users and makes them take effect """
        # Configure limits from the test params
        limit_config = LimitConfig()
        limit_config.set_limit(resource_name, resource_limit)

        # Configure extra limits
        for resource_name, resource_limit in self.extra_resources.items():
            limit_config.set_limit(resource_name, int(resource_limit))

        # Set user limits
        for user in self.users:
            self.rest.add_set_builtin_user(
                user.username, user.params(limit_config.get_user_config()))

        # A pattern for matching UUID-4
        pattern = re.compile(
            r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
        )

        # Ensure the user limits were set
        if limit_config.get_user_config():
            for user in self.users:
                content = self.rest.get_builtin_user(user.username)
                # Ensure the user's uuid is a UUID-4
                self.assertTrue(pattern.match(content['uuid']),
                                "The user's uuid is does match a UUID-4")
                self.assertEqual(content['limits'],
                                 limit_config.get_user_config())

        # Set scope limits
        for user in self.users:
            self.rest.set_scope_limit(self.bucket_name, user.scope.name,
                                      limit_config.get_scope_config())

        # Ensure the scope limits were set
        if limit_config.get_scope_config():
            for user in self.users:
                _, content = BucketHelper(
                    self.cluster.master).list_collections(self.bucket_name)
                scope = next(scope for scope in json.loads(content)['scopes']
                             if scope['name'] == user.scope.name)
                self.assertEqual(scope['limits'],
                                 limit_config.get_scope_config())

        self.enforce()
Пример #29
0
    def test_no_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info("Load more until we are out of memory")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        i = item_count
        insert_success = True
        while insert_success:
            insert_success = client.insert_document("key-id" + str(i),
                                                    '{"name":"dave"}')
            i += 1

        self.log.info('Memory is full at {0} items'.format(i))
        self.log.info("As a result added more %s items" % (i - item_count))

        self.log.info("Fetch item count")
        stats = bucket_helper.get_bucket(self.cb_bucket_name).stats
        itemCountWhenOOM = stats.itemCount
        memoryWhenOOM = stats.memUsed
        self.log.info('Item count when OOM {0} and memory used {1}'.format(
            itemCountWhenOOM, memoryWhenOOM))

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, count_n1ql),
                        msg="Count mismatch on CBAS")
Пример #30
0
    def execute(self):
        try:
            rest = BucketHelper(self.server)
            if rest.flush_bucket(self.bucket):
                self.state = CHECKING
                self.task_manager.schedule(self)
            else:
                self.state = FINISHED
                self.set_result(False)

        except BucketFlushFailed as e:
            self.state = FINISHED
            self.set_unexpected_exception(e)

        except Exception as e:
            self.state = FINISHED
            self.set_unexpected_exception(e)