Exemplo n.º 1
0
    def test_invalid_name_collection(self):
        for _ in range(1000):
            scope_name = BucketUtils.get_random_name(invalid_name=True)
            try:
                status, content = BucketHelper(
                    self.cluster.master).create_scope(self.bucket, scope_name)
                if status is True:
                    self.log_failure("Scope '%s::%s' creation not failed: %s" %
                                     (self.bucket, scope_name, content))
            except Exception as e:
                self.log.debug(e)

        for _ in range(1000):
            collection_name = BucketUtils.get_random_name(invalid_name=True)
            try:
                status, content = BucketHelper(self.cluster.master) \
                    .create_collection(self.bucket, CbServer.default_scope,
                                       collection_name)
                if status is True:
                    self.log_failure(
                        "Collection '%s::%s::%s' creation not failed: %s" %
                        (self.bucket, CbServer.default_scope, collection_name,
                         content))
            except Exception as e:
                self.log.debug(e)

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemplo n.º 2
0
    def setUp(self):
        super(ExpiryMaxTTL, self).setUp()

        # Create default bucket
        self.create_bucket(self.cluster)

        self.key = 'test_ttl_docs'.rjust(self.key_size, '0')

        if self.target_vbucket and type(self.target_vbucket) is not list:
            self.target_vbucket = [self.target_vbucket]

        self.bucket_util.get_all_buckets(self.cluster)
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.cluster.buckets[0],
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        self.log.info("==========Finished ExpiryMaxTTL base setup========")
Exemplo n.º 3
0
 def load_bucket_into_dgm(self,
                          cluster,
                          bucket,
                          key,
                          num_items,
                          active_resident_threshold,
                          load_batch_size=20000,
                          batch_size=10,
                          process_concurrency=4,
                          persist_to=None,
                          replicate_to=None):
     rest = BucketHelper(cluster.master)
     bucket_stat = rest.get_bucket_stats_for_node(bucket.name,
                                                  cluster.master)
     while bucket_stat["vb_active_resident_items_ratio"] > \
             active_resident_threshold:
         gen_load = doc_generator(key,
                                  num_items,
                                  num_items + load_batch_size,
                                  doc_type="binary")
         num_items += load_batch_size
         task = self.async_load_gen_docs(
             cluster,
             bucket,
             gen_load,
             "create",
             0,
             batch_size=batch_size,
             process_concurrency=process_concurrency,
             persist_to=persist_to,
             replicate_to=replicate_to)
         self.jython_task_manager.get_task_result(task)
         bucket_stat = rest.get_bucket_stats_for_node(
             bucket.name, cluster.master)
     return num_items
Exemplo n.º 4
0
    def load_document_until_ram_percentage(self):
        self.start = 0
        doc_batch_size = 5000
        self.end = doc_batch_size
        bucket_helper = BucketHelper(self.cluster.master)
        mem_cap = (self.document_ram_percentage * self.bucket_ram * 1000000)
        while True:
            self.log.info("Add documents to bucket")
            self.perform_doc_ops_in_all_cb_buckets(
                "create",
                self.start,
                self.end,
                durability=self.durability_level)

            self.log.info("Calculate available free memory")
            bucket_json = bucket_helper.get_bucket_json(self.cb_bucket_name)
            mem_used = 0
            for node_stat in bucket_json["nodes"]:
                mem_used += node_stat["interestingStats"]["mem_used"]

            if mem_used < mem_cap:
                self.log.info("Memory used: %s < %s" % (mem_used, mem_cap))
                self.start = self.end
                self.end = self.end + doc_batch_size
                self.num_items = self.end
            else:
                break
Exemplo n.º 5
0
    def setUp(self):
        super(ExpiryMaxTTL, self).setUp()
        self.key = 'test_ttl_docs'.rjust(self.key_size, '0')

        if self.target_vbucket and type(self.target_vbucket) is not list:
            self.target_vbucket = [self.target_vbucket]

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.create_default_bucket(
            bucket_type=self.bucket_type,
            maxTTL=self.maxttl,
            storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy,
            replica=self.num_replicas,
            compression_mode=self.compression_mode)
        self.bucket_util.add_rbac_user()
        self.bucket_util.get_all_buckets()
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket_util.buckets[0],
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        self.log.info("==========Finished ExpiryMaxTTL base setup========")
Exemplo n.º 6
0
 def get_bucket_dgm(self, bucket):
     self.rest_client = BucketHelper(self.cluster.master)
     dgm = self.rest_client.fetch_bucket_stats(
         bucket.name)["op"]["samples"]["vb_active_resident_items_ratio"][-1]
     self.log.info("Active Resident Threshold of {0} is {1}".format(
         bucket.name, dgm))
     return dgm
Exemplo n.º 7
0
    def test_MB_34947(self):
        # Update already Created docs with async_writes
        load_gen = doc_generator(self.key, 0, self.num_items,
                                 key_size=self.key_size,
                                 doc_size=self.doc_size,
                                 doc_type=self.doc_type,
                                 vbuckets=self.cluster.vbuckets)
        task = self.task.async_load_gen_docs(
            self.cluster, self.def_bucket, load_gen, "update", 0,
            persist_to=self.persist_to, replicate_to=self.replicate_to,
            timeout_secs=self.sdk_timeout,
            batch_size=10, process_concurrency=8)
        self.task.jython_task_manager.get_task_result(task)

        # Update bucket replica to new value
        bucket_helper = BucketHelper(self.cluster.master)
        bucket_helper.change_bucket_props(
            self.def_bucket, replicaNumber=self.new_replica)
        self.bucket_util.print_bucket_stats(self.cluster)

        # Start rebalance task
        rebalance = self.task.async_rebalance(self.cluster.servers, [], [])
        self.sleep(10, "Wait for rebalance to start")

        # Wait for rebalance task to complete
        self.task.jython_task_manager.get_task_result(rebalance)

        # Assert if rebalance failed
        self.assertTrue(rebalance.result,
                        "Rebalance failed after replica update")
Exemplo n.º 8
0
 def set_num_writer_and_reader_threads(self,
                                       num_writer_threads="default",
                                       num_reader_threads="default"):
     for node in self.cluster_util.get_kv_nodes():
         bucket_helper = BucketHelper(node)
         bucket_helper.update_memcached_settings(
             num_writer_threads=num_writer_threads,
             num_reader_threads=num_reader_threads)
Exemplo n.º 9
0
 def set_num_writer_and_reader_threads(self,
                                       num_writer_threads="default",
                                       num_reader_threads="default",
                                       num_storage_threads="default"):
     bucket_helper = BucketHelper(self.cluster.master)
     bucket_helper.update_memcached_settings(
         num_writer_threads=num_writer_threads,
         num_reader_threads=num_reader_threads,
         num_storage_threads=num_storage_threads)
Exemplo n.º 10
0
    def load_document_until_ram_percentage(self):
        self.start = 0
        doc_batch_size = 5000
        self.end = doc_batch_size
        bucket_helper = BucketHelper(self.cluster.master)
        mem_cap = (self.document_ram_percentage * self.bucket_ram * 1000000)

        first = ['james', 'sharon', 'dave', 'bill', 'mike', 'steve']
        profession = ['doctor', 'lawyer']

        template_obj = JsonObject.create()
        template_obj.put("number", 0)
        template_obj.put("first_name", "")
        template_obj.put("profession", "")
        template_obj.put("mutated", 0)
        template_obj.put("mutation_type", "ADD")

        while True:
            self.log.info("Add documents to bucket")

            doc_gen = DocumentGenerator("test_docs",
                                        template_obj,
                                        start=self.start,
                                        end=self.end,
                                        randomize=False,
                                        first_name=first,
                                        profession=profession,
                                        number=range(70))

            try:
                self.bucket_util.sync_load_all_buckets(
                    self.cluster,
                    doc_gen,
                    "create",
                    0,
                    batch_size=doc_batch_size,
                    durability=self.durability_level,
                    suppress_error_table=True)
            except Exception as e:
                self.fail("Following error occurred while loading bucket - {"
                          "0}".format(str(e)))

            self.log.info("Calculate available free memory")
            bucket_json = bucket_helper.get_bucket_json(self.bucket_name)
            mem_used = 0
            for node_stat in bucket_json["nodes"]:
                mem_used += node_stat["interestingStats"]["mem_used"]

            if mem_used < mem_cap:
                self.log.info("Memory used: %s < %s" % (mem_used, mem_cap))
                self.start = self.end
                self.end = self.end + doc_batch_size
                self.num_items = self.end
            else:
                break
Exemplo n.º 11
0
 def update_bucket_replica(self):
     self.log.info("Updating all the bucket replicas to {0}".format(
         self.replicas_for_failover))
     for i in range(len(self.bucket_util.buckets)):
         bucket_helper = BucketHelper(self.cluster.master)
         bucket_helper.change_bucket_props(
             self.bucket_util.buckets[i],
             replicaNumber=self.replicas_for_failover)
     task = self.task.async_rebalance(
         self.cluster.servers[:self.nodes_init], [], [])
     self.task.jython_task_manager.get_task_result(task)
     self.log.info("Bucket stats before failover")
     self.bucket_util.print_bucket_stats()
Exemplo n.º 12
0
 def get_bucket_dgm(self, bucket):
     self.rest_client = BucketHelper(self.cluster.master)
     count = 0
     dgm = 100
     while count < 5:
         try:
             dgm = self.rest_client.fetch_bucket_stats(
                 bucket.name
             )["op"]["samples"]["vb_active_resident_items_ratio"][-1]
             self.log.info("Active Resident Threshold of {0} is {1}".format(
                 bucket.name, dgm))
             return dgm
         except Exception as e:
             self.sleep(5, e)
         count += 1
     return dgm
Exemplo n.º 13
0
    def durability_succeeds(self,
                            bucket_name,
                            master,
                            induced_error=None,
                            failed_nodes=[]):
        """
        Determines whether the durability will fail/work based on
        the type of error_induced during the test and number of nodes the
        error is induced on.

        :param bucket_name:   Name of the bucket used for fetching
                              the replica value (str)
        :param master:        Master node from the cluster
        :param induced_error: Error induced during the test execution (str)
        :param failed_nodes:  No of nodes failed due to the induced_error (int)

        :return durability_succeeds: Durability status for the bucket (bool)
        """
        durability_succeeds = True
        bucket = BucketHelper(master).get_bucket_json(bucket_name)
        min_nodes_req = bucket["replicaNumber"] + 1
        majority_value = floor(min_nodes_req / 2) + 1

        if induced_error is None:
            if (self.cluster_len - failed_nodes) < majority_value:
                durability_succeeds = False
        else:
            if (self.durability == Bucket.DurabilityLevel.MAJORITY
                    and induced_error in self.disk_error_types):
                durability_succeeds = True
            elif (self.cluster_len - len(failed_nodes)) < majority_value:
                durability_succeeds = False

        return durability_succeeds
Exemplo n.º 14
0
    def test_bucket_flush_while_index_are_created(self):
        self.log.info("Add documents, create CBAS buckets, "
                      "dataset and validate count")
        self.setup_for_test()

        self.log.info('Disconnect CBAS bucket')
        self.cbas_util.disconnect_from_bucket(self.cbas_bucket_name)

        self.log.info('Create secondary index in Async')
        index_fields = self.input.param("index_fields", None)
        index_fields = index_fields.replace('-', ',')
        query = "create index {0} on {1}({2});" \
            .format("sec_idx", self.cbas_dataset_name, index_fields)
        create_index_task = self.task.async_cbas_query_execute(
            self.cluster.master, self.cbas_util, None, query, 'default')

        self.log.info('Flush bucket while index are getting created')
        # Flush the CB bucket
        BucketHelper(self.cluster.master).flush_bucket(self.cb_bucket_name)

        self.log.info('Get result on index creation')
        self.task_manager.get_task_result(create_index_task)

        self.log.info('Connect back cbas bucket')
        self.cbas_util.connect_to_bucket(self.cbas_bucket_name)

        self.log.info('Validate no. of items in CBAS dataset')
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, 0):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the CB bucket")
Exemplo n.º 15
0
    def test_replica_update(self):
        if self.atomicity:
            replica_count = 3
        else:
            replica_count = 4
        if self.nodes_init < 2:
            self.log.error("Test not supported for < 2 node cluster")
            return

        doc_ops = self.input.param("doc_ops", "")
        bucket_helper = BucketHelper(self.cluster.master)

        doc_count = self.num_items
        start_doc_for_insert = self.num_items

        # Replica increment tests
        doc_count, start_doc_for_insert = self.generic_replica_update(
            doc_count, doc_ops, bucket_helper,
            range(1, min(replica_count, self.nodes_init)),
            start_doc_for_insert)

        # Replica decrement tests
        _, _ = self.generic_replica_update(
            doc_count, doc_ops, bucket_helper,
            range(min(replica_count, self.nodes_init) - 2, -1, -1),
            start_doc_for_insert)
Exemplo n.º 16
0
    def test_nru_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info(
            "Fetch initial inserted 100 documents, so they are not removed")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        for i in range(100):
            client.get("test_docs-" + str(i))

        self.log.info("Add 20% more items to trigger NRU")
        for i in range(item_count, int(item_count * 1.2)):
            client.insert_document("key-id" + str(i), '{"name":"dave"}')

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        if self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            pass
        else:
            self.log.info(
                "Document count mismatch might be due to ejection of documents on KV. Retry again"
            )
            count_n1ql = self.rest.query_tool(
                'select count(*) from %s' %
                (self.cb_bucket_name))['results'][0]['$1']
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql),
                            msg="Count mismatch on CBAS")
Exemplo n.º 17
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")

        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        # If True, creates bucket/scope/collections with simpler names
        self.use_simple_names = self.input.param("use_simple_names", True)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.data_spec_name = self.input.param("data_spec_name",
                                               "initial_load")
        self.remove_default_collection = \
            self.input.param("remove_default_collection", False)

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.skip_collections_cleanup = \
            self.input.param("skip_collections_cleanup", False)
        self.validate_docs_count_during_teardown = \
            self.input.param("validate_docs_count_during_teardown", False)
        self.batch_size = self.input.param("batch_size", 200)
        self.process_concurrency = self.input.param("process_concurrency", 1)
        self.retry_get_process_num = \
            self.input.param("retry_get_process_num", 200)
        self.change_magma_quota = self.input.param("change_magma_quota", False)
        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120)
        self.assertTrue(status, msg="Failure during disabling auto-failover")
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.disk_optimized_thread_settings = self.input.param("disk_optimized_thread_settings", False)
        if self.disk_optimized_thread_settings:
            self.set_num_writer_and_reader_threads(num_writer_threads="disk_io_optimized",
                                                   num_reader_threads="disk_io_optimized")

        try:
            self.collection_setup()
        except Java_base_exception as exception:
            self.handle_setup_exception(exception)
        except Exception as exception:
            self.handle_setup_exception(exception)
        self.supported_d_levels = \
            self.bucket_util.get_supported_durability_levels()
        self.log_setup_status("CollectionBase", "complete")
Exemplo n.º 18
0
    def execute(self):
        try:
            rest = BucketHelper(self.server)
            if rest.flush_bucket(self.bucket):
                self.state = CHECKING
                self.task_manager.schedule(self)
            else:
                self.state = FINISHED
                self.set_result(False)

        except BucketFlushFailed as e:
            self.state = FINISHED
            self.set_unexpected_exception(e)

        except Exception as e:
            self.state = FINISHED
            self.set_unexpected_exception(e)
Exemplo n.º 19
0
    def test_no_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        bucket_helper = BucketHelper(self.master)
        item_count = bucket_helper.get_bucket(
            self.cb_bucket_name).stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info("Load more until we are out of memory")
        client = SDKClient(hosts=[self.master.ip],
                           bucket=self.cb_bucket_name,
                           password=self.master.rest_password)
        i = item_count
        insert_success = True
        while insert_success:
            insert_success = client.insert_document("key-id" + str(i),
                                                    '{"name":"dave"}')
            i += 1

        self.log.info('Memory is full at {0} items'.format(i))
        self.log.info("As a result added more %s items" % (i - item_count))

        self.log.info("Fetch item count")
        stats = bucket_helper.get_bucket(self.cb_bucket_name).stats
        itemCountWhenOOM = stats.itemCount
        memoryWhenOOM = stats.memUsed
        self.log.info('Item count when OOM {0} and memory used {1}'.format(
            itemCountWhenOOM, memoryWhenOOM))

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, count_n1ql),
                        msg="Count mismatch on CBAS")
Exemplo n.º 20
0
 def _record_vbuckets(self, master, servers):
     map = dict()
     for bucket in self.buckets:
         self.log.info("Record vbucket for the bucket {0}".format(
             bucket.name))
         map[bucket.name] = BucketHelper(master)\
             ._get_vbuckets(servers, bucket_name=bucket.name)
     #self.log.info("Map: {0}".format(map))
     return map
Exemplo n.º 21
0
    def setUp(self):
        super(DCPSeqItr, self).setUp()

        self.vbuckets = range(1024)
        self.start_seq_no_list = self.input.param("start",
                                                  [0] * len(self.vbuckets))
        self.end_seq_no = self.input.param("end", 0xffffffffffffffff)
        self.vb_uuid_list = self.input.param("vb_uuid_list",
                                             ['0'] * len(self.vbuckets))
        self.vb_retry = self.input.param("retry_limit", 10)
        self.filter_file = self.input.param("filter", None)
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.items = copy.deepcopy(self.init_num_items)
        self.bucket = self.cluster.buckets[0]
        self.dcp_util = DCPUtils(self.cluster.master, self.cluster.buckets[0],
                                 self.start_seq_no_list, self.end_seq_no,
                                 self.vb_uuid_list)
        self.dcp_util.initialise_cluster_connections()
Exemplo n.º 22
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")

        self.MAX_SCOPES = CbServer.max_scopes
        self.MAX_COLLECTIONS = CbServer.max_collections
        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.data_spec_name = self.input.param("data_spec_name",
                                               "initial_load")
        self.remove_default_collection = \
            self.input.param("remove_default_collection", False)

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.skip_collections_cleanup = \
            self.input.param("skip_collections_cleanup", False)
        self.validate_docs_count_during_teardown = \
            self.input.param("validate_docs_count_during_teardown", False)
        self.batch_size = self.input.param("batch_size", 200)
        self.vbuckets = self.input.param("vbuckets",
                                         self.cluster_util.vbuckets)
        self.retry_get_process_num = self.input.param("retry_get_process_num",
                                                      25)

        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")
        self.bucket_helper_obj = BucketHelper(self.cluster.master)

        try:
            self.collection_setup()
        except Java_base_exception as exception:
            self.handle_setup_exception(exception)
        except Exception as exception:
            self.handle_setup_exception(exception)
        self.supported_d_levels = \
            self.bucket_util.get_supported_durability_levels()
        self.log_setup_status("CollectionBase", "complete")
Exemplo n.º 23
0
    def execute(self):

        try:
            status = BucketHelper(self.server).compact_bucket(self.bucket)
            self.state = CHECKING
            self.call()
        except BucketCompactionException as e:
            self.test_log.error("Bucket compaction failed: {0}".format(e))
            self.set_unexpected_exception(e)
            self.state = FINISHED
            self.set_result(False)
Exemplo n.º 24
0
    def execute(self):

        try:
            status = BucketHelper(self.server).compact_bucket(self.bucket)
            self.state = CHECKING
            self.call()
        except BucketCompactionException as e:
            log.error("Bucket compaction failed for unknown reason")
            self.set_unexpected_exception(e)
            self.state = FINISHED
            self.set_result(False)
Exemplo n.º 25
0
    def compact_cb_bucket_with_cbas_connected(self):
        self.setup_for_test()

        # Compact the CB bucket
        BucketHelper(self.cluster.master).flush_bucket(self.cb_bucket_name)

        # Validate no. of items in CBAS dataset
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the CB bucket")
Exemplo n.º 26
0
    def setUp(self):
        super(ExpiryMaxTTL, self).setUp()
        self.key = 'test_ttl_docs'.rjust(self.key_size, '0')

        if self.target_vbucket and type(self.target_vbucket) is not list:
            self.target_vbucket = [self.target_vbucket]

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.create_default_bucket(
            bucket_type=self.bucket_type,
            maxTTL=self.maxttl,
            replica=self.num_replicas,
            compression_mode=self.compression_mode)
        self.bucket_util.add_rbac_user()
        self.bucket_util.get_all_buckets()
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.log.info("==========Finished ExpiryMaxTTL base setup========")
Exemplo n.º 27
0
    def create_scope_collections(self):
        self.__print_step("Creating required scope/collections")
        BucketHelper(self.cluster.master).import_collection_using_manifest(
            self.bucket.name,
            str(collection_spec).replace("'", '"'))
        self.bucket.stats.increment_manifest_uid()

        for scope in collection_spec["scopes"]:
            self.bucket_util.create_scope_object(self.bucket, scope)
            for collection in scope["collections"]:
                self.bucket_util.create_collection_object(
                    self.bucket, scope["name"], collection)
Exemplo n.º 28
0
    def initialise(self):
        # Create the collections for this task
        for collection in self.collections:
            BucketHelper(self.node).create_collection(
                Bucket({'name': self.bucket}), self.scope.name, {"name": collection})

        # A client for each collection
        self.clients = {}
        for collection in self.collections:
            self.clients[collection] = \
                SDKClient([self.node], Bucket(
                    {'name': self.bucket}), scope=self.scope.name, collection=collection)
Exemplo n.º 29
0
 def __init__(self,
              master,
              eventing_nodes,
              src_bucket_name='src_bucket',
              dst_bucket_name='dst_bucket',
              metadata_bucket_name='metadata',
              dst_bucket_name1='dst_bucket_name1',
              eventing_log_level='INFO',
              use_memory_manager=True,
              timer_storage_chan_size=10000,
              dcp_gen_chan_size=10000,
              is_sbm=False,
              is_curl=False,
              hostname='https://postman-echo.com/',
              auth_type='no-auth',
              curl_username=None,
              curl_password=None,
              cookies=False,
              print_eventing_handler_code_in_logs=True):
     self.log = logging.getLogger("test")
     self.eventing_nodes = eventing_nodes
     self.master = master
     self.eventing_helper = EventingHelper(self.eventing_nodes[0])
     self.src_bucket_name = src_bucket_name
     self.dst_bucket_name = dst_bucket_name
     self.metadata_bucket_name = metadata_bucket_name
     self.dst_bucket_name1 = dst_bucket_name1
     self.eventing_log_level = eventing_log_level
     self.use_memory_manager = use_memory_manager
     self.timer_storage_chan_size = timer_storage_chan_size
     self.dcp_gen_chan_size = dcp_gen_chan_size
     self.is_sbm = is_sbm
     self.is_curl = is_curl
     self.hostname = hostname
     self.auth_type = auth_type
     self.curl_username = curl_username
     self.curl_password = curl_password
     self.cookies = cookies
     self.bucket_helper = BucketHelper(self.master)
     self.print_eventing_handler_code_in_logs = print_eventing_handler_code_in_logs
Exemplo n.º 30
0
    def execute(self):
        try:
            rest = RestConnection(self.server)
        except ServerUnavailableException as error:
            self.state = FINISHED
            self.set_unexpected_exception(error)
            return
        info = rest.get_nodes_self()
        if int(info.port) in xrange(9091, 9991):
            self.port = info.port

        if self.bucket.ramQuotaMB <= 0:
            self.size = info.memoryQuota * 2 / 3

        if int(info.port) in xrange(9091, 9991):
            try:
                BucketHelper(self.server).create_bucket(self.bucket.__dict__)
                self.state = CHECKING
                self.call()
            except Exception as e:
                self.test_log.error(str(e))
                self.state = FINISHED
                #self. set_unexpected_exception(e)
            return
        version = rest.get_nodes_self().version
        try:
            if float(version[:2]) >= 3.0 and self.bucket_priority is not None:
                self.bucket.threadsNumber = self.bucket_priority
            BucketHelper(self.server).create_bucket(self.bucket.__dict__)
            self.state = CHECKING
            self.call()

        except BucketCreationException as e:
            self.state = FINISHED
            self.test_log.debug(str(e))
            #self. set_unexpected_exception(e)
        # catch and set all unexpected exceptions
        except Exception as e:
            self.test_log.error(str(e))
            self.state = FINISHED