def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.initial_load_gen = self.get_doc_generator(0, self.num_items)
     self.update_load_gen = self.get_doc_generator(0, self.update_items)
     self.delete_load_gen = self.get_doc_generator(self.update_items,
                                                   self.delete_items)
     self.set_up_cluster()
     self.load_all_buckets(self.initial_load_gen, "create", 0)
     self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                  None)
     self.new_replica = self.input.param("new_replica", None)
     self.replica_update_during = self.input.param("replica_update_during",
                                                   None)
     if self.server_index_to_fail is None:
         self.server_to_fail = self._servers_to_fail()
     else:
         self.server_to_fail = [
             self.cluster.servers[self.server_index_to_fail]
         ]
     self.servers_to_add = self.cluster.servers[self.
                                                nodes_init:self.nodes_init +
                                                self.nodes_in]
     self.servers_to_remove = self.cluster.servers[self.nodes_init -
                                                   self.nodes_out:self.
                                                   nodes_init]
     self.durability_helper = DurabilityHelper(self.log,
                                               len(self.cluster.servers),
                                               self.durability_level)
     self.active_vb_in_failover_nodes = list()
     self.replica_vb_in_failover_nodes = list()
     self.get_vbucket_info_from_failover_nodes()
     self.cluster_util.print_cluster_stats()
     self.bucket_util.print_bucket_stats()
Esempio n. 2
0
 def setUp(self):
     super(UpgradeTests, self).setUp()
     self.durability_helper = DurabilityHelper(
         self.log, len(self.cluster.nodes_in_cluster))
     self.verification_dict = dict()
     self.verification_dict["ops_create"] = self.num_items
     self.verification_dict["ops_delete"] = 0
Esempio n. 3
0
    def setUp(self):
        super(basic_ops, self).setUp()

        self.key = 'test_docs'.rjust(self.key_size, '0')

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.create_default_bucket(
            replica=self.num_replicas,
            compression_mode=self.compression_mode,
            bucket_type=self.bucket_type)
        self.bucket_util.add_rbac_user()

        self.src_bucket = self.bucket_util.get_all_buckets()
        self.durability_helper = DurabilityHelper(
            self.log,
            len(self.cluster.nodes_in_cluster),
            durability=self.durability_level,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to)
        # Reset active_resident_threshold to avoid further data load as DGM
        self.active_resident_threshold = 0
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
        self.log.info("==========Finished Basic_ops base setup========")
Esempio n. 4
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")

        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        # If True, creates bucket/scope/collections with simpler names
        self.use_simple_names = self.input.param("use_simple_names", True)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.data_spec_name = self.input.param("data_spec_name",
                                               "initial_load")
        self.remove_default_collection = \
            self.input.param("remove_default_collection", False)

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.skip_collections_cleanup = \
            self.input.param("skip_collections_cleanup", False)
        self.validate_docs_count_during_teardown = \
            self.input.param("validate_docs_count_during_teardown", False)
        self.batch_size = self.input.param("batch_size", 200)
        self.process_concurrency = self.input.param("process_concurrency", 1)
        self.retry_get_process_num = \
            self.input.param("retry_get_process_num", 200)
        self.change_magma_quota = self.input.param("change_magma_quota", False)
        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120)
        self.assertTrue(status, msg="Failure during disabling auto-failover")
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.disk_optimized_thread_settings = self.input.param("disk_optimized_thread_settings", False)
        if self.disk_optimized_thread_settings:
            self.set_num_writer_and_reader_threads(num_writer_threads="disk_io_optimized",
                                                   num_reader_threads="disk_io_optimized")

        try:
            self.collection_setup()
        except Java_base_exception as exception:
            self.handle_setup_exception(exception)
        except Exception as exception:
            self.handle_setup_exception(exception)
        self.supported_d_levels = \
            self.bucket_util.get_supported_durability_levels()
        self.log_setup_status("CollectionBase", "complete")
Esempio n. 5
0
    def setUp(self):
        super(BucketDurabilityBase, self).setUp()

        if len(self.cluster.servers) < self.nodes_init:
            self.fail("Not enough nodes for rebalance")

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster))
        self.kv_nodes = self.cluster_util.get_kv_nodes(self.cluster)

        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        # Bucket create options representation
        self.bucket_template = dict()
        self.bucket_template[Bucket.name] = "default"
        self.bucket_template[Bucket.ramQuotaMB] = 100
        self.bucket_template[Bucket.replicaNumber] = self.num_replicas
        if self.bucket_type == Bucket.Type.MEMBASE:
            self.bucket_template[Bucket.storageBackend] = self.bucket_storage

        # These two params will be set during each iteration
        self.bucket_template[Bucket.bucketType] = None
        self.bucket_template[Bucket.durabilityMinLevel] = None

        self.bucket_types_to_test = [
            Bucket.Type.MEMBASE, Bucket.Type.EPHEMERAL, Bucket.Type.MEMCACHED
        ]

        self.d_level_order = [
            Bucket.DurabilityLevel.NONE, Bucket.DurabilityLevel.MAJORITY,
            Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,
            Bucket.DurabilityLevel.PERSIST_TO_MAJORITY
        ]

        # Dict representing the possible levels supported by each bucket type
        self.possible_d_levels = dict()
        self.possible_d_levels[Bucket.Type.MEMBASE] = \
            self.bucket_util.get_supported_durability_levels()
        self.possible_d_levels[Bucket.Type.EPHEMERAL] = [
            Bucket.DurabilityLevel.NONE, Bucket.DurabilityLevel.MAJORITY
        ]
        self.possible_d_levels[Bucket.Type.MEMCACHED] = [
            Bucket.DurabilityLevel.NONE
        ]

        # Dict to store the list of active/replica VBs in each node
        self.vbs_in_node = dict()
        for node in self.cluster_util.get_kv_nodes(self.cluster):
            shell = RemoteMachineShellConnection(node)
            self.vbs_in_node[node] = dict()
            self.vbs_in_node[node]["shell"] = shell
        self.log.info("===== BucketDurabilityBase setup complete =====")
Esempio n. 6
0
 def setUp(self):
     super(RebalanceBaseTest, self).setUp()
     self.doc_ops = self.input.param("doc_ops", "create")
     self.doc_size = self.input.param("doc_size", 10)
     self.key_size = self.input.param("key_size", 0)
     self.zone = self.input.param("zone", 1)
     self.new_replica = self.input.param("new_replica", None)
     self.default_view_name = "default_view"
     self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
     self.default_view = View(self.default_view_name, self.defaul_map_func,
                              None)
     self.max_verify = self.input.param("max_verify", None)
     self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
     self.key = 'test_docs'.rjust(self.key_size, '0')
     nodes_init = self.cluster.servers[
         1:self.nodes_init] if self.nodes_init != 1 else []
     self.task.rebalance([self.cluster.master], nodes_init, [])
     self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                          nodes_init)
     self.bucket_util.create_default_bucket(replica=self.num_replicas)
     self.bucket_util.add_rbac_user()
     self.sleep(10)
     gen_create = self.get_doc_generator(0, self.num_items)
     self.print_cluster_stat_task = self.cluster_util.async_print_cluster_stats(
     )
     for bucket in self.bucket_util.buckets:
         task = self.task.async_load_gen_docs(
             self.cluster,
             bucket,
             gen_create,
             "create",
             0,
             persist_to=self.persist_to,
             replicate_to=self.replicate_to,
             batch_size=10,
             timeout_secs=self.sdk_timeout,
             process_concurrency=8,
             retries=self.sdk_retries,
             durability=self.durability_level)
         self.task.jython_task_manager.get_task_result(task)
         self.sleep(20)
         current_item = self.bucket_util.get_bucket_current_item_count(
             self.cluster, bucket)
         self.num_items = current_item
         self.log.info("Inserted {} number of items after loadgen".format(
             self.num_items))
     self.gen_load = self.get_doc_generator(0, self.num_items)
     # gen_update is used for doing mutation for 1/2th of uploaded data
     self.gen_update = self.get_doc_generator(0, (self.num_items / 2))
     self.durability_helper = DurabilityHelper(
         self.log,
         len(self.cluster.nodes_in_cluster),
         durability=self.durability_level,
         replicate_to=self.replicate_to,
         persist_to=self.persist_to)
     self.log.info("==========Finished rebalance base setup========")
Esempio n. 7
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")

        self.MAX_SCOPES = CbServer.max_scopes
        self.MAX_COLLECTIONS = CbServer.max_collections
        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.data_spec_name = self.input.param("data_spec_name",
                                               "initial_load")
        self.remove_default_collection = \
            self.input.param("remove_default_collection", False)

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.skip_collections_cleanup = \
            self.input.param("skip_collections_cleanup", False)
        self.validate_docs_count_during_teardown = \
            self.input.param("validate_docs_count_during_teardown", False)
        self.batch_size = self.input.param("batch_size", 200)
        self.vbuckets = self.input.param("vbuckets",
                                         self.cluster_util.vbuckets)
        self.retry_get_process_num = self.input.param("retry_get_process_num",
                                                      25)

        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")
        self.bucket_helper_obj = BucketHelper(self.cluster.master)

        try:
            self.collection_setup()
        except Java_base_exception as exception:
            self.handle_setup_exception(exception)
        except Exception as exception:
            self.handle_setup_exception(exception)
        self.supported_d_levels = \
            self.bucket_util.get_supported_durability_levels()
        self.log_setup_status("CollectionBase", "complete")
Esempio n. 8
0
    def setUp(self):
        super(RebalanceBaseTest, self).setUp()
        self.rest = RestConnection(self.cluster.master)
        self.doc_ops = self.input.param("doc_ops", "create")
        self.key_size = self.input.param("key_size", 0)
        self.zone = self.input.param("zone", 1)
        self.replica_to_update = self.input.param("new_replica", None)
        self.default_view_name = "default_view"
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.default_view = View(self.default_view_name, self.defaul_map_func,
                                 None)
        self.max_verify = self.input.param("max_verify", None)
        self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
        self.flusher_total_batch_limit = self.input.param(
            "flusher_total_batch_limit", None)
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        self.items = self.num_items
        self.logs_folder = self.input.param("logs_folder")
        node_ram_ratio = self.bucket_util.base_bucket_ratio(
            self.cluster.servers)
        info = self.rest.get_nodes_self()
        self.rest.init_cluster(username=self.cluster.master.rest_username,
                               password=self.cluster.master.rest_password)
        self.rest.init_cluster_memoryQuota(
            memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        self.check_temporary_failure_exception = False
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []

        services = None
        if self.services_init:
            services = list()
            for service in self.services_init.split("-"):
                services.append(service.replace(":", ","))
            services = services[1:] if len(services) > 1 else None

        if nodes_init:
            result = self.task.rebalance([self.cluster.master],
                                         nodes_init, [],
                                         services=services)
            self.assertTrue(result, "Initial rebalance failed")
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.check_replica = self.input.param("check_replica", False)
        self.spec_name = self.input.param("bucket_spec", None)

        self.bucket_util.add_rbac_user()
        # Buckets creation and initial data load done by bucket_spec
        if self.spec_name is not None:
            try:
                self.collection_setup()
            except Java_base_exception as exception:
                self.handle_setup_exception(exception)
            except Exception as exception:
                self.handle_setup_exception(exception)
        else:
            if self.standard_buckets > 10:
                self.bucket_util.change_max_buckets(self.standard_buckets)
            self.create_buckets(self.bucket_size)

            # Create Scope/Collection based on inputs given
            for bucket in self.bucket_util.buckets:
                if self.scope_name != CbServer.default_scope:
                    self.scope_name = BucketUtils.get_random_name()
                    BucketUtils.create_scope(self.cluster.master, bucket,
                                             {"name": self.scope_name})
                if self.collection_name != CbServer.default_collection:
                    self.collection_name = BucketUtils.get_random_name()
                    BucketUtils.create_collection(
                        self.cluster.master, bucket, self.scope_name, {
                            "name": self.collection_name,
                            "num_items": self.num_items
                        })
                    self.log.info(
                        "Bucket %s using scope::collection - '%s::%s'" %
                        (bucket.name, self.scope_name, self.collection_name))

                # Update required num_items under default collection
                bucket.scopes[self.scope_name] \
                    .collections[self.collection_name] \
                    .num_items = self.num_items

            if self.flusher_total_batch_limit:
                self.bucket_util.set_flusher_total_batch_limit(
                    self.cluster.master, self.flusher_total_batch_limit,
                    self.bucket_util.buckets)

            self.gen_create = self.get_doc_generator(0, self.num_items)
            if self.active_resident_threshold < 100:
                self.check_temporary_failure_exception = True
                # Reset num_items=0 since the num_items will be populated
                # by the DGM load task
                for bucket in self.bucket_util.buckets:
                    bucket.scopes[self.scope_name] \
                        .collections[self.collection_name] \
                        .num_items = 0

            # Create clients in SDK client pool
            if self.sdk_client_pool:
                self.log.info("Creating SDK clients for client_pool")
                for bucket in self.bucket_util.buckets:
                    self.sdk_client_pool.create_clients(
                        bucket, [self.cluster.master],
                        self.sdk_pool_capacity,
                        compression_settings=self.sdk_compression)

            if not self.atomicity:
                _ = self._load_all_buckets(self.cluster,
                                           self.gen_create,
                                           "create",
                                           0,
                                           batch_size=self.batch_size)
                self.log.info("Verifying num_items counts after doc_ops")
                self.bucket_util._wait_for_stats_all_buckets()
                self.bucket_util.validate_docs_per_collections_all_buckets(
                    timeout=self.wait_timeout)
            else:
                self.transaction_commit = True
                self._load_all_buckets_atomicty(self.gen_create, "create")
                self.transaction_commit = self.input.param(
                    "transaction_commit", True)

            # Initialize doc_generators
            self.active_resident_threshold = 100
            self.gen_create = None
            self.gen_delete = None
            self.gen_update = self.get_doc_generator(0, (self.items / 2))
            self.durability_helper = DurabilityHelper(
                self.log,
                len(self.cluster.nodes_in_cluster),
                durability=self.durability_level,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to)
            self.cluster_util.print_cluster_stats()
            self.bucket_util.print_bucket_stats()
        self.log_setup_status("RebalanceBase", "complete")
Esempio n. 9
0
    def setUp(self):
        super(CrashTest, self).setUp()

        self.doc_ops = self.input.param("doc_ops", None)
        self.process_name = self.input.param("process", None)
        self.service_name = self.input.param("service", "data")
        self.sig_type = self.input.param("sig_type", "SIGKILL").upper()
        self.target_node = self.input.param("target_node", "active")
        self.client_type = self.input.param("client_type", "sdk").lower()
        self.N1qltxn = self.input.param("N1qltxn", False)

        self.pre_warmup_stats = dict()
        self.timeout = 120
        self.new_docs_to_add = 10000

        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")

        if not self.atomicity:
            self.durability_helper = DurabilityHelper(
                self.log, self.nodes_init,
                durability=self.durability_level,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to)

        verification_dict = dict()
        verification_dict["ops_create"] = \
            self.cluster.buckets[0].scopes[
                CbServer.default_scope].collections[
                CbServer.default_collection].num_items
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["pending_writes"] = 0
        if self.durability_level:
            verification_dict["sync_write_committed_count"] = \
                verification_dict["ops_create"]

        # Load initial documents into the buckets
        transaction_gen_create = doc_generator(
            "transaction_key", 0, self.num_items,
            key_size=self.key_size,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets)
        gen_create = doc_generator(
            self.key, 0, self.num_items,
            key_size=self.key_size,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets)
        if self.atomicity:
            transaction_task = self.task.async_load_gen_docs_atomicity(
                self.cluster, self.cluster.buckets,
                transaction_gen_create, DocLoading.Bucket.DocOps.CREATE,
                exp=0,
                batch_size=10,
                process_concurrency=self.process_concurrency,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                update_count=self.update_count,
                transaction_timeout=self.transaction_timeout,
                commit=True,
                sync=self.sync)
            self.task.jython_task_manager.get_task_result(transaction_task)
        for bucket in self.cluster.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_create,
                DocLoading.Bucket.DocOps.CREATE, self.maxttl,
                persist_to=self.persist_to,
                replicate_to=self.replicate_to,
                durability=self.durability_level,
                batch_size=10, process_concurrency=8)
            self.task.jython_task_manager.get_task_result(task)
            self.bucket_util._wait_for_stats_all_buckets(self.cluster.buckets)

            self.cluster.buckets[0].scopes[
                CbServer.default_scope].collections[
                CbServer.default_collection].num_items += self.num_items
            verification_dict["ops_create"] += self.num_items
            if self.durability_level:
                verification_dict["sync_write_committed_count"] += \
                    self.num_items
            # Verify cbstats vbucket-details
            stats_failed = self.durability_helper.verify_vbucket_details_stats(
                bucket, self.cluster_util.get_kv_nodes(),
                vbuckets=self.cluster_util.vbuckets,
                expected_val=verification_dict)

            if self.atomicity is False:
                if stats_failed:
                    self.fail("Cbstats verification failed")
                self.bucket_util.verify_stats_all_buckets(
                    self.cluster,
                    self.cluster.buckets[0].scopes[
                        CbServer.default_scope].collections[
                        CbServer.default_collection].num_items)
        self.bucket = self.cluster.buckets[0]
        if self.N1qltxn:
            self.n1ql_server = self.cluster_util.get_nodes_from_services_map(
                                service_type="n1ql",
                                get_all_nodes=True)
            self.n1ql_helper = N1QLHelper(server=self.n1ql_server,
                                          use_rest=True,
                                          buckets=self.cluster.buckets,
                                          log=self.log,
                                          scan_consistency='REQUEST_PLUS',
                                          num_collection=3,
                                          num_buckets=1,
                                          num_savepoints=1,
                                          override_savepoint=False,
                                          num_stmt=10,
                                          load_spec=self.data_spec_name)
            self.bucket_col = self.n1ql_helper.get_collections()
            self.stmts = self.n1ql_helper.get_stmt(self.bucket_col)
            self.stmts = self.n1ql_helper.create_full_stmts(self.stmts)
        self.log.info("==========Finished CrashTest setup========")
Esempio n. 10
0
    def setUp(self):
        super(CrashTest, self).setUp()

        self.doc_ops = self.input.param("doc_ops", None)
        self.process_name = self.input.param("process", None)
        self.service_name = self.input.param("service", "data")
        self.sig_type = self.input.param("sig_type", "SIGKILL").upper()
        self.target_node = self.input.param("target_node", "active")

        self.pre_warmup_stats = {}
        self.timeout = 120
        self.new_docs_to_add = 10000

        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        if not self.atomicity:
            self.durability_helper = DurabilityHelper(
                self.log,
                self.nodes_init,
                durability=self.durability_level,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to)
        self.bucket_util.create_default_bucket(
            bucket_type=self.bucket_type,
            ram_quota=self.bucket_size,
            replica=self.num_replicas,
            compression_mode="off",
            storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy)
        self.bucket_util.add_rbac_user()

        if self.sdk_client_pool:
            self.log.info("Creating SDK clients for client_pool")
            for bucket in self.bucket_util.buckets:
                self.sdk_client_pool.create_clients(
                    bucket, [self.cluster.master],
                    self.sdk_pool_capacity,
                    compression_settings=self.sdk_compression)

        verification_dict = dict()
        verification_dict["ops_create"] = self.num_items
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["pending_writes"] = 0
        if self.durability_level:
            verification_dict["sync_write_committed_count"] = self.num_items

        # Load initial documents into the buckets
        self.log.info("Loading initial documents")
        gen_create = doc_generator(self.key,
                                   0,
                                   self.num_items,
                                   key_size=self.key_size,
                                   doc_size=self.doc_size,
                                   doc_type=self.doc_type,
                                   target_vbucket=self.target_vbucket,
                                   vbuckets=self.cluster_util.vbuckets)
        if self.atomicity:
            task = self.task.async_load_gen_docs_atomicity(
                self.cluster,
                self.bucket_util.buckets,
                gen_create,
                "create",
                exp=0,
                batch_size=10,
                process_concurrency=self.process_concurrency,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                update_count=self.update_count,
                transaction_timeout=self.transaction_timeout,
                commit=True,
                sync=self.sync)
            self.task.jython_task_manager.get_task_result(task)
        else:
            for bucket in self.bucket_util.buckets:
                task = self.task.async_load_gen_docs(
                    self.cluster,
                    bucket,
                    gen_create,
                    DocLoading.Bucket.DocOps.CREATE,
                    self.maxttl,
                    persist_to=self.persist_to,
                    replicate_to=self.replicate_to,
                    durability=self.durability_level,
                    batch_size=10,
                    process_concurrency=8,
                    sdk_client_pool=self.sdk_client_pool)
                self.task.jython_task_manager.get_task_result(task)

                self.bucket_util._wait_for_stats_all_buckets()
                # Verify cbstats vbucket-details
                stats_failed = \
                    self.durability_helper.verify_vbucket_details_stats(
                        bucket, self.cluster_util.get_kv_nodes(),
                        vbuckets=self.cluster_util.vbuckets,
                        expected_val=verification_dict)

                if stats_failed:
                    self.fail("Cbstats verification failed")

            self.bucket_util.verify_stats_all_buckets(self.num_items)
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
        self.log.info("==========Finished CrashTest setup========")
Esempio n. 11
0
    def setUp(self):
        super(DurabilityTestsBase, self).setUp()

        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        self.with_non_sync_writes = self.input.param("with_non_sync_writes",
                                                     False)
        self.skip_init_load = self.input.param("skip_init_load", False)
        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Initialize cluster using given nodes
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        # Create default bucket and add rbac user
        self.bucket_util.create_default_bucket(
            replica=self.num_replicas, compression_mode=self.compression_mode,
            bucket_type=self.bucket_type, storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy)
        self.bucket_util.add_rbac_user()

        self.cluster_util.print_cluster_stats()
        self.bucket = self.bucket_util.buckets[0]

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket,
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        if not self.skip_init_load:
            if self.target_vbucket and type(self.target_vbucket) is not list:
                self.target_vbucket = [self.target_vbucket]

            self.log.info("Creating doc_generator..")
            doc_create = doc_generator(
                self.key,
                0,
                self.num_items,
                key_size=self.key_size,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets)

            self.log.info("Loading {0} items into bucket"
                          .format(self.num_items))
            task = self.task.async_load_gen_docs(
                self.cluster, self.bucket, doc_create, "create", 0,
                batch_size=10, process_concurrency=8,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                sdk_client_pool=self.sdk_client_pool)
            self.task.jython_task_manager.get_task_result(task)

            # Verify initial doc load count
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)

        self.bucket_util.print_bucket_stats()
        self.log.info("=== DurabilityBaseTests setup complete ===")
    def test_collection_not_exists(self):
        """
        1. Load docs into required collection
        2. Validate docs based on the targeted collection
        3. Create non-default scope/collection for CRUDs to happen
        4. Perform doc_ops again and perform CRUDs
        5. Drop the target collection and validate the CollectionNotExists
           exception from client side
        6. Recreate non-default collection and re-create the docs and validate
        """
        def validate_vb_detail_stats():
            failed = durability_helper.verify_vbucket_details_stats(
                self.bucket,
                self.cluster_util.get_kv_nodes(),
                vbuckets=self.cluster_util.vbuckets,
                expected_val=verification_dict)
            if failed:
                self.log_failure("vBucket_details validation failed")
            self.bucket_util.validate_docs_per_collections_all_buckets()

        num_cols_in_bucket = 0
        for _, scope in self.bucket.scopes.items():
            for _, _ in scope.collections.items():
                num_cols_in_bucket += 1

        verification_dict = dict()
        verification_dict["ops_create"] = num_cols_in_bucket * self.num_items
        verification_dict["ops_update"] = 0
        verification_dict["ops_delete"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["sync_write_committed_count"] = 0

        durability_helper = DurabilityHelper(self.log,
                                             len(self.cluster.kv_nodes),
                                             durability=self.durability_level)

        drop_scope = self.input.param("drop_scope", False)
        if self.scope_name != CbServer.default_scope:
            self.scope_name = self.bucket_util.get_random_name()
        if self.collection_name != CbServer.default_collection:
            self.collection_name = self.bucket_util.get_random_name()

        # Doc generator used for mutations
        doc_gen = doc_generator("test_col_not_exists", 0, 10)

        # Acquire SDK client for mutations
        client = self.sdk_client_pool.get_client_for_bucket(
            self.bucket, self.scope_name, self.collection_name)

        doc_ttl, _ = \
            SDKExceptionTests.__get_random_doc_ttl_and_durability_level()
        self.log.info(
            "Creating docs with doc_ttl %s into %s:%s:%s" %
            (doc_ttl, self.bucket.name, self.scope_name, self.collection_name))

        while doc_gen.has_next():
            key, value = doc_gen.next()
            result = client.crud("create",
                                 key,
                                 value,
                                 exp=doc_ttl,
                                 durability=self.durability_level,
                                 timeout=30)
            if self.collection_name == CbServer.default_collection:
                if result["status"] is False:
                    self.log_failure("Create doc failed for key: %s" % key)
                else:
                    verification_dict["ops_create"] += 1
                    if self.durability_level:
                        verification_dict["sync_write_committed_count"] += 1
                    self.bucket.scopes[self.scope_name].collections[
                        self.collection_name].num_items += 1
            elif result["status"] is True:
                self.log_failure("Create didn't fail as expected for key: %s" %
                                 key)
            elif SDKException.AmbiguousTimeoutException \
                    not in str(result["error"]) \
                    or SDKException.RetryReason.COLLECTION_NOT_FOUND \
                    not in str(result["error"]):
                self.log_failure("Invalid exception for key %s: %s" %
                                 (key, result["error"]))

        validate_vb_detail_stats()
        # Create required scope/collection for successful CRUD operation
        self.create_scope_collection()

        # Reset doc_gen itr value for retry purpose
        doc_gen.reset()
        doc_ttl, _ = \
            SDKExceptionTests.__get_random_doc_ttl_and_durability_level()
        self.log.info(
            "Creating docs with doc_ttl %s into %s:%s:%s" %
            (doc_ttl, self.bucket.name, self.scope_name, self.collection_name))
        op_type = "create"
        if self.collection_name == CbServer.default_collection:
            op_type = "update"

        while doc_gen.has_next():
            key, value = doc_gen.next()
            result = client.crud(op_type,
                                 key,
                                 value,
                                 exp=doc_ttl,
                                 durability=self.durability_level)
            if result["status"] is False:
                self.log_failure("Create fail for key %s: %s" % (key, result))
            else:
                if op_type == "create":
                    verification_dict["ops_create"] += 1
                    self.bucket.scopes[self.scope_name].collections[
                        self.collection_name].num_items += 1
                else:
                    verification_dict["ops_update"] += 1

                if self.durability_level:
                    verification_dict["sync_write_committed_count"] += 1
        validate_vb_detail_stats()
        self.validate_test_failure()

        if drop_scope:
            self.log.info("Dropping scope %s" % self.scope_name)
            self.bucket_util.drop_scope(self.cluster.master, self.bucket,
                                        self.scope_name)
        else:
            self.log.info("Dropping collection %s:%s" %
                          (self.scope_name, self.collection_name))
            self.bucket_util.drop_collection(self.cluster.master, self.bucket,
                                             self.scope_name,
                                             self.collection_name)
        validate_vb_detail_stats()
        self.validate_test_failure()

        # Reset doc_gen itr value for retry purpose
        doc_gen.reset()
        while doc_gen.has_next():
            key, value = doc_gen.next()
            result = client.crud("create",
                                 key,
                                 value,
                                 exp=doc_ttl,
                                 durability=self.durability_level)
            if result["status"] is True:
                self.log_failure("Create doc succeeded for dropped collection")
        validate_vb_detail_stats()
        self.validate_test_failure()

        # Re-create the dropped collection
        self.create_scope_collection(create_scope=drop_scope)

        if self.collection_name != CbServer.default_collection:
            doc_gen.reset()
            while doc_gen.has_next():
                key, value = doc_gen.next()
                result = client.crud("create",
                                     key,
                                     value,
                                     exp=doc_ttl,
                                     durability=self.durability_level)
                if result["status"] is False:
                    self.log_failure("Create failed after collection recreate "
                                     "for key %s: %s" % (key, result["error"]))
                else:
                    verification_dict["ops_create"] += 1
                    if self.durability_level:
                        verification_dict["sync_write_committed_count"] += 1
                    self.bucket.scopes[self.scope_name].collections[
                        self.collection_name].num_items += 1
            validate_vb_detail_stats()

        # Release the acquired client
        self.sdk_client_pool.release_client(client)
        self.validate_test_failure()
Esempio n. 13
0
    def setUp(self):
        super(basic_ops, self).setUp()

        self.doc_ops = self.input.param("doc_ops", "").split(";")
        self.observe_test = self.input.param("observe_test", False)
        # Scope/collection name can be default or create a random one to test
        self.scope_name = self.input.param("scope", CbServer.default_scope)
        self.collection_name = self.input.param("collection",
                                                CbServer.default_collection)

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.create_default_bucket(
            replica=self.num_replicas,
            compression_mode=self.compression_mode,
            bucket_type=self.bucket_type,
            storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy)
        self.bucket_util.add_rbac_user()

        # Create Scope/Collection with random names if not equal to default
        if self.scope_name != CbServer.default_scope:
            self.scope_name = self.bucket_util.get_random_name()
            self.bucket_util.create_scope(self.cluster.master,
                                          self.bucket_util.buckets[0],
                                          {"name": self.scope_name})
        if self.collection_name != CbServer.default_collection:
            self.collection_name = self.bucket_util.get_random_name()
            self.bucket_util.create_collection(
                self.cluster.master, self.bucket_util.buckets[0],
                self.scope_name, {
                    "name": self.collection_name,
                    "num_items": self.num_items
                })
            self.log.info("Using scope::collection - '%s::%s'" %
                          (self.scope_name, self.collection_name))

        # Update required num_items under default collection
        self.bucket_util.buckets[0] \
            .scopes[self.scope_name] \
            .collections[self.collection_name] \
            .num_items = self.num_items

        self.durability_helper = DurabilityHelper(
            self.log,
            len(self.cluster.nodes_in_cluster),
            durability=self.durability_level,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to)

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket_util.buckets[0],
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        # Reset active_resident_threshold to avoid further data load as DGM
        self.active_resident_threshold = 0
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
        self.log.info("==========Finished Basic_ops base setup========")
Esempio n. 14
0
    def test_index_with_aborts(self):
        """
        1. Create index (2i/view) on default bucket
        2. Load multiple docs such that all sync_writes will be aborted
        3. Verify nothing went into indexing
        4. Load sync_write docs such that they are successful
        5. Validate the mutated docs are taken into indexing
        :return:
        """

        crud_batch_size = 50
        def_bucket = self.cluster.buckets[0]
        kv_nodes = self.cluster_util.get_kv_nodes(self.cluster)
        replica_vbs = dict()
        verification_dict = dict()
        index_item_count = dict()
        expected_num_indexed = dict()
        load_gen = dict()
        load_gen["ADD"] = dict()
        load_gen["SET"] = dict()
        partial_aborts = ["initial_aborts", "aborts_at_end"]

        durability_helper = DurabilityHelper(
            self.log,
            len(self.cluster.nodes_in_cluster),
            durability=self.durability_level,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to)

        if self.create_index_during == "before_doc_ops":
            self.create_gsi_indexes(def_bucket)

        curr_items = self.bucket_util.get_bucket_current_item_count(
            self.cluster, def_bucket)
        if self.sync_write_abort_pattern in ["all_aborts", "initial_aborts"]:
            self.bucket_util.flush_bucket(self.cluster, def_bucket)
            self.num_items = 0
        else:
            self.num_items = curr_items

        self.log.info("Disabling auto_failover to avoid node failures")
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        # Validate vbucket stats
        verification_dict["ops_create"] = self.num_items
        verification_dict["ops_update"] = 0
        # verification_dict["ops_delete"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["sync_write_committed_count"] = 0

        index_item_count["#primary"] = self.num_items
        index_item_count["durable_add_aborts"] = 0
        index_item_count["durable_set_aborts"] = 0
        expected_num_indexed["#primary"] = curr_items
        expected_num_indexed["durable_add_aborts"] = 0
        expected_num_indexed["durable_set_aborts"] = 0

        if self.create_index_during == "before_doc_ops":
            self.validate_indexed_doc_count(def_bucket, index_item_count)

        self.log.info("Loading docs such that all sync_writes will be aborted")
        for server in kv_nodes:
            ssh_shell = RemoteMachineShellConnection(server)
            cbstats = Cbstats(server)
            replica_vbs[server] = cbstats.vbucket_list(def_bucket.name,
                                                       "replica")
            load_gen["ADD"][server] = list()
            load_gen["ADD"][server].append(
                doc_generator(self.key,
                              0,
                              crud_batch_size,
                              target_vbucket=replica_vbs[server],
                              mutation_type="ADD"))
            if self.sync_write_abort_pattern in partial_aborts:
                load_gen["ADD"][server].append(
                    doc_generator(self.key,
                                  10000,
                                  crud_batch_size,
                                  target_vbucket=replica_vbs[server],
                                  mutation_type="ADD"))
                verification_dict["ops_create"] += crud_batch_size
                verification_dict["sync_write_committed_count"] += \
                    crud_batch_size
                index_item_count["#primary"] += crud_batch_size
                index_item_count["durable_add_aborts"] += crud_batch_size
                expected_num_indexed["#primary"] += crud_batch_size
                expected_num_indexed["durable_add_aborts"] += crud_batch_size

            task_success = self.bucket_util.load_durable_aborts(
                ssh_shell, load_gen["ADD"][server], self.cluster, def_bucket,
                self.durability_level, DocLoading.Bucket.DocOps.CREATE,
                self.sync_write_abort_pattern)
            if not task_success:
                self.log_failure("Failure during load_abort task")

            verification_dict["sync_write_aborted_count"] += \
                crud_batch_size
            if self.create_index_during == "before_doc_ops":
                self.validate_indexed_doc_count(def_bucket, index_item_count)

            load_gen["SET"][server] = list()
            load_gen["SET"][server].append(
                doc_generator(self.key,
                              0,
                              crud_batch_size,
                              target_vbucket=replica_vbs[server],
                              mutation_type="SET"))
            if self.sync_write_abort_pattern in partial_aborts:
                load_gen["SET"][server].append(
                    doc_generator(self.key,
                                  10000,
                                  crud_batch_size,
                                  target_vbucket=replica_vbs[server],
                                  mutation_type="SET"))
                verification_dict["ops_update"] += crud_batch_size
                verification_dict["sync_write_committed_count"] += \
                    crud_batch_size
                index_item_count["durable_add_aborts"] -= crud_batch_size
                index_item_count["durable_set_aborts"] += crud_batch_size
                expected_num_indexed["#primary"] += crud_batch_size
                expected_num_indexed["durable_add_aborts"] += crud_batch_size
                expected_num_indexed["durable_set_aborts"] += crud_batch_size

            verification_dict["sync_write_aborted_count"] += \
                crud_batch_size
            task_success = self.bucket_util.load_durable_aborts(
                ssh_shell, load_gen["SET"][server], self.cluster, def_bucket,
                self.durability_level, DocLoading.Bucket.DocOps.UPDATE,
                self.sync_write_abort_pattern)
            if not task_success:
                self.log_failure("Failure during load_abort task")

            ssh_shell.disconnect()

            if self.create_index_during == "before_doc_ops":
                self.validate_indexed_doc_count(def_bucket, index_item_count)
        failed = durability_helper.verify_vbucket_details_stats(
            def_bucket,
            kv_nodes,
            vbuckets=self.cluster.vbuckets,
            expected_val=verification_dict)
        if failed:
            self.log_failure("Cbstat vbucket-details verification failed")
        self.validate_test_failure()

        if self.create_index_during == "after_doc_ops":
            self.create_gsi_indexes(def_bucket)
            self.validate_indexed_doc_count(def_bucket, index_item_count)

        self.log.info("Verify aborts are not indexed")
        self.validate_indexed_count_from_stats(def_bucket,
                                               expected_num_indexed,
                                               index_item_count)

        if not self.use_gsi_for_primary:
            self.log.info("Wait of any indexing_activity to complete")
            index_monitor_task = self.cluster_util.async_monitor_active_task(
                self.cluster.master,
                "indexer",
                "_design/ddl_#primary",
                num_iteration=20,
                wait_task=True)[0]
            self.task_manager.get_task_result(index_monitor_task)
            self.assertTrue(index_monitor_task.result,
                            "Indexer task still running on server")

        for server in kv_nodes:
            if self.sync_write_abort_pattern == "initial_aborts":
                load_gen["ADD"][server] = load_gen["ADD"][server][:1]
                load_gen["SET"][server] = load_gen["SET"][server][:1]
            elif self.sync_write_abort_pattern == "aborts_at_end":
                load_gen["ADD"][server] = load_gen["ADD"][server][-1:]
                load_gen["SET"][server] = load_gen["SET"][server][-1:]

        self.log.info("Load sync_write docs such that they are successful")
        for server in kv_nodes:
            for gen_load in load_gen["ADD"][server]:
                task = self.task.async_load_gen_docs(
                    self.cluster,
                    def_bucket,
                    gen_load,
                    "create",
                    0,
                    batch_size=50,
                    process_concurrency=8,
                    replicate_to=self.replicate_to,
                    persist_to=self.persist_to,
                    durability=self.durability_level,
                    timeout_secs=self.sdk_timeout)
                self.task.jython_task_manager.get_task_result(task)

                if len(task.fail.keys()) != 0:
                    self.log_failure("Some failures seen during doc_ops")

                index_item_count["#primary"] += crud_batch_size
                index_item_count["durable_add_aborts"] += crud_batch_size
                expected_num_indexed["#primary"] += crud_batch_size
                expected_num_indexed["durable_add_aborts"] += crud_batch_size
                self.validate_indexed_doc_count(def_bucket, index_item_count)

            for gen_load in load_gen["SET"][server]:
                task = self.task.async_load_gen_docs(
                    self.cluster,
                    def_bucket,
                    gen_load,
                    "update",
                    0,
                    batch_size=50,
                    process_concurrency=8,
                    replicate_to=self.replicate_to,
                    persist_to=self.persist_to,
                    durability=self.durability_level,
                    timeout_secs=self.sdk_timeout)
                self.task.jython_task_manager.get_task_result(task)

                if len(task.fail.keys()) != 0:
                    self.log_failure("Some failures seen during doc_ops")

                index_item_count["durable_add_aborts"] -= crud_batch_size
                index_item_count["durable_set_aborts"] += crud_batch_size
                expected_num_indexed["#primary"] += crud_batch_size
                expected_num_indexed["durable_add_aborts"] += crud_batch_size
                expected_num_indexed["durable_set_aborts"] += crud_batch_size
                self.validate_indexed_doc_count(def_bucket, index_item_count)

        self.log.info("Validate the mutated docs are taken into indexing")
        self.validate_indexed_count_from_stats(def_bucket,
                                               expected_num_indexed,
                                               index_item_count)
        self.validate_test_failure()
Esempio n. 15
0
 def setUp(self):
     super(DurabilitySuccessTests, self).setUp()
     self.durability_helper = DurabilityHelper(
         self.log, len(self.cluster.nodes_in_cluster),
         self.durability_level)
     self.log.info("=== DurabilitySuccessTests setup complete ===")
Esempio n. 16
0
    def setUp(self):
        super(RebalanceBaseTest, self).setUp()
        self.rest = RestConnection(self.cluster.master)
        self.doc_ops = self.input.param("doc_ops", "create")
        self.key_size = self.input.param("key_size", 0)
        self.zone = self.input.param("zone", 1)
        self.replica_to_update = self.input.param("new_replica", None)
        self.default_view_name = "default_view"
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.default_view = View(self.default_view_name, self.defaul_map_func, None)
        self.max_verify = self.input.param("max_verify", None)
        self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
        self.key = 'test_docs'.rjust(self.key_size, '0')
        self.flusher_batch_split_trigger = self.input.param("flusher_batch_split_trigger", None)
        self.items = self.num_items
        node_ram_ratio = self.bucket_util.base_bucket_ratio(self.cluster.servers)
        info = self.rest.get_nodes_self()
        self.rest.init_cluster(username=self.cluster.master.rest_username,
                               password=self.cluster.master.rest_password)
        self.rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved*node_ram_ratio))
        self.check_temporary_failure_exception = False
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        if nodes_init:
            result = self.task.rebalance([self.cluster.master], nodes_init, [])
            self.assertTrue(result, "Initial rebalance failed")
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)

        self.bucket_util.add_rbac_user()
        if self.standard_buckets > 10:
            self.bucket_util.change_max_buckets(self.standard_buckets)
        self.create_buckets()
        self.sleep(20)

        if self.flusher_batch_split_trigger:
            self.bucket_util.set_flusher_batch_split_trigger(self.cluster.master,
                                                             self.flusher_batch_split_trigger,
                                                             self.bucket_util.buckets)

        self.gen_create = self.get_doc_generator(0, self.num_items)
        if self.active_resident_threshold < 100:
            self.check_temporary_failure_exception = True
        if not self.atomicity:
            tasks_info = self._load_all_buckets(self.cluster, self.gen_create, "create", 0)
            self.log.info("Verifying num_items counts after doc_ops")
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
        else:
            self.transaction_commit = True
            self._load_all_buckets_atomicty(self.gen_create, "create")
            self.transaction_commit = self.input.param("transaction_commit", True)

        # Initialize doc_generators
        self.active_resident_threshold = 100
        self.gen_create = None
        self.gen_delete = None
        self.gen_update = self.get_doc_generator(0, (self.items / 2))
        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            durability=self.durability_level,
            replicate_to=self.replicate_to, persist_to=self.persist_to)
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
        self.log.info("==========Finished rebalance base setup========")
Esempio n. 17
0
    def setUp(self):
        super(CollectionBase, self).setUp()
        self.log_setup_status("CollectionBase", "started")
        self.key = 'test_collection'.rjust(self.key_size, '0')
        self.simulate_error = self.input.param("simulate_error", None)
        self.error_type = self.input.param("error_type", "memory")
        self.doc_ops = self.input.param("doc_ops", None)
        self.spec_name = self.input.param("bucket_spec",
                                          "single_bucket.default")
        self.over_ride_spec_params = \
            self.input.param("override_spec_params", "").split(";")

        self.action_phase = self.input.param("action_phase",
                                             "before_default_load")
        self.crud_batch_size = 100
        self.num_nodes_affected = 1
        if self.num_replicas > 1:
            self.num_nodes_affected = 2

        if self.doc_ops:
            self.doc_ops = self.doc_ops.split(';')

        self.durability_helper = DurabilityHelper(
            self.log, len(self.cluster.nodes_in_cluster),
            self.durability_level)

        # Initialize cluster using given nodes
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        # Create bucket(s) and add rbac user
        self.bucket_util.add_rbac_user()
        buckets_spec = self.bucket_util.get_bucket_template_from_package(
            self.spec_name)
        doc_loading_spec = \
            self.bucket_util.get_crud_template_from_package("initial_load")

        # Process params to over_ride values if required
        self.over_ride_template_params(buckets_spec)
        self.over_ride_template_params(doc_loading_spec)

        # MB-38438, adding CollectionNotFoundException in retry exception
        doc_loading_spec[MetaCrudParams.RETRY_EXCEPTIONS].append(
            SDKException.CollectionNotFoundException)

        self.bucket_util.create_buckets_using_json_data(buckets_spec)
        self.bucket_util.wait_for_collection_creation_to_complete()

        # Init sdk_client_pool if not initialized before
        if self.sdk_client_pool is None:
            self.init_sdk_pool_object()

        # Create clients in SDK client pool
        self.log.info("Creating required SDK clients for client_pool")
        bucket_count = len(self.bucket_util.buckets)
        max_clients = self.task_manager.number_of_threads
        clients_per_bucket = int(ceil(max_clients / bucket_count))
        for bucket in self.bucket_util.buckets:
            self.sdk_client_pool.create_clients(
                bucket, [self.cluster.master],
                clients_per_bucket,
                compression_settings=self.sdk_compression)

        # TODO: remove this once the bug is fixed
        self.sleep(60, "MB-38497")

        doc_loading_task = \
            self.bucket_util.run_scenario_from_spec(
                self.task,
                self.cluster,
                self.bucket_util.buckets,
                doc_loading_spec,
                mutation_num=0)
        if doc_loading_task.result is False:
            self.fail("Initial doc_loading failed")

        self.cluster_util.print_cluster_stats()

        # Verify initial doc load count
        self.bucket_util._wait_for_stats_all_buckets()
        self.bucket_util.validate_docs_per_collections_all_buckets()

        self.bucket_util.print_bucket_stats()
        self.bucket_helper_obj = BucketHelper(self.cluster.master)
        self.log_setup_status("CollectionBase", "complete")
Esempio n. 18
0
    def setUp(self):
        super(RebalanceBaseTest, self).setUp()
        self.rest = RestConnection(self.cluster.master)
        self.doc_ops = self.input.param("doc_ops", "create")
        self.key_size = self.input.param("key_size", 0)
        self.zone = self.input.param("zone", 1)
        self.replica_to_update = self.input.param("new_replica", None)
        self.default_view_name = "default_view"
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.default_view = View(self.default_view_name, self.defaul_map_func,
                                 None)
        self.max_verify = self.input.param("max_verify", None)
        self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
        self.flusher_total_batch_limit = self.input.param("flusher_total_batch_limit", None)
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        self.items = self.num_items
        node_ram_ratio = self.bucket_util.base_bucket_ratio(self.cluster.servers)
        info = self.rest.get_nodes_self()
        self.rest.init_cluster(username=self.cluster.master.rest_username,
                               password=self.cluster.master.rest_password)
        self.rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved*node_ram_ratio))
        self.check_temporary_failure_exception = False
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        if nodes_init:
            result = self.task.rebalance([self.cluster.master], nodes_init, [])
            self.assertTrue(result, "Initial rebalance failed")
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.check_replica = self.input.param("check_replica", False)
        self.spec_name = self.input.param("bucket_spec", None)

        # If buckets creation and initial data load is to be done by bucket_spec
        if self.spec_name is not None:
            self.log.info("Creating buckets from spec")
            # Create bucket(s) and add rbac user
            buckets_spec = self.bucket_util.get_bucket_template_from_package(
                self.spec_name)
            doc_loading_spec = \
                self.bucket_util.get_crud_template_from_package("initial_load")

            self.bucket_util.create_buckets_using_json_data(buckets_spec)
            self.bucket_util.wait_for_collection_creation_to_complete()
            # Create clients in SDK client pool
            if self.sdk_client_pool:
                self.log.info("Creating required SDK clients for client_pool")
                bucket_count = len(self.bucket_util.buckets)
                max_clients = self.task_manager.number_of_threads
                clients_per_bucket = int(ceil(max_clients / bucket_count))
                for bucket in self.bucket_util.buckets:
                    self.sdk_client_pool.create_clients(
                        bucket,
                        [self.cluster.master],
                        clients_per_bucket,
                        compression_settings=self.sdk_compression)

            self.bucket_util.run_scenario_from_spec(self.task,
                                                    self.cluster,
                                                    self.bucket_util.buckets,
                                                    doc_loading_spec,
                                                    mutation_num=0)
            self.bucket_util.add_rbac_user()

            self.cluster_util.print_cluster_stats()

            # Verify initial doc load count
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.validate_docs_per_collections_all_buckets()

            self.cluster_util.print_cluster_stats()
            self.bucket_util.print_bucket_stats()
            self.bucket_helper_obj = BucketHelper(self.cluster.master)
            self.log.info("==========Finished rebalance base setup========")
        else:
            self.bucket_util.add_rbac_user()
            if self.standard_buckets > 10:
                self.bucket_util.change_max_buckets(self.standard_buckets)
            self.create_buckets(self.bucket_size)

            # Create Scope/Collection based on inputs given
            for bucket in self.bucket_util.buckets:
                if self.scope_name != CbServer.default_scope:
                    self.scope_name = BucketUtils.get_random_name()
                    BucketUtils.create_scope(self.cluster.master,
                                             bucket,
                                             {"name": self.scope_name})
                if self.collection_name != CbServer.default_collection:
                    self.collection_name = BucketUtils.get_random_name()
                    BucketUtils.create_collection(self.cluster.master,
                                                  bucket,
                                                  self.scope_name,
                                                  {"name": self.collection_name,
                                                   "num_items": self.num_items})
                    self.log.info("Bucket %s using scope::collection - '%s::%s'"
                                  % (bucket.name,
                                     self.scope_name,
                                     self.collection_name))

                # Update required num_items under default collection
                bucket.scopes[self.scope_name] \
                    .collections[self.collection_name] \
                    .num_items = self.num_items

            if self.flusher_total_batch_limit:
                self.bucket_util.set_flusher_total_batch_limit(
                    self.cluster.master,
                    self.flusher_total_batch_limit,
                    self.bucket_util.buckets)

            self.gen_create = self.get_doc_generator(0, self.num_items)
            if self.active_resident_threshold < 100:
                self.check_temporary_failure_exception = True
            if not self.atomicity:
                _ = self._load_all_buckets(self.cluster, self.gen_create,
                                           "create", 0, batch_size=self.batch_size)
                self.log.info("Verifying num_items counts after doc_ops")
                self.bucket_util._wait_for_stats_all_buckets()
                self.bucket_util.validate_docs_per_collections_all_buckets(
                    timeout=120)
            else:
                self.transaction_commit = True
                self._load_all_buckets_atomicty(self.gen_create, "create")
                self.transaction_commit = self.input.param("transaction_commit",
                                                           True)

            # Initialize doc_generators
            self.active_resident_threshold = 100
            self.gen_create = None
            self.gen_delete = None
            self.gen_update = self.get_doc_generator(0, (self.items / 2))
            self.durability_helper = DurabilityHelper(
                self.log, len(self.cluster.nodes_in_cluster),
                durability=self.durability_level,
                replicate_to=self.replicate_to, persist_to=self.persist_to)
            self.cluster_util.print_cluster_stats()
            self.bucket_util.print_bucket_stats()
            self.log.info("==========Finished rebalance base setup========")
Esempio n. 19
0
    def test_fts_index_with_aborts(self):
        """
        1. Create index (2i/view) on default bucket
        2. Load multiple docs such that all sync_writes will be aborted
        3. Verify nothing went into indexing
        4. Load sync_write docs such that they are successful
        5. Validate the mutated docs are taken into indexing
        :return:
        """
        self.key = "test_query_doc"
        self.index_name = "fts_test_index"
        self.sync_write_abort_pattern = self.input.param(
            "sync_write_abort_pattern", "all_aborts")
        self.create_index_during = self.input.param("create_index_during",
                                                    "before_doc_ops")
        self.restServer = self.cluster_util.get_nodes_from_services_map(
            cluster=self.cluster, service_type=CbServer.Services.FTS)
        self.rest = RestConnection(self.restServer)
        crud_batch_size = 1000
        def_bucket = self.cluster.buckets[0]
        kv_nodes = self.cluster_util.get_kv_nodes(self.cluster)
        replica_vbs = dict()
        verification_dict = dict()
        index_item_count = dict()
        expected_num_indexed = dict()
        load_gen = dict()
        load_gen["ADD"] = dict()
        load_gen["SET"] = dict()
        partial_aborts = ["initial_aborts", "aborts_at_end"]

        durability_helper = DurabilityHelper(
            self.log,
            len(self.cluster.nodes_in_cluster),
            durability=self.durability_level,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to)

        if self.create_index_during == "before_doc_ops":
            self.create_fts_indexes(def_bucket.name, self.index_name)

        curr_items = self.bucket_util.get_bucket_current_item_count(
            self.cluster, def_bucket)
        if self.sync_write_abort_pattern in ["all_aborts", "initial_aborts"]:
            self.bucket_util.flush_bucket(self.cluster, def_bucket)
            self.num_items = 0
        else:
            self.num_items = curr_items

        self.log.info("Disabling auto_failover to avoid node failures")
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        # Validate vbucket stats
        verification_dict["ops_create"] = self.num_items
        verification_dict["ops_update"] = 0
        # verification_dict["ops_delete"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["sync_write_committed_count"] = 0

        if self.create_index_during == "before_doc_ops":
            self.validate_indexed_doc_count(self.index_name,
                                            verification_dict["ops_create"])

        self.log.info("Loading docs such that all sync_writes will be aborted")
        for server in kv_nodes:
            ssh_shell = RemoteMachineShellConnection(server)
            cbstats = Cbstats(ssh_shell)
            replica_vbs[server] = cbstats.vbucket_list(def_bucket.name,
                                                       "replica")
            load_gen["ADD"][server] = list()
            load_gen["ADD"][server].append(
                doc_generator(self.key,
                              0,
                              crud_batch_size,
                              target_vbucket=replica_vbs[server],
                              mutation_type="ADD"))
            if self.sync_write_abort_pattern in partial_aborts:
                load_gen["ADD"][server].append(
                    doc_generator(self.key,
                                  10000,
                                  crud_batch_size,
                                  target_vbucket=replica_vbs[server],
                                  mutation_type="ADD"))
                verification_dict["ops_create"] += crud_batch_size
                verification_dict["sync_write_committed_count"] += \
                    crud_batch_size

            task_success = self.bucket_util.load_durable_aborts(
                ssh_shell, load_gen["ADD"][server], def_bucket,
                self.durability_level, "create", self.sync_write_abort_pattern)
            if not task_success:
                self.log_failure("Failure during load_abort task")

            verification_dict["sync_write_aborted_count"] += \
                crud_batch_size
            if self.create_index_during == "before_doc_ops":
                self.validate_indexed_doc_count(
                    self.index_name, verification_dict["ops_create"])

            load_gen["SET"][server] = list()
            load_gen["SET"][server].append(
                doc_generator(self.key,
                              0,
                              crud_batch_size,
                              target_vbucket=replica_vbs[server],
                              mutation_type="SET"))
            if self.sync_write_abort_pattern in partial_aborts:
                load_gen["SET"][server].append(
                    doc_generator(self.key,
                                  10000,
                                  crud_batch_size,
                                  target_vbucket=replica_vbs[server],
                                  mutation_type="SET"))
                verification_dict["ops_update"] += crud_batch_size
                verification_dict["sync_write_committed_count"] += \
                    crud_batch_size

            verification_dict["sync_write_aborted_count"] += \
                crud_batch_size
            task_success = self.bucket_util.load_durable_aborts(
                ssh_shell, load_gen["SET"][server], def_bucket,
                self.durability_level, "update", self.sync_write_abort_pattern)
            if not task_success:
                self.log_failure("Failure during load_abort task")

            ssh_shell.disconnect()

            if self.create_index_during == "before_doc_ops":
                self.validate_indexed_doc_count(
                    self.index_name, verification_dict["ops_create"])
        failed = durability_helper.verify_vbucket_details_stats(
            def_bucket,
            kv_nodes,
            vbuckets=self.cluster.vbuckets,
            expected_val=verification_dict)
        # if failed:
        #     self.sleep(6000)
        #     self.log_failure("Cbstat vbucket-details verification failed")
        self.validate_test_failure()

        if self.create_index_during == "after_doc_ops":
            self.create_fts_indexes(def_bucket.name, self.index_name)
            self.validate_indexed_doc_count(self.index_name,
                                            verification_dict["ops_create"])

        self.log.info("Verify aborts are not indexed")
        self.validate_indexed_doc_count(self.index_name,
                                        verification_dict["ops_create"])

        for server in kv_nodes:
            if self.sync_write_abort_pattern == "initial_aborts":
                load_gen["ADD"][server] = load_gen["ADD"][server][:1]
                load_gen["SET"][server] = load_gen["SET"][server][:1]
            elif self.sync_write_abort_pattern == "aborts_at_end":
                load_gen["ADD"][server] = load_gen["ADD"][server][-1:]
                load_gen["SET"][server] = load_gen["SET"][server][-1:]

        self.log.info("Load sync_write docs such that they are successful")
        for server in kv_nodes:
            for gen_load in load_gen["ADD"][server]:
                task = self.task.async_load_gen_docs(
                    self.cluster,
                    def_bucket,
                    gen_load,
                    "create",
                    0,
                    batch_size=50,
                    process_concurrency=8,
                    replicate_to=self.replicate_to,
                    persist_to=self.persist_to,
                    durability=self.durability_level,
                    timeout_secs=self.sdk_timeout)
                self.task.jython_task_manager.get_task_result(task)
                if len(task.fail.keys()) != 0:
                    self.log_failure("Some failures seen during doc_ops")
                verification_dict["ops_create"] += crud_batch_size
                self.validate_indexed_doc_count(
                    self.index_name, verification_dict["ops_create"])

            for gen_load in load_gen["SET"][server]:
                task = self.task.async_load_gen_docs(
                    self.cluster,
                    def_bucket,
                    gen_load,
                    "update",
                    0,
                    batch_size=50,
                    process_concurrency=8,
                    replicate_to=self.replicate_to,
                    persist_to=self.persist_to,
                    durability=self.durability_level,
                    timeout_secs=self.sdk_timeout)
                self.task.jython_task_manager.get_task_result(task)
                if len(task.fail.keys()) != 0:
                    self.log_failure("Some failures seen during doc_ops")
                verification_dict["ops_update"] += crud_batch_size
                self.validate_indexed_doc_count(
                    self.index_name, verification_dict["ops_create"])

        self.log.info("Validate the mutated docs are taken into indexing")
        self.validate_indexed_doc_count(self.index_name,
                                        verification_dict["ops_create"])
        self.validate_test_failure()