def test_insert_x_delete_y_docs_destroy_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a cluster, insert {0} docs, delete "\ "{1} docs while destroying the cluster into a single node "\ "and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_{0}_docs_destroy_cluster".format( num_deleted_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully clustered ClusterOperationHelper.add_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start destroying the cluster and rebalancing it without waiting # until it's finished ClusterOperationHelper.cleanup_cluster(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys)
def test_insert_x_docs_during_rebalance(self): num_docs = self.helper.input.param("num-docs", 100000) msg = "description : have a single node, insert {0} docs, "\ "query it, add another node, start rebalancing, insert {0} "\ "docs, finish rebalancing, keep on adding nodes..." self.log.info(msg.format(num_docs)) design_name = "dev_test_insert_{0}_docs_during_rebalance".format( num_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.cleanup_cluster(self.helper.servers) self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Add all servers to the master server one by one and start # rebalacing for server in self.helper.servers[1:]: ClusterOperationHelper.add_and_rebalance( [self.helper.master, server], False) # Docs with the same prefix are overwritten and not newly created prefix = str(uuid.uuid4())[:7] inserted_keys.extend(self.helper.insert_docs( num_docs, prefix, wait_for_persistence=False)) self._wait_for_rebalance() # Make sure data is persisted self.helper.wait_for_persistence() # Verify that all documents got inserted self.helper.query_index_for_verification(design_name, inserted_keys)
def test_insert_x_docs_during_rebalance(self): num_docs = self.helper.input.param("num-docs", 100000) msg = "description : have a single node, insert {0} docs, "\ "query it, add another node, start rebalancing, insert {0} "\ "docs, finish rebalancing, keep on adding nodes..." self.log.info(msg.format(num_docs)) design_name = "dev_test_insert_{0}_docs_during_rebalance".format( num_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.remove_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Add all servers to the master server one by one and start # rebalacing for server in self.helper.servers[1:]: ClusterOperationHelper.add_and_rebalance( [self.helper.master, server], False) # Docs with the same prefix are overwritten and not newly created prefix = str(uuid.uuid4())[:7] inserted_keys.extend( self.helper.insert_docs(num_docs, prefix, wait_for_persistence=False)) self._wait_for_rebalance() # Make sure data is persisted self.helper.wait_for_persistence() # Verify that all documents got inserted self.helper.query_index_for_verification(design_name, inserted_keys)
def test_insert_x_delete_y_docs_destroy_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a cluster, insert {0} docs, delete "\ "{1} docs while destroying the cluster into a single node "\ "and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_{0}_docs_destroy_cluster".format( num_deleted_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully clustered ClusterOperationHelper.add_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start destroying the cluster and rebalancing it without waiting # until it's finished ClusterOperationHelper.remove_and_rebalance(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys)
def _cluster_setup(self): replicas = self.input.param("replicas", 1) keys_count = self.input.param("keys-count", 0) num_buckets = self.input.param("num-buckets", 1) bucket_name = "default" master = self.servers[0] credentials = self.input.membase_settings rest = RestConnection(self.master) info = rest.get_nodes_self() rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) rest.reset_autofailover() ClusterOperationHelper.add_and_rebalance(self.servers, True) if num_buckets == 1: bucket_ram = info.memoryQuota * 2 / 3 rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=replicas, proxyPort=info.moxi) else: created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") for bucket in buckets: inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count) log.info('inserted {0} keys'.format(inserted_keys_cnt))
def test_rebalance_in(self): try: self.log.info("Rebalancing 1 of the servers ..") ClusterOperationHelper.add_and_rebalance(self.servers) self.log.info("Verifying bucket settings after rebalance ..") self._check_config() except Exception as e: self.fail('[ERROR]Rebalance failed .. , {0}'.format(e))
def _cluster_setup(self): replicas = self.input.param("replicas", 1) keys_count = self.input.param("keys-count", 0) num_buckets = self.input.param("num-buckets", 1) bucket_storage = self.input.param("bucket_storage", 'couchstore') bucket_name = "default" master = self.servers[0] credentials = self.input.membase_settings rest = RestConnection(self.master) info = rest.get_nodes_self() rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) rest.reset_autofailover() ClusterOperationHelper.add_and_rebalance(self.servers, True) if num_buckets == 1: bucket_ram = info.memoryQuota * 2 // 3 rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=replicas, proxyPort=info.moxi, storageBackend=bucket_storage) else: created = BucketOperationHelper.create_multiple_buckets( self.master, replicas, howmany=num_buckets, bucket_ram_ratio=(1.0 / 4.0), bucket_storage=bucket_storage) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached( self.master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") for bucket in buckets: inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count) log.info('inserted {0} keys'.format(inserted_keys_cnt))
def setup_cluster(self, do_rebalance=True): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio) self.rest.init_cluster(self.master.rest_username, self.master.rest_password) self.rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password, memoryQuota=mem_quota) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self.testcase) if do_rebalance: rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers) self.testcase.assertTrue(rebalanced, "cluster is not rebalanced") self._create_default_bucket()
def setup_cluster(self): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio) self.rest.init_cluster(self.master.rest_username, self.master.rest_password) self.rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password, memoryQuota=mem_quota) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self.testcase) rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers) self.testcase.assertTrue(rebalanced, "cluster is not rebalanced") self._create_default_bucket()
def setUp(self): super(BucketConfig, self).setUp() self.testcase = '2' self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.servers = self.input.servers #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift") self.lww = self.input.param("lww", True) self.drift = self.input.param("drift", False) self.bucket='bucket-1' self.master = self.servers[0] self.rest = RestConnection(self.master) self.cluster = Cluster() self.skip_rebalance = self.input.param("skip_rebalance", False) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio) if not self.skip_rebalance: self.rest.init_cluster(self.master.rest_username, self.master.rest_password) self.rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password, memoryQuota=mem_quota) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( [self.master], self.testcase) try: rebalanced = ClusterOperationHelper.add_and_rebalance( self.servers) except Exception as e: self.fail(e, 'cluster is not rebalanced') self._create_bucket(self.lww, self.drift)
def _setup_cluster(self): for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers) self.assertTrue(rebalanced, msg="cluster is not rebalanced")