def test_default_moxi(self): name = 'default' for serverInfo in self.servers: rest = RestConnection(serverInfo) replicaNumber = 1 proxyPort = rest.get_nodes_self().moxi rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") inserted_keys = BucketOperationHelper.load_some_data(serverInfo, 1, name) self.assertTrue(inserted_keys, 'unable to insert any key to memcached') verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name) self.assertTrue(verified, msg='verified all the keys stored') #verify keys rest.delete_bucket(name) msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(name) self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=60), msg=msg) rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg) BucketOperationHelper.wait_for_memcached(serverInfo, name) #now let's recreate the bucket self.log.info('recreated the default bucket...') #loop over the keys make sure they dont exist self.assertTrue(BucketOperationHelper.keys_dont_exist(serverInfo, inserted_keys, name), msg='at least one key found in the bucket')
def _test_cluster_topology_change_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys( servers=[self.master], ram_load_ratio=1, value_size_distribution=distribution, moxi=True, write_only=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) servers = [] for i in range(0, len(self.servers) - 1): servers.append(self.servers[i]) self.add_node_and_rebalance(servers[0], servers) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
def _test_backup_add_restore_bucket_with_expiration_key(self, replica): bucket = "default" rest = RestConnection(self.master) info = rest.get_nodes_self() size = int(info.memoryQuota * 2.0 / 3.0) rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica) BucketOperationHelper.wait_for_memcached(self.master, bucket) client = MemcachedClientHelper.direct_client(self.master, bucket) expiry = 60 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, key) except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) client.close() self.log.info("inserted {0} keys with expiry set to {1}".format( len(keys), expiry)) ready = RebalanceHelper.wait_for_persistence( self.master, bucket, bucket_type=self.bucket_type) self.assertTrue(ready, "not all items persisted. see logs") node = RestConnection(self.master).get_nodes_self() output, error = self.shell.execute_command(self.perm_command) self.shell.log_command_output(output, error) backupHelper = BackupHelper(self.master, self) backupHelper.backup(bucket, node, self.remote_tmp_folder) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi) BucketOperationHelper.wait_for_memcached(self.master, bucket) backupHelper.restore(self.remote_tmp_folder) time.sleep(60) client = MemcachedClientHelper.direct_client(self.master, bucket) self.log.info('verifying that all those keys have expired...') for key in keys: try: client.get(key=key) msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds" self.fail(msg.format(expiry, key, expiry)) except mc_bin_client.MemcachedError as error: self.assertEquals( error.status, 1, msg="expected error code {0} but saw error code {1}". format(1, error.status)) client.close() self.log.info( "verified that those keys inserted with expiry set to {0} have expired" .format(expiry))
def common_test_body(self, replica, load_ratio, timeout=10): log = logger.Logger.get_logger() start_time = time.time() log.info("replica : {0}".format(replica)) log.info("load_ratio : {0}".format(load_ratio)) master = self._servers[0] log.info('picking server : {0} as the master'.format(master)) rest = RestConnection(master) info = rest.get_nodes_self() rest.init_cluster(username=master.rest_username, password=master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.mcdMemoryReserved * 2 / 3 rest.create_bucket(bucket='default', ramQuotaMB=bucket_ram, replicaNumber=replica, proxyPort=11211) json_bucket = {'name': 'default', 'port': 11211, 'password': ''} BucketOperationHelper.wait_for_memcached(master, json_bucket) log.info("inserting some items in the master before adding any nodes") distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1} threads = MemcachedClientHelper.create_threads( servers=[master], value_size_distribution=distribution, number_of_threads=len(self._servers), number_of_items=400000000, moxi=False, write_only=True, async_write=True) for thread in threads: thread.terminate_in_minutes = 24 * 60 thread.start() while time.time() < (start_time + 60 * timeout): #rebalance out step nodes #let's add some items ? nodes = rest.node_statuses() delta = len(self._servers) - len(nodes) if delta > 0: if delta > 1: how_many_add = Random().randint(1, delta) else: how_many_add = 1 self.log.info("going to add {0} nodes".format(how_many_add)) self.rebalance_in(how_many=how_many_add) else: self.log.info("all nodes already joined the clustr") time.sleep(240) RestHelper(rest).wait_for_replication(600) #dont rebalance out if there are not too many nodes if len(nodes) >= (3.0 / 4.0 * len(self._servers)): nodes = rest.node_statuses() how_many_out = Random().randint(1, len(nodes) - 1) self.log.info("going to remove {0} nodes".format(how_many_out)) self.rebalance_out(how_many=how_many_out) for t in threads: t.aborted = True t.join()
def common_test_body(self, replica, load_ratio, timeout=10): log = logger.Logger.get_logger() start_time = time.time() log.info("replica : {0}".format(replica)) log.info("load_ratio : {0}".format(load_ratio)) master = self._servers[0] log.info('picking server : {0} as the master'.format(master)) rest = RestConnection(master) info = rest.get_nodes_self() rest.init_cluster(username=master.rest_username, password=master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.mcdMemoryReserved * 2 / 3 rest.create_bucket(bucket='default', ramQuotaMB=bucket_ram, replicaNumber=replica, proxyPort=11211) json_bucket = {'name': 'default', 'port': 11211, 'password': ''} BucketOperationHelper.wait_for_memcached(master, json_bucket) log.info("inserting some items in the master before adding any nodes") distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1} threads = MemcachedClientHelper.create_threads(servers=[master], value_size_distribution=distribution, number_of_threads=len(self._servers), number_of_items=400000000, moxi=False, write_only=True, async_write=True) for thread in threads: thread.terminate_in_minutes = 24 * 60 thread.start() while time.time() < ( start_time + 60 * timeout): #rebalance out step nodes #let's add some items ? nodes = rest.node_statuses() delta = len(self._servers) - len(nodes) if delta > 0: if delta > 1: how_many_add = Random().randint(1, delta) else: how_many_add = 1 self.log.info("going to add {0} nodes".format(how_many_add)) self.rebalance_in(how_many=how_many_add) else: self.log.info("all nodes already joined the clustr") time.sleep(240) RestHelper(rest).wait_for_replication(600) #dont rebalance out if there are not too many nodes if len(nodes) >= (3.0 / 4.0 * len(self._servers)): nodes = rest.node_statuses() how_many_out = Random().randint(1, len(nodes) - 1) self.log.info("going to remove {0} nodes".format(how_many_out)) self.rebalance_out(how_many=how_many_out) for t in threads: t.aborted = True t.join()
def test_default_dedicated(self): name = 'recreate-non-default-{0}'.format(uuid.uuid4()) serverInfo = self.servers[0] if serverInfo.ip != "": rest = RestConnection(serverInfo) replicaNumber = 1 proxyPort = rest.get_nodes_self().memcached + 2000 rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") inserted_keys = BucketOperationHelper.load_some_data( serverInfo, 1, name) self.assertTrue(inserted_keys, 'unable to insert any key to memcached') verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name) self.assertTrue(verified, msg='verified all the keys stored') #verify keys rest.delete_bucket(name) msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion( name, rest, timeout_in_seconds=60), msg=msg) rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") #now let's recreate the bucket self.log.info('recreated the default bucket...') #loop over the keys make sure they dont exist self.assertTrue(BucketOperationHelper.keys_dont_exist( serverInfo, inserted_keys, name), msg='at least one key found in the bucket')
def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) BucketOperationHelper.wait_for_memcached(self.master, bucket) self.add_nodes_and_rebalance() client = MemcachedClientHelper.direct_client(self.master, "default") expiry = 2400 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, "1") except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() for key in keys: try: client.replace(key, expiry, 0, "2") except mc_bin_client.MemcachedError as error: msg = "unable to replace key : {0} in bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry)) for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag) time.sleep(10) self.log.info('verifying that all those keys...') for key in keys: if overwrite_flag: self.assertEqual("2", client.get(key=key), key + " should has value = 2") else: self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2") self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) BucketOperationHelper.wait_for_memcached(self.master, bucket) self.add_nodes_and_rebalance() client = MemcachedClientHelper.direct_client(self.master, "default") expiry = 2400 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, "1") except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() for key in keys: try: client.replace(key, expiry, 0, "2") except mc_bin_client.MemcachedError as error: msg = "unable to replace key : {0} in bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry)) for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag) time.sleep(10) self.log.info('verifying that all those keys...') for key in keys: if overwrite_flag: self.assertEqual("2", client.get(key=key), key + " should has value = 2") else: self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2") self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
def _test_cluster_topology_change_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() rest = RestConnection(self.master) distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master], ram_load_ratio=1, value_size_distribution=distribution, moxi=True, write_only=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) servers = [] for i in range(0, len(self.servers) - 1): servers.append(self.servers[i]) self.add_node_and_rebalance(servers[0], servers) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
def _cluster_setup(self): log = logger.Logger.get_logger() replicas = self._input.param("replicas", 1) keys_count = self._input.param("keys-count", 0) num_buckets = self._input.param("num-buckets", 1) bucket_name = "default" master = self._servers[0] credentials = self._input.membase_settings rest = RestConnection(self.master) info = rest.get_nodes_self() rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) rest.reset_autofailover() ClusterOperationHelper.add_all_nodes_or_assert(self.master, self._servers, credentials, self) bucket_ram = info.memoryQuota * 2 / 3 if num_buckets == 1: rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=replicas, proxyPort=info.moxi) ready = BucketOperationHelper.wait_for_memcached( self.master, bucket_name) nodes = rest.node_statuses() rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[]) buckets = rest.get_buckets() else: created = BucketOperationHelper.create_multiple_buckets( self.master, replicas, howmany=num_buckets) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached( self.master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") nodes = rest.node_statuses() rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[]) for bucket in buckets: inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count) log.info('inserted {0} keys'.format(inserted_keys_cnt)) msg = "rebalance failed after adding these nodes {0}".format(nodes) self.assertTrue(rest.monitorRebalance(), msg=msg) self.assertTrue(ready, "wait_for_memcached failed")
def _create_bucket(self, number_of_replicas=1, bucket_name='default'): self.bucket_name = bucket_name ip_rest = RestConnection(self.servers[0]) info = ip_rest.get_nodes_self() bucket_ram = info.memoryQuota * 2 / 3 self.log.info('creating bucket : {0}'.format(self.bucket_name)) ip_rest.create_bucket(bucket=self.bucket_name, ramQuotaMB=bucket_ram, replicaNumber=number_of_replicas, proxyPort=11220) msg = 'create_bucket succeeded but bucket {0} does not exist'.format(self.bucket_name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, ip_rest), msg=msg) BucketOperationHelper.wait_for_memcached(self.servers[0], self.bucket_name)
def _create_bucket(self, number_of_replicas=1, bucket_name='default'): self.bucket_name = bucket_name ip_rest = RestConnection(self.servers[0]) info = ip_rest.get_nodes_self() bucket_ram = info.memoryQuota * 2 / 3 self.log.info('creating bucket : {0}'.format(self.bucket_name)) ip_rest.create_bucket(bucket=self.bucket_name, ramQuotaMB=bucket_ram, replicaNumber=number_of_replicas, proxyPort=11220) msg = 'create_bucket succeeded but bucket {0} does not exist'.format(self.bucket_name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, ip_rest), msg=msg) BucketOperationHelper.wait_for_memcached(self.servers[0], self.bucket_name)
def _test_backup_and_restore_from_to_different_buckets(self): bucket_before_backup = "bucket_before_backup" bucket_after_backup = "bucket_after_backup" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master], name=bucket_before_backup, ram_load_ratio=20, value_size_distribution=distribution, write_only=True, moxi=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder) shell.disconnect() BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self) BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212) time.sleep(10) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False, bucket=bucket_after_backup), "Missing keys")
def test_database_fragmentation(self): percent_threshold = self.autocompaction_value bucket_name = "default" MAX_RUN = 100 item_size = 1024 update_item_size = item_size * ((float(97 - percent_threshold)) / 100) serverInfo = self.servers[0] self.log.info(serverInfo) rest = RestConnection(serverInfo) remote_client = RemoteMachineShellConnection(serverInfo) output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=100) if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD): self.assertFalse(output, "it should be impossible to set compaction value = {0}%".format(percent_threshold)) import json self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response") self.assertTrue(json.loads(rq_content)["errors"].find("Allowed range is 2 - 100") > -1, \ "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(json.loads(rq_content)["errors"])) self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content)) elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD and percent_threshold <= MAX_RUN): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers) info = rest.get_nodes_self() available_ram = info.memoryQuota * (node_ram_ratio) / 2 items = (int(available_ram * 1000) / 2) / item_size rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl', saslPassword='******', replicaNumber=1, proxyPort=11211) BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name) BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name) self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size)) self.insert_key(serverInfo, bucket_name, items, item_size) self.log.info("sleep 10 seconds before the next run") time.sleep(10) self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items, int(update_item_size))) self.insert_key(serverInfo, bucket_name, items, int(update_item_size)) compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180) if not compact_run: self.log.error("auto compaction does not run") elif compact_run: self.log.info("auto compaction runs successfully") else: self.log.error("Unknown error")
def _test_backup_and_restore_from_to_different_buckets(self): bucket_before_backup = "bucket_before_backup" bucket_after_backup = "bucket_after_backup" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master], name=bucket_before_backup, ram_load_ratio=20, value_size_distribution=distribution, write_only=True, moxi=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder) shell.disconnect() BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self) BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212) time.sleep(10) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False, bucket=bucket_after_backup), "Missing keys")
def _test_backup_add_restore_bucket_with_expiration_key(self, replica): bucket = "default" rest = RestConnection(self.master) info = rest.get_nodes_self() size = int(info.memoryQuota * 2.0 / 3.0) rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica) BucketOperationHelper.wait_for_memcached(self.master, bucket) client = MemcachedClientHelper.direct_client(self.master, bucket) expiry = 60 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, key) except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) client.close() self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") node = RestConnection(self.master).get_nodes_self() output, error = self.shell.execute_command(self.perm_command) self.shell.log_command_output(output, error) backupHelper = BackupHelper(self.master, self) backupHelper.backup(bucket, node, self.remote_tmp_folder) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi) BucketOperationHelper.wait_for_memcached(self.master, bucket) backupHelper.restore(self.remote_tmp_folder) time.sleep(60) client = MemcachedClientHelper.direct_client(self.master, bucket) self.log.info('verifying that all those keys have expired...') for key in keys: try: client.get(key=key) msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds" self.fail(msg.format(expiry, key, expiry)) except mc_bin_client.MemcachedError as error: self.assertEquals(error.status, 1, msg="expected error code {0} but saw error code {1}".format(1, error.status)) client.close() self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached( serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def setUp(self): super(AutoFailoverAbortsRebalance, self).setUp() self.master = self.servers[0] self._get_params() self.rest = RestConnection(self.orchestrator) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) self.num_buckets = self.num_buckets - 1 # this is done as default is created by base class if self.num_buckets: BucketOperationHelper.create_multiple_buckets(self.master, self.num_replicas, node_ram_ratio * (2.0 / 3.0), howmany=self.num_buckets, bucket_storage=self.bucket_storage) self.buckets = self.rest.get_buckets() for bucket in self.buckets: ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name) self.assertTrue(ready, "wait_for_memcached failed") self.initial_load_gen = BlobGenerator('auto-failover', 'auto-failover-', self.value_size, end=self.num_items) self.update_load_gen = BlobGenerator('auto-failover', 'auto-failover-', self.value_size, end=self.update_items) self.delete_load_gen = BlobGenerator('auto-failover', 'auto-failover-', self.value_size, start=self.update_items, end=self.delete_items) self._load_all_buckets(self.servers[0], self.initial_load_gen, "create", 0) self._async_load_all_buckets(self.orchestrator, self.update_load_gen, "update", 0) self._async_load_all_buckets(self.orchestrator, self.delete_load_gen, "delete", 0)
def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0): log = logger.Logger.get_logger() servers = input.servers BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase) serverInfo = servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers) rest = RestConnection(serverInfo) info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio)) if "ascii" in TestInputSingleton.input.test_params\ and TestInputSingleton.input.test_params["ascii"].lower() == "true": BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio, howmany=1, sasl=False) else: BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio, howmany=1, sasl=True) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name) testcase.assertTrue(ready, "wait_for_memcached failed")
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 # Add built-in user testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}] RbacBase().create_user_source(testuser, 'builtin', self.master) time.sleep(10) # Assign user to role role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin') time.sleep(10) rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def test_default_moxi_sasl(self): name = 'new-bucket-{0}'.format(uuid.uuid4()) for serverInfo in self.servers: rest = RestConnection(serverInfo) replicaNumber = 1 proxyPort = rest.get_nodes_self().moxi rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort, authType="sasl", saslPassword='******') msg = 'create_bucket succeeded but bucket {0} does not exist'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") rest.delete_bucket(name) msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion( name, rest, timeout_in_seconds=30), msg=msg)
def _cluster_setup(self): replicas = self.input.param("replicas", 1) keys_count = self.input.param("keys-count", 0) num_buckets = self.input.param("num-buckets", 1) bucket_name = "default" master = self.servers[0] credentials = self.input.membase_settings rest = RestConnection(self.master) info = rest.get_nodes_self() rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) rest.reset_autofailover() ClusterOperationHelper.add_and_rebalance(self.servers, True) if num_buckets == 1: bucket_ram = info.memoryQuota * 2 / 3 rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=replicas, proxyPort=info.moxi) else: created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") for bucket in buckets: inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count) log.info('inserted {0} keys'.format(inserted_keys_cnt))
def setUp(self): self.log = logger.Logger().get_logger() self.input = TestInputSingleton.input self.servers = self.input.servers self.master = self.servers[0] self.ip = self.master.ip self.finished = False self.keys = [] self.keycount = 0 self.failure_string = "" self.cleanup() rest = RestConnection(self.master) info = rest.get_nodes_self() self.port = info.moxi + 1 rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) created = BucketOperationHelper.create_multiple_buckets( self.master, replica=1, bucket_ram_ratio=(2.0 / 3.0), howmany=10, sasl=False) self.assertTrue(created, "bucket creation failed") ready = BucketOperationHelper.wait_for_memcached( self.master, "bucket-0") self.assertTrue(ready, "wait_for_memcached failed")
def common_setup(self, replica): self._input = TestInputSingleton.input self._servers = self._input.servers first = self._servers[0] self.log = logger.Logger().get_logger() self.log.info(self._input) rest = RestConnection(first) for server in self._servers: RestHelper(RestConnection(server)).is_ns_server_running() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) info = rest.get_nodes_self() bucket_ram = info.mcdMemoryReserved * 3 / 4 rest.create_bucket(bucket="default", ramQuotaMB=int(bucket_ram), replicaNumber=replica, proxyPort=rest.get_nodes_self().moxi) msg = "wait_for_memcached fails" ready = BucketOperationHelper.wait_for_memcached(first, "default"), self.assertTrue(ready, msg) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(first.ip)) self.log.info('started rebalance operation on master node {0}'.format(first.ip)) rebalanceSucceeded = rest.monitorRebalance() # without a bucket this seems to fail self.assertTrue(rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format(otpNodeIds)) self.awareness = VBucketAwareMemcached(rest, "default")
def test_max_buckets(self): log = logger.Logger.get_logger() serverInfo = self.servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) rest = RestConnection(serverInfo) proxyPort = rest.get_nodes_self().moxi info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = 100 bucket_count = info.mcdMemoryReserved / bucket_ram for i in range(bucket_count): bucket_name = 'max_buckets-{0}'.format(uuid.uuid4()) rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, authType='sasl', proxyPort=proxyPort) ready = BucketOperationHelper.wait_for_memcached( serverInfo, bucket_name) self.assertTrue(ready, "wait_for_memcached failed") buckets = [] try: buckets = rest.get_buckets() except Exception: log.info('15 seconds sleep before calling get_buckets again...') time.sleep(15) buckets = rest.get_buckets() if len(buckets) != bucket_count: msg = 'tried to create {0} buckets, only created {1}'.format( bucket_count, len(buckets)) log.error(msg) self.fail(msg=msg)
def test_max_buckets(self): log = logger.Logger.get_logger() serverInfo = self.servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) rest = RestConnection(serverInfo) proxyPort = rest.get_nodes_self().moxi info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = 100 bucket_count = info.mcdMemoryReserved / bucket_ram for i in range(bucket_count): bucket_name = 'max_buckets-{0}'.format(uuid.uuid4()) rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, authType='sasl', proxyPort=proxyPort) ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name) self.assertTrue(ready, "wait_for_memcached failed") buckets = [] try: buckets = rest.get_buckets() except Exception: log.info('15 seconds sleep before calling get_buckets again...') time.sleep(15) buckets = rest.get_buckets() if len(buckets) != bucket_count: msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets)) log.error(msg) self.fail(msg=msg)
def setUp(self): self.log = logger.Logger().get_logger() self.input = TestInputSingleton.input self.servers = self.input.servers self.master = self.servers[0] self.ip = self.master.ip self.finished = False self.keys = [] self.keycount = 0 self.failure_string = "" self.cleanup() rest = RestConnection(self.master) info = rest.get_nodes_self() self.port = info.moxi+1 rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) created = BucketOperationHelper.create_multiple_buckets(self.master, replica=1, bucket_ram_ratio=(2.0 / 3.0), howmany=10, sasl=False) self.assertTrue(created, "bucket creation failed") ready = BucketOperationHelper.wait_for_memcached(self.master, "bucket-0") self.assertTrue(ready, "wait_for_memcached failed")
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 # Add built-in user testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] RbacBase().create_user_source(testuser, 'builtin', self.master) # Assign user to role role_list = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin') rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' if (testconstants.TESTRUNNER_CLIENT in os.environ.keys() ) and os.environ[ testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK: self.client = SDKSmartClient( serverInfo, self._bucket_name, compression=TestInputSingleton.input.param( "sdk_compression", True)) else: self.client = MemcachedClientHelper.direct_client( serverInfo, self._bucket_name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached( serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def _cluster_setup(self): log = logger.Logger.get_logger() replicas = self._input.param("replicas", 1) keys_count = self._input.param("keys-count", 0) num_buckets = self._input.param("num-buckets", 1) bucket_name = "default" master = self._servers[0] credentials = self._input.membase_settings rest = RestConnection(master) info = rest.get_nodes_self() rest.init_cluster(username=master.rest_username, password=master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) rest.reset_autofailover() ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self) bucket_ram = info.memoryQuota * 2 / 3 if num_buckets == 1: rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=replicas, proxyPort=info.moxi) ready = BucketOperationHelper.wait_for_memcached(master, bucket_name) nodes = rest.node_statuses() rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[]) buckets = rest.get_buckets() else: created = BucketOperationHelper.create_multiple_buckets(master, replicas, howmany=num_buckets) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") nodes = rest.node_statuses() rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[]) # self.load_data(master, bucket_name, keys_count) for bucket in buckets: inserted_keys_cnt = self.load_data(master, bucket.name, keys_count) log.info('inserted {0} keys'.format(inserted_keys_cnt)) msg = "rebalance failed after adding these nodes {0}".format(nodes) self.assertTrue(rest.monitorRebalance(), msg=msg) self.assertTrue(ready, "wait_for_memcached failed")
def set_get_test(self, value_size, number_of_items): fixed_value = MemcachedClientHelper.create_value("S", value_size) specs = [ ("default", 0), ("set-get-bucket-replica-1", 1), ("set-get-bucket-replica-2", 2), ("set-get-bucket-replica-3", 3), ] serverInfo = self.master rest = RestConnection(serverInfo) bucket_ram = int(rest.get_nodes_self().memoryQuota / 4) mcport = rest.get_nodes_self().memcached for name, replica in specs: rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport) bucket_data = {} buckets = RestConnection(serverInfo).get_buckets() for bucket in buckets: bucket_data[bucket.name] = {} ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name) self.test.assertTrue(ready, "wait_for_memcached failed") client = MemcachedClientHelper.direct_client(serverInfo, bucket.name) inserted = [] rejected = [] while len(inserted) <= number_of_items and len(rejected) <= number_of_items: try: key = str(uuid.uuid4()) client.set(key, 0, 0, fixed_value) inserted.append(key) except mc_bin_client.MemcachedError: pass retry = 0 remaining_items = [] remaining_items.extend(inserted) msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}" while retry < 10 and len(remaining_items) > 0: verified_keys = [] for key in remaining_items: try: flag, keyx, value = client.get(key=key) if not value == fixed_value: self.test.fail("value mismatch for key {0}".format(key)) verified_keys.append(key) except mc_bin_client.MemcachedError as error: self.log.error(msg.format(error.status, key)) retry += 1 [remaining_items.remove(x) for x in verified_keys] print_count = 0 for key in remaining_items: if print_count > 100: break print_count += 1 self.log.error("unable to verify key : {0}".format(key)) if remaining_items: self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
def _create_bucket(self, bucketname): self.rest.create_bucket(bucket=bucketname, ramQuotaMB=100, authType="sasl", saslPassword="******") ready = BucketOperationHelper.wait_for_memcached( self.master, bucketname) self.assertTrue(ready, msg="wait_for_memcached failed")
def _cluster_setup(self): keys_count = self.input.param("keys-count", 0) num_buckets = self.input.param("num-buckets", 1) bucketType = self.input.param("bucketType", "ephemeral") evictionPolicy = self.input.param("evictionPolicy", "noEviction") # fullEviction self.bucket_storage = self.input.param("bucket_storage", 'couchstore') # master = self.servers[0] # credentials = self.input.membase_settings rest = RestConnection(self.master) info = rest.get_nodes_self() rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) memory = min(info.mcdMemoryReserved, self.input.param("kv_memory", 1000)) rest.init_cluster_memoryQuota(memoryQuota=memory) rest.reset_autoreprovision() self._add_and_rebalance(self.servers, True) if num_buckets == 1: bucket_name = "default" bucket_ram = info.memoryQuota * 2 // 3 rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=self.replicas, proxyPort=info.moxi, bucketType=bucketType, evictionPolicy=evictionPolicy, storageBackend=self.bucket_storage) else: created = BucketOperationHelper.create_multiple_buckets( self.master, self.replicas, howmany=num_buckets, bucketType=bucketType, evictionPolicy=evictionPolicy, storageBackend=self.bucket_storage) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached( self.master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") for bucket in buckets: distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_count, rejected_count = self.load_bucket_and_return_the_keys( servers=[self.master], name=bucket.name, # ram_load_ratio=0.02, value_size_distribution=distribution, write_only=True, moxi=True, number_of_threads=2, number_of_items=keys_count) self.loaded_items[bucket.name] = inserted_count
def create_buckets(servers, testcase, howmany=1, replica=1, bucket_ram_ratio=(2.0 / 3.0)): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers) master = servers[0] BucketOperationHelper.create_multiple_buckets(master, replica, node_ram_ratio * bucket_ram_ratio, howmany=howmany) rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(master, bucket.name) testcase.assertTrue(ready, "wait_for_memcached failed")
def test_max_buckets(self): log = logger.Logger.get_logger() serverInfo = self.servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) rest = RestConnection(serverInfo) proxyPort = rest.get_nodes_self().moxi info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_num = rest.get_internalSettings("maxBucketCount") bucket_ram = 100 testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] rolelist = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().create_user_source(testuser, 'builtin', self.master) RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin') for i in range(bucket_num): bucket_name = 'max_buckets-{0}'.format(uuid.uuid4()) rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, authType='sasl', proxyPort=proxyPort) ready = BucketOperationHelper.wait_for_memcached( serverInfo, bucket_name) self.assertTrue(ready, "wait_for_memcached failed") buckets = rest.get_buckets() if len(buckets) != bucket_num: msg = 'tried to create {0} buckets, only created {1}'.format( bucket_count, len(buckets)) self.fail(msg) try: rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, authType='sasl', proxyPort=proxyPort) msg = 'bucket creation did not fail even though system was overcommited' self.fail(msg) except BucketCreationException as ex: self.log.info( 'BucketCreationException was thrown as expected when we try to create {0} buckets' .format(bucket_num + 1)) buckets = rest.get_buckets() if len(buckets) != bucket_num: msg = 'tried to create {0} buckets, only created {1}'.format( bucket_num + 1, len(buckets)) self.fail(msg)
def set_get_test(self, value_size, number_of_items): fixed_value = MemcachedClientHelper.create_value("S", value_size) specs = [("default", 0), ("set-get-bucket-replica-1", 1), ("set-get-bucket-replica-2", 2), ("set-get-bucket-replica-3", 3)] serverInfo = self.master rest = RestConnection(serverInfo) bucket_ram = int(rest.get_nodes_self().memoryQuota / 4) mcport = rest.get_nodes_self().memcached for name, replica in specs: rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport) bucket_data = {} buckets = RestConnection(serverInfo).get_buckets() for bucket in buckets: bucket_data[bucket.name] = {} ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name) self.test.assertTrue(ready, "wait_for_memcached failed") client = MemcachedClientHelper.direct_client(serverInfo, bucket.name) inserted = [] rejected = [] while len(inserted) <= number_of_items and len(rejected) <= number_of_items: try: key = str(uuid.uuid4()) client.set(key, 0, 0, fixed_value) inserted.append(key) except mc_bin_client.MemcachedError: pass retry = 0 remaining_items = [] remaining_items.extend(inserted) msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}" while retry < 10 and len(remaining_items) > 0: verified_keys = [] for key in remaining_items: try: flag, keyx, value = client.get(key=key) if not value == fixed_value: self.test.fail("value mismatch for key {0}".format(key)) verified_keys.append(key) except mc_bin_client.MemcachedError as error: self.log.error(msg.format(error.status, key)) retry += 1 [remaining_items.remove(x) for x in verified_keys] print_count = 0 for key in remaining_items: if print_count > 100: break print_count += 1 self.log.error("unable to verify key : {0}".format(key)) if remaining_items: self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
def _test_delete_key_and_backup_and_restore_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() client = MemcachedClientHelper.direct_client(self.master, "default") expiry = 2400 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, "1") except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) client.delete(keys[0]) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) self.log.info('verifying that all those keys...') missing_keys = [] verify_keys = [] for key in keys: vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF client.vbucketId = vBucketId if key == keys[0]: missing_keys.append(key) else: verify_keys.append(key) self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self), "Keys are not empty") self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self), "Missing keys")
def _test_delete_key_and_backup_and_restore_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() client = MemcachedClientHelper.direct_client(self.master, "default") expiry = 2400 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, "1") except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) client.delete(keys[0]) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) self.log.info('verifying that all those keys...') missing_keys = [] verify_keys = [] for key in keys: vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF client.vbucketId = vBucketId if key == keys[0]: missing_keys.append(key) else: verify_keys.append(key) self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self), "Keys are not empty") self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self), "Missing keys")
def test_max_buckets(self): log = logger.Logger.get_logger() serverInfo = self.servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) rest = RestConnection(serverInfo) info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) bucket_num = rest.get_internalSettings("maxBucketCount") log.info("max # buckets allow in cluster: {0}".format(bucket_num)) bucket_ram = 100 cluster_ram = info.memoryQuota max_buckets = cluster_ram / bucket_ram log.info("RAM setting for this cluster: {0}".format(cluster_ram)) testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] rolelist = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().create_user_source(testuser, 'builtin', self.master) RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin') for i in range(max_buckets): bucket_name = 'max_buckets-{0}'.format(uuid.uuid4()) rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram) ready = BucketOperationHelper.wait_for_memcached( serverInfo, bucket_name) log.info("kv RAM left in cluster: {0}".format(cluster_ram - 100)) cluster_ram -= bucket_ram self.assertTrue(ready, "wait_for_memcached failed") buckets = rest.get_buckets() if len(buckets) != max_buckets: msg = 'tried to create {0} buckets, only created {1}'\ .format(bucket_count, len(buckets)) self.fail(msg) try: rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram) msg = 'bucket creation did not fail even though system was overcommited' self.fail(msg) except BucketCreationException as ex: log.info( '\n******\nBucketCreationException was thrown as expected when\ we try to create {0} buckets'.format(max_buckets + 1)) buckets = rest.get_buckets() if len(buckets) != max_buckets: msg = 'tried to create {0} buckets, only created {1}'\ .format(max_buckets + 1, len(buckets)) self.fail(msg)
def _create_multiple_buckets(self, replica=1): master = self.servers[0] created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets) self.assertTrue(created, "unable to create multiple buckets") rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed")
def _create_multiple_buckets(self, replica=1): master = self.servers[0] created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets) self.assertTrue(created, "unable to create multiple buckets") rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed")
def common_setup(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.servers = self.input.servers master = self.servers[0] rest = RestConnection(master) # Cleanup previous state self.task_manager = None rest.stop_rebalance() RebalanceBaseTest.reset(self) # Initialize test params self.replica = self.input.param("replica", 1) # By default we use keys-count for LoadTask # Use keys-count=-1 to use load-ratio self.keys_count = self.input.param("keys-count", 30000) self.load_ratio = self.input.param("load-ratio", 6) self.expiry_ratio = self.input.param("expiry-ratio", 0.1) self.delete_ratio = self.input.param("delete-ratio", 0.1) self.access_ratio = self.input.param("access-ratio", 0.8) self.num_buckets = self.input.param("num-buckets", 1) self.num_rebalance = self.input.param("num-rebalance", 1) self.do_ascii = self.input.param("ascii", False) self.do_verify = self.input.param("do-verify", True) self.repeat = self.input.param("repeat", 1) self.max_ops_per_second = self.input.param("max_ops_per_second", 500) self.min_item_size = self.input.param("min_item_size", 128) self.do_stop = self.input.param("do-stop", False) self.skip_cleanup = self.input.param("skip-cleanup", False) self.checkResidentRatio = self.input.param("checkResidentRatio", False) self.activeRatio = self.input.param("activeRatio", 50) self.replicaRatio = self.input.param("replicaRatio", 50) self.case_number = self.input.param("case_number", 0) self.log.info('picking server : {0} as the master'.format(master)) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) info = rest.get_nodes_self() rest.init_cluster(username=master.rest_username, password=master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio)) BucketOperationHelper.create_multiple_buckets(master, self.replica, node_ram_ratio * (2.0 / 3.0), howmany=self.num_buckets, sasl=not self.do_ascii) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(master, bucket.name) self.assertTrue(ready, "wait_for_memcached failed") # Initialize and start the taskManager self.task_manager = taskmanager.TaskManager() self.task_manager.start()
def _create_default_bucket(self): helper = RestHelper(self.rest) if not helper.bucket_exists(self.bucket): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) info = self.rest.get_nodes_self() available_ram = int(info.memoryQuota * node_ram_ratio) if available_ram < 256: available_ram = 256 self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram) ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket) self.testcase.assertTrue(ready, "wait_for_memcached failed") self.testcase.assertTrue(helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
def _create_default_bucket(self): name = "default" master = self.master rest = RestConnection(master) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers) info = rest.get_nodes_self() available_ram = info.memoryQuota * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram)) ready = BucketOperationHelper.wait_for_memcached(master, name) self.assertTrue(ready, msg="wait_for_memcached failed") self.load_thread = None self.shutdown_load_data = False
def check_bucket_ready(self, task_manager): try: if BucketOperationHelper.wait_for_memcached(self.server, self.bucket): self.set_result({"status": "success", "value": None}) self.state == "finished" return else: self.log.info("vbucket map not ready after try {0}".format(self.retries)) except Exception: self.log.info("vbucket map not ready after try {0}".format(self.retries)) self.retries = self.retries + 1 task_manager.schedule(self)
def _create_default_bucket(self, replica=1): name = "default" master = self.servers[0] rest = RestConnection(master) helper = RestHelper(RestConnection(master)) if not helper.bucket_exists(name): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) info = rest.get_nodes_self() available_ram = info.memoryQuota * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica) ready = BucketOperationHelper.wait_for_memcached(master, name) self.assertTrue(ready, msg="wait_for_memcached failed") self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
def check(self, task_manager): try: if BucketOperationHelper.wait_for_memcached(self.server, self.bucket): self.log.info("bucket '{0}' was created with per node RAM quota: {1}".format(self.bucket, self.size)) self.set_result(True) self.state = FINISHED return else: self.log.info("vbucket map not ready after try {0}".format(self.retries)) except Exception: self.log.info("vbucket map not ready after try {0}".format(self.retries)) self.retries = self.retries + 1 task_manager.schedule(self)
def _create_default_bucket(self): name = "default" master = self.master rest = RestConnection(master) helper = RestHelper(RestConnection(master)) if not helper.bucket_exists(name): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers) info = rest.get_nodes_self() available_ram = info.memoryQuota * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram)) ready = BucketOperationHelper.wait_for_memcached(master, name) self.assertTrue(ready, msg="wait_for_memcached failed") self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
def _create_default_bucket(self, unittest): name = "default" master = self.master rest = RestConnection(master) helper = RestHelper(RestConnection(master)) if not helper.bucket_exists(name): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers) info = rest.get_nodes_self() available_ram = info.memoryQuota * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram)) ready = BucketOperationHelper.wait_for_memcached(master, name) BucketOperationHelper.wait_for_vbuckets_ready_state(master, name) unittest.assertTrue(ready, msg="wait_for_memcached failed") unittest.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
def _create_default_bucket(self, replica=1): name = "default" master = self.servers[0] rest = RestConnection(master) helper = RestHelper(RestConnection(master)) if not helper.bucket_exists(name): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) info = rest.get_nodes_self() available_ram = info.memoryQuota * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica, storageBackend=self.bucket_storage) ready = BucketOperationHelper.wait_for_memcached(master, name) self.assertTrue(ready, msg="wait_for_memcached failed") self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
def _cluster_setup(self): bucket_name = "default" master = self._servers[0] rest = RestConnection(master) info = rest.get_nodes_self() rest.init_cluster(username=master.rest_username, password=master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) rest.reset_autofailover() bucket_ram = info.memoryQuota * 2 / 3 rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.moxi) ready = BucketOperationHelper.wait_for_memcached(master, bucket_name) self.assertTrue(ready, "wait_for_memcached failed")
def check_bucket_ready(self, task_manager): try: if BucketOperationHelper.wait_for_memcached( self.server, self.bucket): self.set_result({"status": "success", "value": None}) self.state == "finished" return else: self.log.info("vbucket map not ready after try {0}".format( self.retries)) except Exception: self.log.info("vbucket map not ready after try {0}".format( self.retries)) self.retries = self.retries + 1 task_manager.schedule(self)
def _cluster_setup(self): keys_count = self.input.param("keys-count", 0) num_buckets = self.input.param("num-buckets", 1) bucketType = self.input.param("bucketType", "ephemeral") evictionPolicy = self.input.param("evictionPolicy", "noEviction") # fullEviction # master = self.servers[0] # credentials = self.input.membase_settings rest = RestConnection(self.master) info = rest.get_nodes_self() rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) memory = min(info.mcdMemoryReserved, self.input.param("kv_memory", 1000)) rest.init_cluster_memoryQuota(memoryQuota=memory) rest.reset_autoreprovision() self._add_and_rebalance(self.servers, True) if num_buckets == 1: bucket_name = "default" bucket_ram = info.memoryQuota * 2 / 3 rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=self.replicas, proxyPort=info.moxi, bucketType=bucketType, evictionPolicy=evictionPolicy) else: created = BucketOperationHelper.create_multiple_buckets( self.master, self.replicas, howmany=num_buckets, bucketType=bucketType, evictionPolicy=evictionPolicy) self.assertTrue(created, "unable to create multiple buckets") buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name) self.assertTrue(ready, msg="wait_for_memcached failed") for bucket in buckets: distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = self.load_bucket_and_return_the_keys(servers=[self.master], name=bucket.name, # ram_load_ratio=0.02, value_size_distribution=distribution, write_only=True, moxi=True, number_of_threads=2, number_of_items=keys_count) self.loaded_items[bucket.name] = inserted_keys
def _create_default_bucket(self): rest = RestConnection(self.master) helper = RestHelper(RestConnection(self.master)) if not helper.bucket_exists(self.bucket): node_ram_ratio = BucketOperationHelper.base_bucket_ratio([self.master]) info = rest.get_nodes_self() available_ram = info.memoryQuota * node_ram_ratio serverInfo = self.master rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio)) rest.create_bucket(bucket=self.bucket, ramQuotaMB=int(available_ram)) ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket) self.assertTrue(ready, msg="wait_for_memcached failed") self.assertTrue(helper.bucket_exists(self.bucket), msg="unable to create {0} bucket".format(self.bucket))
def _create_default_bucket(self): name = "default" master = self.servers[0] rest = RestConnection(master) helper = RestHelper(RestConnection(master)) if not helper.bucket_exists(name): node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) info = rest.get_nodes_self() available_ram = info.mcdMemoryReserved * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram)) ready = BucketOperationHelper.wait_for_memcached(master, name) self.assertTrue(ready, msg="wait_for_memcached failed") self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name)) self.load_thread = None self.shutdown_load_data = False
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 # Add built-in user testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] RbacBase().create_user_source(testuser, 'builtin', self.master) time.sleep(10) # Assign user to role role_list = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin') time.sleep(10) rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached( serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def _create_default_bucket(self): helper = RestHelper(self.rest) if not helper.bucket_exists(self.bucket): node_ram_ratio = BucketOperationHelper.base_bucket_ratio( self.servers) info = self.rest.get_nodes_self() available_ram = int(info.memoryQuota * node_ram_ratio) if available_ram < 256: available_ram = 256 self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram) ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket) self.testcase.assertTrue(ready, "wait_for_memcached failed") self.testcase.assertTrue( helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
def _create_default_bucket(self): name = "default" master = self.servers[0] rest = RestConnection(master) helper = RestHelper(RestConnection(master)) if not helper.bucket_exists(name): node_ram_ratio = BucketOperationHelper.base_bucket_ratio( self.servers) info = rest.get_nodes_self() available_ram = info.mcdMemoryReserved * node_ram_ratio rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram)) ready = BucketOperationHelper.wait_for_memcached(master, name) self.assertTrue(ready, msg="wait_for_memcached failed") self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name)) self.load_thread = None self.shutdown_load_data = False