def _test_cluster_topology_change_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys( servers=[self.master], ram_load_ratio=1, value_size_distribution=distribution, moxi=True, write_only=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) servers = [] for i in range(0, len(self.servers) - 1): servers.append(self.servers[i]) self.add_node_and_rebalance(servers[0], servers) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
def _test_cluster_topology_change_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() rest = RestConnection(self.master) distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master], ram_load_ratio=1, value_size_distribution=distribution, moxi=True, write_only=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) servers = [] for i in range(0, len(self.servers) - 1): servers.append(self.servers[i]) self.add_node_and_rebalance(servers[0], servers) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
def test_default_moxi(self): name = 'default' for serverInfo in self.servers: rest = RestConnection(serverInfo) replicaNumber = 1 proxyPort = rest.get_nodes_self().moxi rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") inserted_keys = BucketOperationHelper.load_some_data(serverInfo, 1, name) self.assertTrue(inserted_keys, 'unable to insert any key to memcached') verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name) self.assertTrue(verified, msg='verified all the keys stored') #verify keys rest.delete_bucket(name) msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(name) self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=60), msg=msg) rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg) BucketOperationHelper.wait_for_memcached(serverInfo, name) #now let's recreate the bucket self.log.info('recreated the default bucket...') #loop over the keys make sure they dont exist self.assertTrue(BucketOperationHelper.keys_dont_exist(serverInfo, inserted_keys, name), msg='at least one key found in the bucket')
def _test_delete_key_and_backup_and_restore_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() client = MemcachedClientHelper.direct_client(self.master, "default") expiry = 2400 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, "1") except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) client.delete(keys[0]) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) self.log.info('verifying that all those keys...') missing_keys = [] verify_keys = [] for key in keys: vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF client.vbucketId = vBucketId if key == keys[0]: missing_keys.append(key) else: verify_keys.append(key) self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self), "Keys are not empty") self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self), "Missing keys")
def test_default_dedicated(self): name = 'recreate-non-default-{0}'.format(uuid.uuid4()) serverInfo = self.servers[0] if serverInfo.ip != "": rest = RestConnection(serverInfo) replicaNumber = 1 proxyPort = rest.get_nodes_self().memcached + 2000 rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") inserted_keys = BucketOperationHelper.load_some_data( serverInfo, 1, name) self.assertTrue(inserted_keys, 'unable to insert any key to memcached') verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name) self.assertTrue(verified, msg='verified all the keys stored') #verify keys rest.delete_bucket(name) msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion( name, rest, timeout_in_seconds=60), msg=msg) rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort) msg = 'create_bucket succeeded but bucket {0} does not exist'.format( name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, name) self.assertTrue(ready, "wait_for_memcached failed") #now let's recreate the bucket self.log.info('recreated the default bucket...') #loop over the keys make sure they dont exist self.assertTrue(BucketOperationHelper.keys_dont_exist( serverInfo, inserted_keys, name), msg='at least one key found in the bucket')
def _test_backup_add_restore_bucket_body(self, bucket="default", port_no = 11211, delay_after_data_load=0, startup_flag = True): self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket", uuid.uuid4()) master = self.servers[0] node = RestConnection(master).get_nodes_self() BucketOperationHelper.delete_bucket_or_assert(master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self) keys = BucketOperationHelper.load_some_data(master, bucket_name=bucket, test = self) if not startup_flag: self.shell.stop_membase() else: self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load)) time.sleep(delay_after_data_load) #let's create a unique folder in the remote location output, error = self.shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder)) self.shell.log_command_output(output,error) #now let's back up BackupHelper(master, self).backup(bucket, node, self.remote_tmp_folder) if not startup_flag: self.shell.start_membase() BucketOperationHelper.delete_bucket_or_assert(master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self) if not startup_flag: self.shell.stop_membase() BackupHelper(master, self).restore(self.remote_tmp_folder) if not startup_flag: self.shell.start_membase() BucketOperationHelper.verify_data(master.ip, keys, False, False, port_no, self)
def _test_backup_and_restore_from_to_different_buckets(self): bucket_before_backup = "bucket_before_backup" bucket_after_backup = "bucket_after_backup" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master], name=bucket_before_backup, ram_load_ratio=20, value_size_distribution=distribution, write_only=True, moxi=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder) shell.disconnect() BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self) BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212) time.sleep(10) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False, bucket=bucket_after_backup), "Missing keys")