コード例 #1
0
ファイル: backuptests.py プロジェクト: saigon/testrunner
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            ram_load_ratio=1,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False,
                                          False, 11210, self)
コード例 #2
0
ファイル: backuptests.py プロジェクト: jchris/testrunner
    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
                        "Keys are not empty")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
                        "Missing keys")
コード例 #3
0
    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
                        "Keys are not empty")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
                        "Missing keys")
コード例 #4
0
ファイル: backuptests.py プロジェクト: jchris/testrunner
    def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key), key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
コード例 #5
0
    def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key), key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
コード例 #6
0
ファイル: backuptests.py プロジェクト: steveyen/testrunner
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        rest = RestConnection(self.master)

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
コード例 #7
0
ファイル: mbbackuptests.py プロジェクト: jchris/testrunner
 def test_default_bucket(self):
     master = self.servers[0]
     BucketOperationHelper.create_bucket(serverInfo=master, test_case=self)
     #let's create a unique folder in the remote location
     shell = RemoteMachineShellConnection(master)
     self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket-", uuid.uuid4())
     output, error = shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder))
     shell.log_command_output(output,error)
     #now let's back up
     BackupHelper(master).backup('default',self.remote_tmp_folder)
     backup_files = BackupHelper(master).download_backups(self.remote_tmp_folder)
     print 'backup rertued'
     for backup_file in backup_files:
         self.log.info(backup_file)
コード例 #8
0
ファイル: backuptests.py プロジェクト: jchris/testrunner
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             moxi=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
コード例 #9
0
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             moxi=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
コード例 #10
0
 def test_merge_backup_from_old_and_new_bucket_bwc(self):
     """
         1. Create a bucket A
         2. Load docs with key 1
         3. Do backup
         4. Delete bucket A
         5. Re-create bucket A
         6. Load docs with key 2
         7. Do backup
         8. Do merge backup.  Verify backup only contain docs key 2
     """
     gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen, "create", 0)
     self.log.info("Start doing backup")
     self.backup_create()
     self.backup_cluster()
     if self.bucket_delete:
         self.log.info("Start to delete bucket")
         BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
         BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
     elif self.bucket_flush:
         self.log.info("Start to flush bucket")
         self._all_buckets_flush()
     gen = BlobGenerator("ent-backup2_", "ent-backup-", self.value_size, end=self.num_items)
     self.log.info("Start to load bucket again with different key")
     self._load_all_buckets(self.master, gen, "create", 0)
     self.backup_cluster()
     self.backupset.number_of_backups += 1
     status, output, message = self.backup_list()
     if not status:
         self.fail(message)
     self.log.info("Start to merge backup")
     self.backupset.start = randrange(1, self.backupset.number_of_backups)
     self.backupset.end = randrange(self.backupset.start,
                                    self.backupset.number_of_backups + 1)
     self.merged = True
     result, output, _ = self.backup_merge()
     self.backupset.end -= 1
     status, output, message = self.backup_list()
     if not status:
         self.fail(message)
     current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
                                              self.skip_consistency, self.per_node)
     self.log.info("*** Start to validate data in merge backup ")
     self.validate_backup_data(self.backupset.backup_host, [self.master],
                               "ent-backup", False, False, "memory",
                               self.num_items, "ent-backup1")
     self.backup_cluster_validate(skip_backup=True)
コード例 #11
0
    def _test_backup_add_restore_bucket_body(self, bucket="default", port_no = 11211, delay_after_data_load=0, startup_flag = True):

        self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket", uuid.uuid4())
        master = self.servers[0]

        node = RestConnection(master).get_nodes_self()
        BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)
        keys = BucketOperationHelper.load_some_data(master, bucket_name=bucket, test = self)

        if not startup_flag:
            self.shell.stop_membase()
        else:
            self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
            time.sleep(delay_after_data_load)

        #let's create a unique folder in the remote location
        output, error = self.shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder))
        self.shell.log_command_output(output,error)

        #now let's back up
        BackupHelper(master, self).backup(bucket, node, self.remote_tmp_folder)

        if not startup_flag:
            self.shell.start_membase()

        BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)

        if not startup_flag:
            self.shell.stop_membase()

        BackupHelper(master, self).restore(self.remote_tmp_folder)

        if not startup_flag:
            self.shell.start_membase()

        BucketOperationHelper.verify_data(master.ip, keys, False, False, port_no, self)
コード例 #12
0
 def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_not_be_cancelled(
         self):
     during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                 "stop_server")
     post_failure_operation = self.input.param("post_failure_operation",
                                               "create_delete_buckets")
     zone_name = "Group_{0}_{1}".format(random.randint(1, 1000000000),
                                        self._testMethodName)
     zone_name = zone_name[0:60]
     default_zone = "Group 1"
     moved_node = []
     moved_node.append(self.servers[1].ip)
     try:
         operation = self._rebalance_operation(self.rebalance_operation)
         self.sleep(self.sleep_time)
         # induce the failure during the rebalance
         self._induce_error(during_rebalance_failure)
         operation.result()
     except Exception as e:
         self.log.info("Rebalance failed with : {0}".format(str(e)))
         # Recover from the error
         self._recover_from_error(during_rebalance_failure)
         result = json.loads(self.rest.get_pending_rebalance_info())
         self.log.info(result)
         retry_rebalance = result["retry_rebalance"]
         if retry_rebalance != "pending":
             self.fail("Auto-retry of failed rebalance is not triggered")
         if post_failure_operation == "create_delete_buckets":
             # delete buckets and create new one
             BucketOperationHelper.delete_all_buckets_or_assert(
                 servers=self.servers, test_case=self)
             self.sleep(self.sleep_time)
             BucketOperationHelper.create_bucket(self.master,
                                                 test_case=self)
         elif post_failure_operation == "change_replica_count":
             # change replica count
             self.log.info("Changing replica count of buckets")
             for bucket in self.buckets:
                 self.rest.change_bucket_props(bucket, replicaNumber=2)
         elif post_failure_operation == "change_server_group":
             # change server group
             self.log.info("Creating new zone " + zone_name)
             self.rest.add_zone(zone_name)
             self.log.info("Moving {0} to new zone {1}".format(
                 moved_node, zone_name))
             status = self.rest.shuffle_nodes_in_zones(
                 moved_node, default_zone, zone_name)
         else:
             self.fail("Invalid post_failure_operation option")
         # In these failure scenarios while the retry is pending, then the retry will be attempted but fail
         try:
             self.check_retry_rebalance_succeeded()
         except Exception as e:
             self.log.info(e)
             if "Retrying of rebalance still did not help. All the retries exhausted" not in str(
                     e):
                 self.fail(
                     "Auto retry of failed rebalance succeeded when it was expected to fail"
                 )
     else:
         self.fail(
             "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
         )
     finally:
         if post_failure_operation == "change_server_group":
             status = self.rest.shuffle_nodes_in_zones(
                 moved_node, zone_name, default_zone)
             self.log.info(
                 "Shuffle the node back to default group . Status : {0}".
                 format(status))
             self.sleep(self.sleep_time)
             self.log.info("Deleting new zone " + zone_name)
             try:
                 self.rest.delete_zone(zone_name)
             except:
                 self.log.info("Errors in deleting zone")
         if self.disable_auto_failover:
             self.rest.update_autofailover_settings(True, 120)
         self.start_server(self.servers[1])
         self.stop_firewall_on_node(self.servers[1])
コード例 #13
0
ファイル: memcapable.py プロジェクト: rayleyva/testrunner
    def test_getr(self):
        item_count = self.input.param("item_count", 10000)
        replica_count = self.input.param("replica_count", 1)
        expiration = self.input.param("expiration", 0)
        delay = float(self.input.param("delay", 0))
        eject = self.input.param("eject", 0)
        delete = self.input.param("delete", 0)
        mutate = self.input.param("mutate", 0)
        warmup = self.input.param("warmup", 0)
        skipload = self.input.param("skipload", 0)
        rebalance = self.input.param("rebalance", 0)

        negative_test = False
        if delay > expiration:
            negative_test = True
        if delete and not mutate:
            negative_test = True
        if skipload and not mutate:
            negative_test = True

        prefix = str(uuid.uuid4())[:7]

        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
        BucketOperationHelper.create_bucket(self.master, name=self.default_bucket_name, replica=replica_count, port=11210, test_case=self, bucket_ram=-1, password="")

        if rebalance == GetrTests.DURING_REBALANCE or rebalance == GetrTests.AFTER_REBALANCE:
            # leave 1 node unclustered for rebalance in
            ClusterOperationHelper.begin_rebalance_out(self.master, self.servers[-1:])
            ClusterOperationHelper.end_rebalance(self.master)
            ClusterOperationHelper.begin_rebalance_in(self.master, self.servers[:-1])
            ClusterOperationHelper.end_rebalance(self.master)
        else:
            ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
            ClusterOperationHelper.end_rebalance(self.master)

        vprefix = ""
        if not skipload:
            self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
            if not expiration:
                RebalanceHelper.wait_for_stats_int_value(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), "<=", 600, True)

        if delete:
            self._delete_items(item_count=item_count, prefix=prefix)

        if mutate:
            vprefix = "mutated"
            self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)

        self.assertTrue(RebalanceHelper.wait_for_replication(self.rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        if eject:
            self._eject_items(item_count=item_count, prefix=prefix)

        if delay:
            self.sleep(delay)

        if rebalance == GetrTests.DURING_REBALANCE:
            ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
        if rebalance == GetrTests.AFTER_REBALANCE:
            ClusterOperationHelper.end_rebalance(self.master)
        if warmup:
            self.log.info("restarting memcached")
            command = "rpc:multicall(erlang, apply, [fun () -> try ns_server_testrunner_api:restart_memcached(20000) catch _:_ -> ns_port_sup:restart_port_by_name(memcached) end end, []], 20000)."
            memcached_restarted, content = self.rest.diag_eval(command)
            #wait until memcached starts
            self.assertTrue(memcached_restarted, "unable to restart memcached process through diag/eval")
            RebalanceHelper.wait_for_stats(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), 600)

        count = self._getr_items(item_count=item_count, replica_count=replica_count, prefix=prefix, vprefix=vprefix)

        if negative_test:
            self.assertTrue(count == 0, "found {0} items, expected none".format(count))
        else:
            self.assertTrue(count == replica_count * item_count, "expected {0} items, got {1} items".format(replica_count * item_count, count))
        if rebalance == GetrTests.DURING_REBALANCE:
            ClusterOperationHelper.end_rebalance(self.master)