Exemple #1
0
    def _common_test_body(self, moxi=False):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings
        bucket_data = RebalanceBaseTest.bucket_data_init(rest)

        for server in self.servers[1:]:
            self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
            self.log.info("adding node {0}:{1} and rebalance afterwards".format(server.ip, server.port))
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
            msg = "unable to add node {0} to the cluster {1}"
            self.assertTrue(otpNode, msg.format(server.ip, master.ip))
            for name in bucket_data:
                inserted_keys, rejected_keys = \
                MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.servers[0]],
                    name=name,
                    ram_load_ratio= -1,
                    number_of_items=self.keys_count,
                    number_of_threads=1,
                    write_only=True)
                rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
                self.assertTrue(rest.monitorRebalance(),
                    msg="rebalance operation failed after adding node {0}".format(server.ip))
                self.log.info("completed rebalancing in server {0}".format(server))
                IncrementalRebalanceWithParallelReadTests._reader_thread(self, inserted_keys, bucket_data, moxi=moxi)
                self.assertTrue(rest.monitorRebalance(),
                    msg="rebalance operation failed after adding node {0}".format(server.ip))
                break
Exemple #2
0
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            ram_load_ratio=1,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False,
                                          False, 11210, self)
Exemple #3
0
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        rest = RestConnection(self.master)

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
Exemple #4
0
 def load_data(master, bucket, keys_count=-1, load_ratio=-1):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=bucket,
                                                           ram_load_ratio=load_ratio,
                                                           number_of_items=keys_count,
                                                           number_of_threads=2,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
     return inserted_keys
Exemple #5
0
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             moxi=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
Exemple #6
0
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             moxi=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
 def _load_data_for_buckets(self):
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     distribution = {128: 1.0}
     self.bucket_data = {}
     for bucket in buckets:
         name = bucket.name.encode("ascii", "ignore")
         self.bucket_data[name] = {}
         self.bucket_data[name]["inserted_keys"], self.bucket_data[name]["rejected_keys"] = \
         MemcachedClientHelper.load_bucket_and_return_the_keys(name=self.bucket,
                                                               servers=[self.master],
                                                               value_size_distribution=distribution,
                                                               number_of_threads=1,
                                                               number_of_items=self.number_of_items,
                                                               write_only=True)
 def load_data(master, bucket, keys_count=-1, load_ratio=-1):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys = \
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=bucket,
                                                           ram_load_ratio=load_ratio,
                                                           number_of_items=keys_count,
                                                           number_of_threads=2,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size',
                                           0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                           'ep_flusher_todo', 0)
     return inserted_keys
 def _load_data_for_buckets(self):
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     distribution = {128: 1.0}
     self.bucket_data = {}
     for bucket in buckets:
         name = bucket.name.encode("ascii", "ignore")
         self.bucket_data[name] = {}
         self.bucket_data[name]["inserted_keys"], self.bucket_data[name]["rejected_keys"] = \
         MemcachedClientHelper.load_bucket_and_return_the_keys(name=self.bucket,
                                                               servers=[self.master],
                                                               value_size_distribution=distribution,
                                                               number_of_threads=1,
                                                               number_of_items=self.number_of_items,
                                                               write_only=True,
                                                               moxi=True)
 def _load_data(self, master, load_ratio):
     log = logger.Logger.get_logger()
     if load_ratio == -1:
         #let's load 0.1 data
         load_ratio = 0.1
     distribution = {1024: 0.5, 20: 0.5}
     #TODO: with write_only = False, sometimes the load hangs, debug this
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           ram_load_ratio=load_ratio,
                                                           number_of_threads=1,
                                                           value_size_distribution=distribution,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats(master, "default", 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats(master, "default", 'ep_flusher_todo', 0)
     return inserted_keys
Exemple #11
0
 def _load_data(self, master, load_ratio):
     log = logger.Logger.get_logger()
     if load_ratio == -1:
         #let's load 0.1 data
         load_ratio = 0.1
     distribution = {1024: 0.5, 20: 0.5}
     #TODO: with write_only = False, sometimes the load hangs, debug this
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           ram_load_ratio=load_ratio,
                                                           number_of_threads=1,
                                                           value_size_distribution=distribution,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats(master, "default", 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats(master, "default", 'ep_flusher_todo', 0)
     return inserted_keys
Exemple #12
0
 def _test_body(self, fill_ram_percentage, number_of_replicas):
     master = self.servers[0]
     self._verify_minimum_requirement(number_of_replicas)
     self._cleanup_cluster()
     self.log.info('cluster is setup')
     bucket_name = \
     'replica-{0}-ram-{1}-{2}'.format(number_of_replicas,
                                      fill_ram_percentage,
                                      uuid.uuid4())
     self._create_bucket(number_of_replicas=number_of_replicas,
                         bucket_name=bucket_name)
     self.log.info('created the bucket')
     distribution = RebalanceBaseTest.get_distribution(fill_ram_percentage)
     self.add_nodes_and_rebalance()
     self.log.info('loading more data into the bucket')
     inserted_keys, rejected_keys = \
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=self.bucket_name,
                                                           ram_load_ratio=fill_ram_percentage,
                                                           value_size_distribution=distribution,
                                                           number_of_threads=2,
                                                           write_only=True,
                                                           moxi=False)
     self.keys = inserted_keys
     self.log.info('updating all keys by appending _20 to each value')
     self._update_keys('20')
     self.log.info('verifying keys now...._20')
     self._verify_data('20')
     rest = RestConnection(self.servers[0])
     self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(),
                                                          timeout=180),
                     msg="replication did not complete after 3 minutes")
     replicated = RebalanceHelper.wait_till_total_numbers_match(
         master, self.bucket_name, 300)
     self.assertTrue(
         replicated,
         msg=
         "replication was completed but sum(curr_items) dont match the curr_items_total"
     )
     self.log.info('updating all keys by appending _30 to each value')
     self._update_keys('30')
     self.log.info('verifying keys now...._20')
     self._verify_data('30')
     #flushing the node before cleaup
     MemcachedClientHelper.flush_bucket(self.servers[0], self.bucket_name)
Exemple #13
0
 def load_data(master, bucket, keys_count= -1, load_ratio= -1, delete_ratio=0, expiry_ratio=0, test=None):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys = \
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
         name=bucket,
         ram_load_ratio=load_ratio,
         number_of_items=keys_count,
         number_of_threads=2,
         write_only=True,
         delete_ratio=delete_ratio,
         expiry_ratio=expiry_ratio,
         moxi=True)
     log.info("wait until data is completely persisted on the disk")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0, timeout_in_seconds=120)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0, timeout_in_seconds=120)
     test.assertTrue(ready, "wait_for ep_flusher_todo == 0 failed")
     return inserted_keys
Exemple #14
0
def _create_load_multiple_bucket(self, server, bucket_data, howmany=2):
    created = BucketOperationHelper.create_multiple_buckets(server, 1, howmany=howmany)
    self.assertTrue(created, "unable to create multiple buckets")
    rest = RestConnection(server)
    buckets = rest.get_buckets()
    for bucket in buckets:
        bucket_data[bucket.name] = {}
        ready = BucketOperationHelper.wait_for_memcached(server, bucket.name)
        self.assertTrue(ready, "wait_for_memcached failed")
        #let's insert some data
        distribution = {2 * 1024: 0.5, 20: 0.5}
        bucket_data[bucket.name]["inserted_keys"], bucket_data[bucket.name]["reject_keys"] =\
        MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[server], name=bucket.name,
                                                              ram_load_ratio=2.0,
                                                              number_of_threads=2,
                                                              value_size_distribution=distribution,
                                                              write_only=True,
                                                              moxi=True)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_queue_size', 0)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_flusher_todo', 0)
Exemple #15
0
def _create_load_multiple_bucket(self, server, bucket_data, howmany=2):
    created = BucketOperationHelper.create_multiple_buckets(server,
        1, howmany=howmany)
    self.assertTrue(created, "unable to create multiple buckets")
    rest = RestConnection(server)
    buckets = rest.get_buckets()
    for bucket in buckets:
        bucket_data[bucket.name] = {}
        ready = BucketOperationHelper.wait_for_memcached(server, bucket.name)
        self.assertTrue(ready, "wait_for_memcached failed")
        #let's insert some data
        distribution = {2 * 1024: 0.5, 20: 0.5}
        bucket_data[bucket.name]["inserted_keys"], \
            bucket_data[bucket.name]["reject_keys"] = \
        MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[server],
            name=bucket.name, ram_load_ratio=2.0, number_of_threads=2,
            value_size_distribution=distribution, write_only=True,
            moxi=True)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_queue_size', 0)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_flusher_todo',
            0)
 def _test_body(self, fill_ram_percentage, number_of_replicas):
     master = self.servers[0]
     self._verify_minimum_requirement(number_of_replicas)
     self._cleanup_cluster()
     self.log.info('cluster is setup')
     bucket_name =\
     'replica-{0}-ram-{1}-{2}'.format(number_of_replicas,
                                      fill_ram_percentage,
                                      uuid.uuid4())
     self._create_bucket(number_of_replicas=number_of_replicas, bucket_name=bucket_name)
     self.log.info('created the bucket')
     distribution = RebalanceBaseTest.get_distribution(fill_ram_percentage)
     self.add_nodes_and_rebalance()
     self.log.info('loading more data into the bucket')
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=self.bucket_name,
                                                           ram_load_ratio=fill_ram_percentage,
                                                           value_size_distribution=distribution,
                                                           number_of_threads=2,
                                                           write_only=True,
                                                           moxi=False)
     self.keys = inserted_keys
     self.log.info('updating all keys by appending _20 to each value')
     self._update_keys('20')
     self.log.info('verifying keys now...._20')
     self._verify_data('20')
     rest = RestConnection(self.servers[0])
     self.assertTrue(RestHelper(rest).wait_for_replication(180),
                     msg="replication did not complete")
     replicated = RebalanceHelper.wait_till_total_numbers_match(master, self.bucket_name, 300)
     self.assertTrue(replicated, msg="replication was completed but sum(curr_items) dont match the curr_items_total")
     self.log.info('updating all keys by appending _30 to each value')
     self._update_keys('30')
     self.log.info('verifying keys now...._20')
     self._verify_data('30')
     #flushing the node before cleaup
     MemcachedClientHelper.flush_bucket(self.servers[0], self.bucket_name)
Exemple #17
0
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers) - 1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version == final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds()
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info(
                "Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product,
                                                  info.deliverable_type,
                                                  info.architecture_type,
                                                  final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket,
            ram_load_ratio=0.5,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            delete_ratio=0.1,
            number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(
                rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info(
                "Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product,
                                                info.deliverable_type,
                                                info.architecture_type,
                                                final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
            inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
            removed = helper.remove_nodes(
                knownNodes=[node.id for node in nodes],
                ejectedNodes=[
                    node.id for node in nodes if node.id != master_id
                ],
                wait_for_rebalance=True)

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds()
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers)-1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version==final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds(version=final_version)
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info("Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                  info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=0.5,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             delete_ratio=0.1,
                                                                                             number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                                                            self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info("Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                                                            self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
                removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
                                          ejectedNodes=[node.id for node in nodes if node.id != master_id],
                                          wait_for_rebalance=True   )

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds(version=initial_version)
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
Exemple #19
0
    def _test_backup_add_restore_bucket_body(self, bucket,
                                             delay_after_data_load,
                                             startup_flag, single_node):
        server = self.master
        rest = RestConnection(server)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket,
                               ramQuotaMB=size,
                               proxyPort=proxyPort,
                               authType="sasl",
                               saslPassword="******")

        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        if not single_node:
            self.add_nodes_and_rebalance()
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket,
            ram_load_ratio=1,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            number_of_threads=2)

        if not single_node:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(
                rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        self.log.info(
            "Sleep {0} seconds after data load".format(delay_after_data_load))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.stop_membase()
                shell.stop_couchbase()
                shell.disconnect()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)

        #now let's back up
        BackupHelper(self.master, self).backup(bucket, node,
                                               self.remote_tmp_folder)

        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.start_membase()
                shell.start_couchbase()
                RestHelper(RestConnection(server)).is_ns_server_running()
                shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)

        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket,
                               ramQuotaMB=size,
                               proxyPort=proxyPort,
                               authType="sasl",
                               saslPassword="******")
        BucketOperationHelper.wait_for_memcached(self.master, bucket)

        if bucket == "default":
            BackupHelper(self.master,
                         self).restore(backup_location=self.remote_tmp_folder,
                                       moxi_port=info.moxi)
        else:
            BackupHelper(self.master,
                         self).restore(backup_location=self.remote_tmp_folder,
                                       moxi_port=info.moxi,
                                       username=bucket,
                                       password='******')

        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
            inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
Exemple #20
0
    def _test_backup_add_restore_bucket_body(self,
                                             bucket,
                                             delay_after_data_load,
                                             startup_flag,
                                             single_node):
        server = self.master
        rest = RestConnection(server)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
                               authType="sasl", saslPassword="******")

        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        if not single_node:
            self.add_nodes_and_rebalance()
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        if not single_node:
            rest = RestConnection(self.master)
            self.assertTrue(RestHelper(rest).wait_for_replication(180), msg="replication did not complete")

        self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.stop_membase()
                shell.stop_couchbase()
                shell.disconnect()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)

        #now let's back up
        BackupHelper(self.master, self).backup(bucket, node, self.remote_tmp_folder)

        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.start_membase()
                shell.start_couchbase()
                RestHelper(RestConnection(server)).is_ns_server_running()
                shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)

        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
                               authType="sasl", saslPassword="******")
        BucketOperationHelper.wait_for_memcached(self.master, bucket)

        if bucket == "default":
            BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi)
        else:
            BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi, username=bucket, password='******')

        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")