예제 #1
0
    def test_upgrade(self):
        self._install([self.master])
        self.operations([self.master])
        for upgrade_version in self.upgrade_versions:
            self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                       format(upgrade_version))
            upgrade_threads = self._async_update(upgrade_version, [self.master])
            #wait upgrade statuses
            for upgrade_thread in upgrade_threads:
                upgrade_thread.join()
            success_upgrade = True
            while not self.queue.empty():
                success_upgrade &= self.queue.get()
            if not success_upgrade:
                self.fail("Upgrade failed!")


            self.sleep(self.expire_time)
#            if not self.is_linux:
#                self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
            remote = RemoteMachineShellConnection(self.master)
            for bucket in self.buckets:
                remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
            remote.disconnect()
            self.sleep(30)
            self.verification([self.master])
예제 #2
0
    def rebalance_in_out_at_once_persistence_stopped(self):
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
                                      , end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
예제 #3
0
    def rebalance_in_out_at_once_persistence_stopped(self):
        num_nodes_with_stopped_persistence = self.input.param(
            "num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [
            self.servers[i + self.nodes_init] for i in range(self.nodes_in)
        ]
        servs_out = [
            self.servers[self.nodes_init - i - 1]
            for i in range(self.nodes_out)
        ]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.
                                      nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param(
            "num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
                                      , end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format(
            [node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master,
                                             gen_extra,
                                             "create",
                                             0,
                                             batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init],
                               servs_in,
                               servs_out,
                               timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(
                self.master, bucket)
        self.assertTrue(
            verified,
            "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total"
        )
예제 #4
0
    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_recovery,
                                               self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery,
                                      self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = "http://%s@%s:%s -b %s -B %s -v -v -v" % (
                self.couchbase_login_info, self.server_recovery.ip,
                self.server_recovery.port, bucket.name, bucket.name)
            self.shell.execute_cbtransfer(transfer_source,
                                          transfer_destination)
        del kvs_before
        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(
            self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "",
                                                  "set flush_param",
                                                  "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1,
                                 self.wait_timeout * 50, self.max_verify, True,
                                 1)
        self._verify_stats_all_buckets([self.server_recovery])
예제 #5
0
    def test_rebalance_in_out_at_once_persistence_stopped(self):
        """
        PERFORMANCE:Rebalance in/out at once with stopped persistence.

        This test begins by loading a given number of items into the cluster with
        self.nodes_init nodes in it. Then we stop persistence on some nodes.
        Test starts  to update some data and load new data in the cluster.
        At that time we add  servs_in nodes and remove  servs_out nodes and start rebalance.
        After rebalance and data ops are completed we start verification phase:
        wait for the disk queues to drain, verify the number of items that were/or not persisted
        with expected values, verify that there has been no data loss,
        sum(curr_items) match the curr_items_total.Once All checks passed, test is finished.
        Available parameters by default are:
        nodes_init=1, nodes_in=1, nodes_out=1,num_nodes_with_stopped_persistence=1
        num_items_without_persistence=100000
        """
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2,
                                  end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out],
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        # verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified,
                        "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
        self.verify_unacked_bytes_all_buckets()
예제 #6
0
    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        del self.buckets
        self.buckets = []
        if self.default_bucket:
            bucket_params = self._create_bucket_params(server=self.server_recovery, size=self.bucket_size,
                                                              replicas=self.num_replicas)
            self.cluster.create_default_bucket(bucket_params)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery, self.standard_buckets)

        transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
        if self.os == 'windows':
            output, error = self.shell.execute_command("taskkill /F /T /IM cbtransfer.exe")
            self.shell.log_command_output(output, error)
            self.shell.delete_files("/cygdrive/c%s" % self.win_data_location)
            self.shell.execute_command("mkdir /cygdrive/c%s" % self.win_data_location)
            self.shell.execute_command("cp -rf %s /cygdrive/c/tmp/" % (WIN_COUCHBASE_DATA_PATH))
            transfer_source = "couchstore-files://C:%s" % (self.win_data_location)
        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_destination = "http://%s@%s:%s" % (self.couchbase_login_info,
                                                        self.server_recovery.ip,
                                                        self.server_recovery.port)
            self.shell.execute_cbtransfer(transfer_source, transfer_destination, "-b %s -B %s" % (bucket.name, bucket.name))
        del kvs_before

        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_recovery])
예제 #7
0
 def test_upgrade(self):
     self._install([self.master])
     self.operations([self.master])
     self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade")
     for upgrade_version in self.upgrade_versions:
         remote = RemoteMachineShellConnection(self.master)
         self._upgrade(upgrade_version, self.master)
         self.sleep(self.expire_time)
         for bucket in self.buckets:
             remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
         self.sleep(30)
         remote.disconnect()
         self.verification([self.master])
예제 #8
0
    def test_upgrade(self):
        self._install([self.master])
        self.operations([self.master])
        self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade")
        for upgrade_version in self.upgrade_versions:
            self._upgrade(upgrade_version, self.master)
            self.sleep(self.expire_time)
#            if not self.is_linux:
#                self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
            remote = RemoteMachineShellConnection(self.master)
            for bucket in self.buckets:
                remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
            remote.disconnect()
            self.sleep(30)
            self.verification([self.master])
예제 #9
0
    def test_time_sync_threshold_setting(self):

        self.log.info('starting test_time_sync_threshold_setting')

        # bucket is created with lww in base test case using the LWW parameter

        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0],
                                                     self.buckets[0])
        ahead_threshold = int(
            client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
            'Ahead threshold mismatch expected: {0} actual {1}'.format(
                LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold))
        # change the setting and verify it is per the new setting - this may or may not be supported

        shell = RemoteMachineShellConnection(self.servers[0])
        output, error = shell.execute_cbepctl(
            self.buckets[0], "", "set vbucket_param",
            "hlc_drift_ahead_threshold_us ",
            str(LWWStatsTests.DEFAULT_THRESHOLD / 2) +
            LWWStatsTests.DUMMY_VBUCKET)
        if len(error) > 0:
            self.fail(
                'Failed to set the drift counter threshold, please check the logs.'
            )

        ahead_threshold = int(
            client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            'Ahead threshold mismatch expected: {0} actual {1}'.format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold))
예제 #10
0
 def test_upgrade(self):
     self._install([self.master])
     self.operations()
     upgrade_versions = self.input.param("upgrade_version", "2.0.0-1870-rel")
     upgrade_versions = upgrade_versions.split(";")
     self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
     time.sleep(self.sleep_time)
     for upgrade_version in upgrade_versions:
         remote = RemoteMachineShellConnection(self.master)
         self._upgrade(upgrade_version, self.master, remote)
         time.sleep(self.expire_time)
         for bucket in self.buckets:
             remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
         time.sleep(30)
         remote.disconnect()
         self.verification()
예제 #11
0
    def test_time_sync_threshold_setting(self):

        self.log.info("starting test_time_sync_threshold_setting")

        # bucket is created with lww in base test case using the LWW parameter

        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold
            ),
        )
        # change the setting and verify it is per the new setting - this may or may not be supported

        shell = RemoteMachineShellConnection(self.servers[0])
        output, error = shell.execute_cbepctl(
            self.buckets[0],
            "",
            "set vbucket_param",
            "hlc_drift_ahead_threshold_us ",
            str(LWWStatsTests.DEFAULT_THRESHOLD / 2) + LWWStatsTests.DUMMY_VBUCKET,
        )
        if len(error) > 0:
            self.fail("Failed to set the drift counter threshold, please check the logs.")

        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold
            ),
        )
예제 #12
0
    def _load_dgm(self):
        generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self.load_gen_list.append(generate_load)

        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            threshold_reached = False
            while not threshold_reached :
                for server in self.servers:
                    active_resident = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'vb_active_perc_mem_resident')[server]
                    if int(active_resident) > self.active_resident_threshold:
                        self.log.info("resident ratio is %s greater than %s for %s in bucket %s. Continue loading to the cluster" %
                                      (active_resident, self.active_resident_threshold, server.ip, bucket.name))
                        random_key = self.key_generator()
                        generate_load = BlobGenerator(random_key, '%s-' % random_key, self.value_size, end=self.num_items)
                        tasks = self._async_load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                        for task in tasks:
                            task.result()
                        self.load_gen_list.append(generate_load)
                    else:
                        threshold_reached = True
                        self.log.info("DGM state achieved for %s in bucket %s!" % (server.ip, bucket.name))
                        break


        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                for gen in self.load_gen_list[:int(len(self.load_gen_list) * 0.5)]:
                    self._load_all_buckets(self.master, gen, "update", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                for gen in self.load_gen_list[int(len(self.load_gen_list) * 0.5):]:
                    self._load_all_buckets(self.master, gen, "delete", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                for gen in self.load_gen_list[:int(len(self.load_gen_list) * 0.8)]:
                    self._load_all_buckets(self.master, gen, "update", self.expire_time, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                time.sleep(self.expire_time * 2)

                for server in self.servers:
                    shell = RemoteMachineShellConnection(server)
                    for bucket in self.buckets:
                        shell.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
                    shell.disconnect()
                time.sleep(30)
예제 #13
0
 def test_upgrade(self):
     self._install([self.master])
     self.operations()
     upgrade_versions = self.input.param('upgrade_version',
                                         '2.0.0-1870-rel')
     upgrade_versions = upgrade_versions.split(";")
     self.log.info(
         "Installation of old version is done. Wait for %s sec for upgrade"
         % (self.sleep_time))
     time.sleep(self.sleep_time)
     for upgrade_version in upgrade_versions:
         remote = RemoteMachineShellConnection(self.master)
         self._upgrade(upgrade_version, self.master, remote)
         time.sleep(self.expire_time)
         for bucket in self.buckets:
             remote.execute_cbepctl(bucket, "", "set flush_param",
                                    "exp_pager_stime", 5)
         time.sleep(30)
         remote.disconnect()
         self.verification()
예제 #14
0
    def _additional_ops(self):
        generate_update = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items * 3)
        self._load_all_buckets(self.master, generate_update, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        generate_delete = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items * 3)
        self._load_all_buckets(self.master, generate_delete, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        generate_expire = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items * 3)
        self._load_all_buckets(self.master, generate_expire, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.master, generate_update, "update", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.master, generate_delete, "delete", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                self._load_all_buckets(self.master, generate_expire, "update", self.expire_time, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                self.sleep(self.expire_time + 10)

                for server in self.servers:
                    shell = RemoteMachineShellConnection(server)
                    for bucket in self.buckets:
                        shell.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
                    shell.disconnect()
                self.sleep(30)
예제 #15
0
    def test_poisoned_cas(self):
        """
        @note:  - set the clock ahead
                - do lots of sets and get some CASs
                - do a set and get the CAS (flag, CAS, value) and save it
                - set the clock back
                - verify the CAS is still big on new sets
                - reset the CAS
                - do the vbucket max cas and verify
                - do a new mutation and verify the CAS is smaller
        """
        #creating a user 'default' for the bucket
        self.log.info('starting test_poisoned_cas')
        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        sdk_client = SDKClient(scheme='couchbase', hosts = [self.servers[0].ip], bucket = self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        # move the system clock ahead to poison the CAS
        shell = RemoteMachineShellConnection(self.servers[0])
        self.assertTrue(  shell.change_system_time( LWWStatsTests.ONE_HOUR_IN_SECONDS ), 'Failed to advance the clock')

        output, error = shell.execute_command('date')
        self.log.info('Date after is set forward {0}'.format( output ))
        rc = sdk_client.set('key1', 'val1')
        rc = mc_client.get('key1' )
        poisoned_cas = rc[1]
        self.log.info('The poisoned CAS is {0}'.format(poisoned_cas))
        # do lots of mutations to set the max CAS for all vbuckets
        gen_load  = BlobGenerator('key-for-cas-test', 'value-for-cas-test-', self.value_size, end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)
        # move the clock back again and verify the CAS stays large
        self.assertTrue(  shell.change_system_time( -LWWStatsTests.ONE_HOUR_IN_SECONDS ), 'Failed to change the clock')
        output, error = shell.execute_command('date')
        self.log.info('Date after is set backwards {0}'.format( output))
        use_mc_bin_client = self.input.param("use_mc_bin_client", True)

        if use_mc_bin_client:
            rc = mc_client.set('key2', 0, 0, 'val2')
            second_poisoned_cas = rc[1]
        else:
            rc = sdk_client.set('key2', 'val2')
            second_poisoned_cas = rc.cas
        self.log.info('The second_poisoned CAS is {0}'.format(second_poisoned_cas))
        self.assertTrue(  second_poisoned_cas > poisoned_cas,
                'Second poisoned CAS {0} is not larger than the first poisoned cas'.format(second_poisoned_cas, poisoned_cas))
        # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not
        # changed then the CAS will immediately continue with the clock. I see two scenarios:
        # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used
        # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use
        # do case 1, set the CAS back 30 minutes.  Calculation below assumes the CAS is in nanoseconds
        earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000
        for i in range(self.vbuckets):
            output, error = shell.execute_cbepctl(self.buckets[0], "", "set_vbucket_param",
                              "max_cas ", str(i) + ' ' + str(earlier_max_cas)  )
            if len(error) > 0:
                self.fail('Failed to set the max cas')
        # verify the max CAS
        for i in range(self.vbuckets):
            max_cas = int( mc_client.stats('vbucket-details')['vb_' + str(i) + ':max_cas'] )
            self.assertTrue(max_cas == earlier_max_cas,
                    'Max CAS not properly set for vbucket {0} set as {1} and observed {2}'.format(i, earlier_max_cas, max_cas ) )
            self.log.info('Per cbstats the max cas for bucket {0} is {1}'.format(i, max_cas) )

        rc1 = sdk_client.set('key-after-resetting cas', 'val1')
        rc2 = mc_client.get('key-after-resetting cas' )
        set_cas_after_reset_max_cas = rc2[1]
        self.log.info('The later CAS is {0}'.format(set_cas_after_reset_max_cas))
        self.assertTrue( set_cas_after_reset_max_cas < poisoned_cas,
             'For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}'.format('key-after-resetting cas', set_cas_after_reset_max_cas, poisoned_cas))
        # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more
        gen_load  = BlobGenerator('key-for-cas-test-after-cas-is-reset', 'value-for-cas-test-', self.value_size, end=1000)
        self._load_all_buckets(self.master, gen_load, "create", 0)
        gen_load.reset()
        while gen_load.has_next():
            key, value = next(gen_load)
            try:
                rc = mc_client.get( key )
                #rc = sdk_client.get(key)
                cas = rc[1]
                self.assertTrue( cas < poisoned_cas, 'For key {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}'.format(key, cas, poisoned_cas))
            except:
                self.log.info('get error with {0}'.format(key))

        rc = sdk_client.set('key3', 'val1')
        better_cas = rc.cas
        self.log.info('The better CAS is {0}'.format(better_cas))
        self.assertTrue( better_cas < poisoned_cas, 'The CAS was not improved')
        # set the clock way ahead - remote_util_OS.py (new)
        # do a bunch of mutations - not really needed
        # do the fix command - cbepctl, the existing way (remote util)
        # do some mutations, verify they conform to the new CAS - build on the CAS code,
        #     where to iterate over the keys and get the CAS?
        """
예제 #16
0
class RecoveryUseTransferTests(TransferBaseTest):
    def setUp(self):
        self.times_teardown_called = 1
        super(RecoveryUseTransferTests, self).setUp()
        self.server_origin = self.servers[0]
        self.server_recovery = self.servers[1]
        self.shell = RemoteMachineShellConnection(self.server_origin)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()

    def tearDown(self):
        super(RecoveryUseTransferTests, self).tearDown()
        if not self.input.param("skip_cleanup", True):
            if times_tear_down_called > 1:
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" %
                                            (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" %
                                            (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
        self.times_teardown_called += 1

    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_recovery,
                                               self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery,
                                      self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = "http://%s@%s:%s -b %s -B %s -v -v -v" % (
                self.couchbase_login_info, self.server_recovery.ip,
                self.server_recovery.port, bucket.name, bucket.name)
            self.shell.execute_cbtransfer(transfer_source,
                                          transfer_destination)
        del kvs_before
        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(
            self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "",
                                                  "set flush_param",
                                                  "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1,
                                 self.wait_timeout * 50, None, True)
        self._verify_stats_all_buckets([self.server_recovery])

    def recover_to_backupdir(self):
        """Recover data with 2.0 couchstore files to a 2.0 backup diretory

        We load a number of items to a node first and then do some mutataion on these items.
        Later we use cbtransfer to transfer the couchstore files we have on this node to
        a backup directory. We use cbrestore to restore these backup files to the same node
        for verification."""

        self.load_data()

        kvs_before = {}
        bucket_names = []

        self.shell.delete_files(self.backup_location)
        self.shell.create_directory(self.backup_location)

        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)
            transfer_source = "-v -v -v couchstore-files://%s" % (
                COUCHBASE_DATA_PATH)
            transfer_destination = self.backup_location
            self.shell.execute_cbtransfer(transfer_source,
                                          transfer_destination)

        self._all_buckets_delete(self.server_origin)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_origin,
                                               self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_origin, self.sasl_buckets)
        self._create_standard_buckets(self.server_origin,
                                      self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        self.shell.restore_backupFile(self.couchbase_login_info,
                                      self.backup_location, bucket_names)
        time.sleep(self.expire_time + 1)
        for bucket in self.buckets:
            self.shell.execute_cbepctl(bucket, "", "set flush_param",
                                       "exp_pager_stime", 5)
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_origin])
        self._verify_all_buckets(self.server_origin, 1, self.wait_timeout * 50,
                                 None, True)
        self._verify_stats_all_buckets([self.server_origin])

    def load_data(self):
        gen_load = BlobGenerator('nosql',
                                 'nosql-',
                                 self.value_size,
                                 end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items * 3 / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.server_origin,
                               gen_load,
                               "create",
                               0,
                               1,
                               self.item_flag,
                               True,
                               batch_size=20000,
                               pause_secs=5,
                               timeout_secs=180)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self._load_all_buckets(self.server_origin,
                                       gen_update,
                                       "update",
                                       0,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
            if ("delete" in self.doc_ops):
                self._load_all_buckets(self.server_origin,
                                       gen_delete,
                                       "delete",
                                       0,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
            if ("expire" in self.doc_ops):
                self._load_all_buckets(self.server_origin,
                                       gen_expire,
                                       "update",
                                       self.expire_time,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
        self._wait_for_stats_all_buckets([self.server_origin])
        time.sleep(30)
예제 #17
0
    def _load_dgm(self):
        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        self._load_all_buckets(self.master,
                               generate_load,
                               "create",
                               0,
                               1,
                               0,
                               True,
                               batch_size=20000,
                               pause_secs=5,
                               timeout_secs=180)
        self.load_gen_list.append(generate_load)

        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            threshold_reached = False
            while not threshold_reached:
                for server in self.servers:
                    active_resident = stats_all_buckets[bucket.name].get_stats(
                        [server], bucket, '',
                        'vb_active_perc_mem_resident')[server]
                    if int(active_resident) > self.active_resident_threshold:
                        self.log.info(
                            "resident ratio is %s greater than %s for %s in bucket %s. Continue loading to the cluster"
                            % (active_resident, self.active_resident_threshold,
                               server.ip, bucket.name))
                        random_key = key_generator()
                        generate_load = BlobGenerator(random_key,
                                                      '%s-' % random_key,
                                                      self.value_size,
                                                      end=self.num_items)
                        self._load_all_buckets(self.master,
                                               generate_load,
                                               "create",
                                               0,
                                               1,
                                               0,
                                               True,
                                               batch_size=20000,
                                               pause_secs=5,
                                               timeout_secs=180)
                        self.load_gen_list.append(generate_load)
                    else:
                        threshold_reached = True
                        self.log.info(
                            "DGM state achieved for %s in bucket %s!" %
                            (server.ip, bucket.name))
                        break

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                for gen in self.load_gen_list[:int(
                        len(self.load_gen_list) * 0.5)]:
                    self._load_all_buckets(self.master,
                                           gen,
                                           "update",
                                           0,
                                           1,
                                           0,
                                           True,
                                           batch_size=20000,
                                           pause_secs=5,
                                           timeout_secs=180)
            if ("delete" in self.doc_ops):
                for gen in self.load_gen_list[
                        int(len(self.load_gen_list) * 0.5):]:
                    self._load_all_buckets(self.master,
                                           gen,
                                           "delete",
                                           0,
                                           1,
                                           0,
                                           True,
                                           batch_size=20000,
                                           pause_secs=5,
                                           timeout_secs=180)
            if ("expire" in self.doc_ops):
                for gen in self.load_gen_list[:int(
                        len(self.load_gen_list) * 0.8)]:
                    self._load_all_buckets(self.master,
                                           gen,
                                           "update",
                                           self.expire_time,
                                           1,
                                           0,
                                           True,
                                           batch_size=20000,
                                           pause_secs=5,
                                           timeout_secs=180)
                time.sleep(self.expire_time * 2)

                for server in self.servers:
                    shell = RemoteMachineShellConnection(server)
                    for bucket in self.buckets:
                        shell.execute_cbepctl(bucket, "", "set flush_param",
                                              "exp_pager_stime", 5)
                    shell.disconnect()
                time.sleep(30)
예제 #18
0
 def _start_replication(self, server, bucket):
     shell = RemoteMachineShellConnection(server)
     shell.execute_cbepctl(self.bucket, "start", "", "", 0)
     shell.execute_cbepctl(self.bucket, "", "set tap_param",
                           "tap_throttle_queue_cap", 1000000)
     shell.disconnect()
예제 #19
0
class RecoveryUseTransferTests(TransferBaseTest):

    def setUp(self):
        self.times_teardown_called = 1
        super(RecoveryUseTransferTests, self).setUp()
        self.server_origin = self.servers[0]
        self.server_recovery = self.servers[1]
        self.shell = RemoteMachineShellConnection(self.server_origin)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()

    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
        self.times_teardown_called += 1
        super(RecoveryUseTransferTests, self).tearDown()

    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_recovery, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = "http://%s@%s:%s -b %s -B %s -v -v -v" % (self.couchbase_login_info,
                                                                             self.server_recovery.ip,
                                                                             self.server_recovery.port,
                                                                             bucket.name, bucket.name)
            self.shell.execute_cbtransfer(transfer_source, transfer_destination)
        del kvs_before
        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_recovery])

    def recover_to_backupdir(self):
        """Recover data with 2.0 couchstore files to a 2.0 backup diretory

        We load a number of items to a node first and then do some mutataion on these items.
        Later we use cbtransfer to transfer the couchstore files we have on this node to
        a backup directory. We use cbrestore to restore these backup files to the same node
        for verification."""

        self.load_data()

        kvs_before = {}
        bucket_names = []

        self.shell.delete_files(self.backup_location)
        self.shell.create_directory(self.backup_location)

        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)
            transfer_source = "-v -v -v couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = self.backup_location
            self.shell.execute_cbtransfer(transfer_source, transfer_destination)

        self._all_buckets_delete(self.server_origin)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_origin, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_origin, self.sasl_buckets)
        self._create_standard_buckets(self.server_origin, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
        time.sleep(self.expire_time + 1)
        for bucket in self.buckets:
            self.shell.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_origin])
        self._verify_all_buckets(self.server_origin, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_origin])

    def load_data(self):
        gen_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
        gen_update = BlobGenerator('nosql', 'nosql-', self.value_size, end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items / 2, end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items * 3 / 4, end=self.num_items)
        self._load_all_buckets(self.server_origin, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_delete, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_expire, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets([self.server_origin])
        time.sleep(30)
예제 #20
0
    def test_rebalance_in_out_at_once_persistence_stopped(self):
        """
        PERFORMANCE:Rebalance in/out at once with stopped persistence.

        This test begins by loading a given number of items into the cluster
        with self.nodes_init nodes in it. Then we stop persistence on some
        nodes. Test starts  to update some data and load new data in the
        cluster. At that time we add  servs_in nodes and remove servs_out nodes
        and start rebalance. After rebalance and data ops are completed we
        start verification phase: wait for the disk queues to drain,
        verify the number of items that were/or not persisted
        with expected values, verify that there has been no data loss,
        sum(curr_items) match the curr_items_total.Once All checks passed,
        test is finished.
        Available parameters by default are:
        nodes_init=1, nodes_in=1,
        nodes_out=1, num_nodes_with_stopped_persistence=1
        num_items_without_persistence=100000
        """
        num_nodes_with_stopped_persistence = self.input.param(
            "num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [
            self.servers[i + self.nodes_init] for i in range(self.nodes_in)
        ]
        servs_out = [
            self.servers[self.nodes_init - i - 1]
            for i in range(self.nodes_out)
        ]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.
                                      nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param(
            "num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike',
                                  'mike-',
                                  self.value_size,
                                  start=self.num_items // 2,
                                  end=self.num_items // 2 +
                                  self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format(
            [node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master,
                                             gen_extra,
                                             "create",
                                             0,
                                             batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(
            servs_init[:self.nodes_init],
            servs_in,
            servs_out,
            timeout=self.wait_timeout * 60,
            sleep_before_rebalance=self.sleep_before_rebalance)
        for task in tasks:
            task.result()

        # Validate seq_no snap_start/stop values after rebalance
        self.check_snap_start_corruption()

        self._wait_for_stats_all_buckets(
            servs_init[:self.nodes_init - self.nodes_out],
            ep_queue_size=self.num_items_without_persistence * 0.9,
            ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        # verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(
                self.master, bucket)
        self.assertTrue(
            verified,
            "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total"
        )
        self.verify_unacked_bytes_all_buckets()
예제 #21
0
    def test_poisoned_cas(self):

        self.log.info("starting test_poisoned_cas")

        """
        - set the clock ahead
        - do lots of sets and get some CASs
        - do a set and get the CAS (flag, CAS, value) and save it
        - set the clock back
        - verify the CAS is still big on new sets
        - reset the CAS
        - do the vbucket max cas and verify
        - do a new mutation and verify the CAS is smaller


        """

        sdk_client = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip], bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # move the system clock ahead to poison the CAS
        shell = RemoteMachineShellConnection(self.servers[0])
        self.assertTrue(shell.change_system_time(LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to advance the clock")

        output, error = shell.execute_command("date")
        self.log.info("Date after is set forward {0}".format(output))

        rc = sdk_client.set("key1", "val1")
        rc = mc_client.get("key1")
        poisoned_cas = rc[1]
        self.log.info("The poisoned CAS is {0}".format(poisoned_cas))

        # do lots of mutations to set the max CAS for all vbuckets

        gen_load = BlobGenerator("key-for-cas-test", "value-for-cas-test-", self.value_size, end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        # move the clock back again and verify the CAS stays large
        self.assertTrue(shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to change the clock")
        output, error = shell.execute_command("date")
        self.log.info("Date after is set backwards {0}".format(output))

        use_mc_bin_client = self.input.param("use_mc_bin_client", False)

        if use_mc_bin_client:
            rc = mc_client.set("key2", 0, 0, "val2")
            second_poisoned_cas = rc[1]
        else:
            rc = sdk_client.set("key2", "val2")
            second_poisoned_cas = rc.cas
        self.log.info("The second_poisoned CAS is {0}".format(second_poisoned_cas))
        self.assertTrue(
            second_poisoned_cas > poisoned_cas,
            "Second poisoned CAS {0} is not larger than the first poisoned cas".format(
                second_poisoned_cas, poisoned_cas
            ),
        )

        # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not
        # changed then the CAS will immediately continue with the clock. I see two scenarios:
        # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used
        # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use

        # do case 1, set the CAS back 30 minutes.  Calculation below assumes the CAS is in nanoseconds
        earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000
        for i in range(self.vbuckets):
            output, error = shell.execute_cbepctl(
                self.buckets[0], "", "set_vbucket_param", "max_cas ", str(i) + " " + str(earlier_max_cas)
            )
            if len(error) > 0:
                self.fail("Failed to set the max cas")

        # verify the max CAS

        for i in range(self.vbuckets):
            max_cas = int(mc_client.stats("vbucket-details")["vb_" + str(i) + ":max_cas"])
            self.assertTrue(
                max_cas == earlier_max_cas,
                "Max CAS not properly set for vbucket {0} set as {1} and observed {2}".format(
                    i, earlier_max_cas, max_cas
                ),
            )
            self.log.info("Per cbstats the max cas for bucket {0} is {1}".format(i, max_cas))

        rc1 = sdk_client.set("key-after-resetting cas", "val1")
        rc2 = mc_client.get("key-after-resetting cas")
        set_cas_after_reset_max_cas = rc2[1]
        self.log.info("The later CAS is {0}".format(set_cas_after_reset_max_cas))
        self.assertTrue(
            set_cas_after_reset_max_cas < poisoned_cas,
            "For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                "key-after-resetting cas", set_cas_after_reset_max_cas, poisoned_cas
            ),
        )

        # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more

        gen_load = BlobGenerator(
            "key-for-cas-test-after-cas-is-reset", "value-for-cas-test-", self.value_size, end=1000
        )
        self._load_all_buckets(self.master, gen_load, "create", 0)

        gen_load.reset()
        while gen_load.has_next():
            key, value = gen_load.next()
            try:
                rc = mc_client.get(key)
                # rc = sdk_client.get(key)
                cas = rc[1]
                self.assertTrue(
                    cas < poisoned_cas,
                    "For key {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                        key, cas, poisoned_cas
                    ),
                )
            except:
                self.log.info("get error with {0}".format(key))

        rc = sdk_client.set("key3", "val1")
        better_cas = rc.cas

        self.log.info("The better CAS is {0}".format(better_cas))

        self.assertTrue(better_cas < poisoned_cas, "The CAS was not improved")

        # set the clock way ahead - remote_util_OS.py (new)
        # do a bunch of mutations - not really needed
        # do the fix command - cbepctl, the existing way (remote util)

        # do some mutations, verify they conform to the new CAS - build on the CAS code,
        #     where to iterate over the keys and get the CAS?
        """
예제 #22
0
    def convert_sqlite_to_couchstore(self):
        """Convert data with 181 sqlite files to a 2.0+ online server

        We load a number of items to one 181 node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the sqlite files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self._install([self.server_origin])

        if self.default_bucket:
            bucket_params = self._create_bucket_params(
                server=self.server_origin,
                size=self.bucket_size,
                replicas=self.num_replicas)
            self.cluster.create_default_bucket(bucket_params)
            self.buckets.append(
                Bucket(name="default",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_standard_buckets(self.server_origin,
                                      self.standard_buckets)

        self.load_data()

        if self.os == 'windows':
            output, error = self.shell.execute_command(
                "taskkill /F /T /IM cbtransfer.exe")
            self.shell.log_command_output(output, error)
            self.shell.delete_files("/cygdrive/c%s" % self.win_data_location)
            self.shell.execute_command("mkdir /cygdrive/c%s" %
                                       self.win_data_location)
            self.shell.execute_command("cp -rf %s /cygdrive/c/tmp/" %
                                       (WIN_COUCHBASE_DATA_PATH))
        else:
            self.shell.delete_files(self.backup_location)
            self.shell.execute_command("mkdir %s" % self.backup_location)
            self.shell.execute_command(
                "cp -rf %s %s" % (COUCHBASE_DATA_PATH, self.backup_location))

        self._install([self.server_origin], version=self.latest_version)
        self._install([self.server_recovery], version=self.latest_version)

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        del self.buckets
        self.buckets = []
        if self.default_bucket:
            bucket_params = self._create_bucket_params(
                server=self.server_recovery,
                size=self.bucket_size,
                replicas=self.num_replicas)
            self.cluster.create_default_bucket(bucket_params)
            self.buckets.append(
                Bucket(name="default",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_standard_buckets(self.server_recovery,
                                      self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "%s/data/%s-data/%s" % (self.backup_location,
                                                      bucket.name, bucket.name)
            if self.os == 'windows':
                transfer_source = "C:%s/%s-data/%s" % (
                    self.win_data_location, bucket.name, bucket.name)
            transfer_destination = "http://%s@%s:%s" % (
                self.couchbase_login_info, self.server_recovery.ip,
                self.server_recovery.port)
            self.shell.execute_cbtransfer(
                transfer_source, transfer_destination, "-b %s -B %s %s" %
                (bucket.name, bucket.name, self.command_options))
        del kvs_before

        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(
            self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "",
                                                  "set flush_param",
                                                  "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(self.wait_timeout)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1,
                                 self.wait_timeout * 50, self.max_verify, True,
                                 1)
예제 #23
0
 def _start_replication(self, server, bucket):
     shell = RemoteMachineShellConnection(server)
     shell.execute_cbepctl(self.bucket, "start", "", "", 0)
     shell.execute_cbepctl(self.bucket, "", "set tap_param", "tap_throttle_queue_cap", 1000000)
     shell.disconnect()