Beispiel #1
0
 def test_not_my_vbucket_config(self):
     self.gen_load = BlobGenerator('cccp', 'cccp-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, self.gen_load, "create", 0)
     self.cluster.rebalance(self.servers[:self.nodes_init],
                            self.servers[self.nodes_init:self.nodes_init + 1], [])
     self.nodes_init = self.nodes_init + 1
     for bucket in self.buckets:
         while self.gen_load.has_next():
             key, _ = self.gen_load.next()
             try:
                 self.clients[bucket.name].get(key)
             except Exception, ex:
                 self.log.info("Config in exception is correct. Bucket %s, key %s" % (bucket.name, key))
                 config = str(ex)[str(ex).find("Not my vbucket':") + 16 : str(ex).find("for vbucket")]
                 config = json.loads(config)
                 self.verify_config(config, bucket)
Beispiel #2
0
    def test_nwusage_with_hard_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)
        bw_enable_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info(
            "Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=bw_enable_time,
                                     end_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes - 1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     no_of_nodes=self.num_src_nodes,
                                     event_time=node_back_time)
Beispiel #3
0
    def test_nwusage_with_unidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)
Beispiel #4
0
    def test_compression_with_disabling_later(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        self._set_compression_type(self.src_cluster, "default", "None")

        self._wait_for_replication_to_catchup()

        self.verify_results()
Beispiel #5
0
 def test_cluster_rebalance_in_env_var_services(self):
     gen_load = BlobGenerator('buckettest',
                              'buckettest-',
                              self.value_size,
                              start=0,
                              end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     self.find_nodes_in_list()
     servers_in = self.servers[1:]
     for servers in servers_in:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     rebalance = self.cluster.async_rebalance(
         self.servers[:self.nodes_init],
         self.nodes_in_list, [],
         services=self.services_in)
     self.assertTrue(rebalance.result(),
                     "Issue with Reablance in with different services")
 def incremental_rebalance_out_with_mutation_and_deletion(self):
     gen_2 = BlobGenerator('rebalance-del', 'rebalance-del-', self.value_size, start=self.num_items / 2 + 2000,
                           end=self.num_items)
     batch_size = 1000
     for i in reversed(range(self.num_servers)[1:]):
         # don't use batch for rebalance out 2-1 nodes
         for bucket in self.buckets:
             bucket.kvs[2] = KVStore()
         tasks = [self.cluster.async_rebalance(self.servers[:i], [], [self.servers[i]])]
         tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0, kv_store=1, batch_size=batch_size, timeout_secs=60)
         tasks += self._async_load_all_buckets(self.master, gen_2, "delete", 0, kv_store=2, batch_size=batch_size, timeout_secs=60)
         for task in tasks:
             task.result()
         self.sleep(5)
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self.verify_cluster_stats(self.servers[:i])
     self.verify_unacked_bytes_all_buckets()
    def test_nwusage_with_auto_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()

        self.src_cluster.rebalance_in()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(45)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * self.num_src_nodes)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(30)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time,
                                     end_time=node_back_time, no_of_nodes=self.num_src_nodes)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes + 1)
Beispiel #8
0
 def checks_tap_connections_tests(self):
     servs_init = self.servers[:self.nodes_init]
     servs_in = [
         self.servers[i + self.nodes_init] for i in range(self.nodes_in)
     ]
     servs_out = [
         self.servers[self.nodes_init - i - 1]
         for i in range(self.nodes_out)
     ]
     rest = RestConnection(self.master)
     buckets_stats_before = {}
     for bucket in self.buckets:
         _, result = rest.get_bucket_stats_json(bucket)
         buckets_stats_before[
             bucket.name] = result["op"]["samples"]["ep_tap_user_count"]
     self.log.info("current nodes : {0}".format(
         [node.id for node in rest.node_statuses()]))
     self.log.info("adding nodes {0} to cluster".format(servs_in))
     self.log.info("removing nodes {0} from cluster".format(servs_out))
     result_nodes = set(servs_init + servs_in) - set(servs_out)
     self.cluster.rebalance(servs_init[:self.nodes_init], servs_in,
                            servs_out)
     gen = BlobGenerator('mike2',
                         'mike2-',
                         self.value_size,
                         end=self.num_items)
     self._load_all_buckets(self.master, gen, "create", 0)
     self.verify_cluster_stats(result_nodes)
     buckets_stats_after = {}
     for bucket in self.buckets:
         _, result = rest.get_bucket_stats_json(bucket)
         buckets_stats_after[
             bucket.name] = result["op"]["samples"]["ep_tap_user_count"]
         for stat in buckets_stats_after[
                 bucket.name][len(buckets_stats_before[bucket.name]) - 1:]:
             if stat != 0:
                 self.log.error(
                     "'ep_tap_user_count' for bucket '{0}' before test:{1}".
                     format(bucket.name, buckets_stats_before[bucket.name]))
                 self.log.error(
                     "'ep_tap_user_count' for bucket '{0}' after test:{1}".
                     format(bucket.name, buckets_stats_after[bucket.name]))
                 self.log.error("'ep_tap_user_count' != 0 as expected")
         self.log.info(
             "'ep_tap_user_count' for bucket '{0}' = 0 for the entire test".
             format(bucket.name))
Beispiel #9
0
    def rebalance_in_with_failover_full_addback_recovery(self):
        gen_update = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items)
        tasks = []
        tasks += self._async_load_all_buckets(self.master, gen_update, "update", 0)
        for task in tasks:
            task.result()
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        self._verify_stats_all_buckets(self.servers[:self.nodes_init], timeout=120)
        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self.sleep(20)
        prev_failover_stats = self.get_failovers_logs(self.servers[:self.nodes_init], self.buckets)
        prev_vbucket_stats = self.get_vbucket_seqnos(self.servers[:self.nodes_init], self.buckets)
        disk_replica_dataset, disk_active_dataset = self.get_and_compare_active_replica_data_set_all(self.servers[:self.nodes_init], self.buckets, path=None)
        self.rest = RestConnection(self.master)
        self.nodes = self.get_nodes(self.master)

        chosen = RebalanceHelper.pick_nodes(self.master, howmany=1)
        # Mark Node for failover
        success_failed_over = self.rest.fail_over(chosen[0].id, graceful=False)

        # Perform doc-mutation after node failover
        tasks = self._async_load_all_buckets(
            self.master, gen_update, "update", 0)
        for task in tasks:
            task.result()

        # Mark Node for full recovery
        if success_failed_over:
            self.rest.set_recovery_type(otpNode=chosen[0].id, recoveryType="full")

        rebalance = self.cluster.async_rebalance(
            self.servers[:self.nodes_init], servs_in, [],
            sleep_before_rebalance=self.sleep_before_rebalance)
        rebalance.result()

        # Validate seq_no snap_start/stop values after rebalance
        self.check_snap_start_corruption()

        self._verify_stats_all_buckets(self.servers[:self.nodes_in + self.nodes_init], timeout=120)
        self.verify_cluster_stats(self.servers[:self.nodes_in + self.nodes_init], check_ep_items_remaining = True)
        self.compare_failovers_logs(prev_failover_stats, self.servers[:self.nodes_in + self.nodes_init], self.buckets)
        self.sleep(30)
        self.data_analysis_active_replica_all(disk_active_dataset, disk_replica_dataset, self.servers[:self.nodes_in + self.nodes_init], self.buckets, path=None)
        self.verify_unacked_bytes_all_buckets()
        nodes = self.get_nodes_in_cluster(self.master)
        self.vb_distribution_analysis(servers = nodes, buckets = self.buckets, std = 1.0 , total_vbuckets = self.total_vbuckets)
Beispiel #10
0
    def test_failover_swap_rebalance(self):
        """ add and failover node then perform swap rebalance """

        assert len(self.servers) > 2, "not enough servers"
        nodeA = self.servers[0]
        nodeB = self.servers[1]
        nodeC = self.servers[2]

        gen_create = BlobGenerator('dcp', 'dcp-', 64, start=0, end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        vbucket = 0
        vb_uuid, seqno, high_seqno = self.vb_info(nodeA, vbucket)

        # rebalance in nodeB
        assert self.cluster.rebalance([nodeA], [nodeB], [])

        # add nodeC
        rest = RestConnection(nodeB)
        rest.add_node(user=nodeC.rest_username,
                      password=nodeC.rest_password,
                      remoteIp=nodeC.ip,
                      port=nodeC.port)

        # stop and failover nodeA
        assert self.stop_node(0)
        self.stopped_nodes.append(0)

        assert self.cluster.failover([nodeB], [nodeA])
        assert self.cluster.rebalance([nodeB], [], [])
        # verify seqnos and stream mutations
        rest = RestConnection(nodeB)
        vbuckets = rest.get_vbuckets()
        total_mutations = 0

        for vb in vbuckets:
            mcd_client = self.mcd_client(nodeB)
            stats = mcd_client.stats(VBSEQNO_STAT)
            vbucket = vb.id
            key = 'vb_{0}:high_seqno'.format(vbucket)
            total_mutations += int(stats[key])

        assert total_mutations == self.num_items
        task = self.cluster.async_rebalance([nodeB], [], [nodeC])
        task.result()
    def test_online_swap_rebalance_upgrade(self):
        """ Online swap rebalance upgrade test

        The old nodes are removed and the new nodes are added followed by a rebalance.
        """
        # Installs the `self.initial_version` of Couchbase on the first two servers
        self.product = 'couchbase-server'
        self._install(self.input.servers[:2])

        # Check Couchbase is running post installation
        for server in self.input.servers:
            self.assertTrue(RestHelper(RestConnection(server)).is_ns_server_running(60), f"ns_server is not running on {server}")

        # Install the `self.upgrade_versions` on the last 2 nodes
        self.initial_version = self.upgrade_versions[0]
        self._install(self.input.servers[2:])

        # Remove the first two nodes and perform a rebalance
        self.cluster.rebalance(self.servers, self.servers[2:], self.servers[:2], services=["kv", "kv"])

        # Replace the services of the last node with kv and backup
        self.replace_services(self.servers[2:], self.servers[-1], ["kv,backup"])

        # Add the built in user for memcached authentication
        self.add_built_in_server_user(node=self.servers[2])

        # Create the default bucket and update the list of buckets
        rest_conn = RestConnection(self.servers[2])
        rest_conn.create_bucket(bucket='default', ramQuotaMB=512, compressionMode=self.compression_mode)
        self.buckets = rest_conn.get_buckets()

        # Populate the buckets with data
        self._load_all_buckets(self.servers[2], BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items), "create", 0)

        try:
            backup_service_hook = BackupServiceHook(self.servers[-1], self.servers, self.backupset, None)

            # Wait for the data to be persisted to disk
            for bucket in self.buckets:
                if not RebalanceHelper.wait_for_stats_on_all(backup_service_hook.backup_service.master, bucket.name, 'ep_queue_size', 0, timeout_in_seconds=200):
                    self.fail("Timeout reached while waiting for 'eq_queue_size' to reach 0")

            backup_service_hook.run_test()
        finally:
            backup_service_hook.cleanup()
    def test_compression_with_failover(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        src_conn = RestConnection(self.src_cluster.get_master_node())
        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = src_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    src_conn.set_recovery_type(otpNode=node.id,
                                               recoveryType=self.recoveryType)
                    self.sleep(30)
                    src_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(
                self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type)
        self.verify_results()
Beispiel #13
0
 def __load_data(self):
     self.gen_load = BlobGenerator('couch',
                                   'cb-',
                                   self.value_size,
                                   end=self.num_items)
     bucket_to_load = None
     for bucket in self.buckets:
         if bucket.name == 'default':
             bucket_to_load = bucket
             break
     self.assertNotEqual(
         bucket_to_load,
         None,
         msg="Could not find default bucket on node {0}".format(
             self.master.ip))
     self.cluster.load_gen_docs(self.master, bucket_to_load.name,
                                self.gen_load, bucket_to_load.kvs[1],
                                'create')
 def setUp(self):
     super(NodeServiceTests, self).setUp()
     self.helper = ServerHelper(self)
     num_buckets = self.input.param("num_buckets", 1)
     compression = self.input.param("sdk_compression", True)
     for i in xrange(num_buckets):
         RestConnection(self.servers[0]).create_bucket(bucket='bucket%s' % i, ramQuotaMB=100, proxyPort=STANDARD_BUCKET_PORT + i + 1)
         gen_load = BlobGenerator('ui', 'ui-', 256, start=0, end=10)
         cluster = Cluster()
         try:
             gen = copy.deepcopy(gen_load)
             task = cluster.async_load_gen_docs(self.servers[0], 'bucket%s' % i, gen,
                                                Bucket().kvs[1], 'create',
                                                0, 0, True, 1, 1, 30, compression=compression)
             task.result()
         finally:
             cluster.shutdown()
     BaseHelper(self).login()
Beispiel #15
0
    def test_nwusage_with_unidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * self.num_src_nodes)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit,
                                     no_of_nodes=self.num_src_nodes)
 def test_Transfer(self):
      shell = RemoteMachineShellConnection(self.master)
      gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=100)
      self._load_all_buckets(self.master, gen_update, "create", 0, 1, 0, True, batch_size=20000,
                                                                     pause_secs=5, timeout_secs=180)
      self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
      source = "http://" + self.master.ip + ":8091"
      info = shell.extract_remote_info()
      path = '/tmp/backup'
      #if info.type.lower() == "windows":
      #    path = '/cygdrive/c' + path
      shell.delete_files(path)
      create_dir = "mkdir " + path
      shell.execute_command(create_dir)
      options = "-b default " + " -u " + self.master.rest_username + " -p " + self.master.rest_password
      shell.execute_cbtransfer(source, path, options)
      expectedResults = {"peername":self.master.ip, "sockname":self.master.ip + ":11210", "source":"memcached", "user":"******", 'bucket':'default'}
      self.checkConfig(self.eventID, self.master, expectedResults)
Beispiel #17
0
    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self._set_nwusage_limit(self.src_cluster, 0)
        event_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=0, no_of_nodes=2, event_time=event_time, nw_usage="0")
Beispiel #18
0
    def test_ephemeral_buckets(self):
        eviction_policy = self.input.param("eviction_policy", 'noEviction')
        shared_params = self._create_bucket_params(server=self.server, size=100,
                                                   replicas=self.num_replicas, bucket_type='ephemeral',
                                                   eviction_policy=eviction_policy)
        # just do sasl for now, pending decision on support of non-sasl buckets in 5.0
        self.cluster.create_sasl_bucket(name=self.bucket_name, password=self.sasl_password, bucket_params=shared_params)
        self.buckets.append(Bucket(name=self.bucket_name, authType="sasl", saslPassword=self.sasl_password,
                                           num_replicas=self.num_replicas,
                                           bucket_size=self.bucket_size, master_id=self.server))

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
                            msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
        gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
        self._load_all_buckets(self.server, gen_load, "create", 0)
        self.cluster.bucket_delete(self.server, self.bucket_name)
        self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
                            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
 def setUp(self):
     super(MoxiTests, self).setUp()
     self.gen_load = BlobGenerator('moxi',
                                   'moxi-',
                                   self.value_size,
                                   end=self.num_items)
     self.moxi_port = self.input.param('moxi_port', 51500)
     self.ops = self.input.param('doc_ops', 'create')
     self.cluster_ops = self.input.param("ops", [])
     if self.cluster_ops:
         self.cluster_ops = self.cluster_ops.split(';')
     try:
         self.assertTrue(self.master != self.moxi_server,
                         'There are not enough vms!')
         self._stop_moxi()
     except Exception, ex:
         self.tearDown()
         raise ex
Beispiel #20
0
    def test_nwusage_with_rebalance_out(self):
        self.setup_xdcr()
        self.sleep(60)
        no_of_nodes = self.num_src_nodes - 1
        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * no_of_nodes)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_out()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit,
                                     no_of_nodes=no_of_nodes)
Beispiel #21
0
    def incremental_rebalance_out_in_with_mutation(self):
        init_num_nodes = self.input.param("init_num_nodes", 1)

        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:init_num_nodes], [])
        gen = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)
        for i in range(self.num_servers):
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0,batch_size=10, timeout_secs=60)

            self.cluster.rebalance(self.servers[:self.num_servers], self.servers[init_num_nodes:init_num_nodes + i + 1], [])
            self.sleep(10)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   [], self.servers[init_num_nodes:init_num_nodes + i + 1])
            for task in tasks:
                task.result(self.wait_timeout * 30)
            self.verify_cluster_stats(self.servers[:init_num_nodes])
        self.verify_unacked_bytes_all_buckets()
Beispiel #22
0
 def test_cluster_rebalance_out_prompt(self):
     gen_load = BlobGenerator('buckettest',
                              'buckettest-',
                              self.value_size,
                              start=0,
                              end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     servers_in = self.servers[1:]
     for servers in servers_in:
         self.secretmgmt_base_obj.setup_pass_node(servers,
                                                  self.password,
                                                  startup_type='prompt')
     self.cluster.rebalance(self.servers, servers_in, [])
     servers_out = self.servers[2:]
     temp_result = self.cluster.rebalance(self.servers, [], servers_out)
     self.assertTrue(
         temp_result,
         'Rebalance-out did not complete with password node setup')
Beispiel #23
0
 def setUp(self):
     super(CompactionViewTests, self).setUp()
     self.value_size = self.input.param("value_size", 256)
     self.fragmentation_value = self.input.param("fragmentation_value", 80)
     self.ddocs_num = self.input.param("ddocs_num", 1)
     self.view_per_ddoc = self.input.param("view_per_ddoc", 2)
     self.use_dev_views = self.input.param("use_dev_views", False)
     self.default_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
     self.default_view_name = "default_view"
     self.default_view = View(self.default_view_name, self.default_map_func,
                              None)
     self.ddocs = []
     self.gen_load = BlobGenerator('test_view_compaction',
                                   'test_view_compaction-',
                                   self.value_size,
                                   end=self.num_items)
     self.thread_crashed = Event()
     self.thread_stopped = Event()
Beispiel #24
0
 def test_data_distribution(self):
     """
         Test to check for data distribution at vbucket level
     """
     self.std = self.input.param("std", 1.0)
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self.value_size,
                                     end=self.num_items)
     self._load_all_buckets(self.master,
                            self.gen_create,
                            "create",
                            0,
                            batch_size=10000,
                            pause_secs=10,
                            timeout_secs=60)
     self._wait_for_stats_all_buckets(self.servers)
     self.data_distribution_analysis(self.num_items, self.std)
Beispiel #25
0
 def test_data_analysis_active_replica_comparison_all(self):
     """
         Method to show active vs replica comparison using cbtransfer functionality
         This will be done cluster level comparison
     """
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self.value_size,
                                     end=self.num_items)
     self._load_all_buckets(self.master,
                            self.gen_create,
                            "create",
                            0,
                            batch_size=10000,
                            pause_secs=10,
                            timeout_secs=60)
     self._wait_for_stats_all_buckets(self.servers)
     self.data_analysis_all_replica_active()
    def test_load_collection(self):
        #epengine.basic_collections.basic_collections.test_load_collection,value_size=200,num_items=100,collection=True
        self.value_size = 200
        self.enable_bloom_filter = False
        self.buckets = RestConnection(self.master).get_buckets()
        self.active_resident_threshold = float(
            self.input.param("active_resident_threshold", 100))

        gen_create = BlobGenerator('eviction',
                                   'eviction-',
                                   self.value_size,
                                   end=self.num_items)

        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])

        self._verify_all_buckets(self.master)
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
Beispiel #27
0
    def incremental_rebalance_in_out_with_mutation_and_deletion(self):
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2 + 2000,
                              end=self.num_items)
        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            tasks = self._async_load_all_buckets(self.master, self.gen_update, "update", 0,
                                                  pause_secs=5, batch_size=1, timeout_secs=60)
            tasks.extend(self._async_load_all_buckets(self.master, gen_delete, "delete", 0,
                                                 pause_secs=5, batch_size=1, timeout_secs=60))

            self.cluster.rebalance(self.servers[:i], [], self.servers[i:self.num_servers])
            self.sleep(60)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            for task in tasks:
                task.result(self.wait_timeout * 30)
            self._load_all_buckets(self.master, gen_delete, "create", 0)
            self.verify_cluster_stats(self.servers[:self.num_servers])
Beispiel #28
0
 def test_bucket_create_password(self,
                                 bucket_name='secretsbucket',
                                 num_replicas=1,
                                 bucket_size=100):
     for servers in self.servers:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     bucket_type = self.input.param("bucket_type", 'couchbase')
     tasks = []
     if bucket_type == 'couchbase':
         # self.cluster.create_sasl_bucket(self.master, bucket_name, self.password, num_replicas)
         rest = RestConnection(self.master)
         rest.create_bucket(bucket_name, ramQuotaMB=100)
     elif bucket_type == 'standard':
         self.cluster.create_standard_bucket(self.master, bucket_name,
                                             STANDARD_BUCKET_PORT + 1,
                                             bucket_size)
     elif bucket_type == "memcached":
         tasks.append(
             self.cluster.async_create_memcached_bucket(
                 self.master, bucket_name, STANDARD_BUCKET_PORT + 1,
                 bucket_size))
         for task in tasks:
             self.assertTrue(task.result(), "Issue with bucket creation")
     else:
         self.log.error('Bucket type not specified')
         return
     self.assertTrue(
         BucketOperationHelper.wait_for_bucket_creation(
             bucket_name, RestConnection(self.master)),
         msg='failed to start up bucket with name "{0}'.format(bucket_name))
     gen_load = BlobGenerator('buckettest',
                              'buckettest-',
                              self.value_size,
                              start=0,
                              end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     install_path = self.secretmgmt_base_obj._get_install_path(self.master)
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, '/config/config.dat', self.password)
     self.assertTrue(temp_result, "Password found in config.dat")
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, 'isasl.pw', self.password)
     self.assertTrue(temp_result, "Password found in isasl.pw")
Beispiel #29
0
    def test_stream_eviction(self):
        # eviction.evictionkv.EvictionDCP.test_stream_eviction,dgm_run=True,eviction_policy=fullEviction

        vbuckets = self.rest.get_vbuckets()

        doc_gen = BlobGenerator('dcpdata', 'dcpdata-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, doc_gen, "create", 10)
        # sleep for 10 seconds
        time.sleep(10)
        # get the item count
        item_count = self.rest.get_bucket(self.buckets[0]).stats.itemCount
        self.assertEqual(item_count, self.num_items)

        expired_keys = []
        # check if all the keys expired
        keys=[]
        for i in range(1000):
            keys.append("dcpdata" + str(i))
        time.sleep(10)
        for key in keys:
            try:
                self.client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.log.info(msg.format(10, key, 10))
            except mc_bin_client.MemcachedError as error:
                self.assertEqual(error.status, 1)

        for vb in vbuckets[0:self.vbuckets]:
            vbucket = vb.id
            vb_uuid, _, high_seqno = self.vb_info(self.servers[0], vbucket, bucket=self.buckets[0])
            stream = self.dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid)
            self.dcp_client.general_control("enable_expiry_opcode", "true")

            responses = stream.run()
            for i in responses:
                if i['opcode'] == constants.CMD_EXPIRATION:
                    expired_keys.append(i['key'])


        item_count = self.rest.get_bucket(self.buckets[0]).stats.itemCount
        self.assertEqual(item_count, 0)
        check_values = set(keys).intersection(expired_keys) # check if any key is not expired
        self.assertEqual(len(check_values), self.num_items)
Beispiel #30
0
 def test_data_vb_num_distribution(self):
     """
         Test to check vbucket distribution for active and replica items
     """
     self.std = self.input.param("std", 1.0)
     self.total_vbuckets = self.input.param("total_vbuckets", 1.0)
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self.value_size,
                                     end=self.num_items)
     self._load_all_buckets(self.master,
                            self.gen_create,
                            "create",
                            0,
                            batch_size=10000,
                            pause_secs=10,
                            timeout_secs=60)
     self._wait_for_stats_all_buckets(self.servers)
     self.vb_distribution_check(self.total_vbuckets, self.std)
Beispiel #31
0
 def test_cluster_rebalance_out_diff_type_var_services(self):
     extra_pass = self.input.param("extra_pass", 'p@ssw0rd01')
     gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     self.find_nodes_in_list()
     servers_in = self.servers[1:]
     server_env_var = servers_in[0]
     server_prompt = servers_in[1]
     server_plain = servers_in[2]
     self.secretmgmt_base_obj.setup_pass_node(server_env_var, self.password)
     self.secretmgmt_base_obj.setup_pass_node(server_prompt, extra_pass, startup_type='prompt')
     self.secretmgmt_base_obj.setup_pass_node(server_plain, startup_type='simple')
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.nodes_in_list, [],
                                              services=self.services_in)
     rebalance.result()
     servers_out = self.servers[1:]
     rebalance = self.cluster.async_rebalance(self.servers, [], servers_out)
     print((rebalance.result()))
     self.assertTrue(rebalance.result(), "Rebalance in  and out with different servers")
Beispiel #32
0
class CCCP(BaseTestCase):

    def setUp(self):
        super(CCCP, self).setUp()
        self.map_fn = 'function (doc){emit([doc.join_yr, doc.join_mo],doc.name);}'
        self.ddoc_name = "cccp_ddoc"
        self.view_name = "cccp_view"
        self.default_view = View(self.view_name, self.map_fn, None, False)
        self.ops = self.input.param("ops", None)
        self.clients = {}
        try:
            for bucket in self.buckets:
                self.clients[bucket.name] =\
                  MemcachedClientHelper.direct_client(self.master, bucket.name)
        except:
            self.tearDown()

    def tearDown(self):
        super(CCCP, self).tearDown()

    def test_get_config_client(self):
        tasks = self.run_ops()
        for task in tasks:
            if self.ops != 'failover':
                task.result()
        for bucket in self.buckets:
            _, _, config = self.clients[bucket.name].get_config()
            self.verify_config(json.loads(config), bucket)

    def test_get_config_rest(self):
        tasks = self.run_ops()
        for task in tasks:
            if not task:
                self.fail("no task to run")
            task.result()
        for bucket in self.buckets:
            config = RestConnection(self.master).get_bucket_CCCP(bucket)
            self.verify_config(config, bucket)

    def test_set_config(self):
        tasks = self.run_ops()
        config_expected = 'abcabc'
        for task in tasks:
            task.result()
        for bucket in self.buckets:
            self.clients[bucket.name].set_config(config_expected)
            _, _, config = self.clients[bucket.name].get_config()
            self.assertEquals(config_expected, config, "Expected config: %s, actual %s" %(
                                                      config_expected, config))
            self.log.info("Config was set correctly. Bucket %s" % bucket.name)

    def test_not_my_vbucket_config(self):
        self.gen_load = BlobGenerator('cccp', 'cccp-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, self.gen_load, "create", 0)
        self.cluster.rebalance(self.servers[:self.nodes_init],
                               self.servers[self.nodes_init:self.nodes_init + 1], [])
        self.nodes_init = self.nodes_init + 1
        not_my_vbucket = False
        for bucket in self.buckets:
            while self.gen_load.has_next() and not not_my_vbucket:
                key, _ = self.gen_load.next()
                try:
                    self.clients[bucket.name].get(key)
                except Exception, ex:
                    self.log.info("Config in exception is correct. Bucket %s, key %s"\
                                                                 % (bucket.name, key))
                    config = str(ex)[str(ex).find("Not my vbucket':") \
                                                 + 16 : str(ex).find("for vbucket")]
                    config = json.loads(config)
                    self.verify_config(config, bucket)
                    """ from watson, only the first error contains bucket details """
                    not_my_vbucket = True
Beispiel #33
0
    def test_poisoned_cas(self):

        self.log.info("starting test_poisoned_cas")

        """
        - set the clock ahead
        - do lots of sets and get some CASs
        - do a set and get the CAS (flag, CAS, value) and save it
        - set the clock back
        - verify the CAS is still big on new sets
        - reset the CAS
        - do the vbucket max cas and verify
        - do a new mutation and verify the CAS is smaller


        """

        sdk_client = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip], bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # move the system clock ahead to poison the CAS
        shell = RemoteMachineShellConnection(self.servers[0])
        self.assertTrue(shell.change_system_time(LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to advance the clock")

        output, error = shell.execute_command("date")
        self.log.info("Date after is set forward {0}".format(output))

        rc = sdk_client.set("key1", "val1")
        rc = mc_client.get("key1")
        poisoned_cas = rc[1]
        self.log.info("The poisoned CAS is {0}".format(poisoned_cas))

        # do lots of mutations to set the max CAS for all vbuckets

        gen_load = BlobGenerator("key-for-cas-test", "value-for-cas-test-", self.value_size, end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        # move the clock back again and verify the CAS stays large
        self.assertTrue(shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to change the clock")
        output, error = shell.execute_command("date")
        self.log.info("Date after is set backwards {0}".format(output))

        use_mc_bin_client = self.input.param("use_mc_bin_client", False)

        if use_mc_bin_client:
            rc = mc_client.set("key2", 0, 0, "val2")
            second_poisoned_cas = rc[1]
        else:
            rc = sdk_client.set("key2", "val2")
            second_poisoned_cas = rc.cas
        self.log.info("The second_poisoned CAS is {0}".format(second_poisoned_cas))
        self.assertTrue(
            second_poisoned_cas > poisoned_cas,
            "Second poisoned CAS {0} is not larger than the first poisoned cas".format(
                second_poisoned_cas, poisoned_cas
            ),
        )

        # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not
        # changed then the CAS will immediately continue with the clock. I see two scenarios:
        # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used
        # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use

        # do case 1, set the CAS back 30 minutes.  Calculation below assumes the CAS is in nanoseconds
        earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000
        for i in range(self.vbuckets):
            output, error = shell.execute_cbepctl(
                self.buckets[0], "", "set_vbucket_param", "max_cas ", str(i) + " " + str(earlier_max_cas)
            )
            if len(error) > 0:
                self.fail("Failed to set the max cas")

        # verify the max CAS

        for i in range(self.vbuckets):
            max_cas = int(mc_client.stats("vbucket-details")["vb_" + str(i) + ":max_cas"])
            self.assertTrue(
                max_cas == earlier_max_cas,
                "Max CAS not properly set for vbucket {0} set as {1} and observed {2}".format(
                    i, earlier_max_cas, max_cas
                ),
            )
            self.log.info("Per cbstats the max cas for bucket {0} is {1}".format(i, max_cas))

        rc1 = sdk_client.set("key-after-resetting cas", "val1")
        rc2 = mc_client.get("key-after-resetting cas")
        set_cas_after_reset_max_cas = rc2[1]
        self.log.info("The later CAS is {0}".format(set_cas_after_reset_max_cas))
        self.assertTrue(
            set_cas_after_reset_max_cas < poisoned_cas,
            "For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                "key-after-resetting cas", set_cas_after_reset_max_cas, poisoned_cas
            ),
        )

        # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more

        gen_load = BlobGenerator(
            "key-for-cas-test-after-cas-is-reset", "value-for-cas-test-", self.value_size, end=1000
        )
        self._load_all_buckets(self.master, gen_load, "create", 0)

        gen_load.reset()
        while gen_load.has_next():
            key, value = gen_load.next()
            try:
                rc = mc_client.get(key)
                # rc = sdk_client.get(key)
                cas = rc[1]
                self.assertTrue(
                    cas < poisoned_cas,
                    "For key {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                        key, cas, poisoned_cas
                    ),
                )
            except:
                self.log.info("get error with {0}".format(key))

        rc = sdk_client.set("key3", "val1")
        better_cas = rc.cas

        self.log.info("The better CAS is {0}".format(better_cas))

        self.assertTrue(better_cas < poisoned_cas, "The CAS was not improved")

        # set the clock way ahead - remote_util_OS.py (new)
        # do a bunch of mutations - not really needed
        # do the fix command - cbepctl, the existing way (remote util)

        # do some mutations, verify they conform to the new CAS - build on the CAS code,
        #     where to iterate over the keys and get the CAS?
        """