def load_docs_in_cb_bucket_before_and_after_cbas_connect(self):
        self.setup_for_test()

        # Load more docs in Couchbase bucket.
        self.perform_doc_ops_in_all_cb_buckets("create", self.num_items,
                                               self.num_items * 2)
        self.bucket_util.verify_stats_all_buckets(self.num_items * 2)

        if self.test_abort_snapshot:
            self.log.info("Creating sync_write aborts after dataset connect")
            for server in self.cluster_util.get_kv_nodes():
                ssh_shell = RemoteMachineShellConnection(server)
                cbstats = Cbstats(ssh_shell)
                replica_vbs = cbstats.vbucket_list(
                    self.bucket_util.buckets[0].name, "replica")
                load_gen = doc_generator("test_abort_key",
                                         self.num_items,
                                         self.num_items,
                                         target_vbucket=replica_vbs)
                success = self.bucket_util.load_durable_aborts(
                    ssh_shell, [load_gen], self.bucket_util.buckets[0],
                    self.durability_level, "update", "all_aborts")
                if not success:
                    self.log_failure("Simulating aborts failed")
                ssh_shell.disconnect()

            self.validate_test_failure()

        # Validate no. of items in CBAS dataset
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2):
            self.fail("No. of items in CBAS dataset do not match "
                      "that in the CB bucket")
Exemple #2
0
    def test_crash_while_streaming(self):
        bucket = self.bucket_util.buckets[0]
        vbucket = randint(0, self.vbuckets)
        nodeA = self.servers[0]
        self.load_docs(bucket, vbucket, 0, self.num_items, "create")

        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        _ = dcp_client.stream_req(vbucket, 0, 0, 2 * self.num_items, 0)
        self.load_docs(nodeA, vbucket, self.num_items)
        self.assertTrue(self.stop_node(0), msg="Failed during stop_node")
        self.sleep(2, "Sleep after stop_node")
        self.assertTrue(self.start_node(0), msg="Failed during start_node")
        rest = RestHelper(RestConnection(nodeA))
        self.assertTrue(rest.is_ns_server_running(),
                        msg="Failed while is_ns_server_running check")
        self.sleep(30, "Sleep to wait for ns_server to run")

        vb_info = cb_stat_obj.vbucket_seqno(bucket.name)
        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0,
                                       vb_info[vbucket]["high_seqno"], 0)
        stream.run()
        self.assertTrue(stream.last_by_seqno == vb_info[vbucket]["high_seqno"],
                        msg="Mismatch in high_seqno. {0} == {1}".format(
                            vb_info[vbucket]["high_seqno"],
                            stream.last_by_seqno))

        # Disconnect shell Connection for the node
        shell_conn.disconnect()
Exemple #3
0
 def setUp(self):
     super(OpsChangeCasTests, self).setUp()
     self.key = "test_cas"
     self.expire_time = self.input.param("expire_time", 35)
     self.item_flag = self.input.param("item_flag", 0)
     self.load_gen = doc_generator(self.key, 0, self.num_items,
                                   doc_size=self.doc_size)
     self.node_data = dict()
     for node in self.cluster_util.get_kv_nodes():
         shell = RemoteMachineShellConnection(node)
         cb_stat = Cbstats(shell)
         self.node_data[node.ip] = dict()
         self.node_data[node.ip]["shell"] = shell
         self.node_data[node.ip]["cb_stat"] = Cbstats(shell)
         self.node_data[node.ip]["active"] = cb_stat.vbucket_list(
             self.bucket,
             "active")
         self.node_data[node.ip]["replica"] = cb_stat.vbucket_list(
             self.bucket,
             "replica")
     if self.sdk_client_pool:
         self.client = self.sdk_client_pool.get_client_for_bucket(
             self.bucket)
     else:
         self.client = SDKClient([self.cluster.master], self.bucket)
Exemple #4
0
    def collect_vbucket_num_stats(self, servers, buckets):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:
              buckets: bucket informaiton
              servers: server information

            Returns:
              Failover stats as follows:
              if not collecting per node :: {bucket : [{key:value}]}
              if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        active_bucketMap = {}
        replica_bucketMap = {}
        for bucket in buckets:
            active_map_data = {}
            replica_map_data = {}
            for server in servers:
                #client = MemcachedClientHelper.direct_client(server, bucket)
                #stats = client.stats('')
                cbstat = Cbstats(server)
                stats = cbstat.vbucket_list(bucket.name)
                active_map_data[server.ip] = len(stats)
                stats = cbstat.vbucket_list(bucket.name,
                                            vbucket_type="replica")
                replica_map_data[server.ip] = len(stats)
                # for key in stats.keys():
                #     if key == 'vb_active_num':
                #         active_map_data[server.ip] = int(stats[key])
                #     if key == 'vb_replica_num':
                #         replica_map_data[server.ip] = int(stats[key])
            active_bucketMap[bucket.name] = active_map_data
            replica_bucketMap[bucket.name] = replica_map_data
        return active_bucketMap, replica_bucketMap
Exemple #5
0
    def test_mutations_during_rebalance(self):
        # start rebalance
        task = self.cluster.async_rebalance([self.master], self.servers[1:],
                                            [])
        # load some data
        vbucket = 0
        self.load_docs(self.master, vbucket, self.num_items)
        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cb_stat_obj = Cbstats(self.log, shell_conn)
        # Fetch vbucket seqno stats
        vb_stat = cb_stat_obj.vbucket_seqno(self.bucket_util.buckets[0].name)
        # stream
        self.log.info("Streaming vb {0} to seqno {1}"
                      .format(vbucket, vb_stat[vbucket]["high_seqno"]))
        self.assertEquals(vb_stat[vbucket]["high_seqno"], self.num_items)

        dcp_client = self.dcp_client(self.master, PRODUCER, vbucket)
        stream = dcp_client.stream_req(vbucket, 0, 0,
                                       vb_stat[vbucket]["high_seqno"],
                                       vb_stat[vbucket]["uuid"])

        stream.run()
        last_seqno = stream.last_by_seqno
        assert last_seqno == vb_stat[vbucket]["high_seqno"], last_seqno

        # verify rebalance
        assert task.result()
Exemple #6
0
    def test_create_delete_collection_same_order(self):
        # Create collection in increasing order
        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cb_stat = Cbstats(shell_conn)
        collection_count = 1
        while collection_count < 1000:
            doc_loading_spec = \
                self.bucket_util.get_crud_template_from_package(
                    "def_add_collection")
            self.bucket_util.run_scenario_from_spec(self.task,
                                                    self.cluster,
                                                    self.bucket_util.buckets,
                                                    doc_loading_spec,
                                                    mutation_num=0,
                                                    batch_size=self.batch_size)
            collection_count = cb_stat.get_collections(self.bucket)["count"]
        self.bucket_util.validate_docs_per_collections_all_buckets()

        # Delete collections
        while collection_count > 1:
            doc_loading_spec = \
                self.bucket_util.get_crud_template_from_package(
                    "def_drop_collection")
            self.bucket_util.run_scenario_from_spec(self.task,
                                                    self.cluster,
                                                    self.bucket_util.buckets,
                                                    doc_loading_spec,
                                                    mutation_num=0,
                                                    batch_size=self.batch_size)
            collection_count = cb_stat.get_collections(self.bucket)["count"]

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Exemple #7
0
 def get_vbucket_type_mapping(self, bucket_name):
     for node in self.vbs_in_node.keys():
         cb_stat = Cbstats(self.vbs_in_node[node]["shell"])
         self.vbs_in_node[node]["active"] = \
             cb_stat.vbucket_list(bucket_name, "active")
         self.vbs_in_node[node]["replica"] = \
             cb_stat.vbucket_list(bucket_name, "replica")
Exemple #8
0
    def rebalance_out_with_warming_up(self):
        master_restart = self.input.param("master_restart", False)
        if master_restart:
            warmup_node = self.cluster.master
        else:
            warmup_node = self.cluster.servers[len(self.cluster.servers) - self.nodes_out - 1]
        servs_out = self.cluster.servers[len(self.cluster.servers) - self.nodes_out:]

        if self.test_abort_snapshot:
            self.log.info("Creating sync_write abort scenario for replica vbs")
            for server in self.cluster_util.get_kv_nodes(self.cluster):
                ssh_shell = RemoteMachineShellConnection(server)
                cbstats = Cbstats(ssh_shell)
                replica_vbs = cbstats.vbucket_list(
                    self.cluster.buckets[0].name, "replica")
                load_gen = doc_generator(self.key, 0, 5000,
                                         target_vbucket=replica_vbs)
                success = self.bucket_util.load_durable_aborts(
                    ssh_shell, [load_gen],
                    self.cluster.buckets[0],
                    self.durability_level,
                    "update", "all_aborts")
                if not success:
                    self.log_failure("Simulating aborts failed")
                ssh_shell.disconnect()

            self.validate_test_failure()

        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        self.sleep(20)
        shell.start_couchbase()
        shell.disconnect()

        # Workaround for Eph case (MB-44682 - Not a bug)
        if self.bucket_type == Bucket.Type.EPHEMERAL:
            self.sleep(15, "Wait for couchbase server to start")

        rebalance = self.task.async_rebalance(
            self.cluster.servers, [], servs_out)
        self.task.jython_task_manager.get_task_result(rebalance)
        self.assertTrue(rebalance.result, "Rebalance Failed")
        self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster) - set(servs_out))
        if rebalance.result is False:
            self.log.info("Rebalance was failed as expected")
            self.assertTrue(self.bucket_util._wait_warmup_completed(
                self.cluster_util.get_kv_nodes(self.cluster),
                self.cluster.buckets[0],
                wait_time=self.wait_timeout * 10))

            self.log.info("Second attempt to rebalance")
            rebalance = self.task.async_rebalance(
                self.cluster.servers, [], servs_out)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, "Rebalance attempt failed again")
            self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster) - set(servs_out))
        if not self.atomicity:
            self.bucket_util.verify_cluster_stats(self.cluster, self.num_items,
                                                  timeout=self.wait_timeout)
            self.bucket_util.verify_unacked_bytes_all_buckets(self.cluster)
Exemple #9
0
    def setUp(self):
        super(OutOfOrderReturns, self).setUp()

        self.ooo_order = 0
        self.test_lock = Lock()
        self.doc_ops = self.input.param("doc_ops", "update;update").split(";")

        # Initialize cluster using given nodes
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        # Create default bucket and add rbac user
        self.bucket_util.create_default_bucket(
            bucket_type=self.bucket_type,
            storage=self.bucket_storage,
            ram_quota=self.bucket_size,
            replica=self.num_replicas,
            compression_mode=self.compression_mode,
            eviction_policy=self.bucket_eviction_policy)

        self.cluster.nodes_in_cluster.extend([self.cluster.master])
        self.bucket = self.bucket_util.buckets[0]
        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket,
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        # Create shell connection to each kv_node for cbstat object
        self.kv_nodes = self.cluster_util.get_kv_nodes()
        self.node_data = dict()
        for node in self.kv_nodes:
            shell = RemoteMachineShellConnection(node)
            cb_stat = Cbstats(shell)
            self.node_data[node] = dict()
            self.node_data[node]["shell"] = shell
            self.node_data[node]["cb_stat"] = cb_stat
            self.node_data[node]["active_vbs"] = \
                cb_stat.vbucket_list(self.bucket.name, vbucket_type="active")
            self.node_data[node]["replica_vbs"] = \
                cb_stat.vbucket_list(self.bucket.name, vbucket_type="replica")

        # Print cluster & bucket stats
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
Exemple #10
0
    def test_failover_swap_rebalance(self):
        """ add and failover node then perform swap rebalance """

        assert len(self.servers) > 2, "not enough servers"
        nodeA = self.servers[0]
        nodeB = self.servers[1]
        nodeC = self.servers[2]

        gen_create = doc_generator('dcp', 0, self.num_items, doc_size=64)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        vbucket = 0

        # rebalance in nodeB
        assert self.cluster.rebalance([nodeA], [nodeB], [])

        # add nodeC
        rest = RestConnection(nodeB)
        rest.add_node(user=nodeC.rest_username,
                      password=nodeC.rest_password,
                      remoteIp=nodeC.ip,
                      port=nodeC.port)

        # stop and failover nodeA
        assert self.stop_node(0)
        self.stopped_nodes.append(0)
        self.master = nodeB

        assert self.cluster.failover([nodeB], [nodeA])
        try:
            assert self.cluster.rebalance([nodeB], [], [])
        except Exception:
            pass
        self.add_built_in_server_user()
        # verify seqnos and stream mutations
        rest = RestConnection(nodeB)
        total_mutations = 0

        # Create connection for CbStats
        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cb_stat_obj = Cbstats(shell_conn)
        vb_info = cb_stat_obj.vbucket_seqno(self.bucket_util.buckets[0].name)

        for vb in range(0, self.vbuckets):
            total_mutations += int(vb_info[vb]["high_seqno"])

        # Disconnect the Cbstats shell_conn
        shell_conn.disconnect()

        # / 2   # divide by because the items are split between 2 servers
        self.assertTrue(total_mutations == self.num_items,
                        msg="Number mismatch. {0} != {1}".format(
                            total_mutations, self.num_items))

        task = self.cluster.async_rebalance([nodeB], [], [nodeC])
        task.result()
Exemple #11
0
 def get_collection_id(self, bucket_name, scope_name, collection_name=None):
     shell = RemoteMachineShellConnection(self.cluster.master)
     cbstats = Cbstats(shell)
     if collection_name:
         field = scope_name + ':' + collection_name + ':' + 'id:'
         cid_stat = cbstats.get_stats(bucket_name, "collections",
                                      field)[0][0]
     else:
         field = scope_name + ':id:'
         cid_stat = cbstats.get_stats(bucket_name, "scopes", field)[0][0]
     return cid_stat.split('0x')[-1].strip()
Exemple #12
0
 def get_magma_stats(self, bucket, servers=None):
     magma_stats_for_all_servers = dict()
     servers = servers or self.cluster.nodes_in_cluster
     if type(servers) is not list:
         servers = [servers]
     for server in servers:
         result = dict()
         cbstat_obj = Cbstats(server)
         result = cbstat_obj.magma_stats(bucket.name)
         magma_stats_for_all_servers[server.ip] = result
     return magma_stats_for_all_servers
Exemple #13
0
    def verify_vbucket_details_stats(self,
                                     bucket,
                                     kv_servers,
                                     vbuckets=1024,
                                     expected_val=dict()):
        """

        :param bucket: Bucket object
        :param kv_servers: List of kv_nodes currently present in the cluster
        :param vbuckets: Total vbucket count for the bucket. Default 1024
        :param expected_val: dict() containing expected key,value pairs
        :return verification_failed: Bool value denoting verification
                                     failed or not
        """
        verification_failed = False
        vb_details_stats = dict()
        ops_val = dict()
        ops_val["ops_create"] = 0
        ops_val["ops_delete"] = 0
        ops_val["ops_update"] = 0
        ops_val["ops_reject"] = 0
        ops_val["ops_get"] = 0
        ops_val["rollback_item_count"] = 0
        ops_val["sync_write_aborted_count"] = 0
        ops_val["sync_write_committed_count"] = 0
        ops_val["pending_writes"] = 0

        # Fetch stats for all available vbuckets into 'vb_details_stats'
        for server in kv_servers:
            shell = RemoteMachineShellConnection(server)
            cbstat_obj = Cbstats(shell)
            vb_details_stats.update(cbstat_obj.vbucket_details(bucket.name))
            shell.disconnect()

        for vb_num in range(0, vbuckets):
            vb_num = str(vb_num)
            for op_type in ops_val.keys():
                ops_val[op_type] += int(vb_details_stats[vb_num][op_type])

        # Verification block
        for op_type in ops_val.keys():
            self.log.debug("%s for %s: %s" %
                           (op_type, bucket.name, ops_val[op_type]))

            if op_type in expected_val \
                    and not DurabilityHelper.__compare(ops_val[op_type],
                                                       expected_val[op_type],
                                                       DurabilityHelper.EQUAL):
                verification_failed = True
                self.log.error(
                    "Mismatch in %s stats. %s != %s" %
                    (op_type, ops_val[op_type], expected_val[op_type]))
        return verification_failed
    def test_failover_expired_items_in_vB(self):
        self.maxttl = 120
        self.doc_ops = "expiry"
        self.expiry_perc = self.input.param("expiry_perc", 100)

        shell_conn = RemoteMachineShellConnection(
            self.cluster.nodes_in_cluster[-1])
        cbstats = Cbstats(shell_conn)
        self.target_vbucket = cbstats.vbucket_list(
            self.bucket_util.buckets[0].name)

        self.generate_docs(target_vbucket=self.target_vbucket)

        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        # exp_pager_stime
        self.bucket_util._expiry_pager(self.exp_pager_stime)
        self.sleep(
            self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
         to kickoff")
        self.sleep(
            self.exp_pager_stime * 10,
            "Wait for KV purger to scan expired docs and add \
        tombstones.")

        self.task.async_failover(self.cluster.nodes_in_cluster,
                                 self.cluster.nodes_in_cluster[-1],
                                 graceful=True)

        self.nodes = self.rest.node_statuses()
        self.task.rebalance(self.cluster.nodes_in_cluster,
                            to_add=[],
                            to_remove=[self.cluster.nodes_in_cluster[-1]])

        # Metadata Purge Interval
        self.meta_purge_interval = 60
        self.bucket_util.cbepctl_set_metadata_purge_interval(
            value=self.meta_purge_interval, buckets=self.buckets)
        self.sleep(
            self.meta_purge_interval * 2,
            "Wait for Metadata Purge Interval to drop \
        tomb-stones from storage")

        self.log.info("Starting compaction for each bucket")
        self.run_compaction()

        # All docs and tomb-stone should be dropped from the storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after full compaction: {}".format(ts))
Exemple #15
0
 def get_magma_stats(self, bucket, servers=None, field_to_grep=None):
     magma_stats_for_all_servers = dict()
     servers = servers or self.cluster.nodes_in_cluster
     if type(servers) is not list:
         servers = [servers]
     for server in servers:
         result = dict()
         shell = RemoteMachineShellConnection(server)
         cbstat_obj = Cbstats(shell)
         result = cbstat_obj.magma_stats(bucket.name,
                                         field_to_grep=field_to_grep)
         shell.disconnect()
         magma_stats_for_all_servers[server.ip] = result
     return magma_stats_for_all_servers
Exemple #16
0
    def collect_compare_dcp_stats(self,
                                  buckets,
                                  servers,
                                  perNode=True,
                                  stat_name='unacked_bytes',
                                  compare_value=0,
                                  flow_control_buffer_size=20971520,
                                  filter_list=[]):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:
              buckets: bucket informaiton
              servers: server information
              stat_name: stat we are searching to compare
              compare_value: the comparison value to be satisfied

            Returns:
              map of bucket informing if stat matching was
              satisfied / not satisfied

            example:: unacked_bytes in dcp
        """
        bucketMap = dict()
        for bucket in buckets:
            bucketMap[bucket.name] = True
        for bucket in buckets:
            for server in servers:
                # client = MemcachedClientHelper.direct_client(server, bucket)
                # stats = client.stats('dcp')
                shell = RemoteMachineShellConnection(server)
                cbstat = Cbstats(shell)
                stats = cbstat.dcp_stats(bucket)
                for key in stats.keys():
                    do_filter = False
                    if stat_name in key:
                        for filter_key in filter_list:
                            if filter_key in key:
                                do_filter = True
                        value = int(stats[key])
                        if not do_filter:
                            if value != compare_value:
                                if "eq_dcpq:mapreduce_view" in key:
                                    if value >= flow_control_buffer_size:
                                        bucketMap[bucket] = False
                                else:
                                    bucketMap[bucket] = False
        return bucketMap
    def map_collection_data(self):
        cb_stat_objects = list()
        collection_data = None

        for node in self.cluster_util.get_kv_nodes():
            shell = RemoteMachineShellConnection(node)
            cb_stat_objects.append(Cbstats(shell))

        for cb_stat in cb_stat_objects:
            tem_collection_data = cb_stat.get_collections(self.bucket)
            if collection_data is None:
                collection_data = tem_collection_data
            else:
                for key, value in tem_collection_data.items():
                    if type(value) is dict:
                        for col_name, c_data in value.items():
                            collection_data[key][col_name]['items'] \
                                += c_data['items']

        for s_name, s_data in collection_data.items():
            if type(s_data) is not dict:
                continue
            self.bucket_util.create_scope_object(self.bucket,
                                                 {"name": s_name})
            for c_name, c_data in s_data.items():
                if type(c_data) is not dict:
                    continue
                self.bucket_util.create_collection_object(
                    self.bucket, s_name,
                    {"name": c_name, "num_items": c_data["items"],
                     "maxTTL": c_data.get("maxTTL", 0)})

        # Close shell connections
        for cb_stat in cb_stat_objects:
            cb_stat.shellConn.disconnect()
Exemple #18
0
    def check(self):
        # check bucket compaction status across all nodes
        nodes = self.rest.get_nodes()
        current_compaction_count = {}

        for node in nodes:
            current_compaction_count[node.ip] = 0
            s = TestInputServer()
            s.ip = node.ip
            s.ssh_username = self.server.ssh_username
            s.ssh_password = self.server.ssh_password
            shell = RemoteMachineShellConnection(s)
            res = Cbstats(shell).get_kvtimings()
            shell.disconnect()
            for i in res[0]:
                # check for lines that look like
                #    rw_0:compact_131072,262144:        8
                if 'compact' in i:
                    current_compaction_count[node.ip] += int(i.split(':')[2])

        if cmp(current_compaction_count, self.compaction_count) == 1:
            # compaction count has increased
            self.set_result(True)
            self.state = FINISHED

        else:
            if self.retries > 0:
                # retry
                self.retries = self.retries - 1
                self.task_manager.schedule(self, 10)
            else:
                # never detected a compaction task running
                self.set_result(False)
                self.state = FINISHED
Exemple #19
0
    def setUp(self):
        super(RollbackTests, self).setUp()
        self.num_rollbacks = self.input.param("num_rollbacks", 2)
        self.collection_ops_type = self.input.param("collection_ops", None)
        self.rollback_with_multiple_mutation = self.input.param(
            "rollback_with_multiple_mutation", False)
        self.bucket = self.bucket_util.buckets[0]

        # Disable auto-fail_over to avoid fail_over of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        # Used to calculate expected queue size of validation before rollback
        self.total_rollback_items = 0
        self.kv_nodes = self.cluster_util.get_kv_nodes()

        self.sync_write_enabled = self.durability_helper.is_sync_write_enabled(
            self.bucket_durability_level, self.durability_level)

        # Open shell connections to kv nodes and create cbstat objects
        self.node_shells = dict()
        for node in self.kv_nodes:
            shell_conn = RemoteMachineShellConnection(node)
            self.node_shells[node] = dict()
            self.node_shells[node]["shell"] = shell_conn
            self.node_shells[node]["cbstat"] = Cbstats(shell_conn)
Exemple #20
0
    def ops_change_cas(self):
        """
        CAS value manipulation by update, delete, expire test.

        We load a certain number of items. Then for half of them, we use
        MemcachedClient cas() method to mutate those item values in order
        to change CAS value of those items.
        We use MemcachedClient set() to set a quarter of the items expired.
        We also use MemcachedClient delete() to delete a quarter of the items
        """

        gen_load = doc_generator('nosql',
                                 0,
                                 self.num_items,
                                 doc_size=self.doc_size)
        gen_update = doc_generator('nosql',
                                   0,
                                   self.num_items / 2,
                                   doc_size=self.doc_size)
        gen_delete = doc_generator('nosql',
                                   self.num_items / 2,
                                   (self.num_items * 3 / 4),
                                   doc_size=self.doc_size)
        gen_expire = doc_generator('nosql', (self.num_items * 3 / 4),
                                   self.num_items,
                                   doc_size=self.doc_size)
        self._load_all_buckets(gen_load, "create")
        self.bucket_util.verify_stats_all_buckets(self.cluster, self.num_items)
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        # Create cbstat objects
        self.shell_conn = dict()
        self.cb_stat = dict()
        self.vb_details = dict()
        for node in self.cluster_util.get_kv_nodes(self.cluster):
            self.vb_details[node.ip] = dict()
            self.vb_details[node.ip]["active"] = list()
            self.vb_details[node.ip]["replica"] = list()

            self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
            self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
            self.vb_details[node.ip]["active"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
            self.vb_details[node.ip]["replica"] = \
                self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")

        if self.doc_ops is not None:
            if "update" in self.doc_ops:
                self.verify_cas("update", gen_update)
            if "touch" in self.doc_ops:
                self.verify_cas("touch", gen_update)
            if "delete" in self.doc_ops:
                self.verify_cas("delete", gen_delete)
            if "expire" in self.doc_ops:
                self.verify_cas("expire", gen_expire)

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)
        self.validate_test_failure()
Exemple #21
0
    def test_stream_all_buckets(self):
        doc_gen = doc_generator(self.key, 0, self.num_items)
        self._load_all_buckets(self.master, doc_gen, "create", 0)

        user_name = self.input.param("user_name", None)
        password = self.input.param("password", None)
        nodeA = self.servers[0]

        vbuckets = [vb for vb in range(self.vbuckets)]
        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        for bucket in self.bucket_util.buckets:
            if user_name is not None:
                self.add_built_in_server_user([{
                    'id': user_name,
                    'name': user_name,
                    'password': password
                }], [{
                    'id': user_name,
                    'name': user_name,
                    'roles': 'data_dcp_reader[default]'
                }], self.master)
                dcp_client = self.dcp_client(nodeA,
                                             PRODUCER,
                                             bucket_name=bucket.name,
                                             auth_user=user_name,
                                             auth_password=password)
            else:
                dcp_client = self.dcp_client(nodeA,
                                             PRODUCER,
                                             bucket_name=bucket)

            vb_info = cb_stat_obj.vbucket_seqno(bucket.name)
            for vb in vbuckets[0:16]:
                vbucket = vb.id
                stream = dcp_client.stream_req(vbucket, 0, 0,
                                               vb_info[vb]["high_seqno"],
                                               vb_info[vb]["uuid"])
                _ = stream.run()
                self.assertTrue(
                    vb_info[vb]["high_seqno"] == stream.last_by_seqno,
                    msg="Mismatch in high_seqno. {0} == {1}".format(
                        vb_info[vb]["high_seqno"], stream.last_by_seqno))

        # Disconnect the shell_conn
        shell_conn.disconnect()
Exemple #22
0
 def tearDown(self):
     shell = RemoteMachineShellConnection(self.cluster.master)
     cbstat_obj = Cbstats(shell)
     for bucket in self.bucket_util.buckets:
         result = cbstat_obj.all_stats(bucket.name)
         self.log.info("Bucket: %s, Active Resident ratio(DGM): %s%%"
                       % (bucket.name,
                          result["vb_active_perc_mem_resident"]))
         self.log.info("Bucket: %s, Replica Resident ratio(DGM): %s%%"
                       % (bucket.name,
                          result["vb_replica_perc_mem_resident"]))
         if not self.skip_collections_cleanup:
             self.bucket_util.remove_scope_collections_for_bucket(bucket)
     shell.disconnect()
     if self.validate_docs_count_during_teardown:
         self.bucket_util.validate_docs_per_collections_all_buckets()
     super(CollectionBase, self).tearDown()
Exemple #23
0
    def get_vbucket_info_from_failover_nodes(self):
        """
        Fetch active/replica vbucket list from the
        nodes which are going to be failed over
        """
        bucket = self.bucket_util.buckets[0]
        # Reset the values
        self.active_vb_in_failover_nodes = list()
        self.replica_vb_in_failover_nodes = list()

        # Fetch new vbucket list
        for node in self.server_to_fail:
            shell_conn = RemoteMachineShellConnection(node)
            cbstat = Cbstats(shell_conn)
            self.active_vb_in_failover_nodes += cbstat.vbucket_list(
                bucket.name, "active")
            self.replica_vb_in_failover_nodes += cbstat.vbucket_list(
                bucket.name, "replica")
Exemple #24
0
    def setUp(self):
        super(OutOfOrderReturns, self).setUp()

        # Create default bucket
        self.create_bucket(self.cluster)

        self.ooo_order = 0
        self.test_lock = Lock()
        self.doc_ops = self.input.param("doc_ops", "update;update").split(";")

        # Disable auto-failover to avoid failover of nodes
        status = RestConnection(self.cluster.master) \
            .update_autofailover_settings(False, 120, False)
        self.assertTrue(status, msg="Failure during disabling auto-failover")

        self.cluster.nodes_in_cluster.extend([self.cluster.master])
        self.bucket = self.cluster.buckets[0]

        # Create sdk_clients for pool
        if self.sdk_client_pool:
            self.log.info("Creating SDK client pool")
            self.sdk_client_pool.create_clients(
                self.bucket,
                self.cluster.nodes_in_cluster,
                req_clients=self.sdk_pool_capacity,
                compression_settings=self.sdk_compression)

        # Create shell connection to each kv_node for cbstat object
        self.kv_nodes = self.cluster_util.get_kv_nodes(self.cluster)
        self.node_data = dict()
        for node in self.kv_nodes:
            shell = RemoteMachineShellConnection(node)
            cb_stat = Cbstats(shell)
            self.node_data[node] = dict()
            self.node_data[node]["shell"] = shell
            self.node_data[node]["cb_stat"] = cb_stat
            self.node_data[node]["active_vbs"] = \
                cb_stat.vbucket_list(self.bucket.name, vbucket_type="active")
            self.node_data[node]["replica_vbs"] = \
                cb_stat.vbucket_list(self.bucket.name, vbucket_type="replica")

        # Print cluster & bucket stats
        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)
Exemple #25
0
    def test_stream_after_n_crashes(self):
        crashes = self.input.param("crash_num", 5)
        vbucket = randint(0, self.vbuckets)
        bucket = self.bucket_util.buckets[0]

        self.log.info("Chosen vbucket {0} for {1} crashes".format(
            vbucket, crashes))
        start = 0
        end = self.num_items

        nodeA = self.cluster.servers[0]
        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)
        rest = RestHelper(RestConnection(nodeA))

        for _ in xrange(crashes):
            # Load data into the selected vbucket
            self.load_docs(bucket, vbucket, start, end, "create")
            self.assertTrue(self.stop_node(0), msg="Failed during stop_node")
            self.sleep(5, "Sleep after stop_node")
            self.assertTrue(self.start_node(0), msg="Failed during start_node")
            self.assertTrue(rest.is_ns_server_running(),
                            msg="Failed while is_ns_server_running check")
            self.sleep(5, "Waiting after ns_server started")

            # Fetch vbucket seqno stats
            vb_stat = cb_stat_obj.vbucket_seqno(bucket.name)
            dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
            stream = dcp_client.stream_req(vbucket, 0, 0,
                                           vb_stat[vbucket]["high_seqno"],
                                           vb_stat[vbucket]["uuid"])
            stream.run()

            self.assertTrue(
                stream.last_by_seqno == vb_stat[vbucket]["high_seqno"],
                msg="Mismatch in high_seqno. {0} == {1}".format(
                    vb_stat[vbucket]["high_seqno"], stream.last_by_seqno))

            # Update start/end values for next loop
            start = end
            end += self.num_items

        # Disconnect shell Connection for the node
        shell_conn.disconnect()
Exemple #26
0
    def test_crash_entire_cluster(self):
        self.cluster.rebalance([self.master], self.servers[1:], [])

        bucket = self.bucket_util.buckets[0]
        vbucket = randint(0, self.vbuckets)
        nodeA = self.servers[0]
        self.load_docs(bucket, vbucket, 0, self.num_items, "create")

        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        _ = dcp_client.stream_req(vbucket, 0, 0, 2 * self.num_items, 0)
        self.load_docs(nodeA, vbucket, self.num_items)

        # stop all nodes
        node_range = range(len(self.servers))
        for i in node_range:
            self.assertTrue(self.stop_node(i), msg="Failed during stoip_node")
        self.sleep(2, "Wait after stop_node")

        # start all nodes in reverse order
        node_range.reverse()
        for i in node_range:
            self.assertTrue(self.start_node(i), msg="Failed during start_node")

        rest = RestHelper(RestConnection(nodeA))
        self.assertTrue(rest.is_ns_server_running(),
                        msg="Failed while is_ns_server_running check")

        vb_info = cb_stat_obj.vbucket_seqno(bucket.name)
        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0,
                                       vb_info[vbucket]["high_seqno"], 0)
        stream.run()
        self.assertTrue(stream.last_by_seqno == vb_info[vbucket]["high_seqno"],
                        msg="Seq-no mismatch. {0} != {1}".format(
                            stream.last_by_seqno,
                            vb_info[vbucket]["high_seqno"]))

        # Disconnect shell Connection for the node
        shell_conn.disconnect()
    def test_buffer_ack_during_dcp_commit(self):
        """
        MB-46482
        - Create bucket with min_ram
        - Perform huge number of sync_writes
        - Validate 'dcp unacked_bytes' stats are all ZERO
        """

        if self.durability_level == ""  \
                or self.durability_level.upper() == "NONE":
            self.fail("Test requires valid durability level for sync_writes")

        doc_gen = doc_generator(self.key,
                                self.num_items,
                                self.num_items * 3,
                                key_size=10,
                                doc_size=5)
        self.log.info("Loading %s keys into the bucket" % (self.num_items * 2))
        load_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            doc_gen,
            DocLoading.Bucket.DocOps.UPDATE,
            durability=self.durability_level,
            print_ops_rate=False)
        self.task_manager.get_task_result(load_task)

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)
        self.sleep(5, "Wait for dcp")
        for node in self.cluster_util.get_kv_nodes(self.cluster):
            cb_stat = Cbstats(node)
            dcp_stats = cb_stat.dcp_stats(self.bucket.name)
            for stat_name, val in dcp_stats.items():
                if stat_name.split(":")[-1] == "unacked_bytes":
                    self.log.debug("%s: %s" % (stat_name, val))
                    if int(val) != 0:
                        self.log_failure("%s: %s != 0" % (stat_name, val))

        self.validate_test_failure()
Exemple #28
0
    def online_swap(self, node_to_upgrade, version,
                    install_on_spare_node=True):
        vb_details = dict()
        vb_verification = dict()
        vb_types = ["active", "replica"]

        # Fetch active services on node_to_upgrade
        rest = self.__get_rest_node(node_to_upgrade)
        services = rest.get_nodes_services()
        services_on_target_node = services[(node_to_upgrade.ip + ":"
                                            + node_to_upgrade.port)]

        # Record vbuckets in swap_node
        if CbServer.Services.KV in services_on_target_node:
            cbstats = Cbstats(node_to_upgrade)
            for vb_type in vb_types:
                vb_details[vb_type] = \
                    cbstats.vbucket_list(self.bucket.name, vb_type)

        if install_on_spare_node:
            # Install target version on spare node
            self.install_version_on_node([self.spare_node], version)

        # Perform swap rebalance for node_to_upgrade <-> spare_node
        rebalance_passed = self.task.rebalance(
            self.cluster_util.get_nodes(self.cluster.master),
            to_add=[self.spare_node],
            to_remove=[node_to_upgrade],
            check_vbucket_shuffling=False,
            services=[",".join(services_on_target_node)])
        if not rebalance_passed:
            self.log_failure("Swap rebalance failed during upgrade of {0}"
                             .format(node_to_upgrade))

        # VBuckets shuffling verification
        if CbServer.Services.KV in services_on_target_node:
            # Fetch vbucket stats after swap rebalance for verification
            cbstats = Cbstats(self.spare_node)
            for vb_type in vb_types:
                vb_verification[vb_type] = \
                    cbstats.vbucket_list(self.bucket.name, vb_type)

            # Check vbuckets are shuffled or not
            for vb_type in vb_types:
                if vb_details[vb_type].sort() \
                        != vb_verification[vb_type].sort():
                    self.log_failure("%s vbuckets shuffled post swap_rebalance"
                                     % vb_type)
                    self.log.error("%s vbuckets before vs after: %s != %s"
                                   % (vb_type,
                                      vb_details[vb_type],
                                      vb_verification[vb_type]))

        # Update master node
        self.cluster.master = self.spare_node
        self.cluster.nodes_in_cluster.append(self.spare_node)

        # Update spare_node to rebalanced-out node
        self.spare_node = node_to_upgrade
        self.cluster.nodes_in_cluster.remove(node_to_upgrade)
Exemple #29
0
 def tearDown(self):
     cbstat_obj = Cbstats(self.cluster.master)
     for bucket in self.cluster.buckets:
         if bucket.bucketType != Bucket.Type.MEMCACHED:
             result = cbstat_obj.all_stats(bucket.name)
             self.log.info("Bucket: %s, Active Resident ratio(DGM): %s%%"
                           % (bucket.name,
                              result["vb_active_perc_mem_resident"]))
             self.log.info("Bucket: %s, Replica Resident ratio(DGM): %s%%"
                           % (bucket.name,
                              result["vb_replica_perc_mem_resident"]))
         if not self.skip_collections_cleanup \
                 and bucket.bucketType != Bucket.Type.MEMCACHED:
             self.bucket_util.remove_scope_collections_for_bucket(
                 self.cluster, bucket)
     if self.validate_docs_count_during_teardown:
         self.bucket_util.validate_docs_per_collections_all_buckets(
             self.cluster)
     if self.disk_optimized_thread_settings:
         self.set_num_writer_and_reader_threads(num_writer_threads="default",
                                                num_reader_threads="default")
     super(CollectionBase, self).tearDown()
    def test_magma_rollback_to_0(self):
        items = self.num_items
        mem_only_items = self.input.param("rollback_items", 10000)
        if self.nodes_init < 2 or self.num_replicas < 1:
            self.fail("Not enough nodes/replicas in the cluster/bucket\
                      to test rollback")

        self.num_rollbacks = self.input.param("num_rollbacks", 10)
        shell = RemoteMachineShellConnection(self.cluster_util.cluster.master)
        self.target_vbucket = Cbstats(shell).vbucket_list(
            self.bucket_util.buckets[0].name)
        start = self.num_items

        # Stopping persistence on NodeA
        mem_client = MemcachedClientHelper.direct_client(
            self.input.servers[0], self.bucket_util.buckets[0])
        mem_client.stop_persistence()

        for i in xrange(1, self.num_rollbacks + 1):
            self.gen_create = doc_generator(
                self.key,
                start,
                mem_only_items,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value)

            self.loadgen_docs(_sync=True)

            start = self.gen_create.key_counter
            stat_map = {self.cluster.nodes_in_cluster[0]: mem_only_items * i}
            for node in self.cluster.nodes_in_cluster[1:]:
                stat_map.update({node: 0})

            for bucket in self.bucket_util.buckets:
                self.bucket_util._wait_for_stat(bucket, stat_map)
            self.sleep(60)

        shell.kill_memcached()
        self.assertTrue(
            self.bucket_util._wait_warmup_completed(
                [self.cluster_util.cluster.master],
                self.bucket_util.buckets[0],
                wait_time=self.wait_timeout * 10))
        self.bucket_util.verify_stats_all_buckets(items)
        shell.disconnect()