コード例 #1
0
    def test_crash_while_streaming(self):
        bucket = self.bucket_util.buckets[0]
        vbucket = randint(0, self.vbuckets)
        nodeA = self.servers[0]
        self.load_docs(bucket, vbucket, 0, self.num_items, "create")

        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        _ = dcp_client.stream_req(vbucket, 0, 0, 2 * self.num_items, 0)
        self.load_docs(nodeA, vbucket, self.num_items)
        self.assertTrue(self.stop_node(0), msg="Failed during stop_node")
        self.sleep(2, "Sleep after stop_node")
        self.assertTrue(self.start_node(0), msg="Failed during start_node")
        rest = RestHelper(RestConnection(nodeA))
        self.assertTrue(rest.is_ns_server_running(),
                        msg="Failed while is_ns_server_running check")
        self.sleep(30, "Sleep to wait for ns_server to run")

        vb_info = cb_stat_obj.vbucket_seqno(bucket.name)
        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0,
                                       vb_info[vbucket]["high_seqno"], 0)
        stream.run()
        self.assertTrue(stream.last_by_seqno == vb_info[vbucket]["high_seqno"],
                        msg="Mismatch in high_seqno. {0} == {1}".format(
                            vb_info[vbucket]["high_seqno"],
                            stream.last_by_seqno))

        # Disconnect shell Connection for the node
        shell_conn.disconnect()
コード例 #2
0
ファイル: rebalance.py プロジェクト: ritalrw/TAF
    def test_mutations_during_rebalance(self):
        # start rebalance
        task = self.cluster.async_rebalance([self.master], self.servers[1:],
                                            [])
        # load some data
        vbucket = 0
        self.load_docs(self.master, vbucket, self.num_items)
        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cb_stat_obj = Cbstats(self.log, shell_conn)
        # Fetch vbucket seqno stats
        vb_stat = cb_stat_obj.vbucket_seqno(self.bucket_util.buckets[0].name)
        # stream
        self.log.info("Streaming vb {0} to seqno {1}"
                      .format(vbucket, vb_stat[vbucket]["high_seqno"]))
        self.assertEquals(vb_stat[vbucket]["high_seqno"], self.num_items)

        dcp_client = self.dcp_client(self.master, PRODUCER, vbucket)
        stream = dcp_client.stream_req(vbucket, 0, 0,
                                       vb_stat[vbucket]["high_seqno"],
                                       vb_stat[vbucket]["uuid"])

        stream.run()
        last_seqno = stream.last_by_seqno
        assert last_seqno == vb_stat[vbucket]["high_seqno"], last_seqno

        # verify rebalance
        assert task.result()
コード例 #3
0
    def test_failover_swap_rebalance(self):
        """ add and failover node then perform swap rebalance """

        assert len(self.servers) > 2, "not enough servers"
        nodeA = self.servers[0]
        nodeB = self.servers[1]
        nodeC = self.servers[2]

        gen_create = doc_generator('dcp', 0, self.num_items, doc_size=64)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        vbucket = 0

        # rebalance in nodeB
        assert self.cluster.rebalance([nodeA], [nodeB], [])

        # add nodeC
        rest = RestConnection(nodeB)
        rest.add_node(user=nodeC.rest_username,
                      password=nodeC.rest_password,
                      remoteIp=nodeC.ip,
                      port=nodeC.port)

        # stop and failover nodeA
        assert self.stop_node(0)
        self.stopped_nodes.append(0)
        self.master = nodeB

        assert self.cluster.failover([nodeB], [nodeA])
        try:
            assert self.cluster.rebalance([nodeB], [], [])
        except Exception:
            pass
        self.add_built_in_server_user()
        # verify seqnos and stream mutations
        rest = RestConnection(nodeB)
        total_mutations = 0

        # Create connection for CbStats
        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cb_stat_obj = Cbstats(shell_conn)
        vb_info = cb_stat_obj.vbucket_seqno(self.bucket_util.buckets[0].name)

        for vb in range(0, self.vbuckets):
            total_mutations += int(vb_info[vb]["high_seqno"])

        # Disconnect the Cbstats shell_conn
        shell_conn.disconnect()

        # / 2   # divide by because the items are split between 2 servers
        self.assertTrue(total_mutations == self.num_items,
                        msg="Number mismatch. {0} != {1}".format(
                            total_mutations, self.num_items))

        task = self.cluster.async_rebalance([nodeB], [], [nodeC])
        task.result()
コード例 #4
0
ファイル: multibucket.py プロジェクト: programmatix/TAF
    def test_stream_all_buckets(self):
        doc_gen = doc_generator(self.key, 0, self.num_items)
        self._load_all_buckets(self.master, doc_gen, "create", 0)

        user_name = self.input.param("user_name", None)
        password = self.input.param("password", None)
        nodeA = self.servers[0]

        vbuckets = [vb for vb in range(self.vbuckets)]
        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        for bucket in self.bucket_util.buckets:
            if user_name is not None:
                self.add_built_in_server_user([{
                    'id': user_name,
                    'name': user_name,
                    'password': password
                }], [{
                    'id': user_name,
                    'name': user_name,
                    'roles': 'data_dcp_reader[default]'
                }], self.master)
                dcp_client = self.dcp_client(nodeA,
                                             PRODUCER,
                                             bucket_name=bucket.name,
                                             auth_user=user_name,
                                             auth_password=password)
            else:
                dcp_client = self.dcp_client(nodeA,
                                             PRODUCER,
                                             bucket_name=bucket)

            vb_info = cb_stat_obj.vbucket_seqno(bucket.name)
            for vb in vbuckets[0:16]:
                vbucket = vb.id
                stream = dcp_client.stream_req(vbucket, 0, 0,
                                               vb_info[vb]["high_seqno"],
                                               vb_info[vb]["uuid"])
                _ = stream.run()
                self.assertTrue(
                    vb_info[vb]["high_seqno"] == stream.last_by_seqno,
                    msg="Mismatch in high_seqno. {0} == {1}".format(
                        vb_info[vb]["high_seqno"], stream.last_by_seqno))

        # Disconnect the shell_conn
        shell_conn.disconnect()
コード例 #5
0
    def test_stream_after_n_crashes(self):
        crashes = self.input.param("crash_num", 5)
        vbucket = randint(0, self.vbuckets)
        bucket = self.bucket_util.buckets[0]

        self.log.info("Chosen vbucket {0} for {1} crashes".format(
            vbucket, crashes))
        start = 0
        end = self.num_items

        nodeA = self.cluster.servers[0]
        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)
        rest = RestHelper(RestConnection(nodeA))

        for _ in xrange(crashes):
            # Load data into the selected vbucket
            self.load_docs(bucket, vbucket, start, end, "create")
            self.assertTrue(self.stop_node(0), msg="Failed during stop_node")
            self.sleep(5, "Sleep after stop_node")
            self.assertTrue(self.start_node(0), msg="Failed during start_node")
            self.assertTrue(rest.is_ns_server_running(),
                            msg="Failed while is_ns_server_running check")
            self.sleep(5, "Waiting after ns_server started")

            # Fetch vbucket seqno stats
            vb_stat = cb_stat_obj.vbucket_seqno(bucket.name)
            dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
            stream = dcp_client.stream_req(vbucket, 0, 0,
                                           vb_stat[vbucket]["high_seqno"],
                                           vb_stat[vbucket]["uuid"])
            stream.run()

            self.assertTrue(
                stream.last_by_seqno == vb_stat[vbucket]["high_seqno"],
                msg="Mismatch in high_seqno. {0} == {1}".format(
                    vb_stat[vbucket]["high_seqno"], stream.last_by_seqno))

            # Update start/end values for next loop
            start = end
            end += self.num_items

        # Disconnect shell Connection for the node
        shell_conn.disconnect()
コード例 #6
0
    def test_crash_entire_cluster(self):
        self.cluster.rebalance([self.master], self.servers[1:], [])

        bucket = self.bucket_util.buckets[0]
        vbucket = randint(0, self.vbuckets)
        nodeA = self.servers[0]
        self.load_docs(bucket, vbucket, 0, self.num_items, "create")

        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        _ = dcp_client.stream_req(vbucket, 0, 0, 2 * self.num_items, 0)
        self.load_docs(nodeA, vbucket, self.num_items)

        # stop all nodes
        node_range = range(len(self.servers))
        for i in node_range:
            self.assertTrue(self.stop_node(i), msg="Failed during stoip_node")
        self.sleep(2, "Wait after stop_node")

        # start all nodes in reverse order
        node_range.reverse()
        for i in node_range:
            self.assertTrue(self.start_node(i), msg="Failed during start_node")

        rest = RestHelper(RestConnection(nodeA))
        self.assertTrue(rest.is_ns_server_running(),
                        msg="Failed while is_ns_server_running check")

        vb_info = cb_stat_obj.vbucket_seqno(bucket.name)
        dcp_client = self.dcp_client(nodeA, dcp.constants.PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0,
                                       vb_info[vbucket]["high_seqno"], 0)
        stream.run()
        self.assertTrue(stream.last_by_seqno == vb_info[vbucket]["high_seqno"],
                        msg="Seq-no mismatch. {0} != {1}".format(
                            stream.last_by_seqno,
                            vb_info[vbucket]["high_seqno"]))

        # Disconnect shell Connection for the node
        shell_conn.disconnect()
コード例 #7
0
    def collect_vbucket_stats(self,
                              buckets,
                              servers,
                              collect_vbucket=True,
                              collect_vbucket_seqno=True,
                              collect_vbucket_details=True,
                              perNode=True):
        """
            Method to extract the vbuckets stats given by cbstats tool

            Paramters:
              buckets: bucket information
              servers: server information
              collect_vbucket: take vbucket type stats
              collect_vbucket_seqno: take vbucket-seqno type stats
              collect_vbucket_details: take vbucket-details type stats
              perNode: if True collects data per node else takes a union
                       across nodes

            Returns:
              The output can be in two formats

              if we are doing per node data collection
              Vbucket Information :: {bucket { node : [vbucket_seqno {key:value} U vbucket_details {key:value} U vbucket {key:value}]}}

              if we are not doing per node data collection
              Vbucket Information :: {bucket : [vbucket_seqno {key:value} U vbucket_details {key:value} U vbucket {key:value}]}
        """
        bucketMap = dict()
        for bucket in buckets:
            if bucket.bucketType == Bucket.Type.MEMCACHED:
                continue
            dataMap = dict()
            for server in servers:
                map_data = dict()
                cbstat = Cbstats(server)

                if collect_vbucket:
                    result = dict()
                    for vb_type in ["active", "replica"]:
                        vb_list = cbstat.vbucket_list(bucket.name, vb_type)
                        for vb_num in vb_list:
                            result['vb_%s' % vb_num] = dict()
                            result['vb_%s' % vb_num]["state"] = vb_type
                    map_data.update(result)
                    # vbucket = client.stats('vbucket')
                    # self.createMapVbucket(vbucket, map_data)
                if collect_vbucket_seqno:
                    result = cbstat.vbucket_seqno(bucket.name)
                    for key in result.keys():
                        result['vb_' + key] = result.pop(key)
                    map_data.update(result)
                    # vbucket_seqno = client.stats('vbucket-seqno')
                    # self.createMapVbucket(vbucket_seqno, map_data)
                if collect_vbucket_details:
                    result = cbstat.vbucket_details(bucket.name)
                    for key in result.keys():
                        result['vb_' + key] = result.pop(key)
                    map_data.update(result)
                    # vbucket_details = client.stats('vbucket-details')
                    # self.createMapVbucket(vbucket_details, map_data)
                if perNode:
                    dataMap[server.ip] = map_data
                else:
                    dataMap.update(map_data)
            bucketMap[bucket.name] = dataMap
        return bucketMap
コード例 #8
0
ファイル: rebalance.py プロジェクト: ritalrw/TAF
    def test_failover_log_table_updated(self):
        """
        Verifies failover table entries are updated when vbucket
        ownership changes
        """

        # rebalance in nodeB
        nodeA = self.servers[0]
        nodeB = self.servers[1]

        # load nodeA only
        rest = RestConnection(nodeA)
        vbuckets = rest.get_vbuckets()
        for vb_info in vbuckets[0:4]:
            vbucket = vb_info.id
            self.load_docs(nodeA, vbucket, self.num_items)

        # add nodeB
        self.cluster.rebalance([nodeA], [nodeB], [])

        # stop nodeA and failover
        assert self.stop_node(0)
        self.stopped_nodes.append(0)
        self.master = nodeB
        assert self.cluster.failover([nodeB], [nodeA])
        assert self.cluster.rebalance([nodeB], [], [])

        # load nodeB only
        rest = RestConnection(nodeB)
        vbuckets = rest.get_vbuckets()
        for vb_info in vbuckets[0:4]:
            vbucket = vb_info.id
            self.load_docs(nodeB, vbucket, self.num_items)

        # add nodeA back
        assert self.start_node(0)
        del self.stopped_nodes[0]
        rest = RestHelper(RestConnection(nodeA))
        assert rest.is_ns_server_running()
        time.sleep(10)
        self.cluster.rebalance([nodeB], [nodeA], [])

        # stop nodeB and failover
        assert self.stop_node(1)
        self.master = nodeA
        self.stopped_nodes.append(1)
        assert self.cluster.failover([nodeA], [nodeB])
        assert self.cluster.rebalance([nodeA], [], [])

        # load nodeA only
        rest = RestConnection(nodeA)
        vbuckets = rest.get_vbuckets()
        for vb_info in vbuckets[0:4]:
            vbucket = vb_info.id
            self.load_docs(nodeA, vbucket, self.num_items)

        # Create connection for CbStats
        shell_conn = RemoteMachineShellConnection(self.cluster.master)
        cb_stat_obj = Cbstats(shell_conn)

        # Fetch bucket's failover stats
        bucket = self.bucket_util.buckets[0]
        stats = cb_stat_obj.failover_stats(bucket.name)

        # Disconnect the Cbstats shell_conn
        shell_conn.disconnect()

        # Fetch vbucket seqno stats
        vb_stat = cb_stat_obj.vbucket_seqno(bucket.name)
        # Check failover table entries
        for vb_info in vbuckets[0:4]:
            vb = vb_info.id
            assert long(stats['vb_'+str(vb)+':num_entries']) == 2

            dcp_client = self.dcp_client(nodeA, PRODUCER)
            stream = dcp_client.stream_req(vb, 0, 0, self.num_items*3,
                                           vb_stat[vb]["uuid"])

            _ = stream.run()
            assert stream.last_by_seqno == self.num_items*3, \
                stream.last_by_seqno