Esempio n. 1
0
    def move_single_node_test(self):
        """ Test moving a node in a single-node cluster (#4200) """
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(1, tokens=[0]).start()
        node1 = cluster.nodelist()[0]
        time.sleep(0.2)

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 1)
        self.create_cf(cursor, 'cf', columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        cluster.flush()

        node1.move(2**25)
        time.sleep(1)

        cluster.cleanup()

        # Check we can get all the keys
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)
    def readrepair_test(self):
        cluster = self.cluster
        cluster.set_configuration_options(values={ 'hinted_handoff_enabled' : False})

        if DISABLE_VNODES:
            cluster.populate(2).start()
        else:
            tokens = cluster.balanced_tokens(2)
            cluster.populate(2, tokens=tokens).start()
        [node1, node2] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        create_c1c2_table(self, cursor, read_repair=1.0)

        node2.stop(wait_other_notice=True)

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        node2.start(wait_other_notice=True)

       # query everything to cause RR
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

        node1.stop(wait_other_notice=True)

        # Check node2 for all the keys that should have been repaired
        cursor = self.patient_cql_connection(node2, keyspace='ks')
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)
Esempio n. 3
0
    def hintedhandoff_test(self):
        cluster = self.cluster

        if DISABLE_VNODES:
            cluster.populate(2).start()
        else:
            tokens = cluster.balanced_tokens(2)
            cluster.populate(2, tokens=tokens).start()
        [node1, node2] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        create_c1c2_table(self, cursor)

        node2.stop(wait_other_notice=True)

        for n in xrange(0, 100):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        log_mark = node1.mark_log()
        node2.start()
        node1.watch_log_for(["Finished hinted"],
                            from_mark=log_mark,
                            timeout=120)

        node1.stop(wait_other_notice=True)

        # Check node2 for all the keys that should have been delivered via HH
        cursor = self.patient_cql_connection(node2, keyspace='ks')
        for n in xrange(0, 100):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)
    def hintedhandoff_test(self):
        cluster = self.cluster

        if DISABLE_VNODES:
            cluster.populate(2).start()
        else:
            tokens = cluster.balanced_tokens(2)
            cluster.populate(2, tokens=tokens).start()
        [node1, node2] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        create_c1c2_table(self, cursor)

        node2.stop(wait_other_notice=True)

        for n in xrange(0, 100):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        log_mark = node1.mark_log()
        node2.start()
        node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=90)

        node1.stop(wait_other_notice=True)

        # Check node2 for all the keys that should have been delivered via HH
        cursor = self.patient_cql_connection(node2, keyspace='ks')
        for n in xrange(0, 100):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)
Esempio n. 5
0
    def quorum_available_during_failure_test(self):
        CL = ConsistencyLevel.QUORUM
        RF = 3

        debug("Creating a ring")
        cluster = self.cluster
        if DISABLE_VNODES:
            cluster.populate(3).start()
        else:
            tokens = cluster.balanced_tokens(3)
            cluster.populate(3, tokens=tokens).start()
        [node1, node2, node3] = cluster.nodelist()
        cluster.start()

        debug("Set to talk to node 2")
        cursor = self.patient_cql_connection(node2)
        self.create_ks(cursor, 'ks', RF)
        create_c1c2_table(self, cursor)

        debug("Generating some data")
        for n in xrange(100):
            insert_c1c2(cursor, n, CL)

        debug("Taking down node1")
        node1.stop(wait_other_notice=True)

        debug("Reading back data.")
        for n in xrange(100):
            query_c1c2(cursor, n, CL)
Esempio n. 6
0
    def check_rows_on_node(self,
                           node_to_check,
                           rows,
                           found=[],
                           missings=[],
                           restart=True):
        stopped_nodes = []
        for node in self.cluster.nodes.values():
            if node.is_running() and node is not node_to_check:
                stopped_nodes.append(node)
                node.stop(wait_other_notice=True)

        cursor = self.patient_cql_connection(node_to_check, 'ks')
        result = cursor.execute("SELECT * FROM cf LIMIT %d" % (rows * 2))
        assert len(result) == rows, len(result)

        for k in found:
            query_c1c2(cursor, k, ConsistencyLevel.ONE)

        for k in missings:
            query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k%d'" %
                                    k,
                                    consistency_level=ConsistencyLevel.ONE)
            res = cursor.execute(query)
            assert len(filter(lambda x: len(x) != 0, res)) == 0, res

        if restart:
            for node in stopped_nodes:
                node.start(wait_other_notice=True)
Esempio n. 7
0
    def move_single_node_test(self):
        """ Test moving a node in a single-node cluster (#4200) """
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(1, tokens=[0]).start()
        node1 = cluster.nodelist()[0]
        time.sleep(0.2)

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 1)
        self.create_cf(cursor, 'cf', columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        cluster.flush()

        node1.move(2**25)
        time.sleep(1)

        cluster.cleanup()

        # Check we can get all the keys
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)
Esempio n. 8
0
    def readrepair_test(self):
        cluster = self.cluster
        cluster.set_configuration_options(
            values={'hinted_handoff_enabled': False})

        if DISABLE_VNODES:
            cluster.populate(2).start()
        else:
            tokens = cluster.balanced_tokens(2)
            cluster.populate(2, tokens=tokens).start()
        [node1, node2] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        create_c1c2_table(self, cursor, read_repair=1.0)

        node2.stop(wait_other_notice=True)

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        node2.start(wait_other_notice=True)

        # query everything to cause RR
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

        node1.stop(wait_other_notice=True)

        # Check node2 for all the keys that should have been repaired
        cursor = self.patient_cql_connection(node2, keyspace='ks')
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)
    def quorum_available_during_failure_test(self):
        CL = ConsistencyLevel.QUORUM
        RF = 3

        debug("Creating a ring")
        cluster = self.cluster
        if DISABLE_VNODES:
            cluster.populate(3).start()
        else:
            tokens = cluster.balanced_tokens(3)
            cluster.populate(3, tokens=tokens).start()
        [node1, node2, node3] = cluster.nodelist()
        cluster.start()

        debug("Set to talk to node 2")
        cursor = self.patient_cql_connection(node2)
        self.create_ks(cursor, 'ks', RF)
        create_c1c2_table(self, cursor)

        debug("Generating some data")
        for n in xrange(100):
            insert_c1c2(cursor, n, CL)

        debug("Taking down node1")
        node1.stop(wait_other_notice=True)

        debug("Reading back data.")
        for n in xrange(100):
            query_c1c2(cursor, n, CL)
    def quorum_quorum_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.QUORUM)
            query_c1c2(session2, n, ConsistencyLevel.QUORUM)

        self.cluster.nodelist()[1].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.QUORUM)
Esempio n. 11
0
    def quorum_quorum_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.QUORUM,
                                               ConsistencyLevel.QUORUM)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.QUORUM)
            query_c1c2(session2, n, ConsistencyLevel.QUORUM)

        self.cluster.nodelist()[1].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.QUORUM)
    def all_one_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.ALL, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)
        for n in xrange(0, 100):
            query_c1c2(session2, n, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[1].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)
        for n in xrange(0, 100):
            query_c1c2(session2, n, ConsistencyLevel.ONE)
    def one_one_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.ONE, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.ONE)
            query_c1c2(session2, n, ConsistencyLevel.ONE)


        #Stop a node and retest
        self.cluster.nodelist()[1].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.ONE)
            query_c1c2(session2, n, ConsistencyLevel.ONE)
Esempio n. 14
0
    def one_one_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.ONE,
                                               ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.ONE)
            query_c1c2(session2, n, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[1].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.ONE)
            query_c1c2(session2, n, ConsistencyLevel.ONE)
Esempio n. 15
0
    def all_one_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.ALL,
                                               ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)
        for n in xrange(0, 100):
            query_c1c2(session2, n, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[1].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)
        for n in xrange(0, 100):
            query_c1c2(session2, n, ConsistencyLevel.ONE)
Esempio n. 16
0
    def non_local_read_test(self):
        """ This test reads from a coordinator we know has no copy of the data """
        cluster = self.cluster

        cluster.populate(3).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        create_c1c2_table(self, cursor)

        # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)
        for n in xrange(0, 1000):
            tools.insert_c1c2(cursor, n, ConsistencyLevel.QUORUM)
            tools.query_c1c2(cursor, n, ConsistencyLevel.QUORUM)
Esempio n. 17
0
    def cl_cl_prepare(self, write_cl, read_cl):
        cluster = self.cluster

        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)
        create_c1c2_table(self, session)

        session2 = self.patient_cql_connection(node2, 'ks')

        # insert and get at CL.QUORUM
        for n in xrange(0, 100):
            insert_c1c2(session, n, write_cl)
            query_c1c2(session2, n, read_cl)

        return session, session2
    def cl_cl_prepare(self, write_cl, read_cl):
        cluster = self.cluster

        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)
        create_c1c2_table(self, session)

        session2 = self.patient_cql_connection(node2, 'ks')

        # insert and get at CL.QUORUM
        for n in xrange(0, 100):
            insert_c1c2(session, n, write_cl)
            query_c1c2(session2, n, read_cl)

        return session, session2
Esempio n. 19
0
    def consistent_reads_after_bootstrap_test(self):
        debug("Creating a ring")
        cluster = self.cluster
        cluster.set_configuration_options(values={ 'hinted_handoff_enabled' : False, 'write_request_timeout_in_ms' : 60000, 'read_request_timeout_in_ms' : 60000, 'dynamic_snitch_badness_threshold' : 0.0}, batch_commitlog=True)

        cluster.populate(2).start()
        [node1, node2] = cluster.nodelist()
        cluster.start()

        debug("Set to talk to node 2")
        n2cursor = self.patient_cql_connection(node2)
        self.create_ks(n2cursor, 'ks', 2)
        create_c1c2_table(self, n2cursor)

        debug("Generating some data for all nodes")
        for n in xrange(10,20):
            insert_c1c2(n2cursor, n, ConsistencyLevel.ALL)

        node1.flush()
        debug("Taking down node1")
        node1.stop(wait_other_notice=True)

        debug("Writing data to only node2")
        for n in xrange(30,1000):
            insert_c1c2(n2cursor, n, ConsistencyLevel.ONE)
        node2.flush()

        debug("Restart node1")
        node1.start(wait_other_notice=True)

        debug("Boostraping node3")
        node3 = new_node(cluster)
        node3.start()

        n3cursor = self.patient_cql_connection(node3)
        n3cursor.execute("USE ks");
        debug("Checking that no data was lost")
        for n in xrange(10,20):
            query_c1c2(n3cursor, n, ConsistencyLevel.ALL)

        for n in xrange(30,1000):
            query_c1c2(n3cursor, n, ConsistencyLevel.ALL)
Esempio n. 20
0
    def movement_test(self):
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 1)
        self.create_cf(cursor, 'cf', columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        cluster.flush()

        # Move nodes to balance the cluster
        balancing_tokens = cluster.balanced_tokens(3)
        escformat = '\\%s'
        if cluster.version() >= '2.1':
            escformat = '%s'
        node1.move(escformat %
                   balancing_tokens[0])  # can't assume 0 is balanced with m3p
        node2.move(escformat % balancing_tokens[1])
        node3.move(escformat % balancing_tokens[2])
        time.sleep(1)

        cluster.cleanup()

        # Check we can get all the keys
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)

        # Now the load should be basically even
        sizes = [node.data_size() for node in [node1, node2, node3]]

        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal(sizes[0], sizes[2])
        assert_almost_equal(sizes[1], sizes[2])
Esempio n. 21
0
    def check_rows_on_node(self, node_to_check, rows, found=[], missings=[], restart=True):
        stopped_nodes = []
        for node in self.cluster.nodes.values():
            if node.is_running() and node is not node_to_check:
                stopped_nodes.append(node)
                node.stop(wait_other_notice=True)

        cursor = self.patient_cql_connection(node_to_check, 'ks')
        result = cursor.execute("SELECT * FROM cf LIMIT %d" % (rows * 2))
        assert len(result) == rows, len(result)

        for k in found:
            query_c1c2(cursor, k, ConsistencyLevel.ONE)

        for k in missings:
            query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k%d'" % k, consistency_level=ConsistencyLevel.ONE)
            res = cursor.execute(query)
            assert len(filter(lambda x: len(x) != 0, res)) == 0, res

        if restart:
            for node in stopped_nodes:
                node.start(wait_other_notice=True)
Esempio n. 22
0
    def movement_test(self):
        cluster = self.cluster

        # Create an unbalanced ring
        cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 1)
        self.create_cf(cursor, 'cf', columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.ONE)

        cluster.flush()

        # Move nodes to balance the cluster
        balancing_tokens = cluster.balanced_tokens(3)
        escformat = '\\%s'
        if cluster.version() >= '2.1':
            escformat = '%s'
        node1.move(escformat % balancing_tokens[0]) # can't assume 0 is balanced with m3p
        node2.move(escformat % balancing_tokens[1])
        node3.move(escformat % balancing_tokens[2])
        time.sleep(1)

        cluster.cleanup()

        # Check we can get all the keys
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.ONE)

        # Now the load should be basically even
        sizes = [ node.data_size() for node in [node1, node2, node3] ]

        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal(sizes[0], sizes[2])
        assert_almost_equal(sizes[1], sizes[2])
Esempio n. 23
0
    def simple_bootstrap_test(self):
        cluster = self.cluster
        tokens = cluster.balanced_tokens(2)

        keys = 10000

        # Create a single node cluster
        cluster.populate(1, tokens=[tokens[0]]).start(wait_other_notice=True)
        node1 = cluster.nodes["node1"]

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 1)
        self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, keys):
            insert_c1c2(session, n, ConsistencyLevel.ONE)

        node1.flush()
        initial_size = node1.data_size()

        # Reads inserted data all during the boostrap process. We shouldn't
        # get any error
        reader = self.go(lambda _: query_c1c2(
            session, random.randint(0, keys - 1), ConsistencyLevel.ONE))

        # Boostraping a new node
        node2 = new_node(cluster, token=tokens[1])
        node2.start()
        node2.watch_log_for("Listening for thrift clients...")

        reader.check()
        node1.cleanup()
        time.sleep(.5)
        reader.check()

        size1 = node1.data_size()
        size2 = node2.data_size()
        assert_almost_equal(size1, size2, error=0.3)
        assert_almost_equal(initial_size, 2 * size1)
    def simple_bootstrap_test(self):
        cluster = self.cluster
        tokens = cluster.balanced_tokens(2)

        keys = 10000

        # Create a single node cluster
        cluster.populate(1, tokens=[tokens[0]]).start(wait_other_notice=True)
        node1 = cluster.nodes["node1"]

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 1)
        self.create_cf(session, 'cf', columns={ 'c1' : 'text', 'c2' : 'text' })

        for n in xrange(0, keys):
            insert_c1c2(session, n, ConsistencyLevel.ONE)

        node1.flush()
        initial_size = node1.data_size()

        # Reads inserted data all during the boostrap process. We shouldn't
        # get any error
        reader = self.go(lambda _: query_c1c2(session, random.randint(0, keys-1), ConsistencyLevel.ONE))

        # Boostraping a new node
        node2 = new_node(cluster, token=tokens[1])
        node2.start()
        node2.watch_log_for("Listening for thrift clients...")

        reader.check()
        node1.cleanup()
        time.sleep(.5)
        reader.check()

        size1 = node1.data_size()
        size2 = node2.data_size()
        assert_almost_equal(size1, size2, error=0.3)
        assert_almost_equal(initial_size, 2 * size1)
Esempio n. 25
0
    def decomission_test(self):
        cluster = self.cluster

        tokens = cluster.balanced_tokens(4)
        cluster.populate(4, tokens=tokens).start()
        [node1, node2, node3, node4] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        self.create_cf(cursor, 'cf',columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.QUORUM)

        cluster.flush()
        sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running()]
        init_size = sizes[0]
        assert_almost_equal(*sizes)

        time.sleep(.5)
        node4.decommission()
        node4.stop()
        cluster.cleanup()
        time.sleep(.5)

        # Check we can get all the keys
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

        sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ]
        three_node_sizes = sizes
        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal((2.0/3.0) * sizes[0], sizes[2])
        assert_almost_equal(sizes[2], init_size)

        if cluster.version() <= '1.2':
            node3.stop(wait_other_notice=True)
            node1.removeToken(tokens[2])
            time.sleep(.5)
            cluster.cleanup()
            time.sleep(.5)

            # Check we can get all the keys
            for n in xrange(0, 10000):
                query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

            sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ]
            assert_almost_equal(*sizes)
            assert_almost_equal(sizes[0], 2 * init_size)

            node5 = new_node(cluster, token=(tokens[2]+1)).start()
            time.sleep(.5)
            cluster.cleanup()
            time.sleep(.5)
            cluster.compact()
            time.sleep(.5)

            # Check we can get all the keys
            for n in xrange(0, 10000):
                query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

            sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ]
            # We should be back to the earlir 3 nodes situation
            for i in xrange(0, len(sizes)):
                assert_almost_equal(sizes[i], three_node_sizes[i])
Esempio n. 26
0
    def decomission_test(self):
        cluster = self.cluster

        tokens = cluster.balanced_tokens(4)
        cluster.populate(4, tokens=tokens).start()
        [node1, node2, node3, node4] = cluster.nodelist()

        cursor = self.patient_cql_connection(node1)
        self.create_ks(cursor, 'ks', 2)
        self.create_cf(cursor, 'cf', columns={'c1': 'text', 'c2': 'text'})

        for n in xrange(0, 10000):
            insert_c1c2(cursor, n, ConsistencyLevel.QUORUM)

        cluster.flush()
        sizes = [
            node.data_size() for node in cluster.nodelist()
            if node.is_running()
        ]
        init_size = sizes[0]
        assert_almost_equal(*sizes)

        time.sleep(.5)
        node4.decommission()
        node4.stop()
        cluster.cleanup()
        time.sleep(.5)

        # Check we can get all the keys
        for n in xrange(0, 10000):
            query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

        sizes = [
            node.data_size() for node in cluster.nodelist()
            if node.is_running()
        ]
        three_node_sizes = sizes
        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
        assert_almost_equal(sizes[2], init_size)

        if cluster.version() <= '1.2':
            node3.stop(wait_other_notice=True)
            node1.removeToken(tokens[2])
            time.sleep(.5)
            cluster.cleanup()
            time.sleep(.5)

            # Check we can get all the keys
            for n in xrange(0, 10000):
                query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

            sizes = [
                node.data_size() for node in cluster.nodelist()
                if node.is_running()
            ]
            assert_almost_equal(*sizes)
            assert_almost_equal(sizes[0], 2 * init_size)

            node5 = new_node(cluster, token=(tokens[2] + 1)).start()
            time.sleep(.5)
            cluster.cleanup()
            time.sleep(.5)
            cluster.compact()
            time.sleep(.5)

            # Check we can get all the keys
            for n in xrange(0, 10000):
                query_c1c2(cursor, n, ConsistencyLevel.QUORUM)

            sizes = [
                node.data_size() for node in cluster.nodelist()
                if node.is_running()
            ]
            # We should be back to the earlir 3 nodes situation
            for i in xrange(0, len(sizes)):
                assert_almost_equal(sizes[i], three_node_sizes[i])