def quorum_available_during_failure_test(self): CL = ConsistencyLevel.QUORUM RF = 3 debug("Creating a ring") cluster = self.cluster if DISABLE_VNODES: cluster.populate(3).start() else: tokens = cluster.balanced_tokens(3) cluster.populate(3, tokens=tokens).start() node1, node2, node3 = cluster.nodelist() cluster.start() debug("Set to talk to node 2") session = self.patient_cql_connection(node2) self.create_ks(session, "ks", RF) create_c1c2_table(self, session) debug("Generating some data") insert_c1c2(session, n=100, consistency=CL) debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Reading back data.") for n in xrange(100): query_c1c2(session, n, CL)
def quorum_available_during_failure_test(self): CL = ConsistencyLevel.QUORUM RF = 3 debug("Creating a ring") cluster = self.cluster if DISABLE_VNODES: cluster.populate(3).start() else: tokens = cluster.balanced_tokens(3) cluster.populate(3, tokens=tokens).start() node1, node2, node3 = cluster.nodelist() cluster.start() debug("Set to talk to node 2") session = self.patient_cql_connection(node2) self.create_ks(session, 'ks', RF) create_c1c2_table(self, session) debug("Generating some data") insert_c1c2(session, n=100, consistency=CL) debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Reading back data.") for n in xrange(100): query_c1c2(session, n, CL)
def readrepair_test(self): cluster = self.cluster cluster.set_configuration_options(values={"hinted_handoff_enabled": False}) if DISABLE_VNODES: cluster.populate(2).start() else: tokens = cluster.balanced_tokens(2) cluster.populate(2, tokens=tokens).start() [node1, node2] = cluster.nodelist() cursor = self.patient_cql_connection(node1).cursor() self.create_ks(cursor, "ks", 2) create_c1c2_table(self, cursor, read_repair=1.0) node2.stop(wait_other_notice=True) for n in xrange(0, 10000): insert_c1c2(cursor, n, "ONE") node2.start(wait_other_notice=True) time.sleep(5) # query everything to cause RR for n in xrange(0, 10000): query_c1c2(cursor, n, "QUORUM") node1.stop(wait_other_notice=True) # Check node2 for all the keys that should have been repaired cursor = self.patient_cql_connection(node2, keyspace="ks").cursor() for n in xrange(0, 10000): query_c1c2(cursor, n, "ONE")
def readrepair_test(self): cluster = self.cluster cluster.set_configuration_options( values={'hinted_handoff_enabled': False}) if DISABLE_VNODES: cluster.populate(2).start() else: tokens = cluster.balanced_tokens(2) cluster.populate(2, tokens=tokens).start() node1, node2 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 2) create_c1c2_table(self, session, read_repair=1.0) node2.stop(wait_other_notice=True) insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE) node2.start(wait_other_notice=True) # query everything to cause RR for n in xrange(0, 10000): query_c1c2(session, n, ConsistencyLevel.QUORUM) node1.stop(wait_other_notice=True) # Check node2 for all the keys that should have been repaired session = self.patient_cql_connection(node2, keyspace='ks') for n in xrange(0, 10000): query_c1c2(session, n, ConsistencyLevel.ONE)
def _do_hinted_handoff(self, node1, node2, enabled): """ Test that if we stop one node the other one will store hints only when hinted handoff is enabled """ session = self.patient_exclusive_cql_connection(node1) self.create_ks(session, 'ks', 2) create_c1c2_table(self, session) node2.stop(wait_other_notice=True) insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE) log_mark = node1.mark_log() node2.start(wait_other_notice=True) if enabled: node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=120) node1.stop(wait_other_notice=True) # Check node2 for all the keys that should have been delivered via HH if enabled or not if not enabled session = self.patient_exclusive_cql_connection(node2, keyspace='ks') for n in xrange(0, 100): if enabled: query_c1c2(session, n, ConsistencyLevel.ONE) else: query_c1c2(session, n, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
def quorum_quorum_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor1 = self.patient_cql_connection(node1).cursor() self.create_ks(cursor1, "ks", 3) create_c1c2_table(self, cursor1) cursor2 = self.patient_cql_connection(node2, "ks").cursor() # insert and get at CL.QUORUM for n in xrange(0, 100): insert_c1c2(cursor1, n, "QUORUM") query_c1c2(cursor2, n, "QUORUM") # shutdown a node an test again node3.stop(wait_other_notice=True) for n in xrange(100, 200): insert_c1c2(cursor1, n, "QUORUM") query_c1c2(cursor2, n, "QUORUM") # shutdown another node and test we get unavailabe exception node2.stop(wait_other_notice=True) assert_unavailable(insert_c1c2, cursor1, 200, "QUORUM")
def readrepair_test(self): cluster = self.cluster cluster.set_configuration_options(values={"hinted_handoff_enabled": False}) if DISABLE_VNODES: cluster.populate(2).start() else: tokens = cluster.balanced_tokens(2) cluster.populate(2, tokens=tokens).start() node1, node2 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, "ks", 2) create_c1c2_table(self, session, read_repair=1.0) node2.stop(wait_other_notice=True) insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE) node2.start(wait_other_notice=True) # query everything to cause RR for n in xrange(0, 10000): query_c1c2(session, n, ConsistencyLevel.QUORUM) node1.stop(wait_other_notice=True) # Check node2 for all the keys that should have been repaired session = self.patient_cql_connection(node2, keyspace="ks") for n in xrange(0, 10000): query_c1c2(session, n, ConsistencyLevel.ONE)
def blacklisted_directory_test(self): cluster = self.cluster cluster.set_datadir_count(3) cluster.populate(1) [node] = cluster.nodelist() remove_perf_disable_shared_mem(node) cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node) self.create_ks(session, 'ks', 1) create_c1c2_table(self, session) insert_c1c2(session, n=10000) node.flush() for k in xrange(0, 10000): query_c1c2(session, k) node.compact() mbean = make_mbean('db', type='BlacklistedDirectories') with JolokiaAgent(node) as jmx: jmx.execute_method(mbean, 'markUnwritable', [os.path.join(node.get_path(), 'data0')]) for k in xrange(0, 10000): query_c1c2(session, k) node.nodetool('relocatesstables') for k in xrange(0, 10000): query_c1c2(session, k)
def quorum_available_during_failure_test(self): CL = 'QUORUM' RF = 3 debug("Creating a ring") cluster = self.cluster if ENABLE_VNODES: tokens = cluster.balanced_tokens(3) cluster.populate(3, tokens=tokens).start() else: cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") cursor = self.cql_connection(node2).cursor() self.create_ks(cursor, 'ks', RF) create_c1c2_table(self, cursor) debug("Generating some data") for n in xrange(100): insert_c1c2(cursor, n, CL) debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Reading back data.") for n in xrange(100): query_c1c2(cursor, n, CL)
def one_one_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor1 = self.patient_cql_connection(node1).cursor() self.create_ks(cursor1, "ks", 3) create_c1c2_table(self, cursor1) cursor2 = self.patient_cql_connection(node2, "ks").cursor() # insert and get at CL.ONE for n in xrange(0, 100): insert_c1c2(cursor1, n, "ONE") retry_till_success(query_c1c2, cursor2, n, "ONE", timeout=5) # shutdown a node an test again node3.stop(wait_other_notice=True) for n in xrange(100, 200): insert_c1c2(cursor1, n, "ONE") retry_till_success(query_c1c2, cursor2, n, "ONE", timeout=5) # shutdown a second node an test again node2.stop(wait_other_notice=True) for n in xrange(200, 300): insert_c1c2(cursor1, n, "ONE") retry_till_success(query_c1c2, cursor1, n, "ONE", timeout=5)
def hintedhandoff_test(self): cluster = self.cluster if DISABLE_VNODES: cluster.populate(2).start() else: tokens = cluster.balanced_tokens(2) cluster.populate(2, tokens=tokens).start() [node1, node2] = cluster.nodelist() cursor = self.patient_cql_connection(node1).cursor() self.create_ks(cursor, "ks", 2) create_c1c2_table(self, cursor) node2.stop(wait_other_notice=True) for n in xrange(0, 100): insert_c1c2(cursor, n, "ONE") log_mark = node1.mark_log() node2.start() node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=90) node1.stop(wait_other_notice=True) # Check node2 for all the keys that should have been delivered via HH cursor = self.patient_cql_connection(node2, keyspace="ks").cursor() for n in xrange(0, 100): query_c1c2(cursor, n, "ONE")
def consistent_reads_after_bootstrap_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000, 'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0 }, batch_commitlog=True) cluster.populate(2).start() node1, node2 = cluster.nodelist() cluster.start(wait_for_binary_proto=True, wait_other_notice=True) debug("Set to talk to node 2") n2session = self.patient_cql_connection(node2) self.create_ks(n2session, 'ks', 2) create_c1c2_table(self, n2session) debug("Generating some data for all nodes") insert_c1c2(n2session, keys=range(10, 20), consistency=ConsistencyLevel.ALL) node1.flush() debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Writing data to only node2") insert_c1c2(n2session, keys=range(30, 1000), consistency=ConsistencyLevel.ONE) node2.flush() debug("Restart node1") node1.start(wait_other_notice=True) debug("Bootstraping node3") node3 = new_node(cluster) node3.start(wait_for_binary_proto=True) n3session = self.patient_cql_connection(node3) n3session.execute("USE ks") debug("Checking that no data was lost") for n in xrange(10, 20): query_c1c2(n3session, n, ConsistencyLevel.ALL) for n in xrange(30, 1000): query_c1c2(n3session, n, ConsistencyLevel.ALL)
def consistent_reads_after_move_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000, 'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0 }, batch_commitlog=True) cluster.populate(3, tokens=[0, 2**48, 2**62]).start() [node1, node2, node3] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") n2cursor = self.patient_cql_connection(node2).cursor() self.create_ks(n2cursor, 'ks', 2) create_c1c2_table(self, n2cursor) debug("Generating some data for all nodes") for n in xrange(10, 20): insert_c1c2(n2cursor, n, 'ALL') node1.flush() debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Writing data to node2") for n in xrange(30, 1000): insert_c1c2(n2cursor, n, 'ONE') node2.flush() debug("Restart node1") node1.start(wait_other_notice=True) debug("Move token on node3") node3.move(2) debug("Checking that no data was lost") for n in xrange(10, 20): query_c1c2(n2cursor, n, 'ALL') for n in xrange(30, 1000): query_c1c2(n2cursor, n, 'ALL')
def non_local_read_test(self): """ This test reads from a coordinator we know has no copy of the data """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() cursor = self.patient_cql_connection(node1) self.create_ks(cursor, 'ks', 2) create_c1c2_table(self, cursor) # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally) for n in xrange(0, 1000): tools.insert_c1c2(cursor, n, ConsistencyLevel.QUORUM) tools.query_c1c2(cursor, n, ConsistencyLevel.QUORUM)
def hintedhandoff_decom_test(self): self.cluster.populate(4).start(wait_for_binary_proto=True) [node1, node2, node3, node4] = self.cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 2) create_c1c2_table(self, session) node4.stop(wait_other_notice=True) insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE) node1.decommission() node4.start(wait_for_binary_proto=True) node2.decommission() node3.decommission() time.sleep(5) for x in xrange(0, 100): query_c1c2(session, x, ConsistencyLevel.ONE)
def non_local_read_test(self): """ This test reads from a coordinator we know has no copy of the data """ cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor = self.patient_cql_connection(node1).cursor() self.create_ks(cursor, 'ks', 2) create_c1c2_table(self, cursor) # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally) for n in xrange(0, 1000): tools.insert_c1c2(cursor, n, "QUORUM") tools.query_c1c2(cursor, n, "QUORUM")
def non_local_read_test(self): """ This test reads from a coordinator we know has no copy of the data """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 2) create_c1c2_table(self, session) # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally) tools.insert_c1c2(session, n=1000, consistency=ConsistencyLevel.QUORUM) for n in xrange(0, 1000): tools.query_c1c2(session, n, ConsistencyLevel.QUORUM)
def cl_cl_prepare(self, write_cl, read_cl, tolerate_missing=False): cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 3) create_c1c2_table(self, session) session2 = self.patient_cql_connection(node2, 'ks') # insert and get at CL.QUORUM for n in xrange(0, 100): insert_c1c2(session, n, write_cl) query_c1c2(session2, n, read_cl, tolerate_missing) return session, session2
def consistent_reads_after_bootstrap_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000, 'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0}, batch_commitlog=True) cluster.populate(2).start() node1, node2 = cluster.nodelist() cluster.start(wait_for_binary_proto=True, wait_other_notice=True) debug("Set to talk to node 2") n2session = self.patient_cql_connection(node2) self.create_ks(n2session, 'ks', 2) create_c1c2_table(self, n2session) debug("Generating some data for all nodes") for n in xrange(10, 20): insert_c1c2(n2session, n, ConsistencyLevel.ALL) node1.flush() debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Writing data to only node2") for n in xrange(30, 1000): insert_c1c2(n2session, n, ConsistencyLevel.ONE) node2.flush() debug("Restart node1") node1.start(wait_other_notice=True) debug("Boostraping node3") node3 = new_node(cluster) node3.start(wait_for_binary_proto=True) n3session = self.patient_cql_connection(node3) n3session.execute("USE ks") debug("Checking that no data was lost") for n in xrange(10, 20): query_c1c2(n3session, n, ConsistencyLevel.ALL) for n in xrange(30, 1000): query_c1c2(n3session, n, ConsistencyLevel.ALL)
def all_all_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor1 = self.cql_connection(node1).cursor() self.create_ks(cursor1, 'ks', 3) create_c1c2_table(self, cursor1) cursor2 = self.cql_connection(node2, 'ks').cursor() # insert and get at CL.ALL for n in xrange(0, 100): insert_c1c2(cursor1, n, "ALL") query_c1c2(cursor2, n, "ALL") # shutdown one node and test we get unavailabe exception node3.stop(wait_other_notice=True) assert_unavailable(insert_c1c2, cursor1, 100, "ALL")
def all_all_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor1 = self.patient_cql_connection(node1).cursor() self.create_ks(cursor1, 'ks', 3) create_c1c2_table(self, cursor1) cursor2 = self.patient_cql_connection(node2, 'ks').cursor() # insert and get at CL.ALL for n in xrange(0, 100): insert_c1c2(cursor1, n, "ALL") query_c1c2(cursor2, n, "ALL") # shutdown one node and test we get unavailabe exception node3.stop(wait_other_notice=True) assert_unavailable(insert_c1c2, cursor1, 100, "ALL")
def all_one_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor1 = self.patient_cql_connection(node1).cursor() self.create_ks(cursor1, "ks", 3) create_c1c2_table(self, cursor1) cursor2 = self.patient_cql_connection(node2, "ks").cursor() # insert and get at CL.ONE for n in xrange(0, 100): insert_c1c2(cursor1, n, "ALL") query_c1c2(cursor2, n, "ONE") # shutdown a node an test again node3.stop(wait_other_notice=True) assert_unavailable(insert_c1c2, cursor1, 100, "ALL")
def tracing_from_system_traces_test(self): self.cluster.populate(1).start(wait_for_binary_proto=True) node1, = self.cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) create_c1c2_table(self, session) for n in xrange(100): insert_c1c2(session, n) out, err = self.run_cqlsh(node1, 'TRACING ON; SELECT * FROM ks.cf') self.assertIn('Tracing session: ', out) out, err = self.run_cqlsh(node1, 'TRACING ON; SELECT * FROM system_traces.events') self.assertNotIn('Tracing session: ', out) out, err = self.run_cqlsh(node1, 'TRACING ON; SELECT * FROM system_traces.sessions') self.assertNotIn('Tracing session: ', out)
def consistent_reads_after_move_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'hinted_handoff_enabled' : False, 'write_request_timeout_in_ms' : 60000, 'read_request_timeout_in_ms' : 60000, 'dynamic_snitch_badness_threshold' : 0.0}, batch_commitlog=True) cluster.populate(3, tokens=[0, 2**48, 2**62]).start() [node1, node2, node3] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") n2cursor = self.patient_cql_connection(node2) self.create_ks(n2cursor, 'ks', 2) create_c1c2_table(self, n2cursor) debug("Generating some data for all nodes") for n in xrange(10,20): insert_c1c2(n2cursor, n, ConsistencyLevel.ALL) node1.flush() debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Writing data to node2") for n in xrange(30,1000): insert_c1c2(n2cursor, n, ConsistencyLevel.ONE) node2.flush() debug("Restart node1") node1.start(wait_other_notice=True) debug("Move token on node3") node3.move(2) debug("Checking that no data was lost") for n in xrange(10,20): query_c1c2(n2cursor, n, ConsistencyLevel.ALL) for n in xrange(30,1000): query_c1c2(n2cursor, n, ConsistencyLevel.ALL)
def taketoken_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'initial_token': None, 'num_tokens': 10, 'hinted_handoff_enabled' : False, 'write_request_timeout_in_ms' : 60000, 'read_request_timeout_in_ms' : 60000, 'dynamic_snitch_badness_threshold' : 0.0}, batch_commitlog=True) cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") n2cursor = self.patient_cql_connection(node2).cursor() self.create_ks(n2cursor, 'ks', 2) create_c1c2_table(self, n2cursor) debug("Generating some data for all nodes") for n in xrange(10,20): insert_c1c2(n2cursor, n, 'ALL') node1.flush() debug("Writing data to node2") for n in xrange(30,1000): insert_c1c2(n2cursor, n, 'ONE') node2.flush() debug("Getting token from node 1") n1cursor = self.patient_cql_connection(node1).cursor() n1cursor.execute('SELECT tokens FROM system.local') n1tokens = n1cursor.fetchone() n3cursor = self.patient_cql_connection(node3).cursor() n3cursor.execute('SELECT tokens FROM system.local') n3tokens = n3cursor.fetchone() debug("Relocate tokens from node1 to node3") i = 0; tl = ""; for t in n1tokens[0]: if i == 8: break t = '\\%s' % t tl = "%s %s" % (tl, t); i += 1 cmd = "taketoken %s" % tl debug(cmd) node3.nodetool(cmd) time.sleep(1) debug("Check that the tokens were really moved") n3cursor.execute('SELECT tokens FROM system.local') n3tokens = n3cursor.fetchone() n1cursor.execute('SELECT tokens FROM system.local') n1tokens = n1cursor.fetchone() debug("n1 %s n3 %s" % (n1tokens,n3tokens)) assert len(n3tokens[0]) == 18 assert len(n1tokens[0]) == 2 debug("Checking that no data was lost") for n in xrange(10,20): query_c1c2(n2cursor, n, 'ALL') for n in xrange(30,1000): query_c1c2(n2cursor, n, 'ALL')
def taketoken_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'initial_token': None, 'num_tokens': 10, 'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000, 'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0 }, batch_commitlog=True) cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") n2cursor = self.patient_cql_connection(node2).cursor() self.create_ks(n2cursor, 'ks', 2) create_c1c2_table(self, n2cursor) debug("Generating some data for all nodes") for n in xrange(10, 20): insert_c1c2(n2cursor, n, 'ALL') node1.flush() debug("Writing data to node2") for n in xrange(30, 1000): insert_c1c2(n2cursor, n, 'ONE') node2.flush() debug("Getting token from node 1") n1cursor = self.patient_cql_connection(node1).cursor() n1cursor.execute('SELECT tokens FROM system.local') n1tokens = n1cursor.fetchone() n3cursor = self.patient_cql_connection(node3).cursor() n3cursor.execute('SELECT tokens FROM system.local') n3tokens = n3cursor.fetchone() debug("Relocate tokens from node1 to node3") i = 0 tl = "" for t in n1tokens[0]: if i == 8: break t = '\\%s' % t tl = "%s %s" % (tl, t) i += 1 cmd = "taketoken %s" % tl debug(cmd) node3.nodetool(cmd) time.sleep(1) debug("Check that the tokens were really moved") n3cursor.execute('SELECT tokens FROM system.local') n3tokens = n3cursor.fetchone() n1cursor.execute('SELECT tokens FROM system.local') n1tokens = n1cursor.fetchone() debug("n1 %s n3 %s" % (n1tokens, n3tokens)) assert len(n3tokens[0]) == 18 assert len(n1tokens[0]) == 2 debug("Checking that no data was lost") for n in xrange(10, 20): query_c1c2(n2cursor, n, 'ALL') for n in xrange(30, 1000): query_c1c2(n2cursor, n, 'ALL')
def consistent_reads_after_relocate_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'initial_token': None, 'num_tokens': 10, 'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000, 'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0 }, batch_commitlog=True) cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") n2cursor = self.patient_cql_connection(node2).cursor() self.create_ks(n2cursor, 'ks', 2) create_c1c2_table(self, n2cursor) debug("Generating some data for all nodes") for n in xrange(10, 20): insert_c1c2(n2cursor, n, 'ALL') node1.flush() debug("Taking down node1") node3.stop(wait_other_notice=True) debug("Writing data to node2") for n in xrange(30, 1000): insert_c1c2(n2cursor, n, 'ONE') node2.flush() debug("Restart node1") node3.start(wait_other_notice=True) debug("Getting token from node 1") n1cursor = self.patient_cql_connection(node1).cursor() n1cursor.execute('SELECT tokens FROM system.local') tokens = n1cursor.fetchone() debug("Relocate tokens from node1 to node3") tl = " ".join(str(t) for t in list(tokens[0])[:8]) cmd = "taketoken %s" % tl debug(cmd) node3.nodetool(cmd) n1cursor.execute('SELECT tokens FROM system.local') tokens = n1cursor.fetchone() debug("%s" % tokens) assert len(tokens) == 2 debug("Checking that no data was lost") for n in xrange(10, 20): query_c1c2(n2cursor, n, 'ALL') for n in xrange(30, 1000): query_c1c2(n2cursor, n, 'ALL')