def _bootstrap_new_node(self): # Check we can bootstrap a new node on the upgraded cluster: debug("Adding a node to the cluster") nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes))) nnode.start(use_jna=True, wait_other_notice=True) self._write_values() self._increment_counters() self._check_values() self._check_counters()
def read_from_bootstrapped_node_test(self): """Test bootstrapped node sees existing data, eg. CASSANDRA-6648""" cluster = self.cluster cluster.populate(3) version = cluster.version() cluster.start() node1 = cluster.nodes['node1'] if version < "2.1": node1.stress(['-n', '10000']) else: node1.stress(['write', 'n=10000', '-rate', 'threads=8']) node4 = new_node(cluster) node4.start() session = self.patient_cql_connection(node4) rows = session.execute('select * from "Keyspace1"."Standard1" limit 10') assert len(list(rows)) == 10
def consistent_reads_after_bootstrap_test(self): debug("Creating a ring") cluster = self.cluster cluster.set_configuration_options(values={ 'hinted_handoff_enabled' : False, 'write_request_timeout_in_ms' : 60000, 'read_request_timeout_in_ms' : 60000, 'dynamic_snitch_badness_threshold' : 0.0}, batch_commitlog=True) cluster.populate(2).start() [node1, node2] = cluster.nodelist() cluster.start() debug("Set to talk to node 2") n2cursor = self.patient_cql_connection(node2) self.create_ks(n2cursor, 'ks', 2) create_c1c2_table(self, n2cursor) debug("Generating some data for all nodes") for n in xrange(10,20): insert_c1c2(n2cursor, n, ConsistencyLevel.ALL) node1.flush() debug("Taking down node1") node1.stop(wait_other_notice=True) debug("Writing data to only node2") for n in xrange(30,1000): insert_c1c2(n2cursor, n, ConsistencyLevel.ONE) node2.flush() debug("Restart node1") node1.start(wait_other_notice=True) debug("Boostraping node3") node3 = new_node(cluster) node3.start() n3cursor = self.patient_cql_connection(node3) n3cursor.execute("USE ks"); debug("Checking that no data was lost") for n in xrange(10,20): query_c1c2(n3cursor, n, ConsistencyLevel.ALL) for n in xrange(30,1000): query_c1c2(n3cursor, n, ConsistencyLevel.ALL)
def read_from_bootstrapped_node_test(self): """Test bootstrapped node sees existing data, eg. CASSANDRA-6648""" cluster = self.cluster cluster.populate(3) version = cluster.version() cluster.start() node1 = cluster.nodes['node1'] if version < "2.1": node1.stress(['-n', '10000']) else: node1.stress(['write', 'n=10000', '-rate', 'threads=8']) node4 = new_node(cluster) node4.start() session = self.patient_cql_connection(node4) stress_table = 'keyspace1.standard1' if self.cluster.version( ) >= '2.1' else '"Keyspace1"."Standard1"' rows = session.execute('select * from %s limit 10' % stress_table) assert len(list(rows)) == 10
def simple_bootstrap_test(self): cluster = self.cluster tokens = cluster.balanced_tokens(2) keys = 10000 # Create a single node cluster cluster.populate(1, tokens=[tokens[0]]).start(wait_other_notice=True) node1 = cluster.nodes["node1"] session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) for n in xrange(0, keys): insert_c1c2(session, n, ConsistencyLevel.ONE) node1.flush() initial_size = node1.data_size() # Reads inserted data all during the boostrap process. We shouldn't # get any error reader = self.go(lambda _: query_c1c2( session, random.randint(0, keys - 1), ConsistencyLevel.ONE)) # Boostraping a new node node2 = new_node(cluster, token=tokens[1]) node2.start() node2.watch_log_for("Listening for thrift clients...") reader.check() node1.cleanup() time.sleep(.5) reader.check() size1 = node1.data_size() size2 = node2.data_size() assert_almost_equal(size1, size2, error=0.3) assert_almost_equal(initial_size, 2 * size1)
def simple_bootstrap_test(self): cluster = self.cluster tokens = cluster.balanced_tokens(2) keys = 10000 # Create a single node cluster cluster.populate(1, tokens=[tokens[0]]).start(wait_other_notice=True) node1 = cluster.nodes["node1"] session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) self.create_cf(session, 'cf', columns={ 'c1' : 'text', 'c2' : 'text' }) for n in xrange(0, keys): insert_c1c2(session, n, ConsistencyLevel.ONE) node1.flush() initial_size = node1.data_size() # Reads inserted data all during the boostrap process. We shouldn't # get any error reader = self.go(lambda _: query_c1c2(session, random.randint(0, keys-1), ConsistencyLevel.ONE)) # Boostraping a new node node2 = new_node(cluster, token=tokens[1]) node2.start() node2.watch_log_for("Listening for thrift clients...") reader.check() node1.cleanup() time.sleep(.5) reader.check() size1 = node1.data_size() size2 = node2.data_size() assert_almost_equal(size1, size2, error=0.3) assert_almost_equal(initial_size, 2 * size1)
def decomission_test(self): cluster = self.cluster tokens = cluster.balanced_tokens(4) cluster.populate(4, tokens=tokens).start() [node1, node2, node3, node4] = cluster.nodelist() cursor = self.patient_cql_connection(node1) self.create_ks(cursor, 'ks', 2) self.create_cf(cursor, 'cf', columns={'c1': 'text', 'c2': 'text'}) for n in xrange(0, 10000): insert_c1c2(cursor, n, ConsistencyLevel.QUORUM) cluster.flush() sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] init_size = sizes[0] assert_almost_equal(*sizes) time.sleep(.5) node4.decommission() node4.stop() cluster.cleanup() time.sleep(.5) # Check we can get all the keys for n in xrange(0, 10000): query_c1c2(cursor, n, ConsistencyLevel.QUORUM) sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] three_node_sizes = sizes assert_almost_equal(sizes[0], sizes[1]) assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2]) assert_almost_equal(sizes[2], init_size) if cluster.version() <= '1.2': node3.stop(wait_other_notice=True) node1.removeToken(tokens[2]) time.sleep(.5) cluster.cleanup() time.sleep(.5) # Check we can get all the keys for n in xrange(0, 10000): query_c1c2(cursor, n, ConsistencyLevel.QUORUM) sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] assert_almost_equal(*sizes) assert_almost_equal(sizes[0], 2 * init_size) node5 = new_node(cluster, token=(tokens[2] + 1)).start() time.sleep(.5) cluster.cleanup() time.sleep(.5) cluster.compact() time.sleep(.5) # Check we can get all the keys for n in xrange(0, 10000): query_c1c2(cursor, n, ConsistencyLevel.QUORUM) sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] # We should be back to the earlir 3 nodes situation for i in xrange(0, len(sizes)): assert_almost_equal(sizes[i], three_node_sizes[i])
def decomission_test(self): cluster = self.cluster tokens = cluster.balanced_tokens(4) cluster.populate(4, tokens=tokens).start() [node1, node2, node3, node4] = cluster.nodelist() cursor = self.patient_cql_connection(node1) self.create_ks(cursor, 'ks', 2) self.create_cf(cursor, 'cf',columns={'c1': 'text', 'c2': 'text'}) for n in xrange(0, 10000): insert_c1c2(cursor, n, ConsistencyLevel.QUORUM) cluster.flush() sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running()] init_size = sizes[0] assert_almost_equal(*sizes) time.sleep(.5) node4.decommission() node4.stop() cluster.cleanup() time.sleep(.5) # Check we can get all the keys for n in xrange(0, 10000): query_c1c2(cursor, n, ConsistencyLevel.QUORUM) sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] three_node_sizes = sizes assert_almost_equal(sizes[0], sizes[1]) assert_almost_equal((2.0/3.0) * sizes[0], sizes[2]) assert_almost_equal(sizes[2], init_size) if cluster.version() <= '1.2': node3.stop(wait_other_notice=True) node1.removeToken(tokens[2]) time.sleep(.5) cluster.cleanup() time.sleep(.5) # Check we can get all the keys for n in xrange(0, 10000): query_c1c2(cursor, n, ConsistencyLevel.QUORUM) sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] assert_almost_equal(*sizes) assert_almost_equal(sizes[0], 2 * init_size) node5 = new_node(cluster, token=(tokens[2]+1)).start() time.sleep(.5) cluster.cleanup() time.sleep(.5) cluster.compact() time.sleep(.5) # Check we can get all the keys for n in xrange(0, 10000): query_c1c2(cursor, n, ConsistencyLevel.QUORUM) sizes = [ node.data_size() for node in cluster.nodelist() if node.is_running() ] # We should be back to the earlir 3 nodes situation for i in xrange(0, len(sizes)): assert_almost_equal(sizes[i], three_node_sizes[i])