def test_wait_for_schema(self): #start a one node cluster cluster = self.cluster cluster.populate(1, install_byteman=True) node1 = cluster.nodes['node1'] cluster.start() session = self.patient_cql_connection(node1) create_ks(session, 'ks', 2) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) empty_size = data_size(node1, 'ks', 'cf') keys = 1000 insert_statement = session.prepare( "INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) node1.flush() node1.compact() initial_size = data_size(node1, 'ks', 'cf') #logger.debug("node1 size for ks.cf before bootstrapping node2: %s" % float(initial_size)) node2 = new_node(cluster) node2.set_configuration_options( values={'request_timeout_in_ms': 10000}) mark = node2.mark_log() node1.byteman_submit(['./byteman/migration_request_sleep.btm']) node2.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds=20"], set_migration_task=False, wait_for_binary_proto=True) node2.watch_log_for('Prepare completed. Receiving', from_mark=mark, timeout=6) node2.flush() node2.compact() #logger.debug("node2 joined with size for ks.cf : %s" % float(data_size(node2, 'ks','cf'))) node1.stop() rows = session.execute('SELECT count(*) from ks.cf') assert rows[0][0] == 1000 cluster.stop()
def _base_bootstrap_test(self, bootstrap=None, bootstrap_from_version=None, enable_ssl=None): def default_bootstrap(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) node2.start(wait_for_binary_proto=True) return node2 if bootstrap is None: bootstrap = default_bootstrap cluster = self.cluster if enable_ssl: logger.debug("***using internode ssl***") generate_ssl_stores(self.fixture_dtest_setup.test_path) cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path) tokens = cluster.balanced_tokens(2) cluster.set_configuration_options(values={'num_tokens': 1}) logger.debug("[node1, node2] tokens: %r" % (tokens, )) keys = 10000 # Create a single node cluster cluster.populate(1) node1 = cluster.nodelist()[0] if bootstrap_from_version: logger.debug("starting source node on version {}".format( bootstrap_from_version)) node1.set_install_dir(version=bootstrap_from_version) node1.set_configuration_options(values={'initial_token': tokens[0]}) cluster.start(wait_other_notice=True) session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) # record the size before inserting any of our own data empty_size = data_size(node1, 'ks', 'cf') logger.debug("node1 empty size for ks.cf: %s" % float(empty_size)) insert_statement = session.prepare( "INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) node1.flush() node1.compact() initial_size = data_size(node1, 'ks', 'cf') logger.debug("node1 size for ks.cf before bootstrapping node2: %s" % float(initial_size)) # Reads inserted data all during the bootstrap process. We shouldn't # get any error query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE) session.shutdown() # Bootstrapping a new node in the current version node2 = bootstrap(cluster, tokens[1]) node2.compact() node1.cleanup() logger.debug("node1 size for ks.cf after cleanup: %s" % float(data_size(node1, 'ks', 'cf'))) node1.compact() logger.debug("node1 size for ks.cf after compacting: %s" % float(data_size(node1, 'ks', 'cf'))) logger.debug("node2 size for ks.cf after compacting: %s" % float(data_size(node2, 'ks', 'cf'))) size1 = float(data_size(node1, 'ks', 'cf')) size2 = float(data_size(node2, 'ks', 'cf')) assert_almost_equal(size1, size2, error=0.3) assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size))) assert_bootstrap_state(self, node2, 'COMPLETED')
def _base_bootstrap_test(self, bootstrap=None, bootstrap_from_version=None, enable_ssl=None): def default_bootstrap(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) node2.start(wait_for_binary_proto=True) return node2 if bootstrap is None: bootstrap = default_bootstrap cluster = self.cluster if enable_ssl: logger.debug("***using internode ssl***") generate_ssl_stores(self.fixture_dtest_setup.test_path) cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path) tokens = cluster.balanced_tokens(2) cluster.set_configuration_options(values={'num_tokens': 1}) logger.debug("[node1, node2] tokens: %r" % (tokens,)) keys = 10000 # Create a single node cluster cluster.populate(1) node1 = cluster.nodelist()[0] if bootstrap_from_version: logger.debug("starting source node on version {}".format(bootstrap_from_version)) node1.set_install_dir(version=bootstrap_from_version) node1.set_configuration_options(values={'initial_token': tokens[0]}) cluster.start(wait_other_notice=True) session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) # record the size before inserting any of our own data empty_size = data_size(node1, 'ks','cf') logger.debug("node1 empty size for ks.cf: %s" % float(empty_size)) insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) node1.flush() node1.compact() initial_size = data_size(node1,'ks','cf') logger.debug("node1 size for ks.cf before bootstrapping node2: %s" % float(initial_size)) # Reads inserted data all during the bootstrap process. We shouldn't # get any error query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE) session.shutdown() # Bootstrapping a new node in the current version node2 = bootstrap(cluster, tokens[1]) node2.compact() node1.cleanup() logger.debug("node1 size for ks.cf after cleanup: %s" % float(data_size(node1,'ks','cf'))) node1.compact() logger.debug("node1 size for ks.cf after compacting: %s" % float(data_size(node1,'ks','cf'))) logger.debug("node2 size for ks.cf after compacting: %s" % float(data_size(node2,'ks','cf'))) size1 = float(data_size(node1,'ks','cf')) size2 = float(data_size(node2,'ks','cf')) assert_almost_equal(size1, size2, error=0.3) assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size))) assert_bootstrap_state(self, node2, 'COMPLETED')