def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2) create_schema(session, keyspace, replication_factor=rf) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2)
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) # white list policy should not allow reconnecting to ignored hosts force_stop(3) self._wait_for_nodes_down([3]) self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) self._wait_for_nodes_down([2]) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass cluster.shutdown()
def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) self._wait_for_nodes_up(range(1, 5), cluster) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 3) self.coordinator_stats.assert_query_count_equals(self, 2, 3) self.coordinator_stats.assert_query_count_equals(self, 3, 3) self.coordinator_stats.assert_query_count_equals(self, 4, 3) force_stop(1) bootstrap(5, 'dc1') # reset control connection self._insert(session, keyspace, count=1000) self._wait_for_nodes_up([5], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 3) self.coordinator_stats.assert_query_count_equals(self, 3, 3) self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3) cluster.shutdown()
def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster = Cluster(load_balancing_policy=TokenAwarePolicy( RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' 'k1 int, ' 'k2 int, ' 'i int, ' 'PRIMARY KEY ((k1, k2)))' % table) prepared = session.prepare('INSERT INTO %s ' '(k1, k2, i) ' 'VALUES ' '(?, ?, ?)' % table) session.execute(prepared.bind((1, 2, 3))) results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table) self.assertTrue(len(results) == 1) self.assertTrue(results[0].i) cluster.shutdown()
def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster, session = self._cluster_session_with_lbp( TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' 'k1 int, ' 'k2 int, ' 'i int, ' 'PRIMARY KEY ((k1, k2)))' % table) prepared = session.prepare('INSERT INTO %s ' '(k1, k2, i) ' 'VALUES ' '(?, ?, ?)' % table) session.execute(prepared.bind((1, 2, 3))) results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table) self.assertTrue(results[0].i) cluster.shutdown()
def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster, session = self._cluster_session_with_lbp( TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) cluster.shutdown()
def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' 'k1 int, ' 'k2 int, ' 'i int, ' 'PRIMARY KEY ((k1, k2)))' % table) prepared = session.prepare('INSERT INTO %s ' '(k1, k2, i) ' 'VALUES ' '(?, ?, ?)' % table) bound = prepared.bind((1, 2, 3)) result = session.execute(bound) self.assertIn(result.response_future.attempted_hosts[0], cluster.metadata.get_replicas(keyspace, bound.routing_key)) # There could be race condition with querying a node # which doesn't yet have the data so we query one of # the replicas results = session.execute(SimpleStatement('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table, routing_key=bound.routing_key)) self.assertIn(results.response_future.attempted_hosts[0], cluster.metadata.get_replicas(keyspace, bound.routing_key)) self.assertTrue(results[0].i) cluster.shutdown()
def test_rfthree_tokenaware_none_down(self): keyspace = 'test_rfthree_tokenaware_none_down' cluster = Cluster(load_balancing_policy=TokenAwarePolicy( RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) self._assert_reads_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]), expected_reader=2) cluster.shutdown()
def test_rfthree_tokenaware_none_down(self): keyspace = 'test_rfthree_tokenaware_none_down' cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile( TokenAwarePolicy(RoundRobinPolicy())) }) session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) self._assert_reads_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]), expected_reader=2) cluster.shutdown()
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) # white list policy should not allow reconnecting to ignored hosts force_stop(3) wait_for_down(cluster, 3) self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) time.sleep(10) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass
def make_session_and_keyspace(self): cluster = TestCluster(execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(request_timeout=20, row_factory=dict_factory) }) session = cluster.connect() create_schema(cluster, session, self.keyspace) return session
def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) self._query(session, keyspace, 1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2) cluster.shutdown()
def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) force_stop(3) self._wait_for_nodes_down([3], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 0) decommission(1) start(3) self._wait_for_nodes_down([1], cluster) self._wait_for_nodes_up([3], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 6) cluster.shutdown()
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2', ), load_balancing_policy=WhiteListRoundRobinPolicy( (IP_FORMAT % 2, ))) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() decommission(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass
def test_black_list_with_host_filter_policy(self): use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address ) cluster = Cluster( (IP_FORMAT % 1,), load_balancing_policy=hfp, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0 ) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting())
def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))}) session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) create_schema(cluster, session, keyspace, replication_factor=rf) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2) cluster.shutdown()
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,))) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() decommission(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass
def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' cluster = Cluster(load_balancing_policy=RoundRobinPolicy()) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 3) self.coordinator_stats.assert_query_count_equals(self, 2, 3) self.coordinator_stats.assert_query_count_equals(self, 3, 3) self.coordinator_stats.assert_query_count_equals(self, 4, 3) force_stop(1) bootstrap(5, 'dc1') # reset control connection self._insert(session, keyspace, count=1000) wait_for_up(cluster, 5) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 3) self.coordinator_stats.assert_query_count_equals(self, 3, 3) self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3)
def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' 'k1 int, ' 'k2 int, ' 'i int, ' 'PRIMARY KEY ((k1, k2)))' % table) prepared = session.prepare('INSERT INTO %s ' '(k1, k2, i) ' 'VALUES ' '(?, ?, ?)' % table) session.execute(prepared.bind((1, 2, 3))) results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table) self.assertTrue(len(results) == 1) self.assertTrue(results[0].i)
def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=2) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) wait_for_down(cluster, 2, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12)
def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster = Cluster(load_balancing_policy=TokenAwarePolicy( RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=2) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) wait_for_down(cluster, 2, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) cluster.shutdown()
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) # white list policy should not allow reconnecting to ignored hosts force_stop(3) self._wait_for_nodes_down([3]) self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) self._wait_for_nodes_down([2]) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass finally: cluster.shutdown()
def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect() create_schema(session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) self._query(session, keyspace, 1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2)
def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2],cluster) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) cluster.shutdown()
def make_session_and_keyspace(self): cluster = Cluster() session = cluster.connect() session.row_factory = dict_factory create_schema(session, self.keyspace) return session
def make_session_and_keyspace(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() session.default_timeout = 20.0 # increase the default timeout session.row_factory = dict_factory create_schema(cluster, session, self.keyspace) return session
def make_session_and_keyspace(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() session.default_timeout = 20.0 # increase the default timeout session.row_factory = dict_factory create_schema(session, self.keyspace) return session
def make_session_and_keyspace(self): cluster = Cluster() session = cluster.connect() session.default_timeout = 20.0 # increase the default timeout session.row_factory = dict_factory create_schema(session, self.keyspace) return session
def _set_up_shuffle_test(self, keyspace, replication_factor): use_singledc() cluster, session = self._cluster_session_with_lbp( TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True) ) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=replication_factor) return cluster, session
def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=1) self._insert(session, keyspace) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() force_stop(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: self.assertEqual(e.consistency, 1) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) self.coordinator_stats.reset_counts() start(2) wait_for_up(cluster, 2, wait=True) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() decommission(2) wait_for_down(cluster, 2, wait=True) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6)
def test_black_list_with_host_filter_policy(self): """ Test to validate removing certain hosts from the query plan with HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy """ use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address) cluster = Cluster((IP_FORMAT % 1, ), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=hfp) }) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) # RoundRobin doesn't provide a gurantee on the order of the hosts # so we will have that for 127.0.0.1 and 127.0.0.3 the count for one # will be 4 and for the other 8 first_node_count = self.coordinator_stats.get_query_count(1) third_node_count = self.coordinator_stats.get_query_count(3) self.assertEqual(first_node_count + third_node_count, 12) self.assertTrue(first_node_count == 8 or first_node_count == 4) self.coordinator_stats.assert_query_count_equals(self, 2, 0) # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse( cluster.metadata.get_host( ignored_address).is_currently_reconnecting())
def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace, count=12) self._query(session, keyspace, count=12) if roundrobin: self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) else: self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: self.coordinator_stats.reset_counts() force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) # Test reads that expected to complete successfully for cl in SINGLE_DC_CONSISTENCY_LEVELS - set( [ConsistencyLevel.ANY]): self.coordinator_stats.reset_counts() self._query(session, keyspace, 12, consistency_level=cl) if roundrobin: self.coordinator_stats.assert_query_count_equals( self, 1, 6) self.coordinator_stats.assert_query_count_equals( self, 2, 0) self.coordinator_stats.assert_query_count_equals( self, 3, 6) else: self.coordinator_stats.assert_query_count_equals( self, 1, 0) self.coordinator_stats.assert_query_count_equals( self, 2, 0) self.coordinator_stats.assert_query_count_equals( self, 3, 12) finally: start(2) wait_for_up(cluster, 2) session.cluster.shutdown()
def test_black_list_with_host_filter_policy(self): """ Test to validate removing certain hosts from the query plan with HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy """ use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address ) cluster = Cluster( (IP_FORMAT % 1,), load_balancing_policy=hfp, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0 ) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) # RoundRobin doesn't provide a gurantee on the order of the hosts # so we will have that for 127.0.0.1 and 127.0.0.3 the count for one # will be 4 and for the other 8 first_node_count = self.coordinator_stats.get_query_count(1) third_node_count = self.coordinator_stats.get_query_count(3) self.assertEqual(first_node_count + third_node_count, 12) self.assertTrue(first_node_count == 8 or first_node_count == 4) self.coordinator_stats.assert_query_count_equals(self, 2, 0) # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting())
def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2')) self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6)
def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2')) self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6) cluster.shutdown()
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = TestCluster( contact_points=('127.0.0.2', ), topology_event_refresh_window=0, status_event_refresh_window=0, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(( IP_FORMAT % 2, ))) }) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) # white list policy should not allow reconnecting to ignored hosts force_stop(3) self._wait_for_nodes_down([3]) self.assertFalse( cluster.metadata.get_host(IP_FORMAT % 3).is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) self._wait_for_nodes_down([2]) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass finally: cluster.shutdown()
def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster = Cluster(load_balancing_policy=DCAwareRoundRobinPolicy('dc2')) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4, wait=False) wait_for_up(cluster, 5) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6)
def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc2')) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4, wait=False) wait_for_up(cluster, 5) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6)
def test_dc_aware_roundrobin_two_dcs(self): clear_and_use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs' cluster = Cluster(load_balancing_policy=DCAwareRoundRobinPolicy('dc1'), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4, wait=False) wait_for_up(cluster, 5) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0)
def test_rfthree_tokenaware_none_down(self): keyspace = 'test_rfthree_tokenaware_none_down' cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2) create_schema(session, keyspace, replication_factor=3) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) self._assert_reads_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]), expected_reader=2)
def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=3) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) force_stop(3) wait_for_down(cluster, 3) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 0) decommission(1) start(3) wait_for_down(cluster, 1) wait_for_up(cluster, 3) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 6)
def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace, count=12) self._query(session, keyspace, count=12) if roundrobin: self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) else: self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: self.coordinator_stats.reset_counts() force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) # Test reads that expected to complete successfully for cl in SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]): self.coordinator_stats.reset_counts() self._query(session, keyspace, 12, consistency_level=cl) if roundrobin: self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) else: self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) finally: start(2) wait_for_up(cluster, 2) session.cluster.shutdown()
def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) force_stop(3) wait_for_down(cluster, 3) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 0) decommission(1) start(3) wait_for_down(cluster, 1) wait_for_up(cluster, 3) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 6)
def test_dc_aware_roundrobin_two_dcs(self): clear_and_use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc1'), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4, wait=False) wait_for_up(cluster, 5) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0)
def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 3) self.coordinator_stats.assert_query_count_equals(self, 2, 3) self.coordinator_stats.assert_query_count_equals(self, 3, 3) self.coordinator_stats.assert_query_count_equals(self, 4, 3) force_stop(1) bootstrap(5, 'dc1') # reset control connection self._insert(session, keyspace, count=1000) wait_for_up(cluster, 5) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 3) self.coordinator_stats.assert_query_count_equals(self, 3, 3) self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3) cluster.shutdown()
def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' 'k1 int, ' 'k2 int, ' 'i int, ' 'PRIMARY KEY ((k1, k2)))' % table) prepared = session.prepare('INSERT INTO %s ' '(k1, k2, i) ' 'VALUES ' '(?, ?, ?)' % table) session.execute(prepared.bind((1, 2, 3))) results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table) self.assertTrue(results[0].i) cluster.shutdown()
def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') self._wait_for_nodes_up([5]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() decommission(3) decommission(4) self._wait_for_nodes_down([3, 4]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) responses = set() for node in [1, 2, 5]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 0, 12]), responses) self.coordinator_stats.reset_counts() decommission(5) self._wait_for_nodes_down([5]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) responses = set() for node in [1, 2]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 12]), responses) self.coordinator_stats.reset_counts() decommission(1) self._wait_for_nodes_down([1]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() force_stop(2) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass cluster.shutdown()
def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') wait_for_up(cluster, 5) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() decommission(3) decommission(4) wait_for_down(cluster, 3, wait=True) wait_for_down(cluster, 4, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) responses = set() for node in [1, 2, 5]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 0, 12]), responses) self.coordinator_stats.reset_counts() decommission(5) wait_for_down(cluster, 5, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) responses = set() for node in [1, 2]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 12]), responses) self.coordinator_stats.reset_counts() decommission(1) wait_for_down(cluster, 1, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() force_stop(2) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass
def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster, session = self._cluster_session_with_lbp( TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=1) self._insert(session, keyspace) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() force_stop(2) self._wait_for_nodes_down([2], cluster) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: self.assertEqual(e.consistency, 1) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) self.coordinator_stats.reset_counts() start(2) self._wait_for_nodes_up([2], cluster) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable: pass self.coordinator_stats.reset_counts() start(2) self._wait_for_nodes_up([2], cluster) decommission(2) self._wait_for_nodes_down([2], cluster) self._query(session, keyspace, use_prepared=use_prepared) results = set([ self.coordinator_stats.get_query_count(1), self.coordinator_stats.get_query_count(3) ]) self.assertEqual(results, set([0, 12])) self.coordinator_stats.assert_query_count_equals(self, 2, 0) cluster.shutdown()
def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster, session = self._cluster_session_with_lbp( DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') self._wait_for_nodes_up([5]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() decommission(3) decommission(4) self._wait_for_nodes_down([3, 4]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) responses = set() for node in [1, 2, 5]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 0, 12]), responses) self.coordinator_stats.reset_counts() decommission(5) self._wait_for_nodes_down([5]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) responses = set() for node in [1, 2]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 12]), responses) self.coordinator_stats.reset_counts() decommission(1) self._wait_for_nodes_down([1]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() force_stop(2) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass cluster.shutdown()
def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=1) self._insert(session, keyspace) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() force_stop(2) self._wait_for_nodes_down([2], cluster) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: self.assertEqual(e.consistency, 1) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) self.coordinator_stats.reset_counts() start(2) self._wait_for_nodes_up([2], cluster) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable: pass self.coordinator_stats.reset_counts() start(2) self._wait_for_nodes_up([2], cluster) decommission(2) self._wait_for_nodes_down([2], cluster) self._query(session, keyspace, use_prepared=use_prepared) results = set([ self.coordinator_stats.get_query_count(1), self.coordinator_stats.get_query_count(3) ]) self.assertEqual(results, set([0, 12])) self.coordinator_stats.assert_query_count_equals(self, 2, 0) cluster.shutdown()