def test_black_list_with_host_filter_policy(self): use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address ) cluster = Cluster( (IP_FORMAT % 1,), load_balancing_policy=hfp, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0 ) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting())
def test_black_list_with_host_filter_policy(self): """ Test to validate removing certain hosts from the query plan with HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy """ use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address) cluster = Cluster((IP_FORMAT % 1, ), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=hfp) }) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) # RoundRobin doesn't provide a gurantee on the order of the hosts # so we will have that for 127.0.0.1 and 127.0.0.3 the count for one # will be 4 and for the other 8 first_node_count = self.coordinator_stats.get_query_count(1) third_node_count = self.coordinator_stats.get_query_count(3) self.assertEqual(first_node_count + third_node_count, 12) self.assertTrue(first_node_count == 8 or first_node_count == 4) self.coordinator_stats.assert_query_count_equals(self, 2, 0) # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse( cluster.metadata.get_host( ignored_address).is_currently_reconnecting())
def test_black_list_with_host_filter_policy(self): """ Test to validate removing certain hosts from the query plan with HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy """ use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address ) cluster = Cluster( (IP_FORMAT % 1,), load_balancing_policy=hfp, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0 ) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) # RoundRobin doesn't provide a gurantee on the order of the hosts # so we will have that for 127.0.0.1 and 127.0.0.3 the count for one # will be 4 and for the other 8 first_node_count = self.coordinator_stats.get_query_count(1) third_node_count = self.coordinator_stats.get_query_count(3) self.assertEqual(first_node_count + third_node_count, 12) self.assertTrue(first_node_count == 8 or first_node_count == 4) self.coordinator_stats.assert_query_count_equals(self, 2, 0) # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting())
def setUp(self): """ Setup sessions and pause node1 """ self.cluster = TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" ) ) } ) self.session = self.cluster.connect(wait_for_all_pools=True) self.control_connection_host_number = 1 self.node_to_stop = get_node(self.control_connection_host_number) ddl = ''' CREATE TABLE test3rf.timeout ( k int PRIMARY KEY, v int )''' self.session.execute(ddl) self.node_to_stop.pause()
def setUp(self): contact_point = ['127.0.0.2'] self.cluster = Cluster(contact_points=contact_point, metrics_enabled=True, protocol_version=PROTOCOL_VERSION, load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), lambda host: host.address in contact_point ), default_retry_policy=FallthroughRetryPolicy()) self.session = self.cluster.connect("test3rf", wait_for_all_pools=True)
def setUp(self): self.defaultInFlight = Connection.max_in_flight Connection.max_in_flight = 2 self.cluster = Cluster( protocol_version=PROTOCOL_VERSION, load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP)) self.session = self.cluster.connect()
def setUp(self): self.cluster = TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP)) }) self.session = self.cluster.connect()
def generateCluster(self): filter_policy = HostFilterPolicy( child_policy=RoundRobinPolicy(), predicate=self.isAddressAccepted ) cluster = Cluster( [self.host_address], load_balancing_policy=filter_policy ) return cluster
def setUp(self): contact_point = ['127.0.0.2'] self.cluster = TestCluster(contact_points=contact_point, metrics_enabled=True, execution_profiles= {EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), lambda host: host.address in contact_point), retry_policy=FallthroughRetryPolicy() ) } ) self.session = self.cluster.connect("test3rf", wait_for_all_pools=True)
def test_host_filter_policy(self): def my_predicate(s): return False self.assertEqual( insights_registry.serialize(HostFilterPolicy(LoadBalancingPolicy(), my_predicate)), {'namespace': 'cassandra.policies', 'options': {'child_policy': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'LoadBalancingPolicy'}, 'predicate': 'my_predicate'}, 'type': 'HostFilterPolicy'} )
def test_predicate_changes(self): """ Test to validate host filter reacts correctly when the predicate return a different subset of the hosts HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy """ external_event = True contact_point = DefaultEndPoint("127.0.0.1") single_host = {Host(contact_point, SimpleConvictionPolicy)} all_hosts = { Host(DefaultEndPoint("127.0.0.{}".format(i)), SimpleConvictionPolicy) for i in (1, 2, 3) } predicate = lambda host: host.endpoint == contact_point if external_event else True hfp = ExecutionProfile(load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), predicate=predicate)) cluster = Cluster((contact_point, ), execution_profiles={EXEC_PROFILE_DEFAULT: hfp}, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) session = cluster.connect(wait_for_all_pools=True) queried_hosts = set() for _ in range(10): response = session.execute("SELECT * from system.local") queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, single_host) external_event = False futures = session.update_created_pools() wait_futures(futures, timeout=cluster.connect_timeout) queried_hosts = set() for _ in range(10): response = session.execute("SELECT * from system.local") queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, all_hosts)