def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=2) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) wait_for_down(cluster, 2, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12)
def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect() create_schema(session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) self._query(session, keyspace, 1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2)
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,))) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() decommission(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass
def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2) create_schema(session, keyspace, replication_factor=rf) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2)
def test_pool_with_host_down(self): """ Test to ensure that cluster.connect() doesn't return prior to pools being initialized. This test will figure out which host our pool logic will connect to first. It then shuts that server down. Previouly the cluster.connect() would return prior to the pools being initialized, and the first queries would return a no host exception @since 3.7.0 @jira_ticket PYTHON-617 @expected_result query should complete successfully @test_category connection """ # find the first node, we will try create connections to, shut it down. cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.connect() hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) # Attempt a query against that node. It should complete cluster2 = Cluster(protocol_version=PROTOCOL_VERSION) session2 = cluster2.connect() session2.execute("SELECT * FROM system.local") cluster2.shutdown() finally: start(node_to_stop) wait_for_up(cluster, node_to_stop) cluster.shutdown()
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) # white list policy should not allow reconnecting to ignored hosts force_stop(3) wait_for_down(cluster, 3) self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) time.sleep(10) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass
def _wait_for_nodes_down(self, nodes, cluster=None): log.debug('entered: _wait_for_nodes_down(nodes={ns}, ' 'cluster={cs})'.format(ns=nodes, cs=cluster)) if not cluster: self._connect_probe_cluster() cluster = self.probe_cluster for n in nodes: wait_for_down(cluster, n)
def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=1) self._insert(session, keyspace) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() force_stop(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: self.assertEqual(e.consistency, 1) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) self.coordinator_stats.reset_counts() start(2) wait_for_up(cluster, 2, wait=True) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() decommission(2) wait_for_down(cluster, 2, wait=True) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6)
def test_pool_with_host_down(self): """ Test to ensure that cluster.connect() doesn't return prior to pools being initialized. This test will figure out which host our pool logic will connect to first. It then shuts that server down. Previously the cluster.connect() would return prior to the pools being initialized, and the first queries would return a no host exception @since 3.7.0 @jira_ticket PYTHON-617 @expected_result query should complete successfully @test_category connection """ # find the first node, we will try create connections to, shut it down. # We will be shuting down a random house, so we need a complete contact list all_contact_points = ["127.0.0.1", "127.0.0.2", "127.0.0.3"] # Connect up and find out which host will bet queries routed to to first cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) cluster.shutdown() # We now register a cluster that has it's Control Connection NOT on the node that we are shutting down. # We do this so we don't miss the event contact_point = '127.0.0.{0}'.format(self.get_node_not_x(node_to_stop)) cluster = Cluster(contact_points=[contact_point], protocol_version=PROTOCOL_VERSION) cluster.connect(wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) # Attempt a query against that node. It should complete cluster2 = Cluster(contact_points=all_contact_points, protocol_version=PROTOCOL_VERSION) session2 = cluster2.connect() session2.execute("SELECT * FROM system.local") finally: cluster2.shutdown() start(node_to_stop) wait_for_up(cluster, node_to_stop) cluster.shutdown()
def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace, count=12) self._query(session, keyspace, count=12) if roundrobin: self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) else: self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: self.coordinator_stats.reset_counts() force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) # Test reads that expected to complete successfully for cl in SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]): self.coordinator_stats.reset_counts() self._query(session, keyspace, 12, consistency_level=cl) if roundrobin: self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) else: self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) finally: start(2) wait_for_up(cluster, 2) session.cluster.shutdown()
def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 4) self.coordinator_stats.assert_query_count_equals(self, 2, 4) self.coordinator_stats.assert_query_count_equals(self, 3, 4) force_stop(3) wait_for_down(cluster, 3) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 6) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 0) decommission(1) start(3) wait_for_down(cluster, 1) wait_for_up(cluster, 3) self.coordinator_stats.reset_counts() self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 6)
def test_white_list(self): use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2', ), load_balancing_policy=WhiteListRoundRobinPolicy( (IP_FORMAT % 2, )), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) # white list policy should not allow reconnecting to ignored hosts force_stop(3) wait_for_down(cluster, 3) self.assertFalse( cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) time.sleep(10) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass cluster.shutdown()
def _wait_for_nodes_down(self, nodes, cluster=None): if not cluster: self._connect_probe_cluster() cluster = self.probe_cluster for n in nodes: wait_for_down(cluster, n)
def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster = Cluster(load_balancing_policy=TokenAwarePolicy( RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) create_schema(session, keyspace, replication_factor=1) self._insert(session, keyspace) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() force_stop(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: self.assertEqual(e.consistency, 1) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) self.coordinator_stats.reset_counts() start(2) wait_for_up(cluster, 2, wait=True) self._query(session, keyspace, use_prepared=use_prepared) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.reset_counts() stop(2) wait_for_down(cluster, 2, wait=True) try: self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable: pass self.coordinator_stats.reset_counts() start(2) wait_for_up(cluster, 2, wait=True) decommission(2) wait_for_down(cluster, 2, wait=True) self._query(session, keyspace, use_prepared=use_prepared) results = set([ self.coordinator_stats.get_query_count(1), self.coordinator_stats.get_query_count(3) ]) self.assertEqual(results, set([0, 12])) self.coordinator_stats.assert_query_count_equals(self, 2, 0) cluster.shutdown()
def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster = Cluster(load_balancing_policy=DCAwareRoundRobinPolicy( 'dc2', used_hosts_per_remote_dc=1), protocol_version=PROTOCOL_VERSION) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') wait_for_up(cluster, 5) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() decommission(3) decommission(4) wait_for_down(cluster, 3, wait=True) wait_for_down(cluster, 4, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) responses = set() for node in [1, 2, 5]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 0, 12]), responses) self.coordinator_stats.reset_counts() decommission(5) wait_for_down(cluster, 5, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) responses = set() for node in [1, 2]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 12]), responses) self.coordinator_stats.reset_counts() decommission(1) wait_for_down(cluster, 1, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() force_stop(2) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass cluster.shutdown()
def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) session = cluster.connect() wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) create_schema(session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') wait_for_up(cluster, 5) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() decommission(3) decommission(4) wait_for_down(cluster, 3, wait=True) wait_for_down(cluster, 4, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) responses = set() for node in [1, 2, 5]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 0, 12]), responses) self.coordinator_stats.reset_counts() decommission(5) wait_for_down(cluster, 5, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) responses = set() for node in [1, 2]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 12]), responses) self.coordinator_stats.reset_counts() decommission(1) wait_for_down(cluster, 1, wait=True) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() force_stop(2) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass