def test_host_order(self): """ Test Host class is ordered consistently @since 3.9 @jira_ticket PYTHON-714 @expected_result the hosts are ordered correctly @test_category data_types """ hosts = [ Host(addr, SimpleConvictionPolicy) for addr in ("127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4") ] hosts_equal = [ Host(addr, SimpleConvictionPolicy) for addr in ("127.0.0.1", "127.0.0.1") ] hosts_equal_conviction = [ Host("127.0.0.1", SimpleConvictionPolicy), Host("127.0.0.1", ConvictionPolicy) ] check_sequence_consistency(self, hosts) check_sequence_consistency(self, hosts_equal, equal=True) check_sequence_consistency(self, hosts_equal_conviction, equal=True)
def test_simple_strategy_make_token_replica_map(self): host1 = Host('1', SimpleConvictionPolicy) host2 = Host('2', SimpleConvictionPolicy) host3 = Host('3', SimpleConvictionPolicy) token_to_host_owner = { MD5Token(0): host1, MD5Token(100): host2, MD5Token(200): host3 } ring = [MD5Token(0), MD5Token(100), MD5Token(200)] rf1_replicas = SimpleStrategy({ 'replication_factor': '1' }).make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(rf1_replicas[MD5Token(0)], [host1]) self.assertItemsEqual(rf1_replicas[MD5Token(100)], [host2]) self.assertItemsEqual(rf1_replicas[MD5Token(200)], [host3]) rf2_replicas = SimpleStrategy({ 'replication_factor': '2' }).make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(rf2_replicas[MD5Token(0)], [host1, host2]) self.assertItemsEqual(rf2_replicas[MD5Token(100)], [host2, host3]) self.assertItemsEqual(rf2_replicas[MD5Token(200)], [host3, host1]) rf3_replicas = SimpleStrategy({ 'replication_factor': '3' }).make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(rf3_replicas[MD5Token(0)], [host1, host2, host3]) self.assertItemsEqual(rf3_replicas[MD5Token(100)], [host2, host3, host1]) self.assertItemsEqual(rf3_replicas[MD5Token(200)], [host3, host1, host2])
def test_nts_make_token_replica_map_empty_dc(self): host = Host('1', SimpleConvictionPolicy) host.set_location_info('dc1', 'rack1') token_to_host_owner = {MD5Token(0): host} ring = [MD5Token(0)] nts = NetworkTopologyStrategy({'dc1': 1, 'dc2': 0}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) self.assertEqual(set(replica_map[MD5Token(0)]), set([host]))
def test_no_remote(self): hosts = [] for i in range(4): h = Host(i, SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1") policy.populate(None, hosts) qplan = list(policy.make_query_plan()) self.assertEqual(sorted(qplan), sorted(hosts))
def __init__(self): self.hosts = { "192.168.1.0": Host("192.168.1.0", SimpleConvictionPolicy), "192.168.1.1": Host("192.168.1.1", SimpleConvictionPolicy), "192.168.1.2": Host("192.168.1.2", SimpleConvictionPolicy) } for host in self.hosts.values(): host.set_up() self.cluster_name = None self.partitioner = None self.token_map = {}
def test_iterate_all_hosts_and_modify(self): """ PYTHON-572 """ metadata = Metadata() metadata.add_or_return_host(Host('dc1.1', SimpleConvictionPolicy)) metadata.add_or_return_host(Host('dc1.2', SimpleConvictionPolicy)) self.assertEqual(len(metadata.all_hosts()), 2) for host in metadata.all_hosts(): # this would previously raise in Py3 metadata.remove_host(host) self.assertEqual(len(metadata.all_hosts()), 0)
def test_statement_params_override_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster( execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) rf = session.execute_async("query", execution_profile='non-default') ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) my_timeout = 1.1234 self.assertNotEqual(ss.retry_policy.__class__, rf._load_balancer.__class__) self.assertNotEqual(ss.consistency_level, rf.message.consistency_level) self.assertNotEqual(ss._serial_consistency_level, rf.message.serial_consistency_level) self.assertNotEqual(my_timeout, rf.timeout) rf = session.execute_async(ss, timeout=my_timeout, execution_profile='non-default') expected_profile = ExecutionProfile( non_default_profile.load_balancing_policy, ss.retry_policy, ss.consistency_level, ss._serial_consistency_level, my_timeout, non_default_profile.row_factory) self._verify_response_future_profile(rf, expected_profile)
def test_non_implemented(self): """ Code coverage for interface-style base class """ policy = LoadBalancingPolicy() host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") self.assertRaises(NotImplementedError, policy.distance, host) self.assertRaises(NotImplementedError, policy.populate, None, host) self.assertRaises(NotImplementedError, policy.make_query_plan) self.assertRaises(NotImplementedError, policy.on_up, host) self.assertRaises(NotImplementedError, policy.on_down, host) self.assertRaises(NotImplementedError, policy.on_add, host) self.assertRaises(NotImplementedError, policy.on_remove, host)
def test_with_remotes(self): hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") local_hosts = set(h for h in hosts if h.datacenter == "dc1") remote_hosts = set(h for h in hosts if h.datacenter != "dc1") # allow all of the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), local_hosts) self.assertEqual(set(qplan[2:]), remote_hosts) # allow only one of the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), local_hosts) used_remotes = set(qplan[2:]) self.assertEqual(1, len(used_remotes)) self.assertIn(qplan[2], remote_hosts) # allow no remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(2, len(qplan)) self.assertEqual(local_hosts, set(qplan))
def test_default_serial_consistency_level(self, *_): """ Make sure default_serial_consistency_level passes through to a query message. Also make sure Statement.serial_consistency_level overrides the default. PR #510 """ c = Cluster(protocol_version=4) s = Session(c, [Host("127.0.0.1", SimpleConvictionPolicy)]) # default is None default_profile = c.profile_manager.default self.assertIsNone(default_profile.serial_consistency_level) sentinel = 1001 for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel): default_profile.serial_consistency_level = cl # default is passed through f = s.execute_async(query='') self.assertEqual(f.message.serial_consistency_level, cl) # any non-None statement setting takes precedence for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): f = s.execute_async( SimpleStatement(query_string='', serial_consistency_level=cl_override)) self.assertEqual(default_profile.serial_consistency_level, cl) self.assertEqual(f.message.serial_consistency_level, cl_override)
def _assert_shuffle(self, patched_shuffle, keyspace, routing_key): hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) replicas = hosts[2:] cluster.metadata.get_replicas.return_value = replicas child_policy = Mock() child_policy.make_query_plan.return_value = hosts child_policy.distance.return_value = HostDistance.LOCAL policy = TokenAwarePolicy(child_policy, shuffle_replicas=True) policy.populate(cluster, hosts) cluster.metadata.get_replicas.reset_mock() child_policy.make_query_plan.reset_mock() query = Statement(routing_key=routing_key) qplan = list(policy.make_query_plan(keyspace, query)) if keyspace is None or routing_key is None: self.assertEqual(hosts, qplan) self.assertEqual(cluster.metadata.get_replicas.call_count, 0) child_policy.make_query_plan.assert_called_once_with(keyspace, query) self.assertEqual(patched_shuffle.call_count, 0) else: self.assertEqual(set(replicas), set(qplan[:2])) self.assertEqual(hosts[:2], qplan[2:]) child_policy.make_query_plan.assert_called_once_with(keyspace, query) self.assertEqual(patched_shuffle.call_count, 1)
def test_wrap_round_robin(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() def get_replicas(keyspace, packed_key): index = struct.unpack('>i', packed_key)[0] return list(islice(cycle(hosts), index, index + 2)) cluster.metadata.get_replicas.side_effect = get_replicas policy = TokenAwarePolicy(RoundRobinPolicy()) policy.populate(cluster, hosts) for i in range(4): query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name') qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) other = set(h for h in hosts if h not in replicas) self.assertEqual(replicas, qplan[:2]) self.assertEqual(other, set(qplan[2:])) # Should use the secondary policy for i in range(4): qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan), set(hosts))
def test_predicate_changes(self): """ Test to validate hostfilter reacts correctly when the predicate return a different subset of the hosts HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy """ external_event = True contact_point = "127.0.0.1" single_host = {Host(contact_point, SimpleConvictionPolicy)} all_hosts = { Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in (1, 2, 3) } predicate = lambda host: host.address == contact_point if external_event else True hfp = ExecutionProfile(load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), predicate=predicate)) cluster = Cluster((contact_point, ), execution_profiles={EXEC_PROFILE_DEFAULT: hfp}, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) session = cluster.connect(wait_for_all_pools=True) queried_hosts = set() for _ in range(10): response = session.execute("SELECT * from system.local") queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, single_host) external_event = False session.update_created_pools() queried_hosts = set() for _ in range(10): response = session.execute("SELECT * from system.local") queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, all_hosts)
def add_host(self, address, datacenter, rack, signal=False, refresh_nodes=True): host = Host(address, SimpleConvictionPolicy, datacenter, rack) self.added_hosts.append(host) return host
def test_no_live_nodes(self): """ Ensure query plan for a downed cluster will execute without errors """ hosts = [] for i in range(4): h = Host(i, SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) for host in hosts: policy.on_down(host) qplan = list(policy.make_query_plan()) self.assertEqual(qplan, [])
def test_hosts_with_hostname(self): hosts = ['localhost'] policy = WhiteListRoundRobinPolicy(hosts) host = Host("127.0.0.1", SimpleConvictionPolicy) policy.populate(None, [host]) qplan = list(policy.make_query_plan()) self.assertEqual(sorted(qplan), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL)
def test_wait_for_schema_agreement_rpc_lookup(self): """ If the rpc_address is 0.0.0.0, the "peer" column should be used instead. """ self.connection.peer_results[1].append( ["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]]) host = Host("0.0.0.0", SimpleConvictionPolicy) self.cluster.metadata.hosts[PEER_IP] = host host.is_up = False # even though the new host has a different schema version, it's # marked as down, so the control connection shouldn't care self.assertTrue(self.control_connection.wait_for_schema_agreement()) self.assertEqual(self.time.clock, 0) # but once we mark it up, the control connection will care host.is_up = True self.assertFalse(self.control_connection.wait_for_schema_agreement()) self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
def test_statement_keyspace(self): hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) replicas = hosts[2:] cluster.metadata.get_replicas.return_value = replicas child_policy = Mock() child_policy.make_query_plan.return_value = hosts child_policy.distance.return_value = HostDistance.LOCAL policy = TokenAwarePolicy(child_policy) policy.populate(cluster, hosts) # no keyspace, child policy is called keyspace = None routing_key = 'routing_key' query = Statement(routing_key=routing_key) qplan = list(policy.make_query_plan(keyspace, query)) self.assertEqual(hosts, qplan) self.assertEqual(cluster.metadata.get_replicas.call_count, 0) child_policy.make_query_plan.assert_called_once_with(keyspace, query) # working keyspace, no statement cluster.metadata.get_replicas.reset_mock() keyspace = 'working_keyspace' routing_key = 'routing_key' query = Statement(routing_key=routing_key) qplan = list(policy.make_query_plan(keyspace, query)) self.assertEqual(replicas + hosts[:2], qplan) cluster.metadata.get_replicas.assert_called_with(keyspace, routing_key) # statement keyspace, no working cluster.metadata.get_replicas.reset_mock() working_keyspace = None statement_keyspace = 'statement_keyspace' routing_key = 'routing_key' query = Statement(routing_key=routing_key, keyspace=statement_keyspace) qplan = list(policy.make_query_plan(working_keyspace, query)) self.assertEqual(replicas + hosts[:2], qplan) cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key) # both keyspaces set, statement keyspace used for routing cluster.metadata.get_replicas.reset_mock() working_keyspace = 'working_keyspace' statement_keyspace = 'statement_keyspace' routing_key = 'routing_key' query = Statement(routing_key=routing_key, keyspace=statement_keyspace) qplan = list(policy.make_query_plan(working_keyspace, query)) self.assertEqual(replicas + hosts[:2], qplan) cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)
def test_target_host_nominal(self): node_count = 4 hosts = [Host(i, Mock()) for i in range(node_count)] target_host = hosts[1] target_host.is_up = True policy = DSELoadBalancingPolicy(RoundRobinPolicy()) policy.populate(Mock(metadata=ClusterMetaMock({'127.0.0.1': target_host})), hosts) for _ in range(10): query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1'))) self.assertEqual(sorted(query_plan), hosts) self.assertEqual(query_plan[0], target_host)
def test_default_dc(self): host_local = Host(1, SimpleConvictionPolicy, 'local') host_remote = Host(2, SimpleConvictionPolicy, 'remote') host_none = Host(1, SimpleConvictionPolicy) # contact point is '1' cluster = Mock(contact_points_resolved=[1]) # contact DC first policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_local) policy.on_add(host_remote) self.assertNotEqual(policy.local_dc, host_remote.datacenter) self.assertEqual(policy.local_dc, host_local.datacenter) # contact DC second policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_remote) policy.on_add(host_local) self.assertNotEqual(policy.local_dc, host_remote.datacenter) self.assertEqual(policy.local_dc, host_local.datacenter) # no DC policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_none) self.assertFalse(policy.local_dc) # only other DC policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_remote) self.assertFalse(policy.local_dc)
def test_nts_token_performance(self): """ Tests to ensure that when rf exceeds the number of nodes available, that we dont' needlessly iterate trying to construct tokens for nodes that don't exist. @since 3.7 @jira_ticket PYTHON-379 @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes @test_category metadata """ token_to_host_owner = {} ring = [] dc1hostnum = 3 current_token = 0 vnodes_per_host = 500 for i in range(dc1hostnum): host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy) host.set_location_info('dc1', "rack1") for vnode_num in range(vnodes_per_host): md5_token = MD5Token(current_token + vnode_num) token_to_host_owner[md5_token] = host ring.append(md5_token) current_token += 1000 nts = NetworkTopologyStrategy({'dc1': 3}) start_time = timeit.default_timer() nts.make_token_replica_map(token_to_host_owner, ring) elapsed_base = timeit.default_timer() - start_time nts = NetworkTopologyStrategy({'dc1': 1500}) start_time = timeit.default_timer() nts.make_token_replica_map(token_to_host_owner, ring) elapsed_bad = timeit.default_timer() - start_time difference = elapsed_bad - elapsed_base self.assertTrue(difference < 1 and difference > -1)
def test_target_host_down(self): node_count = 4 hosts = [Host(i, Mock()) for i in range(node_count)] target_host = hosts[1] policy = DSELoadBalancingPolicy(RoundRobinPolicy()) policy.populate(Mock(metadata=ClusterMetaMock({'127.0.0.1': target_host})), hosts) query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1'))) self.assertEqual(sorted(query_plan), hosts) target_host.is_up = False policy.on_down(target_host) query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1'))) self.assertNotIn(target_host, query_plan)
def test_status_updates(self): """ Same test as DCAwareRoundRobinPolicyTest.test_status_updates() """ hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)) policy.populate(self.FakeCluster(), hosts) policy.on_down(hosts[0]) policy.on_remove(hosts[2]) new_local_host = Host(4, SimpleConvictionPolicy) new_local_host.set_location_info("dc1", "rack1") policy.on_up(new_local_host) new_remote_host = Host(5, SimpleConvictionPolicy) new_remote_host.set_location_info("dc9000", "rack1") policy.on_add(new_remote_host) # we now have two local hosts and two remote hosts in separate dcs qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host])) self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host])) # since we have hosts in dc9000, the distance shouldn't be IGNORED self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE) policy.on_down(new_local_host) policy.on_down(hosts[1]) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan), set([hosts[3], new_remote_host])) policy.on_down(new_remote_host) policy.on_down(hosts[3]) qplan = list(policy.make_query_plan()) self.assertEqual(qplan, [])
def test_recursion_limited(self): """ Verify that recursion is controlled when raise_on_first_error=False and something is wrong with the query. PYTHON-585 """ max_recursion = sys.getrecursionlimit() s = Session(Cluster(), [Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertRaises(TypeError, execute_concurrent_with_args, s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=True) results = execute_concurrent_with_args(s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=False) # previously self.assertEqual(len(results), max_recursion) for r in results: self.assertFalse(r[0]) self.assertIsInstance(r[1], TypeError)
def test_default_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster( execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) default_profile = cluster.profile_manager.profiles[ EXEC_PROFILE_DEFAULT] rf = session.execute_async("query") self._verify_response_future_profile(rf, default_profile) rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile)
def test_session_host_parameter(self): """ Test for protocol negotiation Very that NoHostAvailable is risen in Session.__init__ when there are no valid connections and that no error is arisen otherwise, despite maybe being some invalid hosts @since 3.9 @jira_ticket PYTHON-665 @expected_result NoHostAvailable when the driver is unable to connect to a valid host, no exception otherwise @test_category connection """ with self.assertRaises(NoHostAvailable): Session(Cluster(protocol_version=PROTOCOL_VERSION), []) with self.assertRaises(NoHostAvailable): Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host("1.2.3.4", SimpleConvictionPolicy)]) session = Session(Cluster(protocol_version=PROTOCOL_VERSION), [ Host(x, SimpleConvictionPolicy) for x in ("127.0.0.1", "127.0.0.2", "1.2.3.4") ]) session.shutdown()
def test_profile_name_value(self): internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'by-name': internalized_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) rf = session.execute_async("query", execution_profile='by-name') self._verify_response_future_profile(rf, internalized_profile) by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value)
def test_wrap_dc_aware(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") def get_replicas(keyspace, packed_key): index = struct.unpack('>i', packed_key)[0] # return one node from each DC if index % 2 == 0: return [hosts[0], hosts[2]] else: return [hosts[1], hosts[3]] cluster.metadata.get_replicas.side_effect = get_replicas policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)) policy.populate(cluster, hosts) for i in range(4): query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name') qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) # first should be the only local replica self.assertIn(qplan[0], replicas) self.assertEqual(qplan[0].datacenter, "dc1") # then the local non-replica self.assertNotIn(qplan[1], replicas) self.assertEqual(qplan[1].datacenter, "dc1") # then one of the remotes (used_hosts_per_remote_dc is 1, so we # shouldn't see two remotes) self.assertEqual(qplan[2].datacenter, "dc2") self.assertEqual(3, len(qplan))
def test_exec_profile_clone(self): cluster = Cluster(execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile() }) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) profile_attrs = { 'request_timeout': 1, 'consistency_level': ConsistencyLevel.ANY, 'serial_consistency_level': ConsistencyLevel.SERIAL, 'row_factory': tuple_factory, 'retry_policy': RetryPolicy(), 'load_balancing_policy': default_lbp_factory() } reference_attributes = ('retry_policy', 'load_balancing_policy') # default and one named for profile in (EXEC_PROFILE_DEFAULT, 'one'): active = cluster.profile_manager.profiles[profile] clone = session.execution_profile_clone_update(profile) self.assertIsNot(clone, active) all_updated = session.execution_profile_clone_update( clone, **profile_attrs) self.assertIsNot(all_updated, clone) for attr, value in profile_attrs.items(): self.assertEqual(getattr(clone, attr), getattr(active, attr)) if attr in reference_attributes: self.assertIs(getattr(clone, attr), getattr(active, attr)) self.assertNotEqual(getattr(all_updated, attr), getattr(active, attr)) # cannot clone nonexistent profile self.assertRaises(ValueError, session.execution_profile_clone_update, 'DOES NOT EXIST', **profile_attrs)
def test_nts_make_token_replica_map(self): token_to_host_owner = {} dc1_1 = Host('dc1.1', SimpleConvictionPolicy) dc1_2 = Host('dc1.2', SimpleConvictionPolicy) dc1_3 = Host('dc1.3', SimpleConvictionPolicy) for host in (dc1_1, dc1_2, dc1_3): host.set_location_info('dc1', 'rack1') token_to_host_owner[MD5Token(0)] = dc1_1 token_to_host_owner[MD5Token(100)] = dc1_2 token_to_host_owner[MD5Token(200)] = dc1_3 dc2_1 = Host('dc2.1', SimpleConvictionPolicy) dc2_2 = Host('dc2.2', SimpleConvictionPolicy) dc2_1.set_location_info('dc2', 'rack1') dc2_2.set_location_info('dc2', 'rack1') token_to_host_owner[MD5Token(1)] = dc2_1 token_to_host_owner[MD5Token(101)] = dc2_2 dc3_1 = Host('dc3.1', SimpleConvictionPolicy) dc3_1.set_location_info('dc3', 'rack3') token_to_host_owner[MD5Token(2)] = dc3_1 ring = [ MD5Token(0), MD5Token(1), MD5Token(2), MD5Token(100), MD5Token(101), MD5Token(200) ] nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))