def test_nts_make_token_replica_map_empty_dc(self): host = Host('1', SimpleConvictionPolicy) host.set_location_info('dc1', 'rack1') token_to_host_owner = {MD5Token(0): host} ring = [MD5Token(0)] nts = NetworkTopologyStrategy({'dc1': 1, 'dc2': 0}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) self.assertEqual(set(replica_map[MD5Token(0)]), set([host]))
def test_get_distance(self): """ Same test as DCAwareRoundRobinPolicyTest.test_get_distance() Except a FakeCluster is needed for the metadata variable and policy.child_policy is needed to change child policy settings """ policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(self.FakeCluster(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host("ip2", SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # dc2 isn't registered in the policy's live_hosts dict policy._child_policy.used_hosts_per_remote_dc = 1 self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered policy.populate(self.FakeCluster(), [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_get_distance(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(Mock(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host("ip2", SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # dc2 isn't registered in the policy's live_hosts dict policy.used_hosts_per_remote_dc = 1 self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered policy.populate(Mock(), [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(Mock(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_nts_make_token_replica_map(self): token_to_host_owner = {} dc1_1 = Host('dc1.1', SimpleConvictionPolicy) dc1_2 = Host('dc1.2', SimpleConvictionPolicy) dc1_3 = Host('dc1.3', SimpleConvictionPolicy) for host in (dc1_1, dc1_2, dc1_3): host.set_location_info('dc1', 'rack1') token_to_host_owner[MD5Token(0)] = dc1_1 token_to_host_owner[MD5Token(100)] = dc1_2 token_to_host_owner[MD5Token(200)] = dc1_3 dc2_1 = Host('dc2.1', SimpleConvictionPolicy) dc2_2 = Host('dc2.2', SimpleConvictionPolicy) dc2_1.set_location_info('dc2', 'rack1') dc2_2.set_location_info('dc2', 'rack1') token_to_host_owner[MD5Token(1)] = dc2_1 token_to_host_owner[MD5Token(101)] = dc2_2 dc3_1 = Host('dc3.1', SimpleConvictionPolicy) dc3_1.set_location_info('dc3', 'rack3') token_to_host_owner[MD5Token(2)] = dc3_1 ring = [MD5Token(0), MD5Token(1), MD5Token(2), MD5Token(100), MD5Token(101), MD5Token(200)] nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
def test_no_remote(self): hosts = [] for i in range(4): h = Host(i, SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1") policy.populate(None, hosts) qplan = list(policy.make_query_plan()) self.assertEqual(sorted(qplan), sorted(hosts))
def test_non_implemented(self): """ Code coverage for interface-style base class """ policy = LoadBalancingPolicy() host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") self.assertRaises(NotImplementedError, policy.distance, host) self.assertRaises(NotImplementedError, policy.populate, None, host) self.assertRaises(NotImplementedError, policy.make_query_plan) self.assertRaises(NotImplementedError, policy.on_up, host) self.assertRaises(NotImplementedError, policy.on_down, host) self.assertRaises(NotImplementedError, policy.on_add, host) self.assertRaises(NotImplementedError, policy.on_remove, host)
def test_version_parsing(self): host = Host('127.0.0.1', SimpleConvictionPolicy) host.set_version("1.0.0") self.assertEqual((1, 0, 0), host.version) host.set_version("1.0") self.assertEqual((1, 0, 0), host.version) host.set_version("1.0.0-beta1") self.assertEqual((1, 0, 0, 'beta1'), host.version) host.set_version("1.0-SNAPSHOT") self.assertEqual((1, 0, 0, 'SNAPSHOT'), host.version)
def test_wait_for_schema_agreement_rpc_lookup(self): """ If the rpc_address is 0.0.0.0, the "peer" column should be used instead. """ self.connection.peer_results[1].append(["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]]) host = Host("0.0.0.0", SimpleConvictionPolicy) self.cluster.metadata.hosts[PEER_IP] = host host.is_up = False # even though the new host has a different schema version, it's # marked as down, so the control connection shouldn't care self.assertTrue(self.control_connection.wait_for_schema_agreement()) self.assertEqual(self.time.clock, 0) # but once we mark it up, the control connection will care host.is_up = True self.assertFalse(self.control_connection.wait_for_schema_agreement()) self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
def test_no_live_nodes(self): """ Ensure query plan for a downed cluster will execute without errors """ hosts = [] for i in range(4): h = Host(i, SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) for host in hosts: policy.on_down(host) qplan = list(policy.make_query_plan()) self.assertEqual(qplan, [])
def test_status_updates(self): hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(None, hosts) policy.on_down(hosts[0]) policy.on_remove(hosts[2]) new_local_host = Host(4, SimpleConvictionPolicy) new_local_host.set_location_info("dc1", "rack1") policy.on_up(new_local_host) new_remote_host = Host(5, SimpleConvictionPolicy) new_remote_host.set_location_info("dc9000", "rack1") policy.on_add(new_remote_host) # we now have two local hosts and two remote hosts in separate dcs qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host])) self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host])) # since we have hosts in dc9000, the distance shouldn't be IGNORED self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
def test_nts_token_performance(self): """ Tests to ensure that when rf exceeds the number of nodes available, that we dont' needlessly iterate trying to construct tokens for nodes that don't exist. @since 3.7 @jira_ticket PYTHON-379 @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes @test_category metadata """ token_to_host_owner = {} ring = [] dc1hostnum = 3 current_token = 0 vnodes_per_host = 500 for i in range(dc1hostnum): host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy) host.set_location_info('dc1', "rack1") for vnode_num in range(vnodes_per_host): md5_token = MD5Token(current_token+vnode_num) token_to_host_owner[md5_token] = host ring.append(md5_token) current_token += 1000 nts = NetworkTopologyStrategy({'dc1': 3}) start_time = timeit.default_timer() nts.make_token_replica_map(token_to_host_owner, ring) elapsed_base = timeit.default_timer() - start_time nts = NetworkTopologyStrategy({'dc1': 1500}) start_time = timeit.default_timer() nts.make_token_replica_map(token_to_host_owner, ring) elapsed_bad = timeit.default_timer() - start_time difference = elapsed_bad - elapsed_base self.assertTrue(difference < 1 and difference > -1)
def test_wrap_dc_aware(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") def get_replicas(keyspace, packed_key): index = struct.unpack('>i', packed_key)[0] # return one node from each DC if index % 2 == 0: return [hosts[0], hosts[2]] else: return [hosts[1], hosts[3]] cluster.metadata.get_replicas.side_effect = get_replicas policy = TokenAwarePolicy( DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)) policy.populate(cluster, hosts) for i in range(4): query = Statement(routing_key=struct.pack('>i', i)) qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) # first should be the only local replica self.assertIn(qplan[0], replicas) self.assertEquals(qplan[0].datacenter, "dc1") # then the local non-replica self.assertNotIn(qplan[1], replicas) self.assertEquals(qplan[1].datacenter, "dc1") # then one of the remotes (used_hosts_per_remote_dc is 1, so we # shouldn't see two remotes) self.assertEquals(qplan[2].datacenter, "dc2") self.assertEquals(3, len(qplan))
def test_statement_params_override_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) rf = session.execute_async("query", execution_profile='non-default') ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) my_timeout = 1.1234 self.assertNotEqual(ss.retry_policy.__class__, rf._load_balancer.__class__) self.assertNotEqual(ss.consistency_level, rf.message.consistency_level) self.assertNotEqual(ss._serial_consistency_level, rf.message.serial_consistency_level) self.assertNotEqual(my_timeout, rf.timeout) rf = session.execute_async(ss, timeout=my_timeout, execution_profile='non-default') expected_profile = ExecutionProfile(non_default_profile.load_balancing_policy, ss.retry_policy, ss.consistency_level, ss._serial_consistency_level, my_timeout, non_default_profile.row_factory) self._verify_response_future_profile(rf, expected_profile)
def test_wrap_round_robin(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] def get_replicas(keyspace, packed_key): index = struct.unpack('>i', packed_key)[0] return list(islice(cycle(hosts), index, index + 2)) cluster.metadata.get_replicas.side_effect = get_replicas policy = TokenAwarePolicy(RoundRobinPolicy()) policy.populate(cluster, hosts) for i in range(4): query = Statement(routing_key=struct.pack('>i', i)) qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) other = set(h for h in hosts if h not in replicas) self.assertEquals(replicas, qplan[:2]) self.assertEquals(other, set(qplan[2:]))
def test_recursion_limited(self): """ Verify that recursion is controlled when raise_on_first_error=False and something is wrong with the query. PYTHON-585 """ max_recursion = sys.getrecursionlimit() s = Session(Cluster(), [Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertRaises(TypeError, execute_concurrent_with_args, s, "doesn't matter", [('param', )] * max_recursion, raise_on_first_error=True) results = execute_concurrent_with_args( s, "doesn't matter", [('param', )] * max_recursion, raise_on_first_error=False) # previously self.assertEqual(len(results), max_recursion) for r in results: self.assertFalse(r[0]) self.assertIsInstance(r[1], TypeError)
def test_getting_replicas(self): tokens = [MD5Token(str(i)) for i in range(0, (2 ** 127 - 1), 2 ** 125)] hosts = [Host("ip%d" % i, SimpleConvictionPolicy) for i in range(len(tokens))] token_to_primary_replica = dict(zip(tokens, hosts)) keyspace = KeyspaceMetadata("ks", True, "SimpleStrategy", {"replication_factor": "1"}) token_map = TokenMap(MD5Token, token_to_primary_replica, tokens, [keyspace]) # tokens match node tokens exactly for token, expected_host in zip(tokens, hosts): replicas = token_map.get_replicas("ks", token) self.assertEqual(replicas, set([expected_host])) # shift the tokens back by one for token, expected_host in zip(tokens[1:], hosts[1:]): replicas = token_map.get_replicas("ks", MD5Token(str(token.value - 1))) self.assertEqual(replicas, set([expected_host])) # shift the tokens forward by one for i, token in enumerate(tokens): replicas = token_map.get_replicas("ks", MD5Token(str(token.value + 1))) expected_host = hosts[(i + 1) % len(hosts)] self.assertEqual(replicas, set([expected_host]))
def test_exec_profile_clone(self): cluster = Cluster(execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile() }) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) profile_attrs = { 'request_timeout': 1, 'consistency_level': ConsistencyLevel.ANY, 'serial_consistency_level': ConsistencyLevel.SERIAL, 'row_factory': tuple_factory, 'retry_policy': RetryPolicy(), 'load_balancing_policy': default_lbp_factory() } reference_attributes = ('retry_policy', 'load_balancing_policy') # default and one named for profile in (EXEC_PROFILE_DEFAULT, 'one'): active = cluster.profile_manager.profiles[profile] clone = session.execution_profile_clone_update(profile) self.assertIsNot(clone, active) all_updated = session.execution_profile_clone_update( clone, **profile_attrs) self.assertIsNot(all_updated, clone) for attr, value in profile_attrs.items(): self.assertEqual(getattr(clone, attr), getattr(active, attr)) if attr in reference_attributes: self.assertIs(getattr(clone, attr), getattr(active, attr)) self.assertNotEqual(getattr(all_updated, attr), getattr(active, attr)) # cannot clone nonexistent profile self.assertRaises(ValueError, session.execution_profile_clone_update, 'DOES NOT EXIST', **profile_attrs)
def _get_replicas(self, token_klass): tokens = [token_klass(i) for i in range(0, (2 ** 127 - 1), 2 ** 125)] hosts = [Host("ip%d" % i, SimpleConvictionPolicy) for i in range(len(tokens))] token_to_primary_replica = dict(zip(tokens, hosts)) keyspace = KeyspaceMetadata("ks", True, "SimpleStrategy", {"replication_factor": "1"}) metadata = Mock(spec=Metadata, keyspaces={'ks': keyspace}) token_map = TokenMap(token_klass, token_to_primary_replica, tokens, metadata) # tokens match node tokens exactly for token, expected_host in zip(tokens, hosts): replicas = token_map.get_replicas("ks", token) self.assertEqual(set(replicas), {expected_host}) # shift the tokens back by one for token, expected_host in zip(tokens, hosts): replicas = token_map.get_replicas("ks", token_klass(token.value - 1)) self.assertEqual(set(replicas), {expected_host}) # shift the tokens forward by one for i, token in enumerate(tokens): replicas = token_map.get_replicas("ks", token_klass(token.value + 1)) expected_host = hosts[(i + 1) % len(hosts)] self.assertEqual(set(replicas), {expected_host})
def test_no_profile_with_legacy(self): # don't construct with both self.assertRaises(ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), execution_profiles={'a': ExecutionProfile()}) self.assertRaises(ValueError, Cluster, default_retry_policy=DowngradingConsistencyRetryPolicy(), execution_profiles={'a': ExecutionProfile()}) self.assertRaises(ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy(), execution_profiles={'a': ExecutionProfile()}) # can't add after cluster = Cluster(load_balancing_policy=RoundRobinPolicy()) self.assertRaises(ValueError, cluster.add_execution_profile, 'name', ExecutionProfile()) # session settings lock out profiles cluster = Cluster() session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) for attr, value in (('default_timeout', 1), ('default_consistency_level', ConsistencyLevel.ANY), ('default_serial_consistency_level', ConsistencyLevel.SERIAL), ('row_factory', tuple_factory)): cluster._config_mode = _ConfigMode.UNCOMMITTED setattr(session, attr, value) self.assertRaises(ValueError, cluster.add_execution_profile, 'name' + attr, ExecutionProfile()) # don't accept profile self.assertRaises(ValueError, session.execute_async, "query", execution_profile='some name here')
def test_default_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)]) cluster = Cluster( execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) default_profile = cluster.profile_manager.profiles[ EXEC_PROFILE_DEFAULT] rf = session.execute_async("query") self._verify_response_future_profile(rf, default_profile) rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) for name, ep in six.iteritems(cluster.profile_manager.profiles): self.assertEqual(ep, session.get_execution_profile(name)) # invalid ep with self.assertRaises(ValueError): session.get_execution_profile('non-existent')
def test_default_serial_consistency_level(self, *_): """ Make sure default_serial_consistency_level passes through to a query message. Also make sure Statement.serial_consistency_level overrides the default. PR #510 """ s = Session(Cluster(protocol_version=4), [Host("127.0.0.1", SimpleConvictionPolicy)]) # default is None self.assertIsNone(s.default_serial_consistency_level) # Should fail with self.assertRaises(ValueError): s.default_serial_consistency_level = ConsistencyLevel.ANY with self.assertRaises(ValueError): s.default_serial_consistency_level = 1001 for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): s.default_serial_consistency_level = cl # default is passed through f = s.execute_async(query='') self.assertEqual(f.message.serial_consistency_level, cl) # any non-None statement setting takes precedence for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): f = s.execute_async( SimpleStatement(query_string='', serial_consistency_level=cl_override)) self.assertEqual(s.default_serial_consistency_level, cl) self.assertEqual(f.message.serial_consistency_level, cl_override)
def add_host(self, address, datacenter, rack, signal=False): host = Host(address, SimpleConvictionPolicy, datacenter, rack) self.added_hosts.append(host) return host
from cassandra.metadata import (Murmur3Token, Murmur3Token, BytesToken, ReplicationStrategy, NetworkTopologyStrategy, SimpleStrategy, LocalStrategy, protect_name, protect_names, protect_value, is_valid_name, UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy, ColumnMetadata, TableMetadata, IndexMetadata, Function, Aggregate, Metadata, TokenMap) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host token_to_host_owner = {} host1 = Host('1.0.0.0', SimpleConvictionPolicy) host1.set_location_info('dc1', 'rack1') host2 = Host('2.0.0.0', SimpleConvictionPolicy) host2.set_location_info('dc2', 'rack2') host3 = Host('3.0.0.0', SimpleConvictionPolicy) host3.set_location_info('dc1', 'rack1') host4 = Host('4.0.0.0', SimpleConvictionPolicy) host4.set_location_info('dc2', 'rack2') token_to_host_owner[Murmur3Token("-9000000000000000000")] = host1 token_to_host_owner[Murmur3Token("-6000000000000000000")] = host2 token_to_host_owner[Murmur3Token("4000000000000000000")] = host3 token_to_host_owner[Murmur3Token("9000000000000000000")] = host4 ring = [ Murmur3Token("-9000000000000000000"), Murmur3Token("-6000000000000000000"),
def test_nts_make_token_replica_map_multi_rack(self): token_to_host_owner = {} # (A) not enough distinct racks, first skipped is used dc1_1 = Host('dc1.1', SimpleConvictionPolicy) dc1_2 = Host('dc1.2', SimpleConvictionPolicy) dc1_3 = Host('dc1.3', SimpleConvictionPolicy) dc1_4 = Host('dc1.4', SimpleConvictionPolicy) dc1_1.set_location_info('dc1', 'rack1') dc1_2.set_location_info('dc1', 'rack1') dc1_3.set_location_info('dc1', 'rack2') dc1_4.set_location_info('dc1', 'rack2') token_to_host_owner[MD5Token(0)] = dc1_1 token_to_host_owner[MD5Token(100)] = dc1_2 token_to_host_owner[MD5Token(200)] = dc1_3 token_to_host_owner[MD5Token(300)] = dc1_4 # (B) distinct racks, but not contiguous dc2_1 = Host('dc2.1', SimpleConvictionPolicy) dc2_2 = Host('dc2.2', SimpleConvictionPolicy) dc2_3 = Host('dc2.3', SimpleConvictionPolicy) dc2_1.set_location_info('dc2', 'rack1') dc2_2.set_location_info('dc2', 'rack1') dc2_3.set_location_info('dc2', 'rack2') token_to_host_owner[MD5Token(1)] = dc2_1 token_to_host_owner[MD5Token(101)] = dc2_2 token_to_host_owner[MD5Token(201)] = dc2_3 ring = [ MD5Token(0), MD5Token(1), MD5Token(100), MD5Token(101), MD5Token(200), MD5Token(201), MD5Token(300) ] nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) token_replicas = replica_map[MD5Token(0)] self.assertItemsEqual(token_replicas, (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3))
def test_modification_during_generation(self): hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3) policy.populate(Mock(), hosts) # The general concept here is to change thee internal state of the # policy during plan generation. In this case we use a grey-box # approach that changes specific things during known phases of the # generator. new_host = Host(4, SimpleConvictionPolicy) new_host.set_location_info("dc1", "rack1") # new local before iteration plan = policy.make_query_plan() policy.on_up(new_host) # local list is not bound yet, so we get to see that one self.assertEqual(len(list(plan)), 3 + 2) # remove local before iteration plan = policy.make_query_plan() policy.on_down(new_host) # local list is not bound yet, so we don't see it self.assertEqual(len(list(plan)), 2 + 2) # new local after starting iteration plan = policy.make_query_plan() next(plan) policy.on_up(new_host) # local list was is bound, and one consumed, so we only see the other original self.assertEqual(len(list(plan)), 1 + 2) # remove local after traversing available plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(new_host) # we should be past the local list self.assertEqual(len(list(plan)), 0 + 2) # REMOTES CHANGE new_host.set_location_info("dc2", "rack1") # new remote after traversing local, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_up(new_host) # list is updated before we get to it self.assertEqual(len(list(plan)), 0 + 3) # remove remote after traversing local, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_down(new_host) # list is updated before we get to it self.assertEqual(len(list(plan)), 0 + 2) # new remote after traversing local, and starting remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_up(new_host) # slice is already made, and we've consumed one self.assertEqual(len(list(plan)), 0 + 1) # remove remote after traversing local, and starting remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(new_host) # slice is created with all present, and we've consumed one self.assertEqual(len(list(plan)), 0 + 2) # local DC disappears after finishing it, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_down(hosts[0]) policy.on_down(hosts[1]) # dict traversal starts as normal self.assertEqual(len(list(plan)), 0 + 2) policy.on_up(hosts[0]) policy.on_up(hosts[1]) # PYTHON-297 addresses the following cases, where DCs come and go # during generation # local DC disappears after finishing it, and starting remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(hosts[0]) policy.on_down(hosts[1]) # dict traversal has begun and consumed one self.assertEqual(len(list(plan)), 0 + 1) policy.on_up(hosts[0]) policy.on_up(hosts[1]) # remote DC disappears after finishing local, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_down(hosts[2]) policy.on_down(hosts[3]) # nothing left self.assertEqual(len(list(plan)), 0 + 0) policy.on_up(hosts[2]) policy.on_up(hosts[3]) # remote DC disappears while traversing it plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(hosts[2]) policy.on_down(hosts[3]) # we continue with remainder of original list self.assertEqual(len(list(plan)), 0 + 1) policy.on_up(hosts[2]) policy.on_up(hosts[3]) another_host = Host(5, SimpleConvictionPolicy) another_host.set_location_info("dc3", "rack1") new_host.set_location_info("dc3", "rack1") # new DC while traversing remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_up(new_host) policy.on_up(another_host) # we continue with remainder of original list self.assertEqual(len(list(plan)), 0 + 1) # remote DC disappears after finishing it plan = policy.make_query_plan() for _ in range(3): next(plan) last_host_in_this_dc = next(plan) if last_host_in_this_dc in (new_host, another_host): down_hosts = [new_host, another_host] else: down_hosts = hosts[2:] for h in down_hosts: policy.on_down(h) # the last DC has two self.assertEqual(len(list(plan)), 0 + 2)
def test_get_distance(self): """ Same test as DCAwareRoundRobinPolicyTest.test_get_distance() Except a FakeCluster is needed for the metadata variable and policy.child_policy is needed to change child policy settings """ policy = TokenAwarePolicy( DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(self.FakeCluster(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host("ip2", SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # dc2 isn't registered in the policy's live_hosts dict policy.child_policy.used_hosts_per_remote_dc = 1 self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered policy.populate(self.FakeCluster(), [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host]) distances = set([ policy.distance(remote_host), policy.distance(second_remote_host) ]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_nts_make_token_replica_map(self): token_to_host_owner = {} dc1_1 = Host('dc1.1', SimpleConvictionPolicy) dc1_2 = Host('dc1.2', SimpleConvictionPolicy) dc1_3 = Host('dc1.3', SimpleConvictionPolicy) for host in (dc1_1, dc1_2, dc1_3): host.set_location_info('dc1', 'rack1') token_to_host_owner[MD5Token(0)] = dc1_1 token_to_host_owner[MD5Token(100)] = dc1_2 token_to_host_owner[MD5Token(200)] = dc1_3 dc2_1 = Host('dc2.1', SimpleConvictionPolicy) dc2_2 = Host('dc2.2', SimpleConvictionPolicy) dc2_1.set_location_info('dc2', 'rack1') dc2_2.set_location_info('dc2', 'rack1') token_to_host_owner[MD5Token(1)] = dc2_1 token_to_host_owner[MD5Token(101)] = dc2_2 dc3_1 = Host('dc3.1', SimpleConvictionPolicy) dc3_1.set_location_info('dc3', 'rack3') token_to_host_owner[MD5Token(2)] = dc3_1 ring = [ MD5Token(0), MD5Token(1), MD5Token(2), MD5Token(100), MD5Token(101), MD5Token(200) ] nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
def test_get_distance(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(None, [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host("ip2", SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # dc2 isn't registered in the policy's live_hosts dict policy.used_hosts_per_remote_dc = 1 self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered policy.populate(None, [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(None, [host, remote_host, second_remote_host]) distances = set([ policy.distance(remote_host), policy.distance(second_remote_host) ]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def add_host(self, address, signal=False): host = Host(address, SimpleConvictionPolicy) self.added_hosts.append(host) return host
def test_nts_make_token_replica_map_multi_rack(self): token_to_host_owner = {} # (A) not enough distinct racks, first skipped is used dc1_1 = Host('dc1.1', SimpleConvictionPolicy) dc1_2 = Host('dc1.2', SimpleConvictionPolicy) dc1_3 = Host('dc1.3', SimpleConvictionPolicy) dc1_4 = Host('dc1.4', SimpleConvictionPolicy) dc1_1.set_location_info('dc1', 'rack1') dc1_2.set_location_info('dc1', 'rack1') dc1_3.set_location_info('dc1', 'rack2') dc1_4.set_location_info('dc1', 'rack2') token_to_host_owner[MD5Token(0)] = dc1_1 token_to_host_owner[MD5Token(100)] = dc1_2 token_to_host_owner[MD5Token(200)] = dc1_3 token_to_host_owner[MD5Token(300)] = dc1_4 # (B) distinct racks, but not contiguous dc2_1 = Host('dc2.1', SimpleConvictionPolicy) dc2_2 = Host('dc2.2', SimpleConvictionPolicy) dc2_3 = Host('dc2.3', SimpleConvictionPolicy) dc2_1.set_location_info('dc2', 'rack1') dc2_2.set_location_info('dc2', 'rack1') dc2_3.set_location_info('dc2', 'rack2') token_to_host_owner[MD5Token(1)] = dc2_1 token_to_host_owner[MD5Token(101)] = dc2_2 token_to_host_owner[MD5Token(201)] = dc2_3 ring = [MD5Token(0), MD5Token(1), MD5Token(100), MD5Token(101), MD5Token(200), MD5Token(201), MD5Token(300)] nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2}) replica_map = nts.make_token_replica_map(token_to_host_owner, ring) token_replicas = replica_map[MD5Token(0)] self.assertItemsEqual(token_replicas, (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3))
def setUp(self): os.environ['SSL_VALIDATE'] = 'False' self.config_file = 'test_config' self.host = Host('10.0.0.1', SimpleConvictionPolicy, 9000)