def test_status_updates(self):
        hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
        policy.populate(None, hosts)
        policy.on_down(hosts[0])
        policy.on_remove(hosts[2])

        new_local_host = Host(4, SimpleConvictionPolicy)
        new_local_host.set_location_info("dc1", "rack1")
        policy.on_up(new_local_host)

        new_remote_host = Host(5, SimpleConvictionPolicy)
        new_remote_host.set_location_info("dc9000", "rack1")
        policy.on_add(new_remote_host)

        # we now have two local hosts and two remote hosts in separate dcs
        qplan = list(policy.make_query_plan())
        self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
        self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))

        # since we have hosts in dc9000, the distance shouldn't be IGNORED
        self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
Beispiel #2
0
    def test_status_updates(self):
        hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
        policy.populate(None, hosts)
        policy.on_down(hosts[0])
        policy.on_remove(hosts[2])

        new_local_host = Host(4, SimpleConvictionPolicy)
        new_local_host.set_location_info("dc1", "rack1")
        policy.on_up(new_local_host)

        new_remote_host = Host(5, SimpleConvictionPolicy)
        new_remote_host.set_location_info("dc9000", "rack1")
        policy.on_add(new_remote_host)

        # we now have two local hosts and two remote hosts in separate dcs
        qplan = list(policy.make_query_plan())
        self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
        self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))

        # since we have hosts in dc9000, the distance shouldn't be IGNORED
        self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
Beispiel #3
0
    def test_nts_make_token_replica_map_empty_dc(self):
        host = Host('1', SimpleConvictionPolicy)
        host.set_location_info('dc1', 'rack1')
        token_to_host_owner = {MD5Token(0): host}
        ring = [MD5Token(0)]
        nts = NetworkTopologyStrategy({'dc1': 1, 'dc2': 0})

        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
        self.assertEqual(set(replica_map[MD5Token(0)]), set([host]))
Beispiel #4
0
    def test_nts_make_token_replica_map_empty_dc(self):
        host = Host('1', SimpleConvictionPolicy)
        host.set_location_info('dc1', 'rack1')
        token_to_host_owner = {MD5Token(0): host}
        ring = [MD5Token(0)]
        nts = NetworkTopologyStrategy({'dc1': 1, 'dc2': 0})

        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
        self.assertEqual(set(replica_map[MD5Token(0)]), set([host]))
Beispiel #5
0
    def test_no_remote(self):
        hosts = []
        for i in range(4):
            h = Host(i, SimpleConvictionPolicy)
            h.set_location_info("dc1", "rack1")
            hosts.append(h)

        policy = DCAwareRoundRobinPolicy("dc1")
        policy.populate(None, hosts)
        qplan = list(policy.make_query_plan())
        self.assertEqual(sorted(qplan), sorted(hosts))
    def test_no_remote(self):
        hosts = []
        for i in range(4):
            h = Host(i, SimpleConvictionPolicy)
            h.set_location_info("dc1", "rack1")
            hosts.append(h)

        policy = DCAwareRoundRobinPolicy("dc1")
        policy.populate(None, hosts)
        qplan = list(policy.make_query_plan())
        self.assertEqual(sorted(qplan), sorted(hosts))
    def test_non_implemented(self):
        """
        Code coverage for interface-style base class
        """

        policy = LoadBalancingPolicy()
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")

        self.assertRaises(NotImplementedError, policy.distance, host)
        self.assertRaises(NotImplementedError, policy.populate, None, host)
        self.assertRaises(NotImplementedError, policy.make_query_plan)
        self.assertRaises(NotImplementedError, policy.on_up, host)
        self.assertRaises(NotImplementedError, policy.on_down, host)
        self.assertRaises(NotImplementedError, policy.on_add, host)
        self.assertRaises(NotImplementedError, policy.on_remove, host)
Beispiel #8
0
    def test_non_implemented(self):
        """
        Code coverage for interface-style base class
        """

        policy = LoadBalancingPolicy()
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")

        self.assertRaises(NotImplementedError, policy.distance, host)
        self.assertRaises(NotImplementedError, policy.populate, None, host)
        self.assertRaises(NotImplementedError, policy.make_query_plan)
        self.assertRaises(NotImplementedError, policy.on_up, host)
        self.assertRaises(NotImplementedError, policy.on_down, host)
        self.assertRaises(NotImplementedError, policy.on_add, host)
        self.assertRaises(NotImplementedError, policy.on_remove, host)
    def test_no_live_nodes(self):
        """
        Ensure query plan for a downed cluster will execute without errors
        """

        hosts = []
        for i in range(4):
            h = Host(i, SimpleConvictionPolicy)
            h.set_location_info("dc1", "rack1")
            hosts.append(h)

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
        policy.populate(None, hosts)

        for host in hosts:
            policy.on_down(host)

        qplan = list(policy.make_query_plan())
        self.assertEqual(qplan, [])
Beispiel #10
0
    def test_no_live_nodes(self):
        """
        Ensure query plan for a downed cluster will execute without errors
        """

        hosts = []
        for i in range(4):
            h = Host(i, SimpleConvictionPolicy)
            h.set_location_info("dc1", "rack1")
            hosts.append(h)

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
        policy.populate(Mock(), hosts)

        for host in hosts:
            policy.on_down(host)

        qplan = list(policy.make_query_plan())
        self.assertEqual(qplan, [])
    def test_get_distance(self):
        """
        Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
        Except a FakeCluster is needed for the metadata variable and
        policy.child_policy is needed to change child policy settings
        """

        policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")

        policy.populate(self.FakeCluster(), [host])

        self.assertEqual(policy.distance(host), HostDistance.LOCAL)

        # used_hosts_per_remote_dc is set to 0, so ignore it
        remote_host = Host("ip2", SimpleConvictionPolicy)
        remote_host.set_location_info("dc2", "rack1")
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # dc2 isn't registered in the policy's live_hosts dict
        policy._child_policy.used_hosts_per_remote_dc = 1
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # make sure the policy has both dcs registered
        policy.populate(self.FakeCluster(), [host, remote_host])
        self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)

        # since used_hosts_per_remote_dc is set to 1, only the first
        # remote host in dc2 will be REMOTE, the rest are IGNORED
        second_remote_host = Host("ip3", SimpleConvictionPolicy)
        second_remote_host.set_location_info("dc2", "rack1")
        policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
        distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
        self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
Beispiel #12
0
    def test_nts_make_token_replica_map(self):
        token_to_host_owner = {}

        dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
        dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
        dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
        for host in (dc1_1, dc1_2, dc1_3):
            host.set_location_info('dc1', 'rack1')
        token_to_host_owner[MD5Token(0)] = dc1_1
        token_to_host_owner[MD5Token(100)] = dc1_2
        token_to_host_owner[MD5Token(200)] = dc1_3

        dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
        dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
        dc2_1.set_location_info('dc2', 'rack1')
        dc2_2.set_location_info('dc2', 'rack1')
        token_to_host_owner[MD5Token(1)] = dc2_1
        token_to_host_owner[MD5Token(101)] = dc2_2

        dc3_1 = Host('dc3.1', SimpleConvictionPolicy)
        dc3_1.set_location_info('dc3', 'rack3')
        token_to_host_owner[MD5Token(2)] = dc3_1

        ring = [MD5Token(0),
                MD5Token(1),
                MD5Token(2),
                MD5Token(100),
                MD5Token(101),
                MD5Token(200)]

        nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1})
        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)

        self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
Beispiel #13
0
    def test_get_distance(self):
        """
        Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
        Except a FakeCluster is needed for the metadata variable and
        policy.child_policy is needed to change child policy settings
        """

        policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")

        policy.populate(self.FakeCluster(), [host])

        self.assertEqual(policy.distance(host), HostDistance.LOCAL)

        # used_hosts_per_remote_dc is set to 0, so ignore it
        remote_host = Host("ip2", SimpleConvictionPolicy)
        remote_host.set_location_info("dc2", "rack1")
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # dc2 isn't registered in the policy's live_hosts dict
        policy._child_policy.used_hosts_per_remote_dc = 1
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # make sure the policy has both dcs registered
        policy.populate(self.FakeCluster(), [host, remote_host])
        self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)

        # since used_hosts_per_remote_dc is set to 1, only the first
        # remote host in dc2 will be REMOTE, the rest are IGNORED
        second_remote_host = Host("ip3", SimpleConvictionPolicy)
        second_remote_host.set_location_info("dc2", "rack1")
        policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
        distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
        self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
Beispiel #14
0
    def test_nts_make_token_replica_map(self):
        token_to_host_owner = {}

        dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
        dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
        dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
        for host in (dc1_1, dc1_2, dc1_3):
            host.set_location_info('dc1', 'rack1')
        token_to_host_owner[MD5Token(0)] = dc1_1
        token_to_host_owner[MD5Token(100)] = dc1_2
        token_to_host_owner[MD5Token(200)] = dc1_3

        dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
        dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
        dc2_1.set_location_info('dc2', 'rack1')
        dc2_2.set_location_info('dc2', 'rack1')
        token_to_host_owner[MD5Token(1)] = dc2_1
        token_to_host_owner[MD5Token(101)] = dc2_2

        dc3_1 = Host('dc3.1', SimpleConvictionPolicy)
        dc3_1.set_location_info('dc3', 'rack3')
        token_to_host_owner[MD5Token(2)] = dc3_1

        ring = [MD5Token(0),
                MD5Token(1),
                MD5Token(2),
                MD5Token(100),
                MD5Token(101),
                MD5Token(200)]

        nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1})
        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)

        self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
Beispiel #15
0
    def test_get_distance(self):
        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")
        policy.populate(Mock(), [host])

        self.assertEqual(policy.distance(host), HostDistance.LOCAL)

        # used_hosts_per_remote_dc is set to 0, so ignore it
        remote_host = Host("ip2", SimpleConvictionPolicy)
        remote_host.set_location_info("dc2", "rack1")
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # dc2 isn't registered in the policy's live_hosts dict
        policy.used_hosts_per_remote_dc = 1
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # make sure the policy has both dcs registered
        policy.populate(Mock(), [host, remote_host])
        self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)

        # since used_hosts_per_remote_dc is set to 1, only the first
        # remote host in dc2 will be REMOTE, the rest are IGNORED
        second_remote_host = Host("ip3", SimpleConvictionPolicy)
        second_remote_host.set_location_info("dc2", "rack1")
        policy.populate(Mock(), [host, remote_host, second_remote_host])
        distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
        self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
    def test_get_distance(self):
        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")
        policy.populate(None, [host])

        self.assertEqual(policy.distance(host), HostDistance.LOCAL)

        # used_hosts_per_remote_dc is set to 0, so ignore it
        remote_host = Host("ip2", SimpleConvictionPolicy)
        remote_host.set_location_info("dc2", "rack1")
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # dc2 isn't registered in the policy's live_hosts dict
        policy.used_hosts_per_remote_dc = 1
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # make sure the policy has both dcs registered
        policy.populate(None, [host, remote_host])
        self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)

        # since used_hosts_per_remote_dc is set to 1, only the first
        # remote host in dc2 will be REMOTE, the rest are IGNORED
        second_remote_host = Host("ip3", SimpleConvictionPolicy)
        second_remote_host.set_location_info("dc2", "rack1")
        policy.populate(None, [host, remote_host, second_remote_host])
        distances = set([
            policy.distance(remote_host),
            policy.distance(second_remote_host)
        ])
        self.assertEqual(distances,
                         set([HostDistance.REMOTE, HostDistance.IGNORED]))
Beispiel #17
0
    def test_nts_token_performance(self):
        """
        Tests to ensure that when rf exceeds the number of nodes available, that we dont'
        needlessly iterate trying to construct tokens for nodes that don't exist.

        @since 3.7
        @jira_ticket PYTHON-379
        @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes

        @test_category metadata
        """

        token_to_host_owner = {}
        ring = []
        dc1hostnum = 3
        current_token = 0
        vnodes_per_host = 500
        for i in range(dc1hostnum):

            host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy)
            host.set_location_info('dc1', "rack1")
            for vnode_num in range(vnodes_per_host):
                md5_token = MD5Token(current_token + vnode_num)
                token_to_host_owner[md5_token] = host
                ring.append(md5_token)
            current_token += 1000

        nts = NetworkTopologyStrategy({'dc1': 3})
        start_time = timeit.default_timer()
        nts.make_token_replica_map(token_to_host_owner, ring)
        elapsed_base = timeit.default_timer() - start_time

        nts = NetworkTopologyStrategy({'dc1': 1500})
        start_time = timeit.default_timer()
        nts.make_token_replica_map(token_to_host_owner, ring)
        elapsed_bad = timeit.default_timer() - start_time
        difference = elapsed_bad - elapsed_base
        self.assertTrue(difference < 1 and difference > -1)
Beispiel #18
0
    def test_nts_token_performance(self):
        """
        Tests to ensure that when rf exceeds the number of nodes available, that we dont'
        needlessly iterate trying to construct tokens for nodes that don't exist.

        @since 3.7
        @jira_ticket PYTHON-379
        @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes

        @test_category metadata
        """

        token_to_host_owner = {}
        ring = []
        dc1hostnum = 3
        current_token = 0
        vnodes_per_host = 500
        for i in range(dc1hostnum):

            host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy)
            host.set_location_info('dc1', "rack1")
            for vnode_num in range(vnodes_per_host):
                md5_token = MD5Token(current_token+vnode_num)
                token_to_host_owner[md5_token] = host
                ring.append(md5_token)
            current_token += 1000

        nts = NetworkTopologyStrategy({'dc1': 3})
        start_time = timeit.default_timer()
        nts.make_token_replica_map(token_to_host_owner, ring)
        elapsed_base = timeit.default_timer() - start_time

        nts = NetworkTopologyStrategy({'dc1': 1500})
        start_time = timeit.default_timer()
        nts.make_token_replica_map(token_to_host_owner, ring)
        elapsed_bad = timeit.default_timer() - start_time
        difference = elapsed_bad - elapsed_base
        self.assertTrue(difference < 1 and difference > -1)
Beispiel #19
0
from cassandra.metadata import (Murmur3Token, Murmur3Token,
                                BytesToken, ReplicationStrategy,
                                NetworkTopologyStrategy, SimpleStrategy,
                                LocalStrategy, protect_name,
                                protect_names, protect_value, is_valid_name,
                                UserType, KeyspaceMetadata, get_schema_parser,
                                _UnknownStrategy, ColumnMetadata, TableMetadata,
                                IndexMetadata, Function, Aggregate,
                                Metadata, TokenMap)
from cassandra.policies import SimpleConvictionPolicy
from cassandra.pool import Host

token_to_host_owner = {}

host1 = Host('1.0.0.0', SimpleConvictionPolicy)
host1.set_location_info('dc1', 'rack1')
host2 = Host('2.0.0.0', SimpleConvictionPolicy)
host2.set_location_info('dc2', 'rack2')
host3 = Host('3.0.0.0', SimpleConvictionPolicy)
host3.set_location_info('dc1', 'rack1')
host4 = Host('4.0.0.0', SimpleConvictionPolicy)
host4.set_location_info('dc2', 'rack2')

token_to_host_owner[Murmur3Token("-9000000000000000000")] = host1
token_to_host_owner[Murmur3Token("-6000000000000000000")] = host2
token_to_host_owner[Murmur3Token("4000000000000000000")] = host3
token_to_host_owner[Murmur3Token("9000000000000000000")] = host4

ring = [
        Murmur3Token("-9000000000000000000"),
        Murmur3Token("-6000000000000000000"),
Beispiel #20
0
    def test_modification_during_generation(self):
        hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3)
        policy.populate(Mock(), hosts)

        # The general concept here is to change thee internal state of the
        # policy during plan generation. In this case we use a grey-box
        # approach that changes specific things during known phases of the
        # generator.

        new_host = Host(4, SimpleConvictionPolicy)
        new_host.set_location_info("dc1", "rack1")

        # new local before iteration
        plan = policy.make_query_plan()
        policy.on_up(new_host)
        # local list is not bound yet, so we get to see that one
        self.assertEqual(len(list(plan)), 3 + 2)

        # remove local before iteration
        plan = policy.make_query_plan()
        policy.on_down(new_host)
        # local list is not bound yet, so we don't see it
        self.assertEqual(len(list(plan)), 2 + 2)

        # new local after starting iteration
        plan = policy.make_query_plan()
        next(plan)
        policy.on_up(new_host)
        # local list was is bound, and one consumed, so we only see the other original
        self.assertEqual(len(list(plan)), 1 + 2)

        # remove local after traversing available
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(new_host)
        # we should be past the local list
        self.assertEqual(len(list(plan)), 0 + 2)

        # REMOTES CHANGE
        new_host.set_location_info("dc2", "rack1")

        # new remote after traversing local, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_up(new_host)
        # list is updated before we get to it
        self.assertEqual(len(list(plan)), 0 + 3)

        # remove remote after traversing local, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_down(new_host)
        # list is updated before we get to it
        self.assertEqual(len(list(plan)), 0 + 2)

        # new remote after traversing local, and starting remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_up(new_host)
        # slice is already made, and we've consumed one
        self.assertEqual(len(list(plan)), 0 + 1)

        # remove remote after traversing local, and starting remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(new_host)
        # slice is created with all present, and we've consumed one
        self.assertEqual(len(list(plan)), 0 + 2)

        # local DC disappears after finishing it, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_down(hosts[0])
        policy.on_down(hosts[1])
        # dict traversal starts as normal
        self.assertEqual(len(list(plan)), 0 + 2)
        policy.on_up(hosts[0])
        policy.on_up(hosts[1])

        # PYTHON-297 addresses the following cases, where DCs come and go
        # during generation
        # local DC disappears after finishing it, and starting remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(hosts[0])
        policy.on_down(hosts[1])
        # dict traversal has begun and consumed one
        self.assertEqual(len(list(plan)), 0 + 1)
        policy.on_up(hosts[0])
        policy.on_up(hosts[1])

        # remote DC disappears after finishing local, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_down(hosts[2])
        policy.on_down(hosts[3])
        # nothing left
        self.assertEqual(len(list(plan)), 0 + 0)
        policy.on_up(hosts[2])
        policy.on_up(hosts[3])

        # remote DC disappears while traversing it
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(hosts[2])
        policy.on_down(hosts[3])
        # we continue with remainder of original list
        self.assertEqual(len(list(plan)), 0 + 1)
        policy.on_up(hosts[2])
        policy.on_up(hosts[3])


        another_host = Host(5, SimpleConvictionPolicy)
        another_host.set_location_info("dc3", "rack1")
        new_host.set_location_info("dc3", "rack1")

        # new DC while traversing remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_up(new_host)
        policy.on_up(another_host)
        # we continue with remainder of original list
        self.assertEqual(len(list(plan)), 0 + 1)

        # remote DC disappears after finishing it
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        last_host_in_this_dc = next(plan)
        if last_host_in_this_dc in (new_host, another_host):
            down_hosts = [new_host, another_host]
        else:
            down_hosts = hosts[2:]
        for h in down_hosts:
            policy.on_down(h)
        # the last DC has two
        self.assertEqual(len(list(plan)), 0 + 2)
Beispiel #21
0
    def test_nts_make_token_replica_map_multi_rack(self):
        token_to_host_owner = {}

        # (A) not enough distinct racks, first skipped is used
        dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
        dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
        dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
        dc1_4 = Host('dc1.4', SimpleConvictionPolicy)
        dc1_1.set_location_info('dc1', 'rack1')
        dc1_2.set_location_info('dc1', 'rack1')
        dc1_3.set_location_info('dc1', 'rack2')
        dc1_4.set_location_info('dc1', 'rack2')
        token_to_host_owner[MD5Token(0)] = dc1_1
        token_to_host_owner[MD5Token(100)] = dc1_2
        token_to_host_owner[MD5Token(200)] = dc1_3
        token_to_host_owner[MD5Token(300)] = dc1_4

        # (B) distinct racks, but not contiguous
        dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
        dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
        dc2_3 = Host('dc2.3', SimpleConvictionPolicy)
        dc2_1.set_location_info('dc2', 'rack1')
        dc2_2.set_location_info('dc2', 'rack1')
        dc2_3.set_location_info('dc2', 'rack2')
        token_to_host_owner[MD5Token(1)] = dc2_1
        token_to_host_owner[MD5Token(101)] = dc2_2
        token_to_host_owner[MD5Token(201)] = dc2_3

        ring = [
            MD5Token(0),
            MD5Token(1),
            MD5Token(100),
            MD5Token(101),
            MD5Token(200),
            MD5Token(201),
            MD5Token(300)
        ]

        nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2})
        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)

        token_replicas = replica_map[MD5Token(0)]
        self.assertItemsEqual(token_replicas,
                              (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3))
Beispiel #22
0
    def test_nts_make_token_replica_map_multi_rack(self):
        token_to_host_owner = {}

        # (A) not enough distinct racks, first skipped is used
        dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
        dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
        dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
        dc1_4 = Host('dc1.4', SimpleConvictionPolicy)
        dc1_1.set_location_info('dc1', 'rack1')
        dc1_2.set_location_info('dc1', 'rack1')
        dc1_3.set_location_info('dc1', 'rack2')
        dc1_4.set_location_info('dc1', 'rack2')
        token_to_host_owner[MD5Token(0)] = dc1_1
        token_to_host_owner[MD5Token(100)] = dc1_2
        token_to_host_owner[MD5Token(200)] = dc1_3
        token_to_host_owner[MD5Token(300)] = dc1_4

        # (B) distinct racks, but not contiguous
        dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
        dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
        dc2_3 = Host('dc2.3', SimpleConvictionPolicy)
        dc2_1.set_location_info('dc2', 'rack1')
        dc2_2.set_location_info('dc2', 'rack1')
        dc2_3.set_location_info('dc2', 'rack2')
        token_to_host_owner[MD5Token(1)] = dc2_1
        token_to_host_owner[MD5Token(101)] = dc2_2
        token_to_host_owner[MD5Token(201)] = dc2_3

        ring = [MD5Token(0),
                MD5Token(1),
                MD5Token(100),
                MD5Token(101),
                MD5Token(200),
                MD5Token(201),
                MD5Token(300)]

        nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2})
        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)

        token_replicas = replica_map[MD5Token(0)]
        self.assertItemsEqual(token_replicas, (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3))
    def test_modification_during_generation(self):
        hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3)
        policy.populate(Mock(), hosts)

        # The general concept here is to change thee internal state of the
        # policy during plan generation. In this case we use a grey-box
        # approach that changes specific things during known phases of the
        # generator.

        new_host = Host(4, SimpleConvictionPolicy)
        new_host.set_location_info("dc1", "rack1")

        # new local before iteration
        plan = policy.make_query_plan()
        policy.on_up(new_host)
        # local list is not bound yet, so we get to see that one
        self.assertEqual(len(list(plan)), 3 + 2)

        # remove local before iteration
        plan = policy.make_query_plan()
        policy.on_down(new_host)
        # local list is not bound yet, so we don't see it
        self.assertEqual(len(list(plan)), 2 + 2)

        # new local after starting iteration
        plan = policy.make_query_plan()
        next(plan)
        policy.on_up(new_host)
        # local list was is bound, and one consumed, so we only see the other original
        self.assertEqual(len(list(plan)), 1 + 2)

        # remove local after traversing available
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(new_host)
        # we should be past the local list
        self.assertEqual(len(list(plan)), 0 + 2)

        # REMOTES CHANGE
        new_host.set_location_info("dc2", "rack1")

        # new remote after traversing local, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_up(new_host)
        # list is updated before we get to it
        self.assertEqual(len(list(plan)), 0 + 3)

        # remove remote after traversing local, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_down(new_host)
        # list is updated before we get to it
        self.assertEqual(len(list(plan)), 0 + 2)

        # new remote after traversing local, and starting remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_up(new_host)
        # slice is already made, and we've consumed one
        self.assertEqual(len(list(plan)), 0 + 1)

        # remove remote after traversing local, and starting remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(new_host)
        # slice is created with all present, and we've consumed one
        self.assertEqual(len(list(plan)), 0 + 2)

        # local DC disappears after finishing it, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_down(hosts[0])
        policy.on_down(hosts[1])
        # dict traversal starts as normal
        self.assertEqual(len(list(plan)), 0 + 2)
        policy.on_up(hosts[0])
        policy.on_up(hosts[1])

        # PYTHON-297 addresses the following cases, where DCs come and go
        # during generation
        # local DC disappears after finishing it, and starting remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(hosts[0])
        policy.on_down(hosts[1])
        # dict traversal has begun and consumed one
        self.assertEqual(len(list(plan)), 0 + 1)
        policy.on_up(hosts[0])
        policy.on_up(hosts[1])

        # remote DC disappears after finishing local, but not starting remote
        plan = policy.make_query_plan()
        for _ in range(2):
            next(plan)
        policy.on_down(hosts[2])
        policy.on_down(hosts[3])
        # nothing left
        self.assertEqual(len(list(plan)), 0 + 0)
        policy.on_up(hosts[2])
        policy.on_up(hosts[3])

        # remote DC disappears while traversing it
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_down(hosts[2])
        policy.on_down(hosts[3])
        # we continue with remainder of original list
        self.assertEqual(len(list(plan)), 0 + 1)
        policy.on_up(hosts[2])
        policy.on_up(hosts[3])

        another_host = Host(5, SimpleConvictionPolicy)
        another_host.set_location_info("dc3", "rack1")
        new_host.set_location_info("dc3", "rack1")

        # new DC while traversing remote
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        policy.on_up(new_host)
        policy.on_up(another_host)
        # we continue with remainder of original list
        self.assertEqual(len(list(plan)), 0 + 1)

        # remote DC disappears after finishing it
        plan = policy.make_query_plan()
        for _ in range(3):
            next(plan)
        last_host_in_this_dc = next(plan)
        if last_host_in_this_dc in (new_host, another_host):
            down_hosts = [new_host, another_host]
        else:
            down_hosts = hosts[2:]
        for h in down_hosts:
            policy.on_down(h)
        # the last DC has two
        self.assertEqual(len(list(plan)), 0 + 2)