Ejemplo n.º 1
0
    def test_statement_keyspace(self):
        hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
        for host in hosts:
            host.set_up()

        cluster = Mock(spec=Cluster)
        cluster.metadata = Mock(spec=Metadata)
        replicas = hosts[2:]
        cluster.metadata.get_replicas.return_value = replicas

        child_policy = Mock()
        child_policy.make_query_plan.return_value = hosts
        child_policy.distance.return_value = HostDistance.LOCAL

        policy = TokenAwarePolicy(child_policy)
        policy.populate(cluster, hosts)

        # no keyspace, child policy is called
        keyspace = None
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key)
        qplan = list(policy.make_query_plan(keyspace, query))
        self.assertEqual(hosts, qplan)
        self.assertEqual(cluster.metadata.get_replicas.call_count, 0)
        child_policy.make_query_plan.assert_called_once_with(keyspace, query)

        # working keyspace, no statement
        cluster.metadata.get_replicas.reset_mock()
        keyspace = 'working_keyspace'
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key)
        qplan = list(policy.make_query_plan(keyspace, query))
        self.assertEqual(replicas + hosts[:2], qplan)
        cluster.metadata.get_replicas.assert_called_with(keyspace, routing_key)

        # statement keyspace, no working
        cluster.metadata.get_replicas.reset_mock()
        working_keyspace = None
        statement_keyspace = 'statement_keyspace'
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
        qplan = list(policy.make_query_plan(working_keyspace, query))
        self.assertEqual(replicas + hosts[:2], qplan)
        cluster.metadata.get_replicas.assert_called_with(
            statement_keyspace, routing_key)

        # both keyspaces set, statement keyspace used for routing
        cluster.metadata.get_replicas.reset_mock()
        working_keyspace = 'working_keyspace'
        statement_keyspace = 'statement_keyspace'
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
        qplan = list(policy.make_query_plan(working_keyspace, query))
        self.assertEqual(replicas + hosts[:2], qplan)
        cluster.metadata.get_replicas.assert_called_with(
            statement_keyspace, routing_key)
Ejemplo n.º 2
0
 def test_default_exec_parameters(self):
     cluster = Cluster()
     self.assertEqual(cluster._config_mode, _ConfigMode.UNCOMMITTED)
     self.assertEqual(cluster.load_balancing_policy.__class__, default_lbp_factory().__class__)
     self.assertEqual(cluster.default_retry_policy.__class__, RetryPolicy)
     session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
     self.assertEqual(session.default_timeout, 10.0)
     self.assertEqual(session.default_consistency_level, ConsistencyLevel.LOCAL_ONE)
     self.assertEqual(session.default_serial_consistency_level, None)
     self.assertEqual(session.row_factory, named_tuple_factory)
Ejemplo n.º 3
0
    def test_hosts_with_hostname(self):
        hosts = ['localhost']
        policy = WhiteListRoundRobinPolicy(hosts)
        host = Host("127.0.0.1", SimpleConvictionPolicy)
        policy.populate(None, [host])

        qplan = list(policy.make_query_plan())
        self.assertEqual(sorted(qplan), [host])

        self.assertEqual(policy.distance(host), HostDistance.LOCAL)
Ejemplo n.º 4
0
    def test_host_equality(self):
        """
        Test host equality has correct logic
        """

        a = Host('127.0.0.1', SimpleConvictionPolicy)
        b = Host('127.0.0.1', SimpleConvictionPolicy)
        c = Host('127.0.0.2', SimpleConvictionPolicy)

        self.assertEqual(a, b,
                         'Two Host instances should be equal when sharing.')
        self.assertNotEqual(
            a, c,
            'Two Host instances should NOT be equal when using two different addresses.'
        )
        self.assertNotEqual(
            b, c,
            'Two Host instances should NOT be equal when using two different addresses.'
        )
Ejemplo n.º 5
0
    def add_host(self, address):
        cluster = self.cluster_ref()
        with self._hosts_lock:
            if address not in self._hosts:
                new_host = Host(address, cluster.conviction_policy_factory)
                self._hosts[address] = new_host
            else:
                return None

        return new_host
    def test_status_updates(self):
        """
        Same test as DCAwareRoundRobinPolicyTest.test_status_updates()
        """

        hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        policy = TokenAwarePolicy(
            DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
        policy.populate(self.FakeCluster(), hosts)
        policy.on_down(hosts[0])
        policy.on_remove(hosts[2])

        new_local_host = Host(4, SimpleConvictionPolicy)
        new_local_host.set_location_info("dc1", "rack1")
        policy.on_up(new_local_host)

        new_remote_host = Host(5, SimpleConvictionPolicy)
        new_remote_host.set_location_info("dc9000", "rack1")
        policy.on_add(new_remote_host)

        # we now have two local hosts and two remote hosts in separate dcs
        qplan = list(policy.make_query_plan())
        self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
        self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))

        # since we have hosts in dc9000, the distance shouldn't be IGNORED
        self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)

        policy.on_down(new_local_host)
        policy.on_down(hosts[1])
        qplan = list(policy.make_query_plan())
        self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))

        policy.on_down(new_remote_host)
        policy.on_down(hosts[3])
        qplan = list(policy.make_query_plan())
        self.assertEqual(qplan, [])
    def test_no_remote(self):
        hosts = []
        for i in range(4):
            h = Host(i, SimpleConvictionPolicy)
            h.set_location_info("dc1", "rack1")
            hosts.append(h)

        policy = DCAwareRoundRobinPolicy("dc1")
        policy.populate(None, hosts)
        qplan = list(policy.make_query_plan())
        self.assertEqual(sorted(qplan), sorted(hosts))
    def test_get_distance(self):
        """
        Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
        Except a FakeCluster is needed for the metadata variable and
        policy.child_policy is needed to change child policy settings
        """

        policy = TokenAwarePolicy(
            DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")

        policy.populate(self.FakeCluster(), [host])

        self.assertEqual(policy.distance(host), HostDistance.LOCAL)

        # used_hosts_per_remote_dc is set to 0, so ignore it
        remote_host = Host("ip2", SimpleConvictionPolicy)
        remote_host.set_location_info("dc2", "rack1")
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # dc2 isn't registered in the policy's live_hosts dict
        policy.child_policy.used_hosts_per_remote_dc = 1
        self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)

        # make sure the policy has both dcs registered
        policy.populate(self.FakeCluster(), [host, remote_host])
        self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)

        # since used_hosts_per_remote_dc is set to 1, only the first
        # remote host in dc2 will be REMOTE, the rest are IGNORED
        second_remote_host = Host("ip3", SimpleConvictionPolicy)
        second_remote_host.set_location_info("dc2", "rack1")
        policy.populate(self.FakeCluster(),
                        [host, remote_host, second_remote_host])
        distances = set([
            policy.distance(remote_host),
            policy.distance(second_remote_host)
        ])
        self.assertEqual(distances,
                         set([HostDistance.REMOTE, HostDistance.IGNORED]))
Ejemplo n.º 9
0
    def test_session_host_parameter(self):
        """
        Test for protocol negotiation

        Very that NoHostAvailable is risen in Session.__init__ when there are no valid connections and that
        no error is arisen otherwise, despite maybe being some invalid hosts

        @since 3.9
        @jira_ticket PYTHON-665
        @expected_result NoHostAvailable when the driver is unable to connect to a valid host,
        no exception otherwise

        @test_category connection
        """
        with self.assertRaises(NoHostAvailable):
            Session(Cluster(protocol_version=PROTOCOL_VERSION), [])
        with self.assertRaises(NoHostAvailable):
            Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host("1.2.3.4", SimpleConvictionPolicy)])
        session = Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host(x, SimpleConvictionPolicy) for x in
                                      ("127.0.0.1", "127.0.0.2", "1.2.3.4")])
        session.shutdown()
Ejemplo n.º 10
0
 def test_default_legacy(self):
     cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy())
     self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY)
     session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
     session.default_timeout = 3.7
     session.default_consistency_level = ConsistencyLevel.ALL
     session.default_serial_consistency_level = ConsistencyLevel.SERIAL
     rf = session.execute_async("query")
     expected_profile = ExecutionProfile(cluster.load_balancing_policy, cluster.default_retry_policy,
                                         session.default_consistency_level, session.default_serial_consistency_level,
                                         session.default_timeout, session.row_factory)
     self._verify_response_future_profile(rf, expected_profile)
Ejemplo n.º 11
0
    def test_nts_make_token_replica_map(self):
        token_to_host_owner = {}

        dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
        dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
        dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
        for host in (dc1_1, dc1_2, dc1_3):
            host.set_location_info('dc1', 'rack1')
        token_to_host_owner[MD5Token(0)] = dc1_1
        token_to_host_owner[MD5Token(100)] = dc1_2
        token_to_host_owner[MD5Token(200)] = dc1_3

        dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
        dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
        dc2_1.set_location_info('dc2', 'rack1')
        dc2_2.set_location_info('dc2', 'rack1')
        token_to_host_owner[MD5Token(1)] = dc2_1
        token_to_host_owner[MD5Token(101)] = dc2_2

        dc3_1 = Host('dc3.1', SimpleConvictionPolicy)
        dc3_1.set_location_info('dc3', 'rack3')
        token_to_host_owner[MD5Token(2)] = dc3_1

        ring = [MD5Token(0),
                MD5Token(1),
                MD5Token(2),
                MD5Token(100),
                MD5Token(101),
                MD5Token(200)]

        nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1})
        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)

        self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
Ejemplo n.º 12
0
    def test_predicate_changes(self):
        """
        Test to validate hostfilter reacts correctly when the predicate return
        a different subset of the hosts
        HostFilterPolicy
        @since 3.8
        @jira_ticket PYTHON-961
        @expected_result the excluded hosts are ignored

        @test_category policy
        """
        external_event = True
        contact_point = "127.0.0.1"

        single_host = {Host(contact_point, SimpleConvictionPolicy)}
        all_hosts = {Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in (1, 2, 3)}

        predicate = lambda host: host.address == contact_point if external_event else True
        cluster = Cluster((contact_point,), load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(),
                                                                                 predicate=predicate),
                          protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0,
                          status_event_refresh_window=0)
        session = cluster.connect(wait_for_all_pools=True)

        queried_hosts = set()
        for _ in range(10):
            response = session.execute("SELECT * from system.local")
            queried_hosts.update(response.response_future.attempted_hosts)

        self.assertEqual(queried_hosts, single_host)

        external_event = False
        futures = session.update_created_pools()
        wait_futures(futures, timeout=cluster.connect_timeout)

        queried_hosts = set()
        for _ in range(10):
            response = session.execute("SELECT * from system.local")
            queried_hosts.update(response.response_future.attempted_hosts)
        self.assertEqual(queried_hosts, all_hosts)
Ejemplo n.º 13
0
    def test_profile_name_value(self):

        internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)])
        cluster = Cluster(execution_profiles={'by-name': internalized_profile})
        session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
        self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES)

        rf = session.execute_async("query", execution_profile='by-name')
        self._verify_response_future_profile(rf, internalized_profile)

        by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)])
        rf = session.execute_async("query", execution_profile=by_value)
        self._verify_response_future_profile(rf, by_value)
Ejemplo n.º 14
0
    def test_default_dc(self):
        host_local = Host(1, SimpleConvictionPolicy, 'local')
        host_remote = Host(2, SimpleConvictionPolicy, 'remote')
        host_none = Host(1, SimpleConvictionPolicy)

        # contact point is '1'
        cluster = Mock(contact_points=[1])

        # contact DC first
        policy = DCAwareRoundRobinPolicy()
        policy.populate(cluster, [host_none])
        self.assertFalse(policy.local_dc)
        policy.on_add(host_local)
        policy.on_add(host_remote)
        self.assertNotEqual(policy.local_dc, host_remote.datacenter)
        self.assertEqual(policy.local_dc, host_local.datacenter)

        # contact DC second
        policy = DCAwareRoundRobinPolicy()
        policy.populate(cluster, [host_none])
        self.assertFalse(policy.local_dc)
        policy.on_add(host_remote)
        policy.on_add(host_local)
        self.assertNotEqual(policy.local_dc, host_remote.datacenter)
        self.assertEqual(policy.local_dc, host_local.datacenter)

        # no DC
        policy = DCAwareRoundRobinPolicy()
        policy.populate(cluster, [host_none])
        self.assertFalse(policy.local_dc)
        policy.on_add(host_none)
        self.assertFalse(policy.local_dc)

        # only other DC
        policy = DCAwareRoundRobinPolicy()
        policy.populate(cluster, [host_none])
        self.assertFalse(policy.local_dc)
        policy.on_add(host_remote)
        self.assertFalse(policy.local_dc)
Ejemplo n.º 15
0
    def test_default_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)])
        cluster = Cluster(execution_profiles={'non-default': non_default_profile})
        session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES)

        default_profile = cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT]
        rf = session.execute_async("query")
        self._verify_response_future_profile(rf, default_profile)

        rf = session.execute_async("query", execution_profile='non-default')
        self._verify_response_future_profile(rf, non_default_profile)
Ejemplo n.º 16
0
    def test_target_host_nominal(self):
        node_count = 4
        hosts = [Host(i, Mock()) for i in range(node_count)]
        target_host = hosts[1]
        target_host.is_up = True

        policy = DSELoadBalancingPolicy(RoundRobinPolicy())
        policy.populate(
            Mock(metadata=ClusterMetaMock({'127.0.0.1': target_host})), hosts)
        for _ in range(10):
            query_plan = list(
                policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
            self.assertEqual(sorted(query_plan), hosts)
            self.assertEqual(query_plan[0], target_host)
Ejemplo n.º 17
0
    def test_version_parsing(self):
        host = Host('127.0.0.1', SimpleConvictionPolicy)

        host.set_version("1.0.0")
        self.assertEqual((1, 0, 0), host.version)

        host.set_version("1.0")
        self.assertEqual((1, 0, 0), host.version)

        host.set_version("1.0.0-beta1")
        self.assertEqual((1, 0, 0, 'beta1'), host.version)

        host.set_version("1.0-SNAPSHOT")
        self.assertEqual((1, 0, 0, 'SNAPSHOT'), host.version)
    def test_non_implemented(self):
        """
        Code coverage for interface-style base class
        """

        policy = LoadBalancingPolicy()
        host = Host("ip1", SimpleConvictionPolicy)
        host.set_location_info("dc1", "rack1")

        self.assertRaises(NotImplementedError, policy.distance, host)
        self.assertRaises(NotImplementedError, policy.populate, None, host)
        self.assertRaises(NotImplementedError, policy.make_query_plan)
        self.assertRaises(NotImplementedError, policy.on_up, host)
        self.assertRaises(NotImplementedError, policy.on_down, host)
        self.assertRaises(NotImplementedError, policy.on_add, host)
        self.assertRaises(NotImplementedError, policy.on_remove, host)
Ejemplo n.º 19
0
 def test_no_legacy_with_profile(self):
     cluster_init = Cluster(execution_profiles={'name': ExecutionProfile()})
     cluster_add = Cluster()
     cluster_add.add_execution_profile('name', ExecutionProfile())
     # for clusters with profiles added either way...
     for cluster in (cluster_init, cluster_init):
         # don't allow legacy parameters set
         for attr, value in (('default_retry_policy', RetryPolicy()),
                             ('load_balancing_policy', default_lbp_factory())):
             self.assertRaises(ValueError, setattr, cluster, attr, value)
         session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
         for attr, value in (('default_timeout', 1),
                             ('default_consistency_level', ConsistencyLevel.ANY),
                             ('default_serial_consistency_level', ConsistencyLevel.SERIAL),
                             ('row_factory', tuple_factory)):
             self.assertRaises(ValueError, setattr, session, attr, value)
Ejemplo n.º 20
0
    def test_target_host_down(self):
        node_count = 4
        hosts = [Host(i, Mock()) for i in range(node_count)]
        target_host = hosts[1]

        policy = DSELoadBalancingPolicy(RoundRobinPolicy())
        policy.populate(
            Mock(metadata=ClusterMetaMock({'127.0.0.1': target_host})), hosts)
        query_plan = list(
            policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
        self.assertEqual(sorted(query_plan), hosts)

        target_host.is_up = False
        policy.on_down(target_host)
        query_plan = list(
            policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
        self.assertNotIn(target_host, query_plan)
Ejemplo n.º 21
0
    def test_simple_replication_type_parsing(self):
        """ Test equality between passing numeric and string replication factor for simple strategy """
        rs = ReplicationStrategy()

        simple_int = rs.create('SimpleStrategy', {'replication_factor': 3})
        simple_str = rs.create('SimpleStrategy', {'replication_factor': '3'})

        self.assertEqual(simple_int.export_for_schema(), simple_str.export_for_schema())
        self.assertEqual(simple_int, simple_str)

        # make token replica map
        ring = [MD5Token(0), MD5Token(1), MD5Token(2)]
        hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)]
        token_to_host = dict(zip(ring, hosts))
        self.assertEqual(
            simple_int.make_token_replica_map(token_to_host, ring),
            simple_str.make_token_replica_map(token_to_host, ring)
        )
    def test_no_live_nodes(self):
        """
        Ensure query plan for a downed cluster will execute without errors
        """

        hosts = []
        for i in range(4):
            h = Host(i, SimpleConvictionPolicy)
            h.set_location_info("dc1", "rack1")
            hosts.append(h)

        policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
        policy.populate(None, hosts)

        for host in hosts:
            policy.on_down(host)

        qplan = list(policy.make_query_plan())
        self.assertEqual(qplan, [])
Ejemplo n.º 23
0
    def test_wrap_dc_aware(self):
        cluster = Mock(spec=Cluster)
        cluster.metadata = Mock(spec=Metadata)
        hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
        for host in hosts:
            host.set_up()
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        def get_replicas(keyspace, packed_key):
            index = struct.unpack('>i', packed_key)[0]
            # return one node from each DC
            if index % 2 == 0:
                return [hosts[0], hosts[2]]
            else:
                return [hosts[1], hosts[3]]

        cluster.metadata.get_replicas.side_effect = get_replicas

        policy = TokenAwarePolicy(
            DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
        policy.populate(cluster, hosts)

        for i in range(4):
            query = Statement(routing_key=struct.pack('>i', i),
                              keyspace='keyspace_name')
            qplan = list(policy.make_query_plan(None, query))
            replicas = get_replicas(None, struct.pack('>i', i))

            # first should be the only local replica
            self.assertIn(qplan[0], replicas)
            self.assertEqual(qplan[0].datacenter, "dc1")

            # then the local non-replica
            self.assertNotIn(qplan[1], replicas)
            self.assertEqual(qplan[1].datacenter, "dc1")

            # then one of the remotes (used_hosts_per_remote_dc is 1, so we
            # shouldn't see two remotes)
            self.assertEqual(qplan[2].datacenter, "dc2")
            self.assertEqual(3, len(qplan))
Ejemplo n.º 24
0
    def test_no_profile_with_legacy(self):
        # don't construct with both
        self.assertRaises(ValueError,
                          Cluster,
                          load_balancing_policy=RoundRobinPolicy(),
                          execution_profiles={'a': ExecutionProfile()})
        self.assertRaises(
            ValueError,
            Cluster,
            default_retry_policy=DowngradingConsistencyRetryPolicy(),
            execution_profiles={'a': ExecutionProfile()})
        self.assertRaises(
            ValueError,
            Cluster,
            load_balancing_policy=RoundRobinPolicy(),
            default_retry_policy=DowngradingConsistencyRetryPolicy(),
            execution_profiles={'a': ExecutionProfile()})

        # can't add after
        cluster = Cluster(load_balancing_policy=RoundRobinPolicy())
        self.assertRaises(ValueError, cluster.add_execution_profile, 'name',
                          ExecutionProfile())

        # session settings lock out profiles
        cluster = Cluster()
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
        for attr, value in (('default_timeout',
                             1), ('default_consistency_level',
                                  ConsistencyLevel.ANY),
                            ('default_serial_consistency_level',
                             ConsistencyLevel.SERIAL), ('row_factory',
                                                        tuple_factory)):
            cluster._config_mode = _ConfigMode.UNCOMMITTED
            setattr(session, attr, value)
            self.assertRaises(ValueError, cluster.add_execution_profile,
                              'name' + attr, ExecutionProfile())

        # don't accept profile
        self.assertRaises(ValueError,
                          session.execute_async,
                          "query",
                          execution_profile='some name here')
Ejemplo n.º 25
0
    def test_statement_params_override_legacy(self):
        cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy())
        self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY)
        session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(),
                             consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL)
        my_timeout = 1.1234

        self.assertNotEqual(ss.retry_policy.__class__, cluster.default_retry_policy)
        self.assertNotEqual(ss.consistency_level, session.default_consistency_level)
        self.assertNotEqual(ss._serial_consistency_level, session.default_serial_consistency_level)
        self.assertNotEqual(my_timeout, session.default_timeout)

        rf = session.execute_async(ss, timeout=my_timeout)
        expected_profile = ExecutionProfile(load_balancing_policy=cluster.load_balancing_policy, retry_policy=ss.retry_policy,
                                            request_timeout=my_timeout, consistency_level=ss.consistency_level,
                                            serial_consistency_level=ss._serial_consistency_level)
        self._verify_response_future_profile(rf, expected_profile)
Ejemplo n.º 26
0
    def test_default_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)])
        cluster = Cluster(execution_profiles={'non-default': non_default_profile})
        session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES)

        default_profile = session.get_execution_profile(EXEC_PROFILE_DEFAULT)
        rf = session.execute_async("query")
        self._verify_response_future_profile(rf, default_profile)

        rf = session.execute_async("query", execution_profile='non-default')
        self._verify_response_future_profile(rf, non_default_profile)

        for name, ep in six.iteritems(cluster.profile_manager.profiles):
            self.assertEqual(ep, session.get_execution_profile(name))

        # invalid ep
        with self.assertRaises(ValueError):
            session.get_execution_profile('non-existent')
Ejemplo n.º 27
0
    def test_nts_transient_parsing(self):
        """ Test that we can PARSE a transient replication factor for NTS """
        rs = ReplicationStrategy()

        nts_transient = rs.create('NetworkTopologyStrategy', {'dc1': '3/1', 'dc2': '5/1'})
        self.assertEqual(nts_transient.dc_replication_factors['dc1'], '3/1')
        self.assertEqual(nts_transient.dc_replication_factors['dc2'], '5/1')
        self.assertIn("'dc1': '3/1', 'dc2': '5/1'", nts_transient.export_for_schema())

        nts_str = rs.create('NetworkTopologyStrategy', {'dc1': '3', 'dc2': '5'})
        self.assertNotEqual(nts_transient, nts_str)

        # make token replica map
        ring = [MD5Token(0), MD5Token(1), MD5Token(2)]
        hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)]
        token_to_host = dict(zip(ring, hosts))
        self.assertEqual(
            nts_transient.make_token_replica_map(token_to_host, ring),
            nts_str.make_token_replica_map(token_to_host, ring)
        )
Ejemplo n.º 28
0
    def test_nts_make_token_replica_map_multi_rack(self):
        token_to_host_owner = {}

        # (A) not enough distinct racks, first skipped is used
        dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
        dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
        dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
        dc1_4 = Host('dc1.4', SimpleConvictionPolicy)
        dc1_1.set_location_info('dc1', 'rack1')
        dc1_2.set_location_info('dc1', 'rack1')
        dc1_3.set_location_info('dc1', 'rack2')
        dc1_4.set_location_info('dc1', 'rack2')
        token_to_host_owner[MD5Token(0)] = dc1_1
        token_to_host_owner[MD5Token(100)] = dc1_2
        token_to_host_owner[MD5Token(200)] = dc1_3
        token_to_host_owner[MD5Token(300)] = dc1_4

        # (B) distinct racks, but not contiguous
        dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
        dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
        dc2_3 = Host('dc2.3', SimpleConvictionPolicy)
        dc2_1.set_location_info('dc2', 'rack1')
        dc2_2.set_location_info('dc2', 'rack1')
        dc2_3.set_location_info('dc2', 'rack2')
        token_to_host_owner[MD5Token(1)] = dc2_1
        token_to_host_owner[MD5Token(101)] = dc2_2
        token_to_host_owner[MD5Token(201)] = dc2_3

        ring = [
            MD5Token(0),
            MD5Token(1),
            MD5Token(100),
            MD5Token(101),
            MD5Token(200),
            MD5Token(201),
            MD5Token(300)
        ]

        nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2})
        replica_map = nts.make_token_replica_map(token_to_host_owner, ring)

        token_replicas = replica_map[MD5Token(0)]
        self.assertItemsEqual(token_replicas,
                              (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3))
Ejemplo n.º 29
0
    def test_transient_replication_parsing(self):
        """ Test that we can PARSE a transient replication factor for SimpleStrategy """
        rs = ReplicationStrategy()

        simple_transient = rs.create('SimpleStrategy', {'replication_factor': '3/1'})
        self.assertEqual(simple_transient.replication_factor, 3)
        self.assertEqual(simple_transient.transient_replicas, 1)
        self.assertIn("'replication_factor': '3/1'", simple_transient.export_for_schema())

        simple_str = rs.create('SimpleStrategy', {'replication_factor': '3'})
        self.assertNotEqual(simple_transient, simple_str)

        # make token replica map
        ring = [MD5Token(0), MD5Token(1), MD5Token(2)]
        hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)]
        token_to_host = dict(zip(ring, hosts))
        self.assertEqual(
            simple_transient.make_token_replica_map(token_to_host, ring),
            simple_str.make_token_replica_map(token_to_host, ring)
        )
Ejemplo n.º 30
0
    def test_wait_for_schema_agreement_rpc_lookup(self):
        """
        If the rpc_address is 0.0.0.0, the "peer" column should be used instead.
        """
        self.connection.peer_results[1].append(
            ["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]])
        host = Host("0.0.0.0", SimpleConvictionPolicy)
        self.cluster.metadata.hosts[PEER_IP] = host
        host.is_up = False

        # even though the new host has a different schema version, it's
        # marked as down, so the control connection shouldn't care
        self.assertTrue(self.control_connection.wait_for_schema_agreement())
        self.assertEqual(self.time.clock, 0)

        # but once we mark it up, the control connection will care
        host.is_up = True
        self.assertFalse(self.control_connection.wait_for_schema_agreement())
        self.assertGreaterEqual(self.time.clock,
                                Cluster.max_schema_agreement_wait)