Example #1
0
    def test_profile_lb_swap(self):
        """
        Tests that profile load balancing policies are not shared

        Creates two LBP, runs a few queries, and validates that each LBP is execised
        seperately between EP's

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result LBP should not be shared.

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1, 'rr2': rr2}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect(wait_for_all_pools=True)

            # default is DCA RR for all hosts
            expected_hosts = set(cluster.metadata.all_hosts())
            rr1_queried_hosts = set()
            rr2_queried_hosts = set()

            rs = session.execute(query, execution_profile='rr1')
            rr1_queried_hosts.add(rs.response_future._current_host)
            rs = session.execute(query, execution_profile='rr2')
            rr2_queried_hosts.add(rs.response_future._current_host)

            self.assertEqual(rr2_queried_hosts, rr1_queried_hosts)
 def test_multiple_query_plans(self):
     hosts = [0, 1, 2, 3]
     policy = RoundRobinPolicy()
     policy.populate(None, hosts)
     for i in xrange(20):
         qplan = list(policy.make_query_plan())
         self.assertEqual(sorted(qplan), hosts)
    def test_thread_safety(self):
        hosts = range(100)
        policy = RoundRobinPolicy()
        policy.populate(None, hosts)

        def check_query_plan():
            for i in range(100):
                qplan = list(policy.make_query_plan())
                self.assertEqual(sorted(qplan), hosts)

        threads = [Thread(target=check_query_plan) for i in range(4)]
        map(lambda t: t.start(), threads)
        map(lambda t: t.join(), threads)
    def test_thread_safety_during_modification(self):
        hosts = range(100)
        policy = RoundRobinPolicy()
        policy.populate(None, hosts)

        errors = []

        def check_query_plan():
            try:
                for i in xrange(100):
                    list(policy.make_query_plan())
            except Exception as exc:
                errors.append(exc)

        def host_up():
            for i in xrange(1000):
                policy.on_up(randint(0, 99))

        def host_down():
            for i in xrange(1000):
                policy.on_down(randint(0, 99))

        threads = []
        for i in range(5):
            threads.append(Thread(target=check_query_plan))
            threads.append(Thread(target=host_up))
            threads.append(Thread(target=host_down))

        # make the GIL switch after every instruction, maximizing
        # the chance of race conditions
        check = six.PY2 or '__pypy__' in sys.builtin_module_names
        if check:
            original_interval = sys.getcheckinterval()
        else:
            original_interval = sys.getswitchinterval()

        try:
            if check:
                sys.setcheckinterval(0)
            else:
                sys.setswitchinterval(0.0001)
            map(lambda t: t.start(), threads)
            map(lambda t: t.join(), threads)
        finally:
            if check:
                sys.setcheckinterval(original_interval)
            else:
                sys.setswitchinterval(original_interval)

        if errors:
            self.fail("Saw errors: %s" % (errors,))
    def test_profile_name_value(self):

        internalized_profile = ExecutionProfile(RoundRobinPolicy(),
                                                *[object() for _ in range(3)])
        cluster = Cluster(execution_profiles={'by-name': internalized_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        rf = session.execute_async("query", execution_profile='by-name')
        self._verify_response_future_profile(rf, internalized_profile)

        by_value = ExecutionProfile(RoundRobinPolicy(),
                                    *[object() for _ in range(3)])
        rf = session.execute_async("query", execution_profile=by_value)
        self._verify_response_future_profile(rf, by_value)
Example #6
0
    def test_token_aware_with_rf_2(self, use_prepared=False):
        use_singledc()
        keyspace = 'test_token_aware_with_rf_2'
        cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy()))
        self._wait_for_nodes_up(range(1, 4), cluster)

        create_schema(cluster, session, keyspace, replication_factor=2)
        self._insert(session, keyspace)
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 12)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()
        stop(2)
        self._wait_for_nodes_down([2],cluster)

        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 0)
        self.coordinator_stats.assert_query_count_equals(self, 3, 12)

        cluster.shutdown()
Example #7
0
    def test_roundrobin_two_dcs_2(self):
        use_multidc([2, 2])
        keyspace = 'test_roundrobin_two_dcs_2'
        cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy())
        self._wait_for_nodes_up(range(1, 5), cluster)

        create_schema(cluster, session, keyspace, replication_strategy=[2, 2])
        self._insert(session, keyspace)
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 3)
        self.coordinator_stats.assert_query_count_equals(self, 2, 3)
        self.coordinator_stats.assert_query_count_equals(self, 3, 3)
        self.coordinator_stats.assert_query_count_equals(self, 4, 3)

        force_stop(1)
        bootstrap(5, 'dc1')

        # reset control connection
        self._insert(session, keyspace, count=1000)

        self._wait_for_nodes_up([5], cluster)

        self.coordinator_stats.reset_counts()
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 3)
        self.coordinator_stats.assert_query_count_equals(self, 3, 3)
        self.coordinator_stats.assert_query_count_equals(self, 4, 3)
        self.coordinator_stats.assert_query_count_equals(self, 5, 3)

        cluster.shutdown()
Example #8
0
    def test_roundrobin(self):
        use_singledc()
        keyspace = 'test_roundrobin'
        cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy())
        self._wait_for_nodes_up(range(1, 4), cluster)
        create_schema(cluster, session, keyspace, replication_factor=3)
        self._insert(session, keyspace)
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 4)
        self.coordinator_stats.assert_query_count_equals(self, 2, 4)
        self.coordinator_stats.assert_query_count_equals(self, 3, 4)

        force_stop(3)
        self._wait_for_nodes_down([3], cluster)

        self.coordinator_stats.reset_counts()
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 6)
        self.coordinator_stats.assert_query_count_equals(self, 2, 6)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        decommission(1)
        start(3)
        self._wait_for_nodes_down([1], cluster)
        self._wait_for_nodes_up([3], cluster)

        self.coordinator_stats.reset_counts()
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 6)
        self.coordinator_stats.assert_query_count_equals(self, 3, 6)
        cluster.shutdown()
    def _test_downgrading_cl(self, keyspace, rf, accepted):
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()),
                                  DowngradingConsistencyRetryPolicy())
                          })
        session = cluster.connect(wait_for_all_pools=True)

        create_schema(cluster, session, keyspace, replication_factor=rf)
        self._insert(session, keyspace, 1)
        self._query(session, keyspace, 1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        try:
            force_stop(2)
            wait_for_down(cluster, 2)

            self._assert_writes_succeed(session, keyspace, accepted)
            self._assert_reads_succeed(session, keyspace,
                                       accepted - set([ConsistencyLevel.ANY]))
            self._assert_writes_fail(session, keyspace,
                                     SINGLE_DC_CONSISTENCY_LEVELS - accepted)
            self._assert_reads_fail(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS - accepted)
        finally:
            start(2)
            wait_for_up(cluster, 2)

        cluster.shutdown()
    def test_rfthree_tokenaware_none_down(self):
        keyspace = 'test_rfthree_tokenaware_none_down'
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()))
                          })
        session = cluster.connect(wait_for_all_pools=True)
        wait_for_up(cluster, 1)
        wait_for_up(cluster, 2)

        create_schema(cluster, session, keyspace, replication_factor=3)
        self._insert(session, keyspace, count=1)
        self._query(session, keyspace, count=1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()

        self._assert_writes_succeed(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS)
        self._assert_reads_succeed(session,
                                   keyspace,
                                   SINGLE_DC_CONSISTENCY_LEVELS -
                                   set([ConsistencyLevel.ANY]),
                                   expected_reader=2)

        cluster.shutdown()
    def setUp(self):
        """
        Setup sessions and pause node1
        """
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                                   execution_profiles={EXEC_PROFILE_DEFAULT:
                                           ExecutionProfile(
                                           load_balancing_policy=HostFilterPolicy(
                                               RoundRobinPolicy(), lambda host: host.address == "127.0.0.1"
                                           )
                                       )
                                   }
                               )

        self.session = self.cluster.connect(wait_for_all_pools=True)

        self.control_connection_host_number = 1
        self.node_to_stop = get_node(self.control_connection_host_number)

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node_to_stop.pause()
Example #12
0
    def test_clone_shared_lbp(self):
        """
        Tests that profile load balancing policies are shared on clone

        Creates one LBP clones it, and ensures that the LBP is shared between
        the two EP's

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result LBP is shared

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            self.assertGreater(
                len(cluster.metadata.all_hosts()), 1,
                "We only have one host connected at this point")

            rr1_clone = session.execution_profile_clone_update(
                'rr1', row_factory=tuple_factory)
            cluster.add_execution_profile("rr1_clone", rr1_clone)
            rr1_queried_hosts = set()
            rr1_clone_queried_hosts = set()
            rs = session.execute(query, execution_profile='rr1')
            rr1_queried_hosts.add(rs.response_future._current_host)
            rs = session.execute(query, execution_profile='rr1_clone')
            rr1_clone_queried_hosts.add(rs.response_future._current_host)
            self.assertNotEqual(rr1_clone_queried_hosts, rr1_queried_hosts)
    def test_statement_params_override_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(),
                                               *[object() for _ in range(3)])
        cluster = Cluster(
            execution_profiles={'non-default': non_default_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        rf = session.execute_async("query", execution_profile='non-default')

        ss = SimpleStatement("query",
                             retry_policy=DowngradingConsistencyRetryPolicy(),
                             consistency_level=ConsistencyLevel.ALL,
                             serial_consistency_level=ConsistencyLevel.SERIAL)
        my_timeout = 1.1234

        self.assertNotEqual(ss.retry_policy.__class__,
                            rf._load_balancer.__class__)
        self.assertNotEqual(ss.consistency_level, rf.message.consistency_level)
        self.assertNotEqual(ss._serial_consistency_level,
                            rf.message.serial_consistency_level)
        self.assertNotEqual(my_timeout, rf.timeout)

        rf = session.execute_async(ss,
                                   timeout=my_timeout,
                                   execution_profile='non-default')
        expected_profile = ExecutionProfile(
            non_default_profile.load_balancing_policy, ss.retry_policy,
            ss.consistency_level, ss._serial_consistency_level, my_timeout,
            non_default_profile.row_factory)
        self._verify_response_future_profile(rf, expected_profile)
    def test_token_aware_composite_key(self):
        use_singledc()
        keyspace = 'test_token_aware_composite_key'
        table = 'composite'
        cluster, session = self._cluster_session_with_lbp(
            TokenAwarePolicy(RoundRobinPolicy()))
        self._wait_for_nodes_up(range(1, 4), cluster)

        create_schema(cluster, session, keyspace, replication_factor=2)
        session.execute('CREATE TABLE %s ('
                        'k1 int, '
                        'k2 int, '
                        'i int, '
                        'PRIMARY KEY ((k1, k2)))' % table)

        prepared = session.prepare('INSERT INTO %s '
                                   '(k1, k2, i) '
                                   'VALUES '
                                   '(?, ?, ?)' % table)
        session.execute(prepared.bind((1, 2, 3)))

        results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' %
                                  table)
        self.assertTrue(results[0].i)

        cluster.shutdown()
Example #15
0
 def test_target_no_host(self):
     node_count = 4
     hosts = list(range(node_count))
     policy = DSELoadBalancingPolicy(RoundRobinPolicy())
     policy.populate(Mock(metadata=ClusterMetaMock()), hosts)
     query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
     self.assertEqual(sorted(query_plan), hosts)
Example #16
0
    def test_wrap_round_robin(self):
        cluster = Mock(spec=Cluster)
        cluster.metadata = Mock(spec=Metadata)
        hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
        for host in hosts:
            host.set_up()

        def get_replicas(keyspace, packed_key):
            index = struct.unpack('>i', packed_key)[0]
            return list(islice(cycle(hosts), index, index + 2))

        cluster.metadata.get_replicas.side_effect = get_replicas

        policy = TokenAwarePolicy(RoundRobinPolicy())
        policy.populate(cluster, hosts)

        for i in range(4):
            query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name')
            qplan = list(policy.make_query_plan(None, query))

            replicas = get_replicas(None, struct.pack('>i', i))
            other = set(h for h in hosts if h not in replicas)
            self.assertEqual(replicas, qplan[:2])
            self.assertEqual(other, set(qplan[2:]))

        # Should use the secondary policy
        for i in range(4):
            qplan = list(policy.make_query_plan())

            self.assertEqual(set(qplan), set(hosts))
Example #17
0
 def __init__(self):
     self.metadata = MockMetadata()
     self.added_hosts = []
     self.removed_hosts = []
     self.scheduler = Mock(spec=_Scheduler)
     self.executor = Mock(spec=ThreadPoolExecutor)
     self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(
         RoundRobinPolicy())
 def test_rfthree_tokenaware_downgradingcl(self):
     keyspace = 'test_rfthree_tokenaware_downgradingcl'
     with Cluster(protocol_version=PROTOCOL_VERSION,
                  execution_profiles={
                      EXEC_PROFILE_DEFAULT:
                      ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()),
                                       DowngradingConsistencyRetryPolicy())
                  }) as cluster:
         self.rfthree_downgradingcl(cluster, keyspace, False)
Example #19
0
    def test_missing_exec_prof(self):
        """
        Tests to verify that using an unknown profile raises a ValueError

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result ValueError

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1, 'rr2': rr2}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect()
            with self.assertRaises(ValueError):
                session.execute(query, execution_profile='rr3')
Example #20
0
    def _set_up_shuffle_test(self, keyspace, replication_factor):
        use_singledc()
        cluster, session = self._cluster_session_with_lbp(
            TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)
        )
        self._wait_for_nodes_up(range(1, 4), cluster)

        create_schema(cluster, session, keyspace, replication_factor=replication_factor)
        return cluster, session
Example #21
0
    def test_token_aware_with_local_table(self):
        use_singledc()
        cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy()))
        self._wait_for_nodes_up(range(1, 4), cluster)

        p = session.prepare("SELECT * FROM system.local WHERE key=?")
        # this would blow up prior to 61b4fad
        r = session.execute(p, ('local',))
        self.assertEqual(r[0].key, 'local')

        cluster.shutdown()
Example #22
0
 def test_status_updates(self):
     node_count = 4
     hosts = list(range(node_count))
     policy = DSELoadBalancingPolicy(RoundRobinPolicy())
     policy.populate(Mock(metadata=ClusterMetaMock()), hosts)
     policy.on_down(0)
     policy.on_remove(1)
     policy.on_up(4)
     policy.on_add(5)
     query_plan = list(policy.make_query_plan())
     self.assertEqual(sorted(query_plan), [2, 3, 4, 5])
 def _connect_probe_cluster(self):
     if not self.probe_cluster:
         # distinct cluster so we can see the status of nodes ignored by the LBP being tested
         self.probe_cluster = Cluster(
             schema_metadata_enabled=False,
             token_metadata_enabled=False,
             execution_profiles={
                 EXEC_PROFILE_DEFAULT:
                 ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
             })
         self.probe_session = self.probe_cluster.connect()
Example #24
0
    def test_target_host_nominal(self):
        node_count = 4
        hosts = [Host(i, Mock()) for i in range(node_count)]
        target_host = hosts[1]
        target_host.is_up = True

        policy = DSELoadBalancingPolicy(RoundRobinPolicy())
        policy.populate(Mock(metadata=ClusterMetaMock({'127.0.0.1': target_host})), hosts)
        for _ in range(10):
            query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
            self.assertEqual(sorted(query_plan), hosts)
            self.assertEqual(query_plan[0], target_host)
Example #25
0
    def test_no_live_nodes(self):
        hosts = [0, 1, 2, 3]
        policy = RoundRobinPolicy()
        policy.populate(None, hosts)

        for i in range(4):
            policy.on_down(i)

        query_plan = list(policy.make_query_plan())
        self.assertEqual(query_plan, [])
Example #26
0
 def setUp(self):
     contact_point = ['127.0.0.2']
     self.cluster = Cluster(
         contact_points=contact_point,
         metrics_enabled=True,
         protocol_version=PROTOCOL_VERSION,
         execution_profiles={
             EXEC_PROFILE_DEFAULT:
             ExecutionProfile(load_balancing_policy=HostFilterPolicy(
                 RoundRobinPolicy(),
                 lambda host: host.address in contact_point),
                              retry_policy=FallthroughRetryPolicy())
         })
     self.session = self.cluster.connect("test3rf", wait_for_all_pools=True)
Example #27
0
    def test_target_host_down(self):
        node_count = 4
        hosts = [Host(i, Mock()) for i in range(node_count)]
        target_host = hosts[1]

        policy = DSELoadBalancingPolicy(RoundRobinPolicy())
        policy.populate(Mock(metadata=ClusterMetaMock({'127.0.0.1': target_host})), hosts)
        query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
        self.assertEqual(sorted(query_plan), hosts)

        target_host.is_up = False
        policy.on_down(target_host)
        query_plan = list(policy.make_query_plan(None, Mock(target_host='127.0.0.1')))
        self.assertNotIn(target_host, query_plan)
Example #28
0
    def test_no_live_nodes(self):
        """
        Ensure query plan for a downed cluster will execute without errors
        """
        hosts = [0, 1, 2, 3]
        policy = RoundRobinPolicy()
        policy.populate(None, hosts)

        for i in range(4):
            policy.on_down(i)

        qplan = list(policy.make_query_plan())
        self.assertEqual(qplan, [])
    def test_default_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(),
                                               *[object() for _ in range(3)])
        cluster = Cluster(
            execution_profiles={'non-default': non_default_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        default_profile = cluster.profile_manager.profiles[
            EXEC_PROFILE_DEFAULT]
        rf = session.execute_async("query")
        self._verify_response_future_profile(rf, default_profile)

        rf = session.execute_async("query", execution_profile='non-default')
        self._verify_response_future_profile(rf, non_default_profile)
Example #30
0
    def test_predicate_changes(self):
        """
        Test to validate hostfilter reacts correctly when the predicate return
        a different subset of the hosts
        HostFilterPolicy
        @since 3.8
        @jira_ticket PYTHON-961
        @expected_result the excluded hosts are ignored

        @test_category policy
        """
        external_event = True
        contact_point = "127.0.0.1"

        single_host = {Host(contact_point, SimpleConvictionPolicy)}
        all_hosts = {
            Host("127.0.0.{}".format(i), SimpleConvictionPolicy)
            for i in (1, 2, 3)
        }

        predicate = lambda host: host.address == contact_point if external_event else True
        hfp = ExecutionProfile(load_balancing_policy=HostFilterPolicy(
            RoundRobinPolicy(), predicate=predicate))
        cluster = Cluster((contact_point, ),
                          execution_profiles={EXEC_PROFILE_DEFAULT: hfp},
                          protocol_version=PROTOCOL_VERSION,
                          topology_event_refresh_window=0,
                          status_event_refresh_window=0)
        session = cluster.connect(wait_for_all_pools=True)

        queried_hosts = set()
        for _ in range(10):
            response = session.execute("SELECT * from system.local")
            queried_hosts.update(response.response_future.attempted_hosts)

        self.assertEqual(queried_hosts, single_host)

        external_event = False
        session.update_created_pools()

        queried_hosts = set()
        for _ in range(10):
            response = session.execute("SELECT * from system.local")
            queried_hosts.update(response.response_future.attempted_hosts)
        self.assertEqual(queried_hosts, all_hosts)