Example #1
0
    def test_profile_lb_swap(self):
        """
        Tests that profile load balancing policies are not shared

        Creates two LBP, runs a few queries, and validates that each LBP is execised
        seperately between EP's

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result LBP should not be shared.

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1, 'rr2': rr2}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect(wait_for_all_pools=True)

            # default is DCA RR for all hosts
            expected_hosts = set(cluster.metadata.all_hosts())
            rr1_queried_hosts = set()
            rr2_queried_hosts = set()

            rs = session.execute(query, execution_profile='rr1')
            rr1_queried_hosts.add(rs.response_future._current_host)
            rs = session.execute(query, execution_profile='rr2')
            rr2_queried_hosts.add(rs.response_future._current_host)

            self.assertEqual(rr2_queried_hosts, rr1_queried_hosts)
Example #2
0
    def test_profile_pool_management(self):
        """
        Tests that changes to execution profiles correctly impact our cluster's pooling

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result pools should be correctly updated as EP's are added and removed

        @test_category config_profiles
        """

        node1 = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        node2 = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
        with Cluster(execution_profiles={
                EXEC_PROFILE_DEFAULT: node1,
                'node2': node2
        }) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            pools = session.get_pool_state()
            # there are more hosts, but we connected to the ones in the lbp aggregate
            self.assertGreater(len(cluster.metadata.all_hosts()), 2)
            self.assertEqual(set(h.address for h in pools),
                             set(('127.0.0.1', '127.0.0.2')))

            # dynamically update pools on add
            node3 = ExecutionProfile(
                load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.3']))
            cluster.add_execution_profile('node3', node3)
            pools = session.get_pool_state()
            self.assertEqual(set(h.address for h in pools),
                             set(('127.0.0.1', '127.0.0.2', '127.0.0.3')))
    def test_statement_params_override_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(),
                                               *[object() for _ in range(3)])
        cluster = Cluster(
            execution_profiles={'non-default': non_default_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        rf = session.execute_async("query", execution_profile='non-default')

        ss = SimpleStatement("query",
                             retry_policy=DowngradingConsistencyRetryPolicy(),
                             consistency_level=ConsistencyLevel.ALL,
                             serial_consistency_level=ConsistencyLevel.SERIAL)
        my_timeout = 1.1234

        self.assertNotEqual(ss.retry_policy.__class__,
                            rf._load_balancer.__class__)
        self.assertNotEqual(ss.consistency_level, rf.message.consistency_level)
        self.assertNotEqual(ss._serial_consistency_level,
                            rf.message.serial_consistency_level)
        self.assertNotEqual(my_timeout, rf.timeout)

        rf = session.execute_async(ss,
                                   timeout=my_timeout,
                                   execution_profile='non-default')
        expected_profile = ExecutionProfile(
            non_default_profile.load_balancing_policy, ss.retry_policy,
            ss.consistency_level, ss._serial_consistency_level, my_timeout,
            non_default_profile.row_factory)
        self._verify_response_future_profile(rf, expected_profile)
Example #4
0
    def test_duplicate_metrics_per_cluster(self):
        """
        Test to validate that cluster metrics names can't overlap.
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metric names should not be allowed to be same.

        @test_category metrics
        """
        cluster2 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })

        cluster3 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })

        # Ensure duplicate metric names are not allowed
        cluster2.metrics.set_stats_name("appcluster")
        cluster2.metrics.set_stats_name("appcluster")
        with self.assertRaises(ValueError):
            cluster3.metrics.set_stats_name("appcluster")
        cluster3.metrics.set_stats_name("devops")

        session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True)
        session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True)

        # Basic validation that naming metrics doesn't impact their segration or accuracy
        for i in range(10):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(
                self.ks_name),
                                    consistency_level=ConsistencyLevel.ALL)
            session2.execute(query)

        for i in range(5):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(
                self.ks_name),
                                    consistency_level=ConsistencyLevel.ALL)
            session3.execute(query)

        self.assertEqual(
            cluster2.metrics.get_stats()['request_timer']['count'], 10)
        self.assertEqual(
            cluster3.metrics.get_stats()['request_timer']['count'], 5)

        # Check scales to ensure they are appropriately named
        self.assertTrue("appcluster" in scales._Stats.stats.keys())
        self.assertTrue("devops" in scales._Stats.stats.keys())

        cluster2.shutdown()
        cluster3.shutdown()
    def setUpClass(cls):
        cls.common_setup(1)

        spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
        spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
        spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1))
        spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10))

        cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
        cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
        cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
        cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim)
Example #6
0
 def setUp(self):
     spec_ep_brr = ExecutionProfile(
         load_balancing_policy=BadRoundRobinPolicy(),
         speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
             .01, 20))
     spec_ep_rr = ExecutionProfile(
         speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
             .01, 20))
     spec_ep_rr_lim = ExecutionProfile(
         load_balancing_policy=BadRoundRobinPolicy(),
         speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
             .01, 1))
     self.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
     self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
     self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
    def test_profile_name_value(self):

        internalized_profile = ExecutionProfile(RoundRobinPolicy(),
                                                *[object() for _ in range(3)])
        cluster = Cluster(execution_profiles={'by-name': internalized_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        rf = session.execute_async("query", execution_profile='by-name')
        self._verify_response_future_profile(rf, internalized_profile)

        by_value = ExecutionProfile(RoundRobinPolicy(),
                                    *[object() for _ in range(3)])
        rf = session.execute_async("query", execution_profile=by_value)
        self._verify_response_future_profile(rf, by_value)
    def _test_downgrading_cl(self, keyspace, rf, accepted):
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()),
                                  DowngradingConsistencyRetryPolicy())
                          })
        session = cluster.connect(wait_for_all_pools=True)

        create_schema(cluster, session, keyspace, replication_factor=rf)
        self._insert(session, keyspace, 1)
        self._query(session, keyspace, 1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        try:
            force_stop(2)
            wait_for_down(cluster, 2)

            self._assert_writes_succeed(session, keyspace, accepted)
            self._assert_reads_succeed(session, keyspace,
                                       accepted - set([ConsistencyLevel.ANY]))
            self._assert_writes_fail(session, keyspace,
                                     SINGLE_DC_CONSISTENCY_LEVELS - accepted)
            self._assert_reads_fail(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS - accepted)
        finally:
            start(2)
            wait_for_up(cluster, 2)

        cluster.shutdown()
    def test_rfthree_tokenaware_none_down(self):
        keyspace = 'test_rfthree_tokenaware_none_down'
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()))
                          })
        session = cluster.connect(wait_for_all_pools=True)
        wait_for_up(cluster, 1)
        wait_for_up(cluster, 2)

        create_schema(cluster, session, keyspace, replication_factor=3)
        self._insert(session, keyspace, count=1)
        self._query(session, keyspace, count=1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()

        self._assert_writes_succeed(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS)
        self._assert_reads_succeed(session,
                                   keyspace,
                                   SINGLE_DC_CONSISTENCY_LEVELS -
                                   set([ConsistencyLevel.ANY]),
                                   expected_reader=2)

        cluster.shutdown()
Example #10
0
 def __init__(self,
              name,
              hosts,
              consistency=ConsistencyLevel.LOCAL_ONE,
              lazy_connect=False,
              retry_connect=False,
              cluster_options=None):
     self.hosts = hosts
     self.name = name
     self.consistency = consistency
     self.lazy_connect = lazy_connect
     self.retry_connect = retry_connect
     self.cluster_options = cluster_options if cluster_options else {}
     ep_kwargs = dict((k, self.cluster_options[k])
                      for k in ('load_balancing_policy', 'retry_policy')
                      if k in self.cluster_options)
     try:
         ep = self.cluster_options['execution_profiles'][
             EXEC_PROFILE_DEFAULT]
         ep.consistency_level = self.consistency
         for k, v in ep_kwargs:
             setattr(ep, k, v)
     except KeyError:
         ep = ExecutionProfile(consistency_level=self.consistency,
                               row_factory=dict_factory,
                               **ep_kwargs)
         self.cluster_options['execution_profiles'] = {
             EXEC_PROFILE_DEFAULT: ep
         }
     self.lazy_connect_lock = threading.RLock()
    def setUp(self):
        """
        Setup sessions and pause node1
        """
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                                   execution_profiles={EXEC_PROFILE_DEFAULT:
                                           ExecutionProfile(
                                           load_balancing_policy=HostFilterPolicy(
                                               RoundRobinPolicy(), lambda host: host.address == "127.0.0.1"
                                           )
                                       )
                                   }
                               )

        self.session = self.cluster.connect(wait_for_all_pools=True)

        self.control_connection_host_number = 1
        self.node_to_stop = get_node(self.control_connection_host_number)

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node_to_stop.pause()
    def test_prepare_on_all_hosts(self):
        """
        Test to validate prepare_on_all_hosts flag is honored.

        Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not
        have them prepared. Check the logs to insure they are being re-prepared on those nodes

        @since 3.4.0
        @jira_ticket PYTHON-556
        @expected_result queries will have to re-prepared on hosts that aren't the control connection
        """
        white_list = ForcedHostSwitchPolicy()
        clus = Cluster(
            execution_profiles={
                EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=white_list)},
            protocol_version=PROTOCOL_VERSION,
            prepare_on_all_hosts=False,
            reprepare_on_up=False)
        self.addCleanup(clus.shutdown)

        session = clus.connect(wait_for_all_pools=True)
        mock_handler = MockLoggingHandler()
        logger = logging.getLogger(cluster.__name__)
        logger.addHandler(mock_handler)
        select_statement = session.prepare("SELECT * FROM system.local")
        session.execute(select_statement)
        session.execute(select_statement)
        session.execute(select_statement)
        self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing"))
Example #13
0
    def test_white_list(self):
        use_singledc()
        keyspace = 'test_white_list'

        cluster = Cluster(('127.0.0.2',), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)))})
        session = cluster.connect()
        self._wait_for_nodes_up([1, 2, 3])

        create_schema(cluster, session, keyspace)
        self._insert(session, keyspace)
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 12)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        # white list policy should not allow reconnecting to ignored hosts
        force_stop(3)
        self._wait_for_nodes_down([3])
        self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting())

        self.coordinator_stats.reset_counts()
        force_stop(2)
        self._wait_for_nodes_down([2])

        try:
            self._query(session, keyspace)
            self.fail()
        except NoHostAvailable:
            pass
        finally:
            cluster.shutdown()
Example #14
0
    def test_clone_shared_lbp(self):
        """
        Tests that profile load balancing policies are shared on clone

        Creates one LBP clones it, and ensures that the LBP is shared between
        the two EP's

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result LBP is shared

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            self.assertGreater(
                len(cluster.metadata.all_hosts()), 1,
                "We only have one host connected at this point")

            rr1_clone = session.execution_profile_clone_update(
                'rr1', row_factory=tuple_factory)
            cluster.add_execution_profile("rr1_clone", rr1_clone)
            rr1_queried_hosts = set()
            rr1_clone_queried_hosts = set()
            rs = session.execute(query, execution_profile='rr1')
            rr1_queried_hosts.add(rs.response_future._current_host)
            rs = session.execute(query, execution_profile='rr1_clone')
            rr1_clone_queried_hosts.add(rs.response_future._current_host)
            self.assertNotEqual(rr1_clone_queried_hosts, rr1_queried_hosts)
Example #15
0
    def test_duplicate(self):
        """
        Test duplicate RPC addresses.

        Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged

        @since 3.4
        @jira_ticket PYTHON-366
        @expected_result only one hosts' metadata will be populated

        @test_category metadata
        """
        mock_handler = MockLoggingHandler()
        logger = logging.getLogger(dse.cluster.__name__)
        logger.addHandler(mock_handler)
        test_cluster = self.cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(
                    load_balancing_policy=self.load_balancing_policy)
            })

        test_cluster.connect()
        warnings = mock_handler.messages.get("warning")
        self.assertEqual(len(warnings), 1)
        self.assertTrue('multiple' in warnings[0])
        logger.removeHandler(mock_handler)
        test_cluster.shutdown()
Example #16
0
    def test_numpy_results_paged(self):
        """
        Test Numpy-based parser that returns a NumPy array
        """
        # arrays = { 'a': arr1, 'b': arr2, ... }
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)})
        session = cluster.connect(keyspace="testspace")
        session.client_protocol_handler = NumpyProtocolHandler
        session.default_fetch_size = 2

        expected_pages = (self.N_ITEMS + session.default_fetch_size - 1) // session.default_fetch_size

        self.assertLess(session.default_fetch_size, self.N_ITEMS)

        results = session.execute("SELECT * FROM test_table")

        self.assertTrue(results.has_more_pages)
        for count, page in enumerate(results, 1):
            self.assertIsInstance(page, dict)
            for colname, arr in page.items():
                if count <= expected_pages:
                    self.assertGreater(len(arr), 0, "page count: %d" % (count,))
                    self.assertLessEqual(len(arr), session.default_fetch_size)
                else:
                    # we get one extra item out of this iteration because of the way NumpyParser returns results
                    # The last page is returned as a dict with zero-length arrays
                    self.assertEqual(len(arr), 0)
            self.assertEqual(self._verify_numpy_page(page), len(arr))
        self.assertEqual(count, expected_pages + 1)  # see note about extra 'page' above

        cluster.shutdown()
Example #17
0
 def setUpClass(cls):
     cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                           execution_profiles={
                               EXEC_PROFILE_DEFAULT:
                               ExecutionProfile(row_factory=tuple_factory)
                           })
     cls.session = cls.cluster.connect()
Example #18
0
    def _cluster_session_with_lbp(self, lbp):
        # create a cluster with no delay on events

        cluster = Cluster(protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=lbp)})
        session = cluster.connect()
        return cluster, session
Example #19
0
 def setUp(self):
     self.c = Cluster(protocol_version=PROTOCOL_VERSION,
                      execution_profiles={
                          EXEC_PROFILE_DEFAULT:
                          ExecutionProfile(row_factory=dict_factory)
                      })
     self.session1 = self.c.connect(keyspace=self.keyspace1)
     self.session2 = self.c.connect(keyspace=self.keyspace2)
Example #20
0
 def __init__(self):
     self.metadata = MockMetadata()
     self.added_hosts = []
     self.removed_hosts = []
     self.scheduler = Mock(spec=_Scheduler)
     self.executor = Mock(spec=ThreadPoolExecutor)
     self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(
         RoundRobinPolicy())
 def _results_from_row_factory(self, row_factory):
     cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                       execution_profiles={
                           EXEC_PROFILE_DEFAULT:
                           ExecutionProfile(row_factory=row_factory)
                       })
     with cluster:
         return cluster.connect().execute(self.select)
 def setUp(self):
     self.cluster = Cluster(
         protocol_version=PROTOCOL_VERSION,
         execution_profiles={
             EXEC_PROFILE_DEFAULT:
             ExecutionProfile(
                 load_balancing_policy=WhiteListRoundRobinPolicy([DSE_IP]))
         })
     self.session = self.cluster.connect()
Example #23
0
    def test_missing_exec_prof(self):
        """
        Tests to verify that using an unknown profile raises a ValueError

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result ValueError

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1, 'rr2': rr2}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect()
            with self.assertRaises(ValueError):
                session.execute(query, execution_profile='rr3')
 def test_rfthree_tokenaware_downgradingcl(self):
     keyspace = 'test_rfthree_tokenaware_downgradingcl'
     with Cluster(protocol_version=PROTOCOL_VERSION,
                  execution_profiles={
                      EXEC_PROFILE_DEFAULT:
                      ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()),
                                       DowngradingConsistencyRetryPolicy())
                  }) as cluster:
         self.rfthree_downgradingcl(cluster, keyspace, False)
Example #25
0
 def make_session_and_keyspace(self):
     cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                       execution_profiles={
                           EXEC_PROFILE_DEFAULT:
                           ExecutionProfile(request_timeout=20,
                                            row_factory=dict_factory)
                       })
     session = cluster.connect()
     create_schema(cluster, session, self.keyspace)
     return session
Example #26
0
    def test_profile_load_balancing(self):
        """
        Tests that profile load balancing policies are honored.

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result Execution Policy should be used when applicable.

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        node1 = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy([DSE_IP]))
        with Cluster(execution_profiles={'node1': node1}) as cluster:
            session = cluster.connect(wait_for_all_pools=True)

            # default is DCA RR for all hosts
            expected_hosts = set(cluster.metadata.all_hosts())
            queried_hosts = set()
            for _ in expected_hosts:
                rs = session.execute(query)
                queried_hosts.add(rs.response_future._current_host)
            self.assertEqual(queried_hosts, expected_hosts)

            # by name we should only hit the one
            expected_hosts = set(h for h in cluster.metadata.all_hosts()
                                 if h.address == DSE_IP)
            queried_hosts = set()
            for _ in cluster.metadata.all_hosts():
                rs = session.execute(query, execution_profile='node1')
                queried_hosts.add(rs.response_future._current_host)
            self.assertEqual(queried_hosts, expected_hosts)

            # use a copied instance and override the row factory
            # assert last returned value can be accessed as a namedtuple so we can prove something different
            named_tuple_row = rs[0]
            self.assertIsInstance(named_tuple_row, tuple)
            self.assertTrue(named_tuple_row.release_version)

            tmp_profile = copy(node1)
            tmp_profile.row_factory = tuple_factory
            queried_hosts = set()
            for _ in cluster.metadata.all_hosts():
                rs = session.execute(query, execution_profile=tmp_profile)
                queried_hosts.add(rs.response_future._current_host)
            self.assertEqual(queried_hosts, expected_hosts)
            tuple_row = rs[0]
            self.assertIsInstance(tuple_row, tuple)
            with self.assertRaises(AttributeError):
                tuple_row.release_version

            # make sure original profile is not impacted
            self.assertTrue(
                session.execute(query,
                                execution_profile='node1')[0].release_version)
Example #27
0
    def test_add_profile_timeout(self):
        """
        Tests that EP Timeouts are honored.

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result EP timeouts should override defaults

        @test_category config_profiles
        """

        node1 = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        with Cluster(
                execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            pools = session.get_pool_state()
            self.assertGreater(len(cluster.metadata.all_hosts()), 2)
            self.assertEqual(set(h.address for h in pools), set(
                ('127.0.0.1', )))

            node2 = ExecutionProfile(
                load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))

            max_retry_count = 10
            for i in range(max_retry_count):
                start = time.time()
                try:
                    self.assertRaises(dse.OperationTimedOut,
                                      cluster.add_execution_profile,
                                      'profile_{0}'.format(i),
                                      node2,
                                      pool_wait_timeout=sys.float_info.min)
                    break
                except AssertionError:
                    end = time.time()
                    self.assertAlmostEqual(start, end, 1)
                    break
            else:
                raise Exception(
                    "add_execution_profile didn't timeout after {0} retries".
                    format(max_retry_count))
 def _connect_probe_cluster(self):
     if not self.probe_cluster:
         # distinct cluster so we can see the status of nodes ignored by the LBP being tested
         self.probe_cluster = Cluster(
             schema_metadata_enabled=False,
             token_metadata_enabled=False,
             execution_profiles={
                 EXEC_PROFILE_DEFAULT:
                 ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
             })
         self.probe_session = self.probe_cluster.connect()
Example #29
0
 def setUp(self):
     self.cluster = Cluster(
         protocol_version=PROTOCOL_VERSION,
         execution_profiles={
             EXEC_PROFILE_DEFAULT:
             ExecutionProfile(
                 load_balancing_policy=self.load_balancing_policy)
         })
     self.session = self.cluster.connect()
     self.session.execute(
         "UPDATE system.peers SET rpc_address = '127.0.0.1' WHERE peer='127.0.0.2'"
     )
Example #30
0
    def test_prepare_batch_statement_after_alter(self):
        """
        Test to validate a prepared statement used inside a batch statement is correctly handled
        by the driver. The metadata might be updated when a table is altered. This tests combines
        queries not being prepared and an update of the prepared statement metadata

        @since 3.10
        @jira_ticket PYTHON-706
        @expected_result queries will have to re-prepared on hosts that aren't the control connection
        and the batch statement will be sent.
        """
        white_list = ForcedHostSwitchPolicy()
        clus = Cluster(execution_profiles={
            EXEC_PROFILE_DEFAULT:
            ExecutionProfile(load_balancing_policy=white_list)
        },
                       protocol_version=PROTOCOL_VERSION,
                       prepare_on_all_hosts=False,
                       reprepare_on_up=False)
        self.addCleanup(clus.shutdown)

        table = "test3rf.%s" % self._testMethodName.lower()

        session = clus.connect(wait_for_all_pools=True)

        session.execute("DROP TABLE IF EXISTS %s" % table)
        session.execute(
            "CREATE TABLE %s (k int PRIMARY KEY, a int, b int, d int)" % table)
        insert_statement = session.prepare(
            "INSERT INTO %s (k, b, d) VALUES  (?, ?, ?)" % table)

        # Altering the table might trigger an update in the insert metadata
        session.execute("ALTER TABLE %s ADD c int" % table)

        values_to_insert = [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]

        # We query the three hosts in order (due to the ForcedHostSwitchPolicy)
        # the first three queries will have to be repreapred and the rest should
        # work as normal batch prepared statements
        for i in range(10):
            value_to_insert = values_to_insert[i % len(values_to_insert)]
            batch_statement = BatchStatement(
                consistency_level=ConsistencyLevel.ONE)
            batch_statement.add(insert_statement, value_to_insert)
            session.execute(batch_statement)

        select_results = session.execute("SELECT * FROM %s" % table)
        expected_results = [(1, None, 2, None, 3), (2, None, 3, None, 4),
                            (3, None, 4, None, 5), (4, None, 5, None, 6)]

        self.assertEqual(set(expected_results),
                         set(select_results._current_rows))