def test_statement_params_override_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(),
                                               *[object() for _ in range(2)])
        cluster = Cluster(
            execution_profiles={'non-default': non_default_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES)

        rf = session.execute_async("query", execution_profile='non-default')

        ss = SimpleStatement("query",
                             retry_policy=DowngradingConsistencyRetryPolicy(),
                             consistency_level=ConsistencyLevel.ALL,
                             serial_consistency_level=ConsistencyLevel.SERIAL)
        my_timeout = 1.1234

        self.assertNotEqual(ss.retry_policy.__class__,
                            rf._load_balancer.__class__)
        self.assertNotEqual(ss.consistency_level, rf.message.consistency_level)
        self.assertNotEqual(ss._serial_consistency_level,
                            rf.message.serial_consistency_level)
        self.assertNotEqual(my_timeout, rf.timeout)

        rf = session.execute_async(ss,
                                   timeout=my_timeout,
                                   execution_profile='non-default')
        expected_profile = ExecutionProfile(
            non_default_profile.load_balancing_policy, ss.retry_policy,
            ss.consistency_level, ss._serial_consistency_level, my_timeout,
            non_default_profile.row_factory)
        self._verify_response_future_profile(rf, expected_profile)
示例#2
0
    def test_add_profile_timeout(self):
        """
        Tests that EP Timeouts are honored.

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result EP timeouts should override defaults

        @test_category config_profiles
        """

        node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            pools = session.get_pool_state()
            self.assertGreater(len(cluster.metadata.all_hosts()), 2)
            self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',)))

            node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))

            max_retry_count = 10
            for i in range(max_retry_count):
                start = time.time()
                try:
                    self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile,
                                      'profile_{0}'.format(i),
                                      node2, pool_wait_timeout=sys.float_info.min)
                    break
                except AssertionError:
                    end = time.time()
                    self.assertAlmostEqual(start, end, 1)
            else:
                raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count))
示例#3
0
    def setUpClass(cls):
        if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
            return

        start_and_prime_singledc()
        cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                              compression=False)
        cls.session = cls.cluster.connect(wait_for_all_pools=True)

        spec_ep_brr = ExecutionProfile(
            load_balancing_policy=BadRoundRobinPolicy(),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                1, 6),
            request_timeout=12)
        spec_ep_rr = ExecutionProfile(
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                .5, 10),
            request_timeout=12)
        spec_ep_rr_lim = ExecutionProfile(
            load_balancing_policy=BadRoundRobinPolicy(),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                0.5, 1),
            request_timeout=12)
        spec_ep_brr_lim = ExecutionProfile(
            load_balancing_policy=BadRoundRobinPolicy(),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                4, 10))

        cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
        cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
        cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
        cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim)
示例#4
0
    def test_profile_lb_swap(self):
        """
        Tests that profile load balancing policies are not shared

        Creates two LBP, runs a few queries, and validates that each LBP is execised
        seperately between EP's

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result LBP should not be shared.

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1, 'rr2': rr2}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect(wait_for_all_pools=True)

            # default is DCA RR for all hosts
            expected_hosts = set(cluster.metadata.all_hosts())
            rr1_queried_hosts = set()
            rr2_queried_hosts = set()

            rs = session.execute(query, execution_profile='rr1')
            rr1_queried_hosts.add(rs.response_future._current_host)
            rs = session.execute(query, execution_profile='rr2')
            rr2_queried_hosts.add(rs.response_future._current_host)

            self.assertEqual(rr2_queried_hosts, rr1_queried_hosts)
示例#5
0
    def test_profile_pool_management(self):
        """
        Tests that changes to execution profiles correctly impact our cluster's pooling

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result pools should be correctly updated as EP's are added and removed

        @test_category config_profiles
        """

        node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
        with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            pools = session.get_pool_state()
            # there are more hosts, but we connected to the ones in the lbp aggregate
            self.assertGreater(len(cluster.metadata.all_hosts()), 2)
            self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2')))

            # dynamically update pools on add
            node3 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.3']))
            cluster.add_execution_profile('node3', node3)
            pools = session.get_pool_state()
            self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2', '127.0.0.3')))
    def test_node_busy(self):
        """ Verify that once TCP buffer is full, queries continue to get re-routed to other nodes """
        start_and_prime_singledc()
        profile = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            compression=False,
            execution_profiles={EXEC_PROFILE_DEFAULT: profile},
        )
        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        query = session.prepare("INSERT INTO table1 (id) VALUES (?)")

        prime_request(PauseReads(dc_id=0, node_id=0))

        blocked_profile = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(["127.0.0.1"]))
        cluster.add_execution_profile('blocked_profile', blocked_profile)

        # Fill our blocked node's tcp buffer until we get a busy exception
        self._fill_buffers(session,
                           query,
                           expected_blocked=1,
                           execution_profile='blocked_profile')

        # Now that our send buffer is completely full on one node,
        # verify queries get re-routed to other nodes and queries complete successfully
        for i in range(1000):
            session.execute(query, [str(i)])
示例#7
0
 def setUp(self):
     spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
     spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
     spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1))
     self.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
     self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
     self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
示例#8
0
    def test_add_profile_timeout(self):
        """
        Tests that EP Timeouts are honored.

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result EP timeouts should override defaults

        @test_category config_profiles
        """

        node1 = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        with Cluster(
                execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster:
            session = cluster.connect(wait_for_all_pools=True)
            pools = session.get_pool_state()
            self.assertGreater(len(cluster.metadata.all_hosts()), 2)
            self.assertEqual(set(h.address for h in pools), set(
                ('127.0.0.1', )))

            node2 = ExecutionProfile(
                load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
            self.assertRaises(cassandra.OperationTimedOut,
                              cluster.add_execution_profile,
                              'node2',
                              node2,
                              pool_wait_timeout=0.0000001)
示例#9
0
    def test_serial_consistency_level_validation(self):
        # should pass
        ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.SERIAL)
        ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL)

        # should not pass
        with self.assertRaises(ValueError):
            ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.ANY)
        with self.assertRaises(ValueError):
            ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=42)
示例#10
0
def make_execution_profile(retry_policy=FlakyRetryPolicy(), consistency_level=ConsistencyLevel.ONE, **kwargs):
    if 'load_balancing_policy' in kwargs:
        return ExecutionProfile(retry_policy=retry_policy,
                                consistency_level=consistency_level,
                                **kwargs)
    else:
        return ExecutionProfile(retry_policy=retry_policy,
                                consistency_level=consistency_level,
                                load_balancing_policy=RoundRobinPolicy(),
                                **kwargs)
示例#11
0
    def setup_tables(self):
        key_space = self.env.config.get(ConfigKeys.KEY_SPACE,
                                        domain=ConfigKeys.STORAGE)
        hosts = self.env.config.get(ConfigKeys.HOST, domain=ConfigKeys.STORAGE)
        hosts = hosts.split(",")

        # required to specify execution profiles in future versions
        profiles = {
            # override the default so we can set consistency level later
            EXEC_PROFILE_DEFAULT:
            ExecutionProfile(
                load_balancing_policy=TokenAwarePolicy(
                    DCAwareRoundRobinPolicy()),
                retry_policy=RetryPolicy(),
                request_timeout=10.0,
                row_factory=Session._row_factory,  # noqa
                # should probably be changed to QUORUM when having more than 3 nodes in the cluster
                consistency_level=ConsistencyLevel.LOCAL_ONE,
            ),
            # TODO: there doesn't seem to be a way to specify execution profile when
            #  using the library's object mapping approach, only when writing pure
            #  cql queries:
            #  https://docs.datastax.com/en/developer/python-driver/3.24/execution_profiles/
            # batch profile has longer timeout since they are run async anyway
            "batch":
            ExecutionProfile(
                load_balancing_policy=TokenAwarePolicy(
                    DCAwareRoundRobinPolicy()),
                request_timeout=120.0,
                consistency_level=ConsistencyLevel.LOCAL_ONE,
            )
        }

        kwargs = {
            "default_keyspace": key_space,
            "protocol_version": 3,
            "retry_connect": True,
            "execution_profiles": profiles,
        }

        username = self._get_from_conf(ConfigKeys.USER, ConfigKeys.STORAGE)
        password = self._get_from_conf(ConfigKeys.PASSWORD, ConfigKeys.STORAGE)

        if password is not None:
            auth_provider = PlainTextAuthProvider(
                username=username,
                password=password,
            )
            kwargs["auth_provider"] = auth_provider

        connection.setup(hosts, **kwargs)

        sync_table(MessageModel)
        sync_table(AttachmentModel)
示例#12
0
    def test_no_warning_adding_lbp_ep_to_cluster_with_contact_points(self):
        ep_with_lbp = ExecutionProfile(load_balancing_policy=object())
        cluster = Cluster(
            contact_points=['127.0.0.1'],
            execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp})
        with patch('cassandra.cluster.log') as patched_logger:
            cluster.add_execution_profile(
                name='with_lbp',
                profile=ExecutionProfile(load_balancing_policy=Mock(name='lbp'))
            )

        patched_logger.warning.assert_not_called()
示例#13
0
    def setUpClass(cls):
        cls.common_setup(1)

        spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
        spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
        spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1))
        spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10))

        cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
        cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
        cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
        cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim)
示例#14
0
    def test_profile_name_value(self):

        internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)])
        cluster = Cluster(execution_profiles={'by-name': internalized_profile})
        session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
        self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES)

        rf = session.execute_async("query", execution_profile='by-name')
        self._verify_response_future_profile(rf, internalized_profile)

        by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)])
        rf = session.execute_async("query", execution_profile=by_value)
        self._verify_response_future_profile(rf, by_value)
示例#15
0
    def setUpClass(cls):
        if not DSE_VERSION or DSE_VERSION < cls.required_dse_version:
            return

        cls.execution_profiles = {"CONTDEFAULT": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions()),
                                  "ONEPAGE": ExecutionProfile(
                                      continuous_paging_options=ContinuousPagingOptions(max_pages=1)),
                                  "MANYPAGES": ExecutionProfile(
                                      continuous_paging_options=ContinuousPagingOptions(max_pages=10)),
                                  "BYTES": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions(
                                      page_unit=ContinuousPagingOptions.PagingUnit.BYTES)),
                                  "SLOW": ExecutionProfile(
                                      continuous_paging_options=ContinuousPagingOptions(max_pages_per_second=1)), }
        cls.sane_eps = ["CONTDEFAULT", "BYTES"]
示例#16
0
    def test_duplicate_metrics_per_cluster(self):
        """
        Test to validate that cluster metrics names can't overlap.
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metric names should not be allowed to be same.

        @test_category metrics
        """
        cluster2 = TestCluster(
            metrics_enabled=True,
            monitor_reporting_enabled=False,
            execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}
        )

        cluster3 = TestCluster(
            metrics_enabled=True,
            monitor_reporting_enabled=False,
            execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}
        )

        # Ensure duplicate metric names are not allowed
        cluster2.metrics.set_stats_name("appcluster")
        cluster2.metrics.set_stats_name("appcluster")
        with self.assertRaises(ValueError):
            cluster3.metrics.set_stats_name("appcluster")
        cluster3.metrics.set_stats_name("devops")

        session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True)
        session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True)

        # Basic validation that naming metrics doesn't impact their segration or accuracy
        for i in range(10):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
            session2.execute(query)

        for i in range(5):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
            session3.execute(query)

        self.assertEqual(cluster2.metrics.get_stats()['request_timer']['count'], 10)
        self.assertEqual(cluster3.metrics.get_stats()['request_timer']['count'], 5)

        # Check scales to ensure they are appropriately named
        self.assertTrue("appcluster" in scales._Stats.stats.keys())
        self.assertTrue("devops" in scales._Stats.stats.keys())

        cluster2.shutdown()
        cluster3.shutdown()
示例#17
0
    def test_warning_adding_no_lbp_ep_to_cluster_with_contact_points(self):
        ep_with_lbp = ExecutionProfile(load_balancing_policy=object())
        cluster = Cluster(
            contact_points=['127.0.0.1'],
            execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp})
        with patch('cassandra.cluster.log') as patched_logger:
            cluster.add_execution_profile(name='no_lbp',
                                          profile=ExecutionProfile())

        patched_logger.warning.assert_called_once()
        warning_message = patched_logger.warning.call_args[0][0]
        self.assertIn('no_lbp', warning_message)
        self.assertIn('trying to add', warning_message)
        self.assertIn('please specify a load-balancing policy',
                      warning_message)
示例#18
0
    def test_no_profiles_same_name(self):
        # can override default in init
        cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile()})

        # cannot update default
        self.assertRaises(ValueError, cluster.add_execution_profile, EXEC_PROFILE_DEFAULT, ExecutionProfile())

        # cannot update named init
        self.assertRaises(ValueError, cluster.add_execution_profile, 'one', ExecutionProfile())

        # can add new name
        cluster.add_execution_profile('two', ExecutionProfile())

        # cannot add a profile added dynamically
        self.assertRaises(ValueError, cluster.add_execution_profile, 'two', ExecutionProfile())
示例#19
0
    def test_exec_profile_clone(self):

        cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile()})
        session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        profile_attrs = {'request_timeout': 1,
                         'consistency_level': ConsistencyLevel.ANY,
                         'serial_consistency_level': ConsistencyLevel.SERIAL,
                         'row_factory': tuple_factory,
                         'retry_policy': RetryPolicy(),
                         'load_balancing_policy': default_lbp_factory()}
        reference_attributes = ('retry_policy', 'load_balancing_policy')

        # default and one named
        for profile in (EXEC_PROFILE_DEFAULT, 'one'):
            active = session.get_execution_profile(profile)
            clone = session.execution_profile_clone_update(profile)
            self.assertIsNot(clone, active)

            all_updated = session.execution_profile_clone_update(clone, **profile_attrs)
            self.assertIsNot(all_updated, clone)
            for attr, value in profile_attrs.items():
                self.assertEqual(getattr(clone, attr), getattr(active, attr))
                if attr in reference_attributes:
                    self.assertIs(getattr(clone, attr), getattr(active, attr))
                self.assertNotEqual(getattr(all_updated, attr), getattr(active, attr))

        # cannot clone nonexistent profile
        self.assertRaises(ValueError, session.execution_profile_clone_update, 'DOES NOT EXIST', **profile_attrs)
示例#20
0
 def test_no_legacy_with_profile(self):
     cluster_init = Cluster(execution_profiles={'name': ExecutionProfile()})
     cluster_add = Cluster()
     cluster_add.add_execution_profile('name', ExecutionProfile())
     # for clusters with profiles added either way...
     for cluster in (cluster_init, cluster_init):
         # don't allow legacy parameters set
         for attr, value in (('default_retry_policy', RetryPolicy()),
                             ('load_balancing_policy', default_lbp_factory())):
             self.assertRaises(ValueError, setattr, cluster, attr, value)
         session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
         for attr, value in (('default_timeout', 1),
                             ('default_consistency_level', ConsistencyLevel.ANY),
                             ('default_serial_consistency_level', ConsistencyLevel.SERIAL),
                             ('row_factory', tuple_factory)):
             self.assertRaises(ValueError, setattr, session, attr, value)
示例#21
0
    def test_delay_can_be_0(self):
        """
        Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy
        @since 3.13
        @jira_ticket PYTHON-836
        @expected_result all the queries are executed immediately
        @test_category policy
        """
        number_of_requests = 4
        spec = ExecutionProfile(
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                0, number_of_requests))

        cluster = Cluster()
        cluster.add_execution_profile("spec", spec)
        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        counter = count()

        def patch_and_count(f):
            def patched(*args, **kwargs):
                next(counter)
                f(*args, **kwargs)

            return patched

        ResponseFuture._on_speculative_execute = patch_and_count(
            ResponseFuture._on_speculative_execute)
        stmt = SimpleStatement("INSERT INTO test3rf.test(k, v) VALUES (1, 2)")
        stmt.is_idempotent = True
        results = session.execute(stmt, execution_profile="spec")
        self.assertEqual(len(results.response_future.attempted_hosts), 3)
        self.assertEqual(next(counter), number_of_requests)
示例#22
0
文件: conftest.py 项目: psarna/scylla
def cql(request):
    profile = ExecutionProfile(
        load_balancing_policy=RoundRobinPolicy(),
        consistency_level=ConsistencyLevel.LOCAL_QUORUM,
        serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL,
        # The default timeout (in seconds) for execute() commands is 10, which
        # should have been more than enough, but in some extreme cases with a
        # very slow debug build running on a very busy machine and a very slow
        # request (e.g., a DROP KEYSPACE needing to drop multiple tables)
        # 10 seconds may not be enough, so let's increase it. See issue #7838.
        request_timeout=120)
    if request.config.getoption('ssl'):
        # Scylla does not support any earlier TLS protocol. If you try,
        # you will get mysterious EOF errors (see issue #6971) :-(
        ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
    else:
        ssl_context = None
    cluster = Cluster(
        execution_profiles={EXEC_PROFILE_DEFAULT: profile},
        contact_points=[request.config.getoption('host')],
        port=request.config.getoption('port'),
        # TODO: make the protocol version an option, to allow testing with
        # different versions. If we drop this setting completely, it will
        # mean pick the latest version supported by the client and the server.
        protocol_version=4,
        # Use the default superuser credentials, which work for both Scylla and Cassandra
        auth_provider=PlainTextAuthProvider(username='******',
                                            password='******'),
        ssl_context=ssl_context,
    )
    return cluster.connect()
示例#23
0
    def test_statement_params_override_legacy(self):
        cluster = Cluster(
            load_balancing_policy=RoundRobinPolicy(),
            default_retry_policy=DowngradingConsistencyRetryPolicy())
        self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY)
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        ss = SimpleStatement("query",
                             retry_policy=DowngradingConsistencyRetryPolicy(),
                             consistency_level=ConsistencyLevel.ALL,
                             serial_consistency_level=ConsistencyLevel.SERIAL)
        my_timeout = 1.1234

        self.assertNotEqual(ss.retry_policy.__class__,
                            cluster.default_retry_policy)
        self.assertNotEqual(ss.consistency_level,
                            session.default_consistency_level)
        self.assertNotEqual(ss._serial_consistency_level,
                            session.default_serial_consistency_level)
        self.assertNotEqual(my_timeout, session.default_timeout)

        rf = session.execute_async(ss, timeout=my_timeout)
        expected_profile = ExecutionProfile(
            load_balancing_policy=cluster.load_balancing_policy,
            retry_policy=ss.retry_policy,
            request_timeout=my_timeout,
            consistency_level=ss.consistency_level,
            serial_consistency_level=ss._serial_consistency_level)
        self._verify_response_future_profile(rf, expected_profile)
    def _cluster_session_with_lbp(self, lbp):
        # create a cluster with no delay on events

        cluster = TestCluster(topology_event_refresh_window=0, status_event_refresh_window=0,
                              execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=lbp)})
        session = cluster.connect()
        return cluster, session
示例#25
0
    def _test_downgrading_cl(self, keyspace, rf, accepted):
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()),
                                                                                     DowngradingConsistencyRetryPolicy())})
        session = cluster.connect(wait_for_all_pools=True)

        create_schema(cluster, session, keyspace, replication_factor=rf)
        self._insert(session, keyspace, 1)
        self._query(session, keyspace, 1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        try:
            force_stop(2)
            wait_for_down(cluster, 2)

            self._assert_writes_succeed(session, keyspace, accepted)
            self._assert_reads_succeed(session, keyspace,
                                       accepted - set([ConsistencyLevel.ANY]))
            self._assert_writes_fail(session, keyspace,
                                     SINGLE_DC_CONSISTENCY_LEVELS - accepted)
            self._assert_reads_fail(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS - accepted)
        finally:
            start(2)
            wait_for_up(cluster, 2)

        cluster.shutdown()
示例#26
0
    def test_can_write_speculative(self):
        """
        Verify that the driver will keep querying C* even if there is a host down while being
        upgraded and that all the writes will eventually succeed using the ConstantSpeculativeExecutionPolicy
        policy
        @since 3.12
        @jira_ticket PYTHON-546
        @expected_result all the writes succeed

        @test_category upgrade
        """
        spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10),
                                      request_timeout=12)
        cluster = Cluster()
        self.addCleanup(cluster.shutdown)
        cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)

        session = cluster.connect()

        self.start_upgrade(0)

        c = count()
        while not self.is_upgraded():
            session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),),
                                 execution_profile='spec_ep_rr')
            time.sleep(0.0001)

        session.default_consistency_level = ConsistencyLevel.ALL
        total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test")[0][0]
        self.assertEqual(total_number_of_inserted, next(c))

        self.assertEqual(self.logger_handler.get_message_count("error", ""), 0)
示例#27
0
    def __init__(self, ip_addresses, cassandra_config):
        self._ip_addresses = ip_addresses
        self._auth_provider = None
        self._ssl_context = None
        self._cassandra_config = cassandra_config

        if cassandra_config.cql_username is not None and cassandra_config.cql_password is not None:
            auth_provider = PlainTextAuthProvider(
                username=cassandra_config.cql_username,
                password=cassandra_config.cql_password)
            self._auth_provider = auth_provider

        if cassandra_config.certfile is not None and cassandra_config.usercert is not None and \
           cassandra_config.userkey is not None:
            ssl_context = SSLContext(PROTOCOL_TLSv1)
            ssl_context.load_verify_locations(cassandra_config.certfile)
            ssl_context.verify_mode = CERT_REQUIRED
            ssl_context.load_cert_chain(certfile=cassandra_config.usercert,
                                        keyfile=cassandra_config.userkey)
            self._ssl_context = ssl_context

        load_balancing_policy = WhiteListRoundRobinPolicy(ip_addresses)
        self._execution_profiles = {
            'local':
            ExecutionProfile(load_balancing_policy=load_balancing_policy)
        }
示例#28
0
 def __init__(self, TESTING=None):
     self.table = None
     profiles = None
     try:
         # Set the environment variables within your virtual environment
         # having the following names:
         # For production:
         #               KEYSPACE='key1'
         #               CONTACT_POINTS='127.0.1'
         #
         # For testing:
         #               TEST_KEYSPACE='key2'
         #               TEST_CONTACT_POINTS='127.0.1'
         # Activate the environment variable based on weather TESTING is
         # True or False
         if TESTING is None:
             self._keyspace = os.environ['KEYSPACE']
             self._contact_points = os.environ['CONTACT_POINTS'].split(",")
         else:
             self._keyspace = os.environ['TEST_KEYSPACE']
             self._contact_points = os.environ['TEST_CONTACT_POINTS'].split(
                 ",")
     except (KeyError) as err:
         print("KEY ERROR: ", err)
     else:
         policy = ExecutionProfile(
             load_balancing_policy=DCAwareRoundRobinPolicy())
         self._cluster = Cluster(self._contact_points,
                                 execution_profiles=profiles)
         self._session = self._cluster.connect(self._keyspace)
    def connect(
        self, event: EventBase, username: str = ROOT_USER, password: Optional[str] = None
    ) -> Session:
        """Context manager to connect to the Cassandra cluster and return an active session.

        Args:
            event: The current event object
            username: username to connect with
            password: password to connect with

        Returns:
            A cassandra session
        """
        if password is None:
            password = self.root_password(event)
        auth_provider = PlainTextAuthProvider(username=username, password=password)
        profile = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        cluster = Cluster(
            [self.charm._hostname()],
            port=CQL_PORT,
            auth_provider=auth_provider,
            execution_profiles={EXEC_PROFILE_DEFAULT: profile},
            protocol_version=CQL_PROTOCOL_VERSION,
        )
        try:
            session = cluster.connect()
            yield session
        finally:
            cluster.shutdown
    def test_clone_shared_lbp(self):
        """
        Tests that profile load balancing policies are shared on clone

        Creates one LBP clones it, and ensures that the LBP is shared between
        the two EP's

        @since 3.5
        @jira_ticket PYTHON-569
        @expected_result LBP is shared

        @test_category config_profiles
        """
        query = "select release_version from system.local"
        rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        exec_profiles = {'rr1': rr1}
        with Cluster(execution_profiles=exec_profiles) as cluster:
            session = cluster.connect()
            rr1_clone = session.execution_profile_clone_update(
                'rr1', row_factory=tuple_factory)
            cluster.add_execution_profile("rr1_clone", rr1_clone)
            rr1_queried_hosts = set()
            rr1_clone_queried_hosts = set()
            rs = session.execute(query, execution_profile='rr1')
            rr1_queried_hosts.add(rs.response_future._current_host)
            rs = session.execute(query, execution_profile='rr1_clone')
            rr1_clone_queried_hosts.add(rs.response_future._current_host)
            self.assertNotEqual(rr1_clone_queried_hosts, rr1_queried_hosts)