Exemple #1
0
    def test_can_write_speculative(self):
        """
        Verify that the driver will keep querying C* even if there is a host down while being
        upgraded and that all the writes will eventually succeed using the ConstantSpeculativeExecutionPolicy
        policy
        @since 3.12
        @jira_ticket PYTHON-546
        @expected_result all the writes succeed

        @test_category upgrade
        """
        spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10),
                                      request_timeout=12)
        cluster = Cluster()
        self.addCleanup(cluster.shutdown)
        cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)

        session = cluster.connect()

        self.start_upgrade(0)

        c = count()
        while not self.is_upgraded():
            session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),),
                                 execution_profile='spec_ep_rr')
            time.sleep(0.0001)

        session.default_consistency_level = ConsistencyLevel.ALL
        total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test")[0][0]
        self.assertEqual(total_number_of_inserted, next(c))

        self.assertEqual(self.logger_handler.get_message_count("error", ""), 0)
Exemple #2
0
class cassandra:

    __cluster = None
    __session = None

    def __init__(self,
                 nodes=[
                     'f0c4.company', 'f0g5.company', 'r1c4.company',
                     'r1g5.company'
                 ],
                 keyspace='company_cg',
                 executor_threads=8,
                 request_timeout=10):
        self.__cluster = Cluster(nodes,
                                 connect_timeout=10,
                                 control_connection_timeout=10,
                                 executor_threads=executor_threads)
        self.__cluster.add_execution_profile(
            'default', ExecutionProfile(request_timeout=request_timeout))
        self.__cluster.default_retry_policy.on_read_timeout = retry_policy__on_read_timeout
        self.__session = self.__cluster.connect(keyspace)

    def session(self):
        return self.__session

    def __del__(self):
        self.shutdown()

    def shutdown(self):
        if self.__cluster:
            self.__cluster.shutdown()
    def test_delay_can_be_0(self):
        """
        Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy
        @since 3.13
        @jira_ticket PYTHON-836
        @expected_result all the queries are executed immediately
        @test_category policy
        """
        number_of_requests = 4
        spec = ExecutionProfile(
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                0, number_of_requests))

        cluster = Cluster()
        cluster.add_execution_profile("spec", spec)
        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        counter = count()

        def patch_and_count(f):
            def patched(*args, **kwargs):
                next(counter)
                f(*args, **kwargs)

            return patched

        ResponseFuture._on_speculative_execute = patch_and_count(
            ResponseFuture._on_speculative_execute)
        stmt = SimpleStatement("INSERT INTO test3rf.test(k, v) VALUES (1, 2)")
        stmt.is_idempotent = True
        results = session.execute(stmt, execution_profile="spec")
        self.assertEqual(len(results.response_future.attempted_hosts), 3)
        self.assertEqual(next(counter), number_of_requests)
    def test_node_busy(self):
        """ Verify that once TCP buffer is full, queries continue to get re-routed to other nodes """
        start_and_prime_singledc()
        profile = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
        cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            compression=False,
            execution_profiles={EXEC_PROFILE_DEFAULT: profile},
        )
        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        query = session.prepare("INSERT INTO table1 (id) VALUES (?)")

        prime_request(PauseReads(dc_id=0, node_id=0))

        blocked_profile = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(["127.0.0.1"]))
        cluster.add_execution_profile('blocked_profile', blocked_profile)

        # Fill our blocked node's tcp buffer until we get a busy exception
        self._fill_buffers(session,
                           query,
                           expected_blocked=1,
                           execution_profile='blocked_profile')

        # Now that our send buffer is completely full on one node,
        # verify queries get re-routed to other nodes and queries complete successfully
        for i in range(1000):
            session.execute(query, [str(i)])
    def test_can_write_speculative(self):
        """
        Verify that the driver will keep querying C* even if there is a host down while being
        upgraded and that all the writes will eventually succeed using the ConstantSpeculativeExecutionPolicy
        policy
        @since 3.12
        @jira_ticket PYTHON-546
        @expected_result all the writes succeed

        @test_category upgrade
        """
        spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10),
                                      request_timeout=12)
        cluster = Cluster()
        self.addCleanup(cluster.shutdown)
        cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)

        session = cluster.connect()

        self.start_upgrade(0)

        c = count()
        while not self.is_upgraded():
            session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),),
                                 execution_profile='spec_ep_rr')
            time.sleep(0.0001)

        session.default_consistency_level = ConsistencyLevel.ALL
        total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test")[0][0]
        self.assertEqual(total_number_of_inserted, next(c))

        self.assertEqual(self.logger_handler.get_message_count("error", ""), 0)
    def test_no_warning_adding_lbp_ep_to_cluster_with_contact_points(self):
        ep_with_lbp = ExecutionProfile(load_balancing_policy=object())
        cluster = Cluster(
            contact_points=['127.0.0.1'],
            execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp})
        with patch('cassandra.cluster.log') as patched_logger:
            cluster.add_execution_profile(
                name='with_lbp',
                profile=ExecutionProfile(load_balancing_policy=Mock(name='lbp'))
            )

        patched_logger.warning.assert_not_called()
    def test_no_profiles_same_name(self):
        # can override default in init
        cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile()})

        # cannot update default
        self.assertRaises(ValueError, cluster.add_execution_profile, EXEC_PROFILE_DEFAULT, ExecutionProfile())

        # cannot update named init
        self.assertRaises(ValueError, cluster.add_execution_profile, 'one', ExecutionProfile())

        # can add new name
        cluster.add_execution_profile('two', ExecutionProfile())

        # cannot add a profile added dynamically
        self.assertRaises(ValueError, cluster.add_execution_profile, 'two', ExecutionProfile())
Exemple #8
0
    def test_warning_adding_no_lbp_ep_to_cluster_with_contact_points(self):
        ep_with_lbp = ExecutionProfile(load_balancing_policy=object())
        cluster = Cluster(
            contact_points=['127.0.0.1'],
            execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp})
        with patch('cassandra.cluster.log') as patched_logger:
            cluster.add_execution_profile(name='no_lbp',
                                          profile=ExecutionProfile())

        patched_logger.warning.assert_called_once()
        warning_message = patched_logger.warning.call_args[0][0]
        self.assertIn('no_lbp', warning_message)
        self.assertIn('trying to add', warning_message)
        self.assertIn('please specify a load-balancing policy',
                      warning_message)
    def test_warning_adding_no_lbp_ep_to_cluster_with_contact_points(self):
        ep_with_lbp = ExecutionProfile(load_balancing_policy=object())
        cluster = Cluster(
            contact_points=['127.0.0.1'],
            execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp})
        with patch('cassandra.cluster.log') as patched_logger:
            cluster.add_execution_profile(
                name='no_lbp',
                profile=ExecutionProfile()
            )

        patched_logger.warning.assert_called_once()
        warning_message = patched_logger.warning.call_args[0][0]
        self.assertIn('no_lbp', warning_message)
        self.assertIn('trying to add', warning_message)
        self.assertIn('please specify a load-balancing policy', warning_message)
Exemple #10
0
 def test_no_legacy_with_profile(self):
     cluster_init = Cluster(execution_profiles={'name': ExecutionProfile()})
     cluster_add = Cluster()
     cluster_add.add_execution_profile('name', ExecutionProfile())
     # for clusters with profiles added either way...
     for cluster in (cluster_init, cluster_init):
         # don't allow legacy parameters set
         for attr, value in (('default_retry_policy', RetryPolicy()),
                             ('load_balancing_policy', default_lbp_factory())):
             self.assertRaises(ValueError, setattr, cluster, attr, value)
         session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])
         for attr, value in (('default_timeout', 1),
                             ('default_consistency_level', ConsistencyLevel.ANY),
                             ('default_serial_consistency_level', ConsistencyLevel.SERIAL),
                             ('row_factory', tuple_factory)):
             self.assertRaises(ValueError, setattr, session, attr, value)
Exemple #11
0
def main():
    xact_file_dir = sys.argv[1]
    xact_id = sys.argv[2]
    consistency_level_string = sys.argv[3]
    client_summary_file_dir = sys.argv[4]

    consistency_level = ConsistencyLevel.LOCAL_ONE
    if consistency_level_string == "ONE":
        consistency_level = ConsistencyLevel.ONE
    elif consistency_level_string == "QUORUM":
        consistency_level = ConsistencyLevel.QUORUM

    cluster = Cluster(control_connection_timeout=None)
    profile = ExecutionProfile(consistency_level=consistency_level)
    cluster.add_execution_profile("client", profile)
    session = cluster.connect("wholesaler")
    run_xacts(session, xact_file_dir, xact_id, client_summary_file_dir)
Exemple #12
0
    def test_delay_can_be_0(self):
        """
        Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy
        @since 3.13
        @jira_ticket PYTHON-836
        @expected_result all the queries are executed immediately
        @test_category policy
        """
        query_to_prime = "INSERT INTO madeup_keyspace.madeup_table(k, v) VALUES (1, 2)"
        prime_query(query_to_prime, then={"delay_in_ms": 5000})
        number_of_requests = 4
        spec = ExecutionProfile(
            load_balancing_policy=RoundRobinPolicy(),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                0, number_of_requests))

        cluster = Cluster()
        cluster.add_execution_profile("spec", spec)
        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        counter = count()

        def patch_and_count(f):
            def patched(*args, **kwargs):
                next(counter)
                print("patched")
                f(*args, **kwargs)

            return patched

        self.addCleanup(setattr, ResponseFuture, "send_request",
                        ResponseFuture.send_request)
        ResponseFuture.send_request = patch_and_count(
            ResponseFuture.send_request)
        stmt = SimpleStatement(query_to_prime)
        stmt.is_idempotent = True
        results = session.execute(stmt, execution_profile="spec")
        self.assertEqual(len(results.response_future.attempted_hosts), 3)

        # send_request is called number_of_requests times for the speculative request
        # plus one for the call from the main thread.
        self.assertEqual(next(counter), number_of_requests + 1)
    def test_delay_can_be_0(self):
        """
        Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy
        @since 3.13
        @jira_ticket PYTHON-836
        @expected_result all the queries are executed immediately
        @test_category policy
        """
        query_to_prime = "INSERT INTO madeup_keyspace.madeup_table(k, v) VALUES (1, 2)"
        prime_query(query_to_prime, then={"delay_in_ms": 5000})
        number_of_requests = 4
        spec = ExecutionProfile(load_balancing_policy=RoundRobinPolicy(),
                                speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests))

        cluster = Cluster()
        cluster.add_execution_profile("spec", spec)
        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        counter = count()

        def patch_and_count(f):
            def patched(*args, **kwargs):
                next(counter)
                print("patched")
                f(*args, **kwargs)
            return patched

        self.addCleanup(setattr, ResponseFuture, "send_request", ResponseFuture.send_request)
        ResponseFuture.send_request = patch_and_count(ResponseFuture.send_request)
        stmt = SimpleStatement(query_to_prime)
        stmt.is_idempotent = True
        results = session.execute(stmt, execution_profile="spec")
        self.assertEqual(len(results.response_future.attempted_hosts), 3)

        # send_request is called number_of_requests times for the speculative request
        # plus one for the call from the main thread.
        self.assertEqual(next(counter), number_of_requests + 1)
Exemple #14
0
class CloudTests(CloudProxyCluster):
    def hosts_up(self):
        return [h for h in self.cluster.metadata.all_hosts() if h.is_up]

    def test_resolve_and_connect(self):
        self.connect(self.creds)

        self.assertEqual(len(self.hosts_up()), 3)
        for host in self.cluster.metadata.all_hosts():
            self.assertTrue(host.is_up)
            self.assertIsInstance(host.endpoint, SniEndPoint)
            self.assertEqual(
                str(host.endpoint),
                "{}:{}:{}".format(host.endpoint.address, host.endpoint.port,
                                  host.host_id))
            self.assertIn(host.endpoint._resolved_address,
                          ("127.0.0.1", '::1'))

    def test_match_system_local(self):
        self.connect(self.creds)

        self.assertEqual(len(self.hosts_up()), 3)
        for host in self.cluster.metadata.all_hosts():
            row = self.session.execute('SELECT * FROM system.local',
                                       host=host).one()
            self.assertEqual(row.host_id, host.host_id)
            self.assertEqual(row.rpc_address, host.broadcast_rpc_address)

    def test_set_auth_provider(self):
        self.connect(self.creds)
        self.assertIsInstance(self.cluster.auth_provider,
                              PlainTextAuthProvider)
        self.assertEqual(self.cluster.auth_provider.username, 'user1')
        self.assertEqual(self.cluster.auth_provider.password, 'user1')

    def test_support_leaving_the_auth_unset(self):
        with self.assertRaises(NoHostAvailable):
            self.connect(self.creds_no_auth)
        self.assertIsNone(self.cluster.auth_provider)

    def test_support_overriding_auth_provider(self):
        try:
            self.connect(self.creds,
                         auth_provider=PlainTextAuthProvider(
                             'invalid', 'invalid'))
        except:
            pass  # this will fail soon when sni_single_endpoint is updated
        self.assertIsInstance(self.cluster.auth_provider,
                              PlainTextAuthProvider)
        self.assertEqual(self.cluster.auth_provider.username, 'invalid')
        self.assertEqual(self.cluster.auth_provider.password, 'invalid')

    def test_error_overriding_ssl_context(self):
        with self.assertRaises(ValueError) as cm:
            self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1))

        self.assertIn('cannot be specified with a cloud configuration',
                      str(cm.exception))

    def test_error_overriding_ssl_options(self):
        with self.assertRaises(ValueError) as cm:
            self.connect(self.creds, ssl_options={'check_hostname': True})

        self.assertIn('cannot be specified with a cloud configuration',
                      str(cm.exception))

    def _bad_hostname_metadata(self, config, http_data):
        config = parse_metadata_info(config, http_data)
        config.sni_host = "127.0.0.1"
        return config

    def test_verify_hostname(self):
        with patch('cassandra.datastax.cloud.parse_metadata_info',
                   wraps=self._bad_hostname_metadata):
            with self.assertRaises(NoHostAvailable) as e:
                self.connect(self.creds)
            self.assertIn("hostname", str(e.exception).lower())

    def test_error_when_bundle_doesnt_exist(self):
        try:
            self.connect('/invalid/path/file.zip')
        except Exception as e:
            if six.PY2:
                self.assertIsInstance(e, IOError)
            else:
                self.assertIsInstance(e, FileNotFoundError)

    def test_load_balancing_policy_is_dcawaretokenlbp(self):
        self.connect(self.creds)
        self.assertIsInstance(
            self.cluster.profile_manager.default.load_balancing_policy,
            TokenAwarePolicy)
        self.assertIsInstance(
            self.cluster.profile_manager.default.load_balancing_policy.
            _child_policy, DCAwareRoundRobinPolicy)

    def test_resolve_and_reconnect_on_node_down(self):

        self.connect(self.creds,
                     idle_heartbeat_interval=1,
                     idle_heartbeat_timeout=1,
                     reconnection_policy=ConstantReconnectionPolicy(120))

        self.assertEqual(len(self.hosts_up()), 3)
        CLOUD_PROXY_SERVER.stop_node(1)
        wait_until_not_raised(
            lambda: self.assertEqual(len(self.hosts_up()), 2), 0.02, 250)

        host = [h for h in self.cluster.metadata.all_hosts() if not h.is_up][0]
        with patch.object(SniEndPoint, "resolve",
                          wraps=host.endpoint.resolve) as mocked_resolve:
            CLOUD_PROXY_SERVER.start_node(1)
            wait_until_not_raised(
                lambda: self.assertEqual(len(self.hosts_up()), 3), 0.02, 250)
            mocked_resolve.assert_called_once()

    def test_metadata_unreachable(self):
        with self.assertRaises(DriverException) as cm:
            self.connect(self.creds_unreachable, connect_timeout=1)

        self.assertIn('Unable to connect to the metadata service',
                      str(cm.exception))

    def test_metadata_ssl_error(self):
        with self.assertRaises(DriverException) as cm:
            self.connect(self.creds_invalid_ca)

        self.assertIn('Unable to connect to the metadata', str(cm.exception))

    def test_default_consistency(self):
        self.connect(self.creds)
        self.assertEqual(self.session.default_consistency_level,
                         ConsistencyLevel.LOCAL_QUORUM)
        # Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT,
        # EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT
        for ep_key in six.iterkeys(self.cluster.profile_manager.profiles):
            ep = self.cluster.profile_manager.profiles[ep_key]
            self.assertEqual(
                ep.consistency_level, ConsistencyLevel.LOCAL_QUORUM,
                "Expecting LOCAL QUORUM for profile {}, but got {} instead".
                format(_execution_profile_to_string(ep_key),
                       ConsistencyLevel.value_to_name[ep.consistency_level]))

    def test_default_consistency_of_execution_profiles(self):
        cloud_config = {'secure_connect_bundle': self.creds}
        self.cluster = Cluster(
            cloud=cloud_config,
            protocol_version=4,
            execution_profiles={
                'pre_create_default_ep':
                ExecutionProfile(),
                'pre_create_changed_ep':
                ExecutionProfile(
                    consistency_level=ConsistencyLevel.LOCAL_ONE, ),
            })
        self.cluster.add_execution_profile('pre_connect_default_ep',
                                           ExecutionProfile())
        self.cluster.add_execution_profile(
            'pre_connect_changed_ep',
            ExecutionProfile(consistency_level=ConsistencyLevel.LOCAL_ONE, ))
        session = self.cluster.connect(wait_for_all_pools=True)

        self.cluster.add_execution_profile('post_connect_default_ep',
                                           ExecutionProfile())
        self.cluster.add_execution_profile(
            'post_connect_changed_ep',
            ExecutionProfile(consistency_level=ConsistencyLevel.LOCAL_ONE, ))

        for default in [
                'pre_create_default_ep', 'pre_connect_default_ep',
                'post_connect_default_ep'
        ]:
            cl = self.cluster.profile_manager.profiles[
                default].consistency_level
            self.assertEqual(
                cl, ConsistencyLevel.LOCAL_QUORUM,
                "Expecting LOCAL QUORUM for profile {}, but got {} instead".
                format(default, cl))
        for changed in [
                'pre_create_changed_ep', 'pre_connect_changed_ep',
                'post_connect_changed_ep'
        ]:
            cl = self.cluster.profile_manager.profiles[
                changed].consistency_level
            self.assertEqual(
                cl, ConsistencyLevel.LOCAL_ONE,
                "Expecting LOCAL ONE for profile {}, but got {} instead".
                format(default, cl))

    def test_consistency_guardrails(self):
        self.connect(self.creds)
        self.session.execute(
            "CREATE KEYSPACE IF NOT EXISTS test_consistency_guardrails "
            "with replication={'class': 'SimpleStrategy', 'replication_factor': 1}"
        )
        self.session.execute(
            "CREATE TABLE IF NOT EXISTS test_consistency_guardrails.guardrails (id int primary key)"
        )
        for consistency in DISALLOWED_CONSISTENCIES:
            statement = SimpleStatement(
                "INSERT INTO test_consistency_guardrails.guardrails (id) values (1)",
                consistency_level=consistency)
            with self.assertRaises(InvalidRequest) as e:
                self.session.execute(statement)
            self.assertIn('not allowed for Write Consistency Level',
                          str(e.exception))

        # Sanity check to make sure we can do a normal insert
        statement = SimpleStatement(
            "INSERT INTO test_consistency_guardrails.guardrails (id) values (1)",
            consistency_level=ConsistencyLevel.LOCAL_QUORUM)
        try:
            self.session.execute(statement)
        except InvalidRequest:
            self.fail(
                "InvalidRequest was incorrectly raised for write query at LOCAL QUORUM!"
            )
Exemple #15
0
    def test_graph_profile(self):
        """
            Test verifying various aspects of graph config properties.

            @since 3.20
            @jira_ticket PYTHON-570

            @test_category dse graph
            """
        hosts = self.cluster.metadata.all_hosts()
        first_host = hosts[0].address
        second_hosts = "1.2.3.4"

        generate_classic(self.session)
        # Create variou execution policies
        exec_dif_factory = GraphExecutionProfile(
            row_factory=single_object_row_factory)
        exec_dif_factory.graph_options.graph_name = self.graph_name
        exec_dif_lbp = GraphExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy([first_host]))
        exec_dif_lbp.graph_options.graph_name = self.graph_name
        exec_bad_lbp = GraphExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy([second_hosts]))
        exec_dif_lbp.graph_options.graph_name = self.graph_name
        exec_short_timeout = GraphExecutionProfile(
            request_timeout=1,
            load_balancing_policy=WhiteListRoundRobinPolicy([first_host]))
        exec_short_timeout.graph_options.graph_name = self.graph_name

        # Add a single exection policy on cluster creation
        local_cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={"exec_dif_factory": exec_dif_factory})
        local_session = local_cluster.connect()
        self.addCleanup(local_cluster.shutdown)

        rs1 = self.session.execute_graph('g.V()')
        rs2 = local_session.execute_graph('g.V()',
                                          execution_profile='exec_dif_factory')

        # Verify default and non default policy works
        self.assertFalse(isinstance(rs2[0], Vertex))
        self.assertTrue(isinstance(rs1[0], Vertex))
        # Add other policies validate that lbp are honored
        local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp)
        local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp")
        local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp)
        with self.assertRaises(NoHostAvailable):
            local_session.execute_graph('g.V()',
                                        execution_profile="exec_bad_lbp")

        # Try with missing EP
        with self.assertRaises(ValueError):
            local_session.execute_graph('g.V()',
                                        execution_profile='bad_exec_profile')

        # Validate that timeout is honored
        local_cluster.add_execution_profile("exec_short_timeout",
                                            exec_short_timeout)
        with self.assertRaises(Exception) as e:
            self.assertTrue(
                isinstance(e, InvalidRequest)
                or isinstance(e, OperationTimedOut))
            local_session.execute_graph(
                'java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);',
                execution_profile='exec_short_timeout')