Exemplo n.º 1
0
 def test_connect_no_auth_provider(self):
     cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     self.assertRaisesRegexp(NoHostAvailable,
                             '.*AuthenticationFailed.*Remote end requires authentication.*',
                             cluster.connect)
     assert_quiescent_pool_state(self, cluster)
     cluster.shutdown()
    def test_pool_management(self):
        # Ensure that in_flight and request_ids quiesce after cluster operations
        cluster = Cluster(
            protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0
        )  # no idle heartbeat here, pool management is tested in test_idle_heartbeat
        session = cluster.connect()
        session2 = cluster.connect()

        # prepare
        p = session.prepare("SELECT * FROM system.local WHERE key=?")
        self.assertTrue(session.execute(p, ('local', )))

        # simple
        self.assertTrue(
            session.execute("SELECT * FROM system.local WHERE key='local'"))

        # set keyspace
        session.set_keyspace('system')
        session.set_keyspace('system_traces')

        # use keyspace
        session.execute('USE system')
        session.execute('USE system_traces')

        # refresh schema
        cluster.refresh_schema_metadata()
        cluster.refresh_schema_metadata(max_schema_agreement_wait=0)

        assert_quiescent_pool_state(self, cluster)

        cluster.shutdown()
Exemplo n.º 3
0
    def test_callbacks_and_pool_when_oto(self):
        """
        Test to ensure the callbacks are correcltly called and the connection
        is returned when there is an OTO
        @since 3.12
        @jira_ticket PYTHON-630
        @expected_result the connection is correctly returned to the pool
        after an OTO, also the only the errback is called and not the callback
        when the message finally arrives.

        @test_category metadata
        """
        start_and_prime_singledc()

        cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
        session = cluster.connect()
        self.addCleanup(cluster.shutdown)

        query_to_prime = "SELECT * from testkesypace.testtable"

        server_delay = 2  # seconds
        prime_query(query_to_prime, then={"delay_in_ms": server_delay * 1000})

        future = session.execute_async(query_to_prime, timeout=1)
        callback, errback = Mock(name='callback'), Mock(name='errback')
        future.add_callbacks(callback, errback)
        self.assertRaises(OperationTimedOut, future.result)

        assert_quiescent_pool_state(self, cluster)

        time.sleep(server_delay + 1)
        # PYTHON-630 -- only the errback should be called
        errback.assert_called_once()
        callback.assert_not_called()
Exemplo n.º 4
0
    def test_pool_management(self):
        # Ensure that in_flight and request_ids quiesce after cluster operations
        cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0)  # no idle heartbeat here, pool management is tested in test_idle_heartbeat
        session = cluster.connect()
        session2 = cluster.connect()

        # prepare
        p = session.prepare("SELECT * FROM system.local WHERE key=?")
        self.assertTrue(session.execute(p, ('local',)))

        # simple
        self.assertTrue(session.execute("SELECT * FROM system.local WHERE key='local'"))

        # set keyspace
        session.set_keyspace('system')
        session.set_keyspace('system_traces')

        # use keyspace
        session.execute('USE system')
        session.execute('USE system_traces')

        # refresh schema
        cluster.refresh_schema_metadata()
        cluster.refresh_schema_metadata(max_schema_agreement_wait=0)

        # submit schema refresh
        future = cluster.submit_schema_refresh()
        future.result()

        assert_quiescent_pool_state(self, cluster)

        cluster.shutdown()
    def test_auth_connect(self):
        user = '******'
        passwd = 'password'

        root_session = self.cluster_as('cassandra', 'cassandra').connect()
        root_session.execute('CREATE USER %s WITH PASSWORD %s', (user, passwd))

        try:
            cluster = self.cluster_as(user, passwd)
            session = cluster.connect(wait_for_all_pools=True)
            try:
                self.assertTrue(
                    session.execute(
                        'SELECT release_version FROM system.local'))
                assert_quiescent_pool_state(self, cluster)
                for pool in session.get_pools():
                    connection, _ = pool.borrow_connection(timeout=0)
                    self.assertEqual(
                        connection.authenticator.server_authenticator_class,
                        'org.apache.cassandra.auth.PasswordAuthenticator')
                    pool.return_connection(connection)
            finally:
                cluster.shutdown()
        finally:
            root_session.execute('DROP USER %s', user)
            assert_quiescent_pool_state(self, root_session.cluster)
            root_session.cluster.shutdown()
Exemplo n.º 6
0
    def test_callbacks_and_pool_when_oto(self):
        """
        Test to ensure the callbacks are correcltly called and the connection
        is returned when there is an OTO
        @since 3.12
        @jira_ticket PYTHON-630
        @expected_result the connection is correctly returned to the pool
        after an OTO, also the only the errback is called and not the callback
        when the message finally arrives.

        @test_category metadata
        """
        start_and_prime_singledc()

        cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
        session = cluster.connect()
        self.addCleanup(cluster.shutdown)

        query_to_prime = "SELECT * from testkesypace.testtable"

        server_delay = 2  # seconds
        prime_query(query_to_prime, then={"delay_in_ms": server_delay * 1000})

        future = session.execute_async(query_to_prime, timeout=1)
        callback, errback = Mock(name='callback'), Mock(name='errback')
        future.add_callbacks(callback, errback)
        self.assertRaises(OperationTimedOut, future.result)

        assert_quiescent_pool_state(self, cluster)

        time.sleep(server_delay + 1)
        # PYTHON-630 -- only the errback should be called
        errback.assert_called_once()
        callback.assert_not_called()
Exemplo n.º 7
0
    def test_host_is_not_set_to_down_after_query_oto(self):
        """
        Test to ensure that the connections aren't closed if there's an
        OperationTimedOut in a normal query. This should only happen from the
        heart beat thread (in the case of a OperationTimedOut) with the default
        configuration
        @since 3.12
        @expected_result the connections aren't closed nor the hosts are
        set to down

        @test_category connection
        """
        start_and_prime_singledc()

        query_to_prime = "SELECT * FROM madeup_keyspace.madeup_table"

        prime_query(query_to_prime, then=NO_THEN)

        listener = TrackDownListener()
        cluster = Cluster(compression=False)
        session = cluster.connect(wait_for_all_pools=True)
        cluster.register_listener(listener)

        futures = []
        for _ in range(10):
            future = session.execute_async(query_to_prime)
            futures.append(future)

        for f in futures:
            f._event.wait()
            self.assertIsInstance(f._final_exception, OperationTimedOut)

        self.assertEqual(listener.hosts_marked_down, [])
        assert_quiescent_pool_state(self, cluster)
Exemplo n.º 8
0
    def test_host_is_not_set_to_down_after_query_oto(self):
        """
        Test to ensure that the connections aren't closed if there's an
        OperationTimedOut in a normal query. This should only happen from the
        heart beat thread (in the case of a OperationTimedOut) with the default
        configuration
        @since 3.12
        @expected_result the connections aren't closed nor the hosts are
        set to down

        @test_category connection
        """
        start_and_prime_singledc()

        query_to_prime = "SELECT * FROM madeup_keyspace.madeup_table"

        prime_query(query_to_prime, then=NO_THEN)

        listener = TrackDownListener()
        cluster = Cluster(compression=False)
        session = cluster.connect(wait_for_all_pools=True)
        cluster.register_listener(listener)

        futures = []
        for _ in range(10):
            future = session.execute_async(query_to_prime)
            futures.append(future)

        for f in futures:
            f._event.wait()
            self.assertIsInstance(f._final_exception, OperationTimedOut)

        self.assertEqual(listener.hosts_marked_down, [])
        assert_quiescent_pool_state(self, cluster)
Exemplo n.º 9
0
 def test_connect_wrong_pwd(self):
     cluster = self.cluster_as('cassandra', 'wrong_pass')
     self.assertRaisesRegexp(NoHostAvailable,
                             '.*AuthenticationFailed.*Bad credentials.*Username and/or '
                             'password are incorrect.*',
                             cluster.connect)
     assert_quiescent_pool_state(self, cluster)
     cluster.shutdown()
 def test_connect_wrong_pwd(self):
     cluster = self.cluster_as('cassandra', 'wrong_pass')
     try:
         self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.',
                                 cluster.connect)
         assert_quiescent_pool_state(self, cluster)
     finally:
         cluster.shutdown()
Exemplo n.º 11
0
 def test_connect_wrong_pwd(self):
     cluster = self.cluster_as('cassandra', 'wrong_pass')
     self.assertRaisesRegexp(
         NoHostAvailable,
         '.*AuthenticationFailed.*Bad credentials.*Username and/or '
         'password are incorrect.*', cluster.connect)
     assert_quiescent_pool_state(self, cluster)
     cluster.shutdown()
Exemplo n.º 12
0
    def test_idle_heartbeat(self):
        interval = 1
        cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval)
        if PROTOCOL_VERSION < 3:
            cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
        session = cluster.connect()

        # This test relies on impl details of connection req id management to see if heartbeats 
        # are being sent. May need update if impl is changed
        connection_request_ids = {}
        for h in cluster.get_connection_holders():
            for c in h.get_connections():
                # make sure none are idle (should have startup messages)
                self.assertFalse(c.is_idle)
                with c.lock:
                    connection_request_ids[id(c)] = deque(c.request_ids)  # copy of request ids

        # let two heatbeat intervals pass (first one had startup messages in it)
        time.sleep(2 * interval + interval/10.)

        connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()]

        # make sure requests were sent on all connections
        for c in connections:
            expected_ids = connection_request_ids[id(c)]
            expected_ids.rotate(-1)
            with c.lock:
                self.assertListEqual(list(c.request_ids), list(expected_ids))

        # assert idle status
        self.assertTrue(all(c.is_idle for c in connections))

        # send messages on all connections
        statements_and_params = [("SELECT release_version FROM system.local", ())] * len(cluster.metadata.all_hosts())
        results = execute_concurrent(session, statements_and_params)
        for success, result in results:
            self.assertTrue(success)

        # assert not idle status
        self.assertFalse(any(c.is_idle if not c.is_control_connection else False for c in connections))

        # holders include session pools and cc
        holders = cluster.get_connection_holders()
        self.assertIn(cluster.control_connection, holders)
        self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1)  # hosts pools, 1 for cc

        # include additional sessions
        session2 = cluster.connect()

        holders = cluster.get_connection_holders()
        self.assertIn(cluster.control_connection, holders)
        self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1)  # 2 sessions' hosts pools, 1 for cc

        cluster._idle_heartbeat.stop()
        cluster._idle_heartbeat.join()
        assert_quiescent_pool_state(self, cluster)

        cluster.shutdown()
Exemplo n.º 13
0
    def test_idle_heartbeat(self):
        interval = 1
        cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval)
        if PROTOCOL_VERSION < 3:
            cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
        session = cluster.connect()

        # This test relies on impl details of connection req id management to see if heartbeats 
        # are being sent. May need update if impl is changed
        connection_request_ids = {}
        for h in cluster.get_connection_holders():
            for c in h.get_connections():
                # make sure none are idle (should have startup messages)
                self.assertFalse(c.is_idle)
                with c.lock:
                    connection_request_ids[id(c)] = deque(c.request_ids)  # copy of request ids

        # let two heatbeat intervals pass (first one had startup messages in it)
        time.sleep(2 * interval + interval/10.)

        connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()]

        # make sure requests were sent on all connections
        for c in connections:
            expected_ids = connection_request_ids[id(c)]
            expected_ids.rotate(-1)
            with c.lock:
                self.assertListEqual(list(c.request_ids), list(expected_ids))

        # assert idle status
        self.assertTrue(all(c.is_idle for c in connections))

        # send messages on all connections
        statements_and_params = [("SELECT release_version FROM system.local", ())] * len(cluster.metadata.all_hosts())
        results = execute_concurrent(session, statements_and_params)
        for success, result in results:
            self.assertTrue(success)

        # assert not idle status
        self.assertFalse(any(c.is_idle if not c.is_control_connection else False for c in connections))

        # holders include session pools and cc
        holders = cluster.get_connection_holders()
        self.assertIn(cluster.control_connection, holders)
        self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1)  # hosts pools, 1 for cc

        # include additional sessions
        session2 = cluster.connect()

        holders = cluster.get_connection_holders()
        self.assertIn(cluster.control_connection, holders)
        self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1)  # 2 sessions' hosts pools, 1 for cc

        cluster._idle_heartbeat.stop()
        cluster._idle_heartbeat.join()
        assert_quiescent_pool_state(self, cluster)

        cluster.shutdown()
 def test_connect_empty_pwd(self):
     cluster = self.cluster_as('Cassandra', '')
     try:
         self.assertRaisesRegexp(NoHostAvailable,
                                 '.*AuthenticationFailed.*',
                                 cluster.connect)
         assert_quiescent_pool_state(self, cluster)
     finally:
         cluster.shutdown()
Exemplo n.º 15
0
 def test_connect_no_auth_provider(self):
     cluster = TestCluster()
     try:
         self.assertRaisesRegexp(NoHostAvailable,
                                 '.*AuthenticationFailed.*',
                                 cluster.connect)
         assert_quiescent_pool_state(self, cluster)
     finally:
         cluster.shutdown()
Exemplo n.º 16
0
 def test_connect_empty_pwd(self):
     cluster = self.cluster_as("Cassandra", "")
     self.assertRaisesRegexp(
         NoHostAvailable,
         ".*AuthenticationFailed.*Bad credentials.*Username and/or " "password are incorrect.*",
         cluster.connect,
     )
     assert_quiescent_pool_state(self, cluster)
     cluster.shutdown()
Exemplo n.º 17
0
    def test_auth_connect(self):
        user = '******'
        passwd = 'password'

        root_session = self.cluster_as('cassandra', 'cassandra').connect()
        root_session.execute('CREATE USER %s WITH PASSWORD %s', (user, passwd))

        cluster = self.cluster_as(user, passwd)
        session = cluster.connect()
        self.assertTrue(session.execute('SELECT release_version FROM system.local'))
        assert_quiescent_pool_state(self, cluster)
        cluster.shutdown()

        root_session.execute('DROP USER %s', user)
        assert_quiescent_pool_state(self, root_session.cluster)
        root_session.cluster.shutdown()
Exemplo n.º 18
0
    def test_auth_connect(self):
        user = "******"
        passwd = "password"

        root_session = self.cluster_as("cassandra", "cassandra").connect()
        root_session.execute("CREATE USER %s WITH PASSWORD %s", (user, passwd))

        cluster = self.cluster_as(user, passwd)
        session = cluster.connect()
        self.assertTrue(session.execute("SELECT release_version FROM system.local"))
        assert_quiescent_pool_state(self, cluster)
        cluster.shutdown()

        root_session.execute("DROP USER %s", user)
        assert_quiescent_pool_state(self, root_session.cluster)
        root_session.cluster.shutdown()
    def test_auth_connect(self):
        user = '******'
        passwd = 'password'

        root_session = self.cluster_as('cassandra', 'cassandra').connect()
        root_session.execute('CREATE USER %s WITH PASSWORD %s', (user, passwd))

        cluster = self.cluster_as(user, passwd)
        session = cluster.connect()
        self.assertTrue(session.execute('SELECT release_version FROM system.local'))
        assert_quiescent_pool_state(self, cluster)
        cluster.shutdown()

        root_session.execute('DROP USER %s', user)
        assert_quiescent_pool_state(self, root_session.cluster)
        root_session.cluster.shutdown()
Exemplo n.º 20
0
    def test_auth_connect(self):
        user = '******'
        passwd = 'password'

        root_session = self.cluster_as('cassandra', 'cassandra').connect()
        root_session.execute('CREATE USER %s WITH PASSWORD %s', (user, passwd))

        try:
            cluster = self.cluster_as(user, passwd)
            session = cluster.connect()
            try:
                self.assertTrue(session.execute('SELECT release_version FROM system.local'))
                assert_quiescent_pool_state(self, cluster)
                for pool in session.get_pools():
                    connection, _ = pool.borrow_connection(timeout=0)
                    self.assertEqual(connection.authenticator.server_authenticator_class, 'org.apache.cassandra.auth.PasswordAuthenticator')
                    pool.return_connection(connection)
            finally:
                cluster.shutdown()
        finally:
            root_session.execute('DROP USER %s', user)
            assert_quiescent_pool_state(self, root_session.cluster)
            root_session.cluster.shutdown()
Exemplo n.º 21
0
    def test_retry_after_defunct(self):
        """
        We test cluster._retry is called if an the connection is defunct
        in the middle of a query

        Finally we verify the driver recovers correctly in the event
        of a network partition

        @since 3.12
        @expected_result the driver is able to query even if a host is marked
        as down in the middle of the query, it will go to the next one if the timeout
        hasn't expired

        @test_category connection
        """
        number_of_dcs = 3
        nodes_per_dc = 2

        query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);"

        idle_heartbeat_timeout = 1
        idle_heartbeat_interval = 5

        simulacron_cluster = start_and_prime_cluster_defaults(
            number_of_dcs, nodes_per_dc)

        dc_ids = sorted(simulacron_cluster.data_center_ids)
        last_host = dc_ids.pop()
        prime_query(query_to_prime,
                    cluster_name="{}/{}".format(
                        simulacron_cluster.cluster_name, last_host))

        roundrobin_lbp = OrderedRoundRobinPolicy()
        cluster = Cluster(
            compression=False,
            idle_heartbeat_interval=idle_heartbeat_interval,
            idle_heartbeat_timeout=idle_heartbeat_timeout,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(load_balancing_policy=roundrobin_lbp)
            })

        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        # This simulates we only have access to one DC
        for dc_id in dc_ids:
            datacenter_path = "{}/{}".format(simulacron_cluster.cluster_name,
                                             dc_id)
            prime_query(query_to_prime,
                        then=NO_THEN,
                        cluster_name=datacenter_path)
            prime_request(
                PrimeOptions(then=NO_THEN, cluster_name=datacenter_path))

        # Only the last datacenter will respond, therefore the first host won't
        # We want to make sure the returned hosts are 127.0.0.1,  127.0.0.2, ... 127.0.0.8
        roundrobin_lbp._position = 0

        # After 3 + 1 seconds the connection should be marked and down and another host retried
        response_future = session.execute_async(
            query_to_prime,
            timeout=4 * idle_heartbeat_interval + idle_heartbeat_timeout)
        response_future.result()
        self.assertGreater(len(response_future.attempted_hosts), 1)

        # No error should be raised here since the hosts have been marked
        # as down and there's still 1 DC available
        for _ in range(10):
            session.execute(query_to_prime)

        # Might take some time to close the previous connections and reconnect
        time.sleep(10)
        assert_quiescent_pool_state(self, cluster)
        clear_queries()

        time.sleep(10)
        assert_quiescent_pool_state(self, cluster)
Exemplo n.º 22
0
    def test_retry_after_defunct(self):
        """
        We test cluster._retry is called if an the connection is defunct
        in the middle of a query

        Finally we verify the driver recovers correctly in the event
        of a network partition

        @since 3.12
        @expected_result the driver is able to query even if a host is marked
        as down in the middle of the query, it will go to the next one if the timeout
        hasn't expired

        @test_category connection
        """
        number_of_dcs = 3
        nodes_per_dc = 2

        query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);"

        idle_heartbeat_timeout = 1
        idle_heartbeat_interval = 5

        simulacron_cluster = start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc)

        dc_ids = sorted(simulacron_cluster.data_center_ids)
        last_host = dc_ids.pop()
        prime_query(query_to_prime,
                    cluster_name="{}/{}".format(simulacron_cluster.cluster_name, last_host))

        roundrobin_lbp = OrderedRoundRobinPolicy()
        cluster = Cluster(compression=False,
                          idle_heartbeat_interval=idle_heartbeat_interval,
                          idle_heartbeat_timeout=idle_heartbeat_timeout,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=roundrobin_lbp)})

        session = cluster.connect(wait_for_all_pools=True)
        self.addCleanup(cluster.shutdown)

        # This simulates we only have access to one DC
        for dc_id in dc_ids:
            datacenter_path = "{}/{}".format(simulacron_cluster.cluster_name, dc_id)
            prime_query(query_to_prime, then=NO_THEN, cluster_name=datacenter_path)
            prime_request(PrimeOptions(then=NO_THEN, cluster_name=datacenter_path))

        # Only the last datacenter will respond, therefore the first host won't
        # We want to make sure the returned hosts are 127.0.0.1,  127.0.0.2, ... 127.0.0.8
        roundrobin_lbp._position = 0

        # After 3 + 1 seconds the connection should be marked and down and another host retried
        response_future = session.execute_async(query_to_prime, timeout=4 * idle_heartbeat_interval
                                                                        + idle_heartbeat_timeout)
        response_future.result()
        self.assertGreater(len(response_future.attempted_hosts), 1)

        # No error should be raised here since the hosts have been marked
        # as down and there's still 1 DC available
        for _ in range(10):
            session.execute(query_to_prime)

        # Might take some time to close the previous connections and reconnect
        time.sleep(10)
        assert_quiescent_pool_state(self, cluster)
        clear_queries()

        time.sleep(10)
        assert_quiescent_pool_state(self, cluster)