def test_pool_with_fork(self): # Test that separate MongoClients have separate Pools, and that the # driver can create a new MongoClient after forking if sys.platform == "win32": raise SkipTest("Can't test forking on Windows") try: from multiprocessing import Process, Pipe except ImportError: raise SkipTest("No multiprocessing module") a = self.get_client(auto_start_request=False) a.pymongo_test.test.remove() a.pymongo_test.test.insert({'_id':1}) a.pymongo_test.test.find_one() self.assertEqual(1, len(get_pool(a).sockets)) a_sock = one(get_pool(a).sockets) def loop(pipe): c = self.get_client(auto_start_request=False) self.assertEqual(1,len(get_pool(c).sockets)) c.pymongo_test.test.find_one() self.assertEqual(1,len(get_pool(c).sockets)) pipe.send(one(get_pool(c).sockets).sock.getsockname()) cp1, cc1 = Pipe() cp2, cc2 = Pipe() p1 = Process(target=loop, args=(cc1,)) p2 = Process(target=loop, args=(cc2,)) p1.start() p2.start() p1.join(1) p2.join(1) p1.terminate() p2.terminate() p1.join() p2.join() cc1.close() cc2.close() b_sock = cp1.recv() c_sock = cp2.recv() self.assertTrue(a_sock.sock.getsockname() != b_sock) self.assertTrue(a_sock.sock.getsockname() != c_sock) self.assertTrue(b_sock != c_sock) # a_sock, created by parent process, is still in the pool d_sock = get_pool(a).get_socket() self.assertEqual(a_sock, d_sock) d_sock.close()
def assert_pool_size(self, pool_size): if pool_size == 0: self.assertTrue( self.c._MongoClient__member is None or not get_pool(self.c).sockets ) else: self.assertEqual( pool_size, len(get_pool(self.c).sockets) )
def test_socket_timeout_ms_validation(self): c = rs_or_single_client(socketTimeoutMS=10 * 1000) self.assertEqual(10, get_pool(c).opts.socket_timeout) c = connected(rs_or_single_client(socketTimeoutMS=None)) self.assertEqual(None, get_pool(c).opts.socket_timeout) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=0) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS="foo")
def test_auth_network_error(self): # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1)) # Simulate an authenticate() call on a different socket. credentials = auth._build_credentials_tuple( 'DEFAULT', 'admin', db_user, db_pwd, {}) c._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # SocketInfo.check_auth logs in with the new credential, but gets a # socket.error. Should be reraised as AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one()
def test_auth_network_error(self): # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. auth_client = self._get_client() if not server_started_with_auth(auth_client): raise SkipTest('Authentication is not enabled on server') auth_client.admin.add_user('admin', 'password') auth_client.admin.authenticate('admin', 'password') try: # Get a client with one socket so we detect if it's leaked. c = self._get_client(max_pool_size=1, waitQueueTimeoutMS=1) # Simulate an authenticate() call on a different socket. credentials = auth._build_credentials_tuple( 'MONGODB-CR', 'admin', unicode('admin'), unicode('password'), {}) c._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # In __check_auth, the client authenticates its socket with the # new credential, but gets a socket.error. Should be reraised as # AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one() finally: remove_all_users(auth_client.admin)
def test_max_pool_size(self): max_pool_size = 4 c = rs_or_single_client(maxPoolSize=max_pool_size) collection = c[DB].test # Need one document. collection.drop() collection.insert_one({}) # nthreads had better be much larger than max_pool_size to ensure that # max_pool_size sockets are actually required at some point in this # test's execution. cx_pool = get_pool(c) nthreads = 10 threads = [] lock = threading.Lock() self.n_passed = 0 def f(): for _ in range(5): collection.find_one({'$where': delay(0.1)}) assert len(cx_pool.sockets) <= max_pool_size with lock: self.n_passed += 1 for i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() joinall(threads) self.assertEqual(nthreads, self.n_passed) self.assertTrue(len(cx_pool.sockets) > 1) self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
def run_mongo_thread(self): pool = get_pool(self.client) assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % ( len(pool.sockets) ) sock_info = one(pool.sockets) self.client.start_request() # start_request() hasn't yet moved the socket from the general pool into # the request assert len(pool.sockets) == 1 assert one(pool.sockets) == sock_info self.client[DB].test.find_one() # find_one() causes the socket to be used in the request, so now it's # bound to this thread assert len(pool.sockets) == 0 assert pool._get_request_state() == sock_info self.client.end_request() # The socket is back in the pool assert len(pool.sockets) == 1 assert one(pool.sockets) == sock_info
def test_auto_start_request(self): ctx = catch_warnings() try: warnings.simplefilter("ignore", DeprecationWarning) for bad_horrible_value in (None, 5, 'hi!'): self.assertRaises( (TypeError, ConfigurationError), lambda: get_client(auto_start_request=bad_horrible_value) ) # auto_start_request should default to False client = get_client() self.assertFalse(client.auto_start_request) client = get_client(auto_start_request=True) self.assertTrue(client.auto_start_request) # Assure we acquire a request socket. client.pymongo_test.test.find_one() self.assertTrue(client.in_request()) pool = get_pool(client) self.assertRequestSocket(pool) self.assertSameSock(pool) client.end_request() self.assertNoRequest(pool) self.assertDifferentSock(pool) # Trigger auto_start_request client.pymongo_test.test.find_one() self.assertRequestSocket(pool) self.assertSameSock(pool) finally: ctx.exit()
def test_server_disconnect(self): # PYTHON-345, we need to make sure that threads' request sockets are # closed by disconnect(). # # 1. Create a client with auto_start_request=True # 2. Start N threads and do a find() in each to get a request socket # 3. Pause all threads # 4. In the main thread close all sockets, including threads' request # sockets # 5. In main thread, do a find(), which raises AutoReconnect and resets # pool # 6. Resume all threads, do a find() in them # # If we've fixed PYTHON-345, then only one AutoReconnect is raised, # and all the threads get new request sockets. cx = get_client(auto_start_request=True) collection = cx.db.pymongo_test # acquire a request socket for the main thread collection.find_one() pool = get_pool(collection.database.connection) socket_info = pool._get_request_state() assert isinstance(socket_info, SocketInfo) request_sock = socket_info.sock state = FindPauseFind.create_shared_state(nthreads=40) threads = [FindPauseFind(collection, state) for _ in range(state.nthreads)] # Each thread does a find(), thus acquiring a request socket for t in threads: t.start() # Wait for the threads to reach the rendezvous FindPauseFind.wait_for_rendezvous(state) try: # Simulate an event that closes all sockets, e.g. primary stepdown for t in threads: t.request_sock.close() # Finally, ensure the main thread's socket's last_checkout is # updated: collection.find_one() # ... and close it: request_sock.close() # Doing an operation on the client raises an AutoReconnect and # resets the pool behind the scenes self.assertRaises(AutoReconnect, collection.find_one) finally: # Let threads do a second find() FindPauseFind.resume_after_rendezvous(state) joinall(threads) for t in threads: self.assertTrue(t.passed, "%s threw exception" % t)
def test_max_pool_size_none(self): c = rs_or_single_client(maxPoolSize=None) collection = c[DB].test # Need one document. collection.drop() collection.insert_one({}) cx_pool = get_pool(c) nthreads = 10 threads = [] lock = threading.Lock() self.n_passed = 0 def f(): for _ in range(5): collection.find_one({'$where': delay(0.1)}) with lock: self.n_passed += 1 for i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() joinall(threads) self.assertEqual(nthreads, self.n_passed) self.assertTrue(len(cx_pool.sockets) > 1)
def test_auto_start_request(self): for bad_horrible_value in (None, 5, 'hi!'): self.assertRaises( (TypeError, ConfigurationError), lambda: get_client(auto_start_request=bad_horrible_value) ) # auto_start_request should default to False client = get_client() self.assertFalse(client.auto_start_request) client = get_client(auto_start_request=True) self.assertTrue(client.auto_start_request) # Assure we acquire a request socket. client.pymongo_test.test.find_one() self.assertTrue(client.in_request()) pool = get_pool(client) self.assertRequestSocket(pool) self.assertSameSock(pool) client.end_request() self.assertNoRequest(pool) self.assertDifferentSock(pool) # Trigger auto_start_request client.pymongo_test.test.find_one() self.assertRequestSocket(pool) self.assertSameSock(pool)
def test_nested_request(self): # auto_start_request is False client = get_client() pool = get_pool(client) self.assertFalse(client.in_request()) # Start and end request client.start_request() self.assertInRequestAndSameSock(client, pool) client.end_request() self.assertNotInRequestAndDifferentSock(client, pool) # Double-nesting client.start_request() client.start_request() client.end_request() self.assertInRequestAndSameSock(client, pool) client.end_request() self.assertNotInRequestAndDifferentSock(client, pool) # Extra end_request calls have no effect - count stays at zero client.end_request() self.assertNotInRequestAndDifferentSock(client, pool) client.start_request() self.assertInRequestAndSameSock(client, pool) client.end_request() self.assertNotInRequestAndDifferentSock(client, pool)
def test_connection(self): c = Connection(host, port) self.assertTrue(c.auto_start_request) self.assertEqual(None, c.max_pool_size) self.assertFalse(c.slave_okay) self.assertFalse(c.safe) self.assertEqual({}, c.get_lasterror_options()) # Connection's writes are unacknowledged by default doc = {"_id": ObjectId()} coll = c.pymongo_test.write_concern_test coll.drop() coll.insert(doc) coll.insert(doc) c = Connection("mongodb://%s:%s/?safe=true" % (host, port)) self.assertTrue(c.safe) # To preserve legacy Connection's behavior, max_size should be None. # Pool should handle this without error. self.assertEqual(None, get_pool(c).max_size) c.end_request() # Connection's network_timeout argument is translated into # socketTimeoutMS self.assertEqual(123, Connection( host, port, network_timeout=123)._MongoClient__net_timeout) for network_timeout in 'foo', 0, -1: self.assertRaises( ConfigurationError, Connection, host, port, network_timeout=network_timeout)
def test_exhaust_getmore_network_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test collection.drop() collection.insert_many([{} for _ in range(200)]) # More than one batch. pool = get_pool(client) pool._check_interval_seconds = None # Never check. cursor = collection.find(cursor_type=CursorType.EXHAUST) # Initial query succeeds. cursor.next() # Cause a network error. sock_info = cursor._Cursor__exhaust_mgr.sock sock_info.sock.close() # A getmore fails. self.assertRaises(ConnectionFailure, list, cursor) self.assertTrue(sock_info.closed) # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_contextlib(self): client = rs_or_single_client() client.pymongo_test.drop_collection("test") client.pymongo_test.test.insert_one({"foo": "bar"}) # The socket used for the previous commands has been returned to the # pool self.assertEqual(1, len(get_pool(client).sockets)) with contextlib.closing(client): self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) self.assertEqual(1, len(get_pool(client).sockets)) self.assertEqual(0, len(get_pool(client).sockets)) with client as client: self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) self.assertEqual(0, len(get_pool(client).sockets))
def before_rendezvous(self): # acquire a socket list(self.collection.find()) pool = get_pool(self.collection.database.connection) socket_info = pool._get_request_state() assert isinstance(socket_info, SocketInfo) self.request_sock = socket_info.sock assert not _closed(self.request_sock)
def after_rendezvous(self): # test_server_disconnect() has closed this socket, but that's ok # because it's not our request socket anymore assert _closed(self.request_sock) # if disconnect() properly replaced the pool, then this won't raise # AutoReconnect because it will acquire a new socket list(self.collection.find()) assert self.collection.database.connection.in_request() pool = get_pool(self.collection.database.connection) assert self.request_sock != pool._get_request_state().sock
def test_operation_failure(self): # Ensure MongoClient doesn't close socket after it gets an error # response to getLastError. PYTHON-395. pool = get_pool(self.client) socket_count = len(pool.sockets) self.assertGreaterEqual(socket_count, 1) old_sock_info = next(iter(pool.sockets)) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({"_id": "foo"}) self.assertRaises(OperationFailure, self.client.pymongo_test.test.insert_one, {"_id": "foo"}) self.assertEqual(socket_count, len(pool.sockets)) new_sock_info = next(iter(pool.sockets)) self.assertEqual(old_sock_info, new_sock_info)
def test_auto_start_request(self): for bad_horrible_value in (None, 5, 'hi!'): self.assertRaises( (TypeError, ConfigurationError), lambda: self._get_client(auto_start_request=bad_horrible_value) ) client = self._get_client(auto_start_request=True) self.assertTrue(client.auto_start_request) pools = pools_from_rs_client(client) self.assertInRequestAndSameSock(client, pools) primary_pool = get_pool(client) # Trigger the RSC to actually start a request on primary pool client.pymongo_test.test.find_one() self.assertTrue(primary_pool.in_request()) # avoid a silly race in tokumx time.sleep(1) # Trigger the RSC to actually start a request on secondary pool cursor = client.pymongo_test.test.find( read_preference=ReadPreference.SECONDARY) try: cursor.next() except StopIteration: # No results, no problem pass secondary = cursor._Cursor__connection_id rs_state = client._MongoReplicaSetClient__rs_state secondary_pool = rs_state.get(secondary).pool self.assertTrue(secondary_pool.in_request()) client.end_request() self.assertNotInRequestAndDifferentSock(client, pools) for pool in pools: self.assertFalse(pool.in_request()) client.start_request() self.assertInRequestAndSameSock(client, pools) client.close() client = self._get_client() pools = pools_from_rs_client(client) self.assertNotInRequestAndDifferentSock(client, pools) client.start_request() self.assertInRequestAndSameSock(client, pools) client.end_request() self.assertNotInRequestAndDifferentSock(client, pools) client.close()
def test_operation_failure_without_request(self): # Ensure MongoClient doesn't close socket after it gets an error # response to getLastError. PYTHON-395. c = get_client() pool = get_pool(c) self.assertEqual(1, len(pool.sockets)) old_sock_info = iter(pool.sockets).next() c.pymongo_test.test.drop() c.pymongo_test.test.insert({'_id': 'foo'}) self.assertRaises( OperationFailure, c.pymongo_test.test.insert, {'_id': 'foo'}) self.assertEqual(1, len(pool.sockets)) new_sock_info = iter(pool.sockets).next() self.assertEqual(old_sock_info, new_sock_info)
def test_operation_failure_without_request(self): # Ensure MongoReplicaSetClient doesn't close socket after it gets an # error response to getLastError. PYTHON-395. c = self._get_client(auto_start_request=False) pool = get_pool(c) self.assertEqual(1, len(pool.sockets)) old_sock_info = iter(pool.sockets).next() c.pymongo_test.test.drop() c.pymongo_test.test.insert({"_id": "foo"}) self.assertRaises(OperationFailure, c.pymongo_test.test.insert, {"_id": "foo"}) self.assertEqual(1, len(pool.sockets)) new_sock_info = iter(pool.sockets).next() self.assertEqual(old_sock_info, new_sock_info) c.close()
def test_operation_failure_with_request(self): # Ensure MongoReplicaSetClient doesn't close socket after it gets an # error response to getLastError. PYTHON-395. c = self._get_client(auto_start_request=True) c.pymongo_test.test.find_one() pool = get_pool(c) # Client reserved a socket for this thread self.assertTrue(isinstance(pool._get_request_state(), SocketInfo)) old_sock_info = pool._get_request_state() c.pymongo_test.test.drop() c.pymongo_test.test.insert({"_id": "foo"}) self.assertRaises(OperationFailure, c.pymongo_test.test.insert, {"_id": "foo"}) # OperationFailure doesn't affect the request socket self.assertEqual(old_sock_info, pool._get_request_state()) c.close()
def test_replica_set_connection(self): c = ReplicaSetConnection(pair, replicaSet=self.name) ctx = catch_warnings() try: warnings.simplefilter("ignore", DeprecationWarning) self.assertTrue(c.auto_start_request) self.assertEqual(None, c.max_pool_size) self.assertFalse(c.slave_okay) self.assertFalse(c.safe) self.assertEqual({}, c.get_lasterror_options()) # ReplicaSetConnection's writes are unacknowledged by default doc = {"_id": ObjectId()} coll = c.pymongo_test.write_concern_test coll.drop() coll.insert(doc) coll.insert(doc) c = ReplicaSetConnection("mongodb://%s:%s/?replicaSet=%s&safe=true" % ( host, port, self.name)) self.assertTrue(c.safe) finally: ctx.exit() # To preserve legacy ReplicaSetConnection's behavior, max_size should # be None. Pool should handle this without error. pool = get_pool(c) self.assertEqual(None, pool.max_size) c.end_request() # ReplicaSetConnection's network_timeout argument is translated into # socketTimeoutMS self.assertEqual(123, ReplicaSetConnection( pair, replicaSet=self.name, network_timeout=123 )._MongoReplicaSetClient__net_timeout) for network_timeout in 'foo', 0, -1: self.assertRaises( ConfigurationError, ReplicaSetConnection, pair, replicaSet=self.name, network_timeout=network_timeout)
def test_exhaust_query_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. # Cause a network error. sock_info = one(pool.sockets) sock_info.sock.close() cursor = collection.find(cursor_type=CursorType.EXHAUST) self.assertRaises(ConnectionFailure, cursor.next) self.assertTrue(sock_info.closed) # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) sock_info = one(pool.sockets) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find(SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST) self.assertRaises(OperationFailure, cursor.next) self.assertFalse(sock_info.closed) # The socket was checked in and the semaphore was decremented. self.assertIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_operation_failure_with_request(self): # Ensure MongoClient doesn't close socket after it gets an error # response to getLastError. PYTHON-395. c = get_client(auto_start_request=True) pool = get_pool(c) # Pool reserves a socket for this thread. c.pymongo_test.test.find_one() self.assertTrue(isinstance(pool._get_request_state(), SocketInfo)) old_sock_info = pool._get_request_state() c.pymongo_test.test.drop() c.pymongo_test.test.insert({'_id': 'foo'}) self.assertRaises( OperationFailure, c.pymongo_test.test.insert, {'_id': 'foo'}) # OperationFailure doesn't affect the request socket self.assertEqual(old_sock_info, pool._get_request_state())
def test_request_threads(self): client = self.client # In a request, all ops go through master pool = get_pool(client.master) client.master.end_request() self.assertNotInRequestAndDifferentSock(client, pool) started_request, ended_request = threading.Event(), threading.Event() checked_request = threading.Event() thread_done = [False] # Starting a request in one thread doesn't put the other thread in a # request def f(): self.assertNotInRequestAndDifferentSock(client, pool) client.start_request() self.assertInRequestAndSameSock(client, pool) started_request.set() checked_request.wait() checked_request.clear() self.assertInRequestAndSameSock(client, pool) client.end_request() self.assertNotInRequestAndDifferentSock(client, pool) ended_request.set() checked_request.wait() thread_done[0] = True t = threading.Thread(target=f) t.setDaemon(True) t.start() started_request.wait() self.assertNotInRequestAndDifferentSock(client, pool) checked_request.set() ended_request.wait() self.assertNotInRequestAndDifferentSock(client, pool) checked_request.set() t.join() self.assertNotInRequestAndDifferentSock(client, pool) self.assertTrue(thread_done[0], "Thread didn't complete")
def test_request_threads(self): client = get_client(auto_start_request=False) pool = get_pool(client) self.assertNotInRequestAndDifferentSock(client, pool) started_request, ended_request = threading.Event(), threading.Event() checked_request = threading.Event() thread_done = [False] # Starting a request in one thread doesn't put the other thread in a # request def f(): self.assertNotInRequestAndDifferentSock(client, pool) client.start_request() self.assertInRequestAndSameSock(client, pool) started_request.set() checked_request.wait() checked_request.clear() self.assertInRequestAndSameSock(client, pool) client.end_request() self.assertNotInRequestAndDifferentSock(client, pool) ended_request.set() checked_request.wait() thread_done[0] = True t = threading.Thread(target=f) t.setDaemon(True) t.start() # It doesn't matter in what order the main thread or t initially get # to started_request.set() / wait(); by waiting here we ensure that t # has called client.start_request() before we assert on the next line. started_request.wait() self.assertNotInRequestAndDifferentSock(client, pool) checked_request.set() ended_request.wait() self.assertNotInRequestAndDifferentSock(client, pool) checked_request.set() t.join() self.assertNotInRequestAndDifferentSock(client, pool) self.assertTrue(thread_done[0], "Thread didn't complete")
def test_with_start_request(self): client = get_client() pool = get_pool(client) # No request started self.assertNoRequest(pool) self.assertDifferentSock(pool) # Start a request request_context_mgr = client.start_request() self.assertTrue( isinstance(request_context_mgr, object) ) self.assertNoSocketYet(pool) self.assertSameSock(pool) self.assertRequestSocket(pool) # End request request_context_mgr.__exit__(None, None, None) self.assertNoRequest(pool) self.assertDifferentSock(pool) # Test the 'with' statement if sys.version_info >= (2, 6): # We need exec here because if the Python version is less than 2.6 # these with-statements won't even compile. exec """ with client.start_request() as request: self.assertEqual(client, request.connection) self.assertNoSocketYet(pool) self.assertSameSock(pool) self.assertRequestSocket(pool) """ # Request has ended self.assertNoRequest(pool) self.assertDifferentSock(pool)
def test_contextlib(self): if sys.version_info < (2, 6): raise SkipTest("With statement requires Python >= 2.6") import contextlib client = get_client(auto_start_request=False) client.pymongo_test.drop_collection("test") client.pymongo_test.test.insert({"foo": "bar"}) # The socket used for the previous commands has been returned to the # pool self.assertEqual(1, len(get_pool(client).sockets)) # We need exec here because if the Python version is less than 2.6 # these with-statements won't even compile. exec """ with contextlib.closing(client): self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) self.assertEqual(None, client._MongoClient__member) """ exec """
def test_server_disconnect(self): # PYTHON-345, we need to make sure that threads' request sockets are # closed by disconnect(). # # 1. Create a client with auto_start_request=True # 2. Start N threads and do a find() in each to get a request socket # 3. Pause all threads # 4. In the main thread close all sockets, including threads' request # sockets # 5. In main thread, do a find(), which raises AutoReconnect and resets # pool # 6. Resume all threads, do a find() in them # # If we've fixed PYTHON-345, then only one AutoReconnect is raised, # and all the threads get new request sockets. cx = get_client(auto_start_request=True) collection = cx.db.pymongo_test # acquire a request socket for the main thread collection.find_one() pool = get_pool(collection.database.connection) socket_info = pool._get_request_state() assert isinstance(socket_info, SocketInfo) request_sock = socket_info.sock state = FindPauseFind.create_shared_state(nthreads=40) threads = [ FindPauseFind(collection, state) for _ in range(state.nthreads) ] # Each thread does a find(), thus acquiring a request socket for t in threads: t.start() # Wait for the threads to reach the rendezvous FindPauseFind.wait_for_rendezvous(state) try: # Simulate an event that closes all sockets, e.g. primary stepdown for t in threads: t.request_sock.close() # Finally, ensure the main thread's socket's last_checkout is # updated: collection.find_one() # ... and close it: request_sock.close() # Doing an operation on the client raises an AutoReconnect and # resets the pool behind the scenes self.assertRaises(AutoReconnect, collection.find_one) finally: # Let threads do a second find() FindPauseFind.resume_after_rendezvous(state) joinall(threads) for t in threads: self.assertTrue(t.passed, "%s threw exception" % t)
def test_1_client_connection_pool_options(self): client = rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) pool_opts = get_pool(client).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS)
def _test_max_pool_size( self, start_request, end_request, max_pool_size=4, nthreads=10): """Start `nthreads` threads. Each calls start_request `start_request` times, then find_one and waits at a barrier; once all reach the barrier each calls end_request `end_request` times. The test asserts that the pool ends with min(max_pool_size, nthreads) sockets or, if start_request wasn't called, at least one socket. This tests both max_pool_size enforcement and that leaked request sockets are eventually returned to the pool when their threads end. You may need to increase ulimit -n on Mac. If you increase nthreads over about 35, note a Gevent 0.13.6 bug on Mac: Greenlet.join() hangs if more than about 35 Greenlets share a MongoClient. Apparently fixed in recent Gevent development. """ if start_request: if max_pool_size is not None and max_pool_size < nthreads: raise AssertionError("Deadlock") c = self.get_client( max_pool_size=max_pool_size, auto_start_request=False) rendezvous = CreateAndReleaseSocket.Rendezvous( nthreads, self.use_greenlets) threads = [] for i in range(nthreads): t = CreateAndReleaseSocket( self, c, start_request, end_request, rendezvous) threads.append(t) for t in threads: t.start() if 'PyPy' in sys.version: # With PyPy we need to kick off the gc whenever the threads hit the # rendezvous since nthreads > max_pool_size. gc_collect_until_done(threads) else: for t in threads: t.join() # join() returns before the thread state is cleared; give it time. self.sleep(1) for t in threads: self.assertTrue(t.passed) # Socket-reclamation doesn't work in Jython if not sys.platform.startswith('java'): cx_pool = get_pool(c) # Socket-reclamation depends on timely garbage-collection if 'PyPy' in sys.version: gc.collect() if self.use_greenlets: # Wait for Greenlet.link() callbacks to execute the_hub = hub.get_hub() if hasattr(the_hub, 'join'): # Gevent 1.0 the_hub.join() else: # Gevent 0.13 and less the_hub.shutdown() if start_request: # Trigger final cleanup in Python <= 2.7.0. cx_pool._ident.get() expected_idle = min(max_pool_size, nthreads) message = ( '%d idle sockets (expected %d) and %d request sockets' ' (expected 0)' % ( len(cx_pool.sockets), expected_idle, len(cx_pool._tid_to_sock))) self.assertEqual( expected_idle, len(cx_pool.sockets), message) else: # Without calling start_request(), threads can safely share # sockets; the number running concurrently, and hence the # number of sockets needed, is between 1 and 10, depending # on thread-scheduling. self.assertTrue(len(cx_pool.sockets) >= 1) # thread.join completes slightly *before* thread locals are # cleaned up, so wait up to 5 seconds for them. self.sleep(0.1) cx_pool._ident.get() start = time.time() while ( not cx_pool.sockets and cx_pool._socket_semaphore.counter < max_pool_size and (time.time() - start) < 5 ): self.sleep(0.1) cx_pool._ident.get() if max_pool_size is not None: self.assertEqual( max_pool_size, cx_pool._socket_semaphore.counter) self.assertEqual(0, len(cx_pool._tid_to_sock))
def assert_request_with_socket(self): self.assertTrue(isinstance( get_pool(self.c)._get_request_state(), SocketInfo ))
def assert_pool_size(self, pool_size): if pool_size == 0: self.assertTrue(self.c._MongoClient__member is None or not get_pool(self.c).sockets) else: self.assertEqual(pool_size, len(get_pool(self.c).sockets))
def test_timeouts(self): client = rs_or_single_client(connectTimeoutMS=10500) self.assertEqual(10.5, get_pool(client).opts.connect_timeout) client = rs_or_single_client(socketTimeoutMS=10500) self.assertEqual(10.5, get_pool(client).opts.socket_timeout)
def test_socketKeepAlive(self): client = rs_or_single_client(socketKeepAlive=True) self.assertTrue(get_pool(client).opts.socket_keepalive)
def _test_max_pool_size_no_rendezvous(self, start_request, end_request): max_pool_size = 5 c = self.get_client( max_pool_size=max_pool_size, auto_start_request=False) # If you increase nthreads over about 35, note a # Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than # about 35 Greenlets share a MongoClient. Apparently fixed in # recent Gevent development. # On the other hand, nthreads had better be much larger than # max_pool_size to ensure that max_pool_size sockets are actually # required at some point in this test's execution. nthreads = 10 if (sys.platform.startswith('java') and start_request > end_request and nthreads > max_pool_size): # Since Jython can't reclaim the socket and release the semaphore # after a thread leaks a request, we'll exhaust the semaphore and # deadlock. raise SkipTest("Jython can't do socket reclamation") threads = [] for i in range(nthreads): t = CreateAndReleaseSocketNoRendezvous( self, c, start_request, end_request) threads.append(t) for t in threads: t.start() if 'PyPy' in sys.version: # With PyPy we need to kick off the gc whenever the threads hit the # rendezvous since nthreads > max_pool_size. gc_collect_until_done(threads) else: for t in threads: t.join() for t in threads: self.assertTrue(t.passed) cx_pool = get_pool(c) # Socket-reclamation depends on timely garbage-collection if 'PyPy' in sys.version: gc.collect() if self.use_greenlets: # Wait for Greenlet.link() callbacks to execute the_hub = hub.get_hub() if hasattr(the_hub, 'join'): # Gevent 1.0 the_hub.join() else: # Gevent 0.13 and less the_hub.shutdown() # thread.join completes slightly *before* thread locals are # cleaned up, so wait up to 5 seconds for them. self.sleep(0.1) cx_pool._ident.get() start = time.time() while ( not cx_pool.sockets and cx_pool._socket_semaphore.counter < max_pool_size and (time.time() - start) < 5 ): self.sleep(0.1) cx_pool._ident.get() self.assertTrue(len(cx_pool.sockets) >= 1) self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
def test_waitQueueTimeoutMS(self): client = self._get_client(waitQueueTimeoutMS=2000) pool = get_pool(client) self.assertEqual(pool.wait_queue_timeout, 2)
def test_waitQueueMultiple(self): client = self._get_client(max_pool_size=3, waitQueueMultiple=2) pool = get_pool(client) self.assertEqual(pool.wait_queue_multiple, 2) self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6)
def loop(pipe): c = self.get_client(auto_start_request=False) self.assertEqual(1, len(get_pool(c).sockets)) c.pymongo_test.test.find_one() self.assertEqual(1, len(get_pool(c).sockets)) pipe.send(one(get_pool(c).sockets).sock.getsockname())
def assert_request_without_socket(self): self.assertEqual( NO_SOCKET_YET, get_pool(self.c)._get_request_state() )
def test_waitQueueTimeoutMS(self): client = rs_or_single_client(waitQueueTimeoutMS=2000) self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2)
def assert_no_request(self): self.assertTrue( self.c._MongoClient__member is None or NO_REQUEST == get_pool(self.c)._get_request_state() )
def test_multiple_connections(self): a = self.get_client(auto_start_request=False) b = self.get_client(auto_start_request=False) self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(1, len(get_pool(b).sockets)) a.start_request() a.pymongo_test.test.find_one() self.assertEqual(0, len(get_pool(a).sockets)) a.end_request() self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(1, len(get_pool(b).sockets)) a_sock = one(get_pool(a).sockets) b.end_request() self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(1, len(get_pool(b).sockets)) b.start_request() b.pymongo_test.test.find_one() self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(0, len(get_pool(b).sockets)) b.end_request() b_sock = one(get_pool(b).sockets) b.pymongo_test.test.find_one() a.pymongo_test.test.find_one() self.assertEqual(b_sock, get_pool(b).get_socket()) self.assertEqual(a_sock, get_pool(a).get_socket()) a_sock.close() b_sock.close()
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() self._ops = [] # Configure the fail point before creating the client. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point(fp) self.addCleanup(self.set_fail_point, { 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] client = single_client(**opts) self.addCleanup(client.close) self.pool = get_pool(client) # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print('\nFailed test: %r' % (test['description'],)) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) print('Log:') for log in self.logs: print(log) raise
def _testOperation_assertNumberConnectionsCheckedOut(self, spec): client = self.entity_map[spec['client']] pool = get_pool(client) self.assertEqual(spec['connections'], pool.active_sockets)
def test_waitQueueMultiple(self): client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2) pool = get_pool(client) self.assertEqual(pool.opts.wait_queue_multiple, 2) self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6)
def test_timeouts(self): client = MongoClient(host, port, connectTimeoutMS=10500) self.assertEqual(10.5, get_pool(client).conn_timeout) client = MongoClient(host, port, socketTimeoutMS=10500) self.assertEqual(10.5, get_pool(client).net_timeout)
def test_socketKeepAlive(self): client = MongoClient(host, port, socketKeepAlive=True) self.assertTrue(get_pool(client).socket_keepalive)
def test_waitQueueTimeoutMS(self): client = MongoClient(host, port, waitQueueTimeoutMS=2000) self.assertEqual(get_pool(client).wait_queue_timeout, 2)
def test_max_pool_size_zero(self): c = rs_or_single_client(maxPoolSize=0) self.addCleanup(c.close) pool = get_pool(c) self.assertEqual(pool.max_pool_size, float('inf'))