def test_socketKeepAlive(self): # Connect. yield self.rsc.server_info() ka = get_primary_pool(self.rsc).opts.socket_keepalive self.assertFalse(ka) client = self.motor_rsc(socketKeepAlive=True) yield client.server_info() ka = get_primary_pool(client).opts.socket_keepalive self.assertTrue(ka)
def test_socketKeepAlive(self): # Connect. yield from self.cx.server_info() ka = get_primary_pool(self.cx).opts.socket_keepalive self.assertFalse(ka) client = self.asyncio_client(socketKeepAlive=True) yield from client.server_info() ka = get_primary_pool(client).opts.socket_keepalive self.assertTrue(ka)
def test_auth_network_error(self): if not test.env.auth: raise SkipTest('Authentication is not enabled on server') # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. c = self.motor_rsc(maxPoolSize=1, waitQueueTimeoutMS=1) yield c.admin.command('ismaster') # Simulate an authenticate() call on a different socket. credentials = pymongo.auth._build_credentials_tuple( 'DEFAULT', 'admin', text_type(db_user), text_type(db_password), {}, 'admin') c.delegate._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_primary_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # In __check_auth, the client authenticates its socket with the # new credential, but gets a socket.error. Should be reraised as # AutoReconnect. with self.assertRaises(pymongo.errors.AutoReconnect): yield c.test.collection.find_one() # No semaphore leak, the pool is allowed to make a new socket. yield c.test.collection.find_one()
def test_reconnect_in_case_connection_closed_by_mongo(self): cx = self.asyncio_client(maxPoolSize=1) yield from cx.admin.command('ping') # close motor_socket, we imitate that connection to mongo server # lost, as result we should have AutoReconnect instead of # IncompleteReadError pool = get_primary_pool(cx) socket = pool.sockets.pop() socket.sock.close() pool.sockets.add(socket) with self.assertRaises(pymongo.errors.AutoReconnect): yield from cx.motor_test.test_collection.find_one()
def _test_exhaust_query_server_error(self, rs): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid counter leak. server = self.primary_or_standalone(rs=rs) client = motor.MotorClient(server.uri, maxPoolSize=1) yield client.admin.command('ismaster') pool = get_primary_pool(client) sock_info = one(pool.sockets) cursor = client.db.collection.find(cursor_type=CursorType.EXHAUST) # With Tornado, simply accessing fetch_next starts the fetch. fetch_next = cursor.fetch_next request = yield self.run_thread(server.receives, OpQuery) request.fail() with self.assertRaises(pymongo.errors.OperationFailure): yield fetch_next self.assertFalse(sock_info.closed) self.assertEqual(sock_info, one(pool.sockets))
def test_exhaust(self): if sys.version_info < (3, 4): raise SkipTest("requires Python 3.4") if (yield server_is_mongos(self.cx)): self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) yield self.db.drop_collection("test") # Insert enough documents to require more than one batch. yield self.db.test.insert_many([{} for _ in range(150)]) client = self.motor_client(maxPoolSize=1) # Ensure a pool. yield client.db.collection.find_one() socks = get_primary_pool(client).sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while (yield cur.fetch_next): cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = yield client[self.db.name].test.find( cursor_type=CursorType.EXHAUST).to_list(None) self.assertEqual(1, len(socks)) self.assertEqual( (yield self.db.test.count_documents({})), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. sock = one(socks) cur = client[self.db.name].test.find( cursor_type=CursorType.EXHAUST).batch_size(1) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) if 'PyPy' in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. yield cur.close() del cur yield gen.sleep(0.1) # The exhaust cursor's socket was discarded, although another may # already have been opened to send OP_KILLCURSORS. self.assertNotIn(sock, socks) self.assertTrue(sock.closed)
def test_close(self): cx = self.asyncio_client() cx.close() self.assertEqual(None, get_primary_pool(cx))
def test_exhaust(self): if (yield from server_is_mongos(self.cx)): self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return self.assertRaises(ValueError, self.db.test.find, cursor_type=5) cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) yield from self.db.drop_collection("test") # Insert enough documents to require more than one batch. yield from self.db.test.insert_many([{} for _ in range(150)]) client = self.asyncio_client(maxPoolSize=1) # Ensure a pool. yield from client.db.collection.find_one() socks = get_primary_pool(client).sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) has_next = yield from cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while (yield from cur.fetch_next): cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = yield from client[self.db.name].test.find( cursor_type=CursorType.EXHAUST).to_list( None) self.assertEqual(1, len(socks)) self.assertEqual( (yield from self.db.test.count_documents({})), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. sock = one(socks) cur = client[self.db.name].test.find( cursor_type=CursorType.EXHAUST).batch_size(1) has_next = yield from cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) if 'PyPy' in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. yield from cur.close() del cur yield from asyncio.sleep(0.1, loop=self.loop) # The exhaust cursor's socket was discarded, although another may # already have been opened to send OP_KILLCURSORS. self.assertNotIn(sock, socks) self.assertTrue(sock.closed)
async def test_exhaust(self): if await server_is_mongos(self.cx): self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) await self.db.drop_collection("test") # Insert enough documents to require more than one batch. await self.db.test.insert_many([{} for _ in range(150)]) client = self.motor_client(maxPoolSize=1) # Ensure a pool. await client.db.collection.find_one() socks = get_primary_pool(client).sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) has_next = await cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while await cur.fetch_next: cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = await client[self.db.name].test.find(cursor_type=CursorType.EXHAUST).to_list(None) self.assertEqual(1, len(socks)) self.assertEqual((await self.db.test.count_documents({})), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. sock = one(socks) cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST).batch_size(1) await cur.fetch_next self.assertTrue(cur.next_object()) # Run at least one getMore to initiate the OP_MSG exhaust protocol. if env.version.at_least(4, 2): await cur.fetch_next self.assertTrue(cur.next_object()) self.assertEqual(0, len(socks)) if "PyPy" in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. await cur.close() del cur async def sock_closed(): return sock not in socks and sock.closed await wait_until( sock_closed, "close exhaust cursor socket", timeout=get_async_test_timeout() ) # The exhaust cursor's socket was discarded, although another may # already have been opened to send OP_KILLCURSORS. self.assertNotIn(sock, socks) self.assertTrue(sock.closed)