def test_exhaust(self): if (yield server_is_mongos(self.cx)): self.assertRaises(InvalidOperation, self.db.test.find, exhaust=True) return self.assertRaises(TypeError, self.db.test.find, exhaust=5) cur = self.db.test.find(exhaust=True) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) yield self.db.drop_collection("test") # Insert enough documents to require more than one batch. yield self.db.test.insert([{} for _ in range(150)]) client = self.motor_client(max_pool_size=1) # Ensure a pool. yield client.db.collection.find_one() socks = client._get_primary_pool().sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(exhaust=True) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while (yield cur.fetch_next): cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = yield client[self.db.name].test.find(exhaust=True).to_list(None) self.assertEqual(1, len(socks)) self.assertEqual( (yield self.db.test.count()), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. cur = client[self.db.name].test.find(exhaust=True) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) if 'PyPy' in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cur = None # The socket should be discarded. self.assertEqual(0, len(socks))
def test_exhaust(self): if (yield server_is_mongos(self.cx)): self.assertRaises(InvalidOperation, self.db.test.find, exhaust=True) return self.assertRaises(TypeError, self.db.test.find, exhaust=5) cur = self.db.test.find(exhaust=True) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) yield self.db.drop_collection("test") # Insert enough documents to require more than one batch. yield self.db.test.insert([{} for _ in range(150)]) client = motor.MotorClient(host, port, max_pool_size=1) # Ensure a pool. yield client.db.collection.find_one() socks = client._get_primary_pool().sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(exhaust=True) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while (yield cur.fetch_next): cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = yield client[self.db.name].test.find(exhaust=True).to_list(None) self.assertEqual(1, len(socks)) self.assertEqual( (yield self.db.test.count()), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. cur = client[self.db.name].test.find(exhaust=True) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) if 'PyPy' in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cur = None # The socket should be discarded. self.assertEqual(0, len(socks))
def test_exhaust(self): if (yield server_is_mongos(self.cx)): self.assertRaises(InvalidOperation, self.db.test.find, exhaust=True) return self.assertRaises(TypeError, self.db.test.find, exhaust=5) cur = self.db.test.find(exhaust=True) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) yield self.db.drop_collection("test") # Insert enough documents to require more than one batch. yield self.db.test.insert([{} for _ in range(150)]) client = self.motor_client(max_pool_size=1) # Ensure a pool. yield client.db.collection.find_one() socks = client._get_primary_pool().sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(exhaust=True) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while (yield cur.fetch_next): cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = yield client[self.db.name].test.find(exhaust=True).to_list(None) self.assertEqual(1, len(socks)) self.assertEqual( (yield self.db.test.count()), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. sock = one(socks) cur = client[self.db.name].test.find(exhaust=True).batch_size(1) has_next = yield cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) if 'PyPy' in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cursor_id = cur.cursor_id retrieved = cur.delegate._Cursor__retrieved cur = None yield self.pause(0.1) # The exhaust cursor's socket was discarded, although another may # already have been opened to send OP_KILLCURSORS. self.assertNotIn(sock, socks) self.assertTrue(sock.closed) yield self.wait_for_cursor(self.collection, retrieved, cursor_id)