def test_cursor_manager(self): self.close_was_called = False test_case = self class CM(CursorManager): def __init__(self, client): super(CM, self).__init__(client) def close(self, cursor_id, address): test_case.close_was_called = True super(CM, self).close(cursor_id, address) with client_knobs(kill_cursor_frequency=0.01): client = rs_or_single_client(maxPoolSize=1) client.set_cursor_manager(CM) # Create a cursor on the same client so we're certain the getMore # is sent after the killCursors message. cursor = client.pymongo_test.test.find().batch_size(1) next(cursor) client.close_cursor( cursor.cursor_id, _CursorAddress(self.client.address, self.collection.full_name)) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') self.assertTrue(self.close_was_called)
def __die(self): """Closes this cursor. """ if self.__id and not self.__killed: self.__collection.database.client.close_cursor( self.__id, _CursorAddress(self.__address, self.__ns)) self.__killed = True
def __die(self, synchronous=False): """Closes this cursor. """ try: already_killed = self.__killed except AttributeError: # __init__ did not run to completion (or at all). return self.__killed = True if self.__id and not already_killed: if self.__exhaust and self.__exhaust_mgr: # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. self.__exhaust_mgr.sock.close_socket( ConnectionClosedReason.ERROR) else: address = _CursorAddress(self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( self.__id, address, session=self.__session) else: # The cursor will be closed later in a different session. self.__collection.database.client._close_cursor( self.__id, address) if self.__exhaust and self.__exhaust_mgr: self.__exhaust_mgr.close() if self.__session and not self.__explicit_session: self.__session._end_session(lock=synchronous) self.__session = None
def __die(self, synchronous=False): """Closes this cursor.""" try: already_killed = self.__killed except AttributeError: # __init__ did not run to completion (or at all). return self.__killed = True if self.__id and not already_killed: cursor_id = self.__id address = _CursorAddress( self.__address, "%s.%s" % (self.__dbname, self.__collname)) else: # Skip killCursors. cursor_id = 0 address = None self.__collection.database.client._cleanup_cursor( synchronous, cursor_id, address, self.__sock_mgr, self.__session, self.__explicit_session, ) if not self.__explicit_session: self.__session = None self.__sock_mgr = None
def __die(self, synchronous=False): """Closes this cursor. """ if self.__id and not self.__killed: if self.__exhaust and self.__exhaust_mgr: # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. self.__exhaust_mgr.sock.close() else: address = _CursorAddress(self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( self.__id, address, session=self.__session) else: # The cursor will be closed later in a different session. self.__collection.database.client.close_cursor( self.__id, address) if self.__exhaust and self.__exhaust_mgr: self.__exhaust_mgr.close() self.__killed = True if self.__session and not self.__explicit_session: self.__session._end_session(lock=synchronous) self.__session = None
def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. # Use 102 batches to cause a single getMore. chunk_size = 1024 data = b'd' * (102 * chunk_size) listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test with GridIn(db.fs, chunk_size=chunk_size) as infile: infile.write(data) with GridOut(db.fs, infile._id) as outfile: self.assertEqual(len(outfile.readchunk()), chunk_size) # Kill the cursor to simulate the cursor timing out on the server # when an application spends a long time between two calls to # readchunk(). client._close_cursor_now( outfile._GridOut__chunk_iter._cursor.cursor_id, _CursorAddress(client.address, db.fs.chunks.full_name)) # Read the rest of the file without error. self.assertEqual(len(outfile.read()), len(data) - chunk_size) # Paranoid, ensure that a getMore was actually sent. self.assertIn("getMore", listener.started_command_names())
def test_kill_cursors_with_cursoraddress(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") self.collection = self.client.pymongo_test.test self.collection.drop() self.collection.insert_many([{'_id': i} for i in range(200)]) cursor = self.collection.find().batch_size(1) next(cursor) self.client.kill_cursors( [cursor.cursor_id], _CursorAddress(self.client.address, self.collection.full_name)) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor')
def __die(self, synchronous=False): """Closes this cursor. """ already_killed = self.__killed self.__killed = True if self.__id and not already_killed: if self.__exhaust and self.__exhaust_mgr: # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. self.__exhaust_mgr.sock.close() else: address = _CursorAddress( self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( self.__id, address, session=self.__session) else: # The cursor will be closed later in a different session. self.__collection.database.client.close_cursor( self.__id, address) if self.__exhaust and self.__exhaust_mgr: self.__exhaust_mgr.close() if self.__session and not self.__explicit_session: self.__session._end_session(lock=synchronous) self.__session = None
def test_kill_cursors_with_cursoraddress(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") coll = self.client.pymongo_test.test coll.drop() coll.insert_many([{'_id': i} for i in range(200)]) cursor = coll.find().batch_size(1) next(cursor) self.client.kill_cursors([cursor.cursor_id], _CursorAddress(self.client.address, coll.full_name)) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor')
def test_resume_on_error(self): """ChangeStream will automatically resume one time on a resumable error (including not master) with the initial pipeline and options, except for the addition/update of a resumeToken. """ with self.coll.watch([]) as change_stream: self.insert_and_check(change_stream, {'_id': 1}) # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor address = _CursorAddress(cursor.address, self.coll.full_name) self.client._close_cursor_now(cursor.cursor_id, address) self.insert_and_check(change_stream, {'_id': 2})
def __die(self, synchronous=False): """Closes this cursor. """ if self.__id and not self.__killed: address = _CursorAddress(self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( self.__id, address) else: self.__collection.database.client.close_cursor( self.__id, address) self.__killed = True
def test_kill_cursors(self): """The killCursors command sent during the resume process must not be allowed to raise an exception. """ def raise_error(): raise ServerSelectionTimeoutError('mock error') with self.coll.watch([]) as change_stream: self.insert_and_check(change_stream, {'_id': 1}) # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor address = _CursorAddress(cursor.address, self.coll.full_name) self.client._close_cursor_now(cursor.cursor_id, address) cursor.close = raise_error self.insert_and_check(change_stream, {'_id': 2})
def __die(self, synchronous=False): """Closes this cursor. """ if self.__id and not self.__killed: address = _CursorAddress(self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( self.__id, address, session=self.__session) else: # The cursor will be closed later in a different session. self.__collection.database.client.close_cursor( self.__id, address) self.__killed = True self.__end_session(synchronous)
def __die(self, synchronous=False): """Closes this cursor. """ already_killed = self.__killed self.__killed = True if self.__id and not already_killed: address = _CursorAddress( self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( self.__id, address, session=self.__session) else: # The cursor will be closed later in a different session. self.__collection.database.client._close_cursor( self.__id, address) self.__end_session(synchronous)
def __die(self): """Closes this cursor. """ if self.__id and not self.__killed: if self.__exhaust and self.__exhaust_mgr: # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. self.__exhaust_mgr.sock.close() else: self.__collection.database.client.close_cursor( self.__id, _CursorAddress( self.__address, self.__collection.full_name)) if self.__exhaust and self.__exhaust_mgr: self.__exhaust_mgr.close() self.__killed = True
def __die(self): """Closes this cursor. """ if self.__id and not self.__killed: if self.__exhaust and self.__exhaust_mgr: # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. self.__exhaust_mgr.sock.close() else: self.__collection.database.client.close_cursor( self.__id, _CursorAddress(self.__address, self.__collection.full_name)) if self.__exhaust and self.__exhaust_mgr: self.__exhaust_mgr.close() self.__killed = True
def __die(self, synchronous=False): """Closes this cursor. """ already_killed = self.__killed self.__killed = True if self.__id and not already_killed: cursor_id = self.__id address = _CursorAddress(self.__address, self.__ns) else: # Skip killCursors. cursor_id = 0 address = None self.__collection.database.client._cleanup_cursor( synchronous, cursor_id, address, self.__sock_mgr, self.__session, self.__explicit_session) if not self.__explicit_session: self.__session = None self.__sock_mgr = None
def test_cursor_manager(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") self.close_was_called = False test_case = self class CM(CursorManager): def __init__(self, client): super(CM, self).__init__(client) def close(self, cursor_id, address): test_case.close_was_called = True super(CM, self).close(cursor_id, address) with client_knobs(kill_cursor_frequency=0.01): client = rs_or_single_client(maxPoolSize=1) client.set_cursor_manager(CM) # Create a cursor on the same client so we're certain the getMore # is sent after the killCursors message. cursor = client.pymongo_test.test.find().batch_size(1) next(cursor) client.close_cursor( cursor.cursor_id, _CursorAddress(self.client.address, self.collection.full_name)) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') self.assertTrue(self.close_was_called)
def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) client = self.watched_collection().database.client client._close_cursor_now(cursor.cursor_id, address)
def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) client = self.input_target.database.client client._close_cursor_now(cursor.cursor_id, address)