def undo(self, transaction_id, transaction): """ This method temporarily holds the pack lock, releasing it when done, and it also holds the commit lock, keeping it held for the next phase. Returns an iterable of ``(oid_int, tid_int)`` pairs giving the items that were restored and are now current. All of those oids that had any data stored for ``transaction_id`` are now invalid. """ # Typically if this is called, the store/restore methods will *not* be # called, but there's not a strict guarantee about that. if transaction is not self.transaction: raise StorageTransactionError(self, transaction) # Unlike most places, transaction_id is the base 64 encoding # of an 8 byte tid undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method assert len(undo_tid) == 8 undo_tid_int = bytes8_to_int64(undo_tid) adapter = self.shared_state.adapter cursor = self.shared_state.store_connection.cursor assert cursor is not None adapter.locker.hold_pack_lock(cursor) try: adapter.packundo.verify_undoable(cursor, undo_tid_int) self._obtain_commit_lock(cursor) self_tid_int = self.committing_tid_lock.tid_int copied = adapter.packundo.undo(cursor, undo_tid_int, self_tid_int) # Invalidate all cached data for these oids. We have a # brand new transaction ID that's greater than any they # had before. In history-preserving mode, there could # still be other valid versions. See notes in packundo: # In theory we could be undoing a transaction several generations in the # past where the object had multiple intermediate states, but in practice # we're probably just undoing the latest state. Still, play it # a bit safer. oid_ints = [oid_int for oid_int, _ in copied] self.shared_state.cache.remove_all_cached_data_for_oids(oid_ints) # Update the current object pointers immediately, so that # subsequent undo operations within this transaction will see # the new current objects. adapter.mover.update_current(cursor, self_tid_int) self.shared_state.blobhelper.copy_undone( copied, self.committing_tid_lock.tid) oids = [int64_to_8bytes(oid_int) for oid_int in oid_ints] self._invalidated_oids(*oids) return copied finally: adapter.locker.release_pack_lock(cursor)
def undo(self, transaction_id, transaction): """ This method temporarily holds the pack lock, releasing it when done, and it also holds the commit lock, keeping it held for the next phase. """ # Typically if this is called, the store/restore methods will *not* be # called, but there's not a strict guarantee about that. if transaction is not self.transaction: raise StorageTransactionError(self, transaction) # Unlike most places, transaction_id is the base 64 encoding # of an 8 byte tid undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method assert len(undo_tid) == 8 undo_tid_int = bytes8_to_int64(undo_tid) adapter = self.adapter cursor = self.store_connection.cursor assert cursor is not None adapter.locker.hold_pack_lock(cursor) try: adapter.packundo.verify_undoable(cursor, undo_tid_int) if self.committing_tid_lock is None: # Note that _prepare_tid acquires the commit lock. # The commit lock must be acquired after the pack lock # because the database adapters also acquire in that # order during packing. tid_lock = DatabaseLockedForTid.lock_database_for_next_tid( cursor, adapter, self.ude) self.committing_tid_lock = tid_lock self_tid_int = self.committing_tid_lock.tid_int copied = adapter.packundo.undo( cursor, undo_tid_int, self_tid_int) oids = [int64_to_8bytes(oid_int) for oid_int, _ in copied] # Update the current object pointers immediately, so that # subsequent undo operations within this transaction will see # the new current objects. adapter.mover.update_current(cursor, self_tid_int) self.blobhelper.copy_undone(copied, self.committing_tid_lock.tid) if not self.undone_oids: self.undone_oids = set() self.undone_oids.update(oids) finally: adapter.locker.release_pack_lock(cursor)