Пример #1
0
 def new(self, txn):
     key = id(txn)
     if key in self:
         raise StorageTransactionError("commit of transaction %r"
                                       " already started" % txn)
     context = self[key] = Transaction(txn)
     return context
Пример #2
0
    def checkCurrentSerialInTransaction(self, oid, required_tid, transaction):
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        required_tid_int = bytes8_to_int64(required_tid)
        oid_int = bytes8_to_int64(oid)

        # If this transaction already specified a different serial for
        # this oid, the transaction conflicts with itself.
        required_tids = self.required_tids
        if not required_tids:
            required_tids = self.required_tids = OID_TID_MAP_TYPE()

        previous_serial_int = required_tids.get(oid_int, required_tid_int)
        if previous_serial_int != required_tid_int:
            raise TransactionConflictsWithItselfError(
                oid=oid,
                serials=(int64_to_8bytes(previous_serial_int), required_tid))

        newer_tid = self.shared_state.local_client.contains_oid_with_newer_tid(
            oid_int, required_tid_int)
        if newer_tid:
            raise CacheHasNewerTidError(oid=oid,
                                        serials=(int64_to_8bytes(newer_tid),
                                                 required_tid))

        required_tids[oid_int] = required_tid_int
Пример #3
0
    def store(self, oid, previous_tid, data, transaction):
        """
        This method should take no globally visible commit locks.
        """
        # Called by Connection.commit(), after tpc_begin has been called.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        cache = self.cache
        oid_int = bytes8_to_int64(oid)
        if previous_tid:
            # previous_tid is the tid of the state that the
            # object was loaded from.

            # XXX PY3: ZODB.tests.IteratorStorage passes a str (non-bytes) value for oid
            prev_tid_int = bytes8_to_int64(
                previous_tid
                if isinstance(previous_tid, bytes)
                else previous_tid.encode('ascii')
            )
        else:
            prev_tid_int = 0

        self.max_stored_oid = max(self.max_stored_oid, oid_int)
        # Save the data locally in a temporary place. Later, closer to commit time,
        # we'll send it all over at once. This lets us do things like use
        # COPY in postgres.
        cache.store_temp(oid_int, data, prev_tid_int)
Пример #4
0
    def lock(self, zs, func):
        """Call the given function with the commit lock.

        If we can get the lock right away, return the result of
        calling the function.

        If we can't get the lock right away, return a delay

        The function must set ``locked`` on the zeo-storage to
        indicate that the zeo-storage should be locked.  Otherwise,
        the lock isn't held pas the call.
        """
        with self._lock:
            if self._can_lock(zs):
                self._locked(zs)
            else:
                if any(w for w in self.waiting if w is zs):
                    raise StorageTransactionError("Already voting (waiting)")

                delay = Delay()
                self.waiting[zs] = (func, delay)
                self._log_waiting(zs,
                                  "(%r) queue lock: transactions waiting: %s")

                return delay

        try:
            result = func()
        except Exception:
            self.release(zs)
            raise
        else:
            if not zs.locked:
                self.release(zs)
            return result
Пример #5
0
    def restore(self, oid, this_tid, data, prev_txn, transaction):
        # Similar to store() (see comments in FileStorage.restore for
        # some differences), but used for importing transactions.
        # Note that *data* can be None.
        # The *prev_txn* "backpointer" optimization/hint is ignored.
        #
        # pylint:disable=unused-argument
        state = self.wrapping
        if transaction is not state.transaction:
            raise StorageTransactionError(self, transaction)

        adapter = state.shared_state.adapter
        cursor = state.shared_state.store_connection.cursor
        assert cursor is not None
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(this_tid)

        # Save the `data`.  Note that `data` can be None.
        # Note also that this doesn't go through the cache.
        state.shared_state.temp_storage.max_restored_oid = max(
            state.shared_state.temp_storage.max_restored_oid, oid_int)
        # TODO: Make it go through the cache, or at least the same
        # sort of queing thing, so that we can do a bulk COPY.
        # The way we do it now complicates restoreBlob() and it complicates voting.
        adapter.mover.restore(cursor, self.batcher, oid_int, tid_int, data)
Пример #6
0
 def _move_blobs_into_place(self, tid):
     if not self._txn_blobs:
         return 0
     if not tid:
         raise StorageTransactionError("No TID for blobs")
     assert isinstance(tid, bytes)
     # We now have a transaction ID, so rename all the blobs
     # accordingly. This is very unlikely to fail. If we're
     # not using a shared blob-dir, it doesn't matter much if it fails;
     # source data is safely in the database, we'd just have some extra temporary
     # files. (Though we don't want that exception to populate from tpc_finish.)
     #
     # In fact, ClientStorage does this in tpc_finish for blob cache dirs.
     # It's not been reported as a problem there, so probably it really does
     # rarely fail. Exceptions from tpc_finish are a VERY BAD THING.
     total_size = 0
     for oid, sourcename in self._txn_blobs.items():
         size = os.stat(sourcename).st_size
         total_size += size
         targetname = self.fshelper.getBlobFilename(oid, tid)
         if sourcename != targetname:
             lock = lock_blob(targetname)
             try:
                 ZODB.blob.rename_or_copy_blob(sourcename, targetname)
             finally:
                 lock.close()
             self._txn_blobs[oid] = targetname
     return total_size
Пример #7
0
 def tpc_finish(self, storage, transaction, f=None, _time=None): # pylint:disable=unused-argument
     # For the sake of some ZODB tests, we need to implement this everywhere,
     # even if it's not actually usable, and the first thing it needs to
     # do is check the transaction.
     if transaction is not self.transaction:
         raise StorageTransactionError('tpc_finish called with wrong transaction')
     raise NotImplementedError("tpc_finish not allowed in this state.")
Пример #8
0
    def deleteObject(self, oid, oldserial, transaction):
        """
        This method operates directly against the ``object_state`` table;
        as such, it immediately takes out locks on that table.

        This method is only expected to be called when performing
        ``IExternalGC`` operations (e.g., from zc.zodbdgc
        or from ZODB/tests/IExternalGC.test).
        """
        if transaction is not self.transaction: # pragma: no cover
            raise StorageTransactionError(self, transaction)

        # We shouldn't have to worry about anything in self._cache
        # because by definition we are deleting objects that were not
        # reachable and so shouldn't be in the cache (or if they were,
        # we'll never ask for them anyway). Most likely, this is running
        # in a separate process anyway, not used for regular storage (
        # an instance of multi-zodb-gc). However, in case it is in a regular
        # process, and in case we do have other transactions that could theoretically
        # see this state, and to relieve memory pressure on local/global caches,
        # we do go ahead and invalidate a cached entry.
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(oldserial)
        self.cache.invalidate(oid_int, tid_int)

        # We delegate the actual operation to the adapter's packundo,
        # just like native pack
        cursor = self.store_connection.cursor
        # When this is done, we get a tpc_vote,
        # and a tpc_finish.
        # The interface doesn't specify a return value, so for testing
        # we return the count of rows deleted (should be 1 if successful)
        return self.adapter.packundo.deleteObject(cursor, oid, oldserial)
Пример #9
0
    def tpc_begin(self, id, user, description, ext, tid=None, status=" "):
        if self.read_only:
            raise ReadOnlyError()
        if self.transaction is not None:
            if self.transaction.id == id:
                self.log("duplicate tpc_begin(%s)" % repr(id))
                return
            else:
                raise StorageTransactionError("Multiple simultaneous tpc_begin"
                                              " requests from one client.")

        t = TransactionMetaData(user, description, ext)
        t.id = id

        self.serials = []
        self.conflicts = {}
        self.invalidated = []
        self.txnlog = CommitLog()
        self.blob_log = []
        self.tid = tid
        self.status = status
        self.stats.active_txns += 1

        # Assign the transaction attribute last. This is so we don't
        # think we've entered TPC until everything is set.  Why?
        # Because if we have an error after this, the server will
        # think it is in TPC and the client will think it isn't.  At
        # that point, the client will keep trying to enter TPC and
        # server won't let it.  Errors *after* the tpc_begin call will
        # cause the client to abort the transaction.
        # (Also see https://bugs.launchpad.net/zodb/+bug/374737.)
        self.transaction = t
Пример #10
0
    def undo(self, transaction_id, transaction):
        """
        This method temporarily holds the pack lock, releasing it when
        done, and it also holds the commit lock, keeping it held for
        the next phase.

        Returns an iterable of ``(oid_int, tid_int)`` pairs giving the
        items that were restored and are now current. All of those oids that
        had any data stored for ``transaction_id`` are now invalid.
        """
        # Typically if this is called, the store/restore methods will *not* be
        # called, but there's not a strict guarantee about that.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        # Unlike most places, transaction_id is the base 64 encoding
        # of an 8 byte tid

        undo_tid = base64_decodebytes(transaction_id + b'\n')  # pylint:disable=deprecated-method
        assert len(undo_tid) == 8
        undo_tid_int = bytes8_to_int64(undo_tid)

        adapter = self.shared_state.adapter
        cursor = self.shared_state.store_connection.cursor
        assert cursor is not None

        adapter.locker.hold_pack_lock(cursor)
        try:
            adapter.packundo.verify_undoable(cursor, undo_tid_int)
            self._obtain_commit_lock(cursor)

            self_tid_int = self.committing_tid_lock.tid_int
            copied = adapter.packundo.undo(cursor, undo_tid_int, self_tid_int)

            # Invalidate all cached data for these oids. We have a
            # brand new transaction ID that's greater than any they
            # had before. In history-preserving mode, there could
            # still be other valid versions. See notes in packundo:
            # In theory we could be undoing a transaction several generations in the
            # past where the object had multiple intermediate states, but in practice
            # we're probably just undoing the latest state. Still, play it
            # a bit safer.
            oid_ints = [oid_int for oid_int, _ in copied]
            self.shared_state.cache.remove_all_cached_data_for_oids(oid_ints)

            # Update the current object pointers immediately, so that
            # subsequent undo operations within this transaction will see
            # the new current objects.
            adapter.mover.update_current(cursor, self_tid_int)

            self.shared_state.blobhelper.copy_undone(
                copied, self.committing_tid_lock.tid)

            oids = [int64_to_8bytes(oid_int) for oid_int in oid_ints]
            self._invalidated_oids(*oids)

            return copied
        finally:
            adapter.locker.release_pack_lock(cursor)
Пример #11
0
    def tpc_vote(self, transaction, storage):
        if transaction is not self.transaction:
            raise StorageTransactionError(
                "tpc_vote called with wrong transaction")

        next_phase = self.tpc_vote_factory(self)
        next_phase.enter(storage)
        return next_phase
Пример #12
0
 def tpc_begin(self, transaction, begin_factory):
     if transaction is self.transaction:
         raise StorageTransactionError(
             "Duplicate tpc_begin calls for same transaction.")
     # XXX: Shouldn't we tpc_abort() first (well, not that exactly, because
     # the transaction won't match, but logically)? The original storage
     # code didn't do that, but it seems like it should.
     return begin_factory(self, transaction)
Пример #13
0
    def tpc_finish(self, transaction, f=None):
        if transaction is not self.transaction:
            raise StorageTransactionError(
                "tpc_finish called with wrong transaction")
        finish_entry = time.time()
        # Handle the finishing. We cannot/must not fail now.
        # TODO: Move most of this into the Finish class/module.
        did_commit = self.__lock_and_move()
        if did_commit:
            locks_released = time.time()
        assert self.committing_tid_lock is not None, self

        # The IStorage docs say that f() "must be called while the
        # storage transaction lock is held." We don't really have a
        # "storage transaction lock", just the global database lock,
        # that we want to drop as quickly as possible, so it would be
        # nice to drop the commit lock and then call f(). This
        # probably doesn't really matter, though, as ZODB.Connection
        # doesn't use f().
        #
        # If we called `lock_and_move` for the first time in this
        # method, then the adapter will have been asked to go ahead
        # and commit, releasing any locks it can (some adapters do,
        # some don't). So we may or may not have a database lock at
        # this point.
        assert not self.blobhelper.NEEDS_DB_LOCK_TO_FINISH
        try:
            self.blobhelper.finish(self.committing_tid_lock.tid)
        except (IOError, OSError):
            # If something failed to move, that's not really a problem:
            # if we did any moving now, we're just a cache.
            logger.exception("Failed to update blob-cache")

        try:
            if f is not None:
                f(self.committing_tid_lock.tid)
            next_phase = Finish(self, not did_commit)
            if not did_commit:
                locks_released = time.time()

            locked_duration = locks_released - self.lock_and_vote_times[0]
            between_vote_and_finish = finish_entry - self.lock_and_vote_times[1]
            do_log_duration_info(
                "Objects were locked by %s for %.3fs",
                AbstractVote.tpc_finish.__wrapped__,  # pylint:disable=no-member
                self,
                None,
                locked_duration)
            do_log_duration_info(
                "Time between vote exiting and %s entering was %.3fs",
                AbstractVote.tpc_finish.__wrapped__,  # pylint:disable=no-member
                self,
                None,
                between_vote_and_finish)

            return next_phase, self.committing_tid_lock.tid
        finally:
            self._clear_temp()
Пример #14
0
 def tpc_begin(self, storage, transaction): # XXX: Signature needs to change.
     if self.read_only:
         raise ReadOnlyError()
     if transaction is self.transaction: # Also handles None.
         raise StorageTransactionError("Duplicate tpc_begin calls for same transaction.")
     state = SharedTPCState(self, storage, transaction)
     try:
         return self.begin_factory(state)
     except:
         state.abort()
         raise
Пример #15
0
    def undo(self, transaction_id, transaction):
        """
        This method temporarily holds the pack lock, releasing it when
        done, and it also holds the commit lock, keeping it held for
        the next phase.
        """
        # Typically if this is called, the store/restore methods will *not* be
        # called, but there's not a strict guarantee about that.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        # Unlike most places, transaction_id is the base 64 encoding
        # of an 8 byte tid

        undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method
        assert len(undo_tid) == 8
        undo_tid_int = bytes8_to_int64(undo_tid)

        adapter = self.adapter
        cursor = self.store_connection.cursor
        assert cursor is not None

        adapter.locker.hold_pack_lock(cursor)
        try:
            adapter.packundo.verify_undoable(cursor, undo_tid_int)
            if self.committing_tid_lock is None:
                # Note that _prepare_tid acquires the commit lock.
                # The commit lock must be acquired after the pack lock
                # because the database adapters also acquire in that
                # order during packing.
                tid_lock = DatabaseLockedForTid.lock_database_for_next_tid(
                    cursor, adapter, self.ude)
                self.committing_tid_lock = tid_lock

            self_tid_int = self.committing_tid_lock.tid_int
            copied = adapter.packundo.undo(
                cursor, undo_tid_int, self_tid_int)
            oids = [int64_to_8bytes(oid_int) for oid_int, _ in copied]

            # Update the current object pointers immediately, so that
            # subsequent undo operations within this transaction will see
            # the new current objects.
            adapter.mover.update_current(cursor, self_tid_int)

            self.blobhelper.copy_undone(copied,
                                        self.committing_tid_lock.tid)

            if not self.undone_oids:
                self.undone_oids = set()
            self.undone_oids.update(oids)
        finally:
            adapter.locker.release_pack_lock(cursor)
Пример #16
0
 def _vote(self, storage):
     if self.temp_storage and self.temp_storage.stored_oids:
         raise StorageTransactionError(
             "Cannot store and delete at the same time.")
     # We only get here if we've deleted objects, meaning we hold their row locks.
     # We only delete objects once we hold the commit lock.
     assert self.committing_tid_lock
     # Holding the commit lock put an entry in the transaction table,
     # but we don't want to bump the TID or store that data.
     self.adapter.txncontrol.delete_transaction(
         self.store_connection.cursor, self.committing_tid_lock.tid_int)
     self.lock_and_vote_times[0] = time.time()
     return ()
Пример #17
0
    def deleteObject(self, oid, oldserial, transaction):
        """
        This method operates directly against the ``object_state`` table;
        as such, it immediately takes out locks on that table.

        This method is only expected to be called when performing
        ``IExternalGC`` operations (e.g., from zc.zodbdgc
        or from ZODB/tests/IExternalGC.test).

        In history-free mode, deleting objects does not allocate
        a new tid (well, it allocates it, but there's no place to store
        it). In history preserving mode, it will wind up allocating a tid
        to store the empty transaction (only previous states were undone)

        TODO: This needs a better, staged implementation. I think it is
           highly likely to deadlock now if anything happened to be reading
           those rows.
        XXX: If we have blobs in a non-shared disk location, this does not
           remove them.
        """
        if transaction is not self.transaction:  # pragma: no cover
            raise StorageTransactionError(self, transaction)

        # We shouldn't have to worry about anything in self._cache
        # because by definition we are deleting objects that were not
        # reachable and so shouldn't be in the cache (or if they were,
        # we'll never ask for them anyway). Most likely, this is running
        # in a separate process anyway, not used for regular storage (
        # an instance of multi-zodb-gc). However, in case it is in a regular
        # process, and in case we do have other transactions that could theoretically
        # see this state, and to relieve memory pressure on local/global caches,
        # we do go ahead and invalidate a cached entry.
        # TODO: We need a distinct name for invalidate, so we can differentiate
        # between why we're doing it. Did we write a newer version? Did we
        # delete a specific verison? Etc.
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(oldserial)
        self.shared_state.cache.remove_cached_data(oid_int, tid_int)

        # We delegate the actual operation to the adapter's packundo,
        # just like native pack
        cursor = self.shared_state.store_connection.cursor
        # When this is done, we get a tpc_vote,
        # and a tpc_finish.
        # The interface doesn't specify a return value, so for testing
        # we return the count of rows deleted (should be 1 if successful)
        deleted = self.shared_state.adapter.packundo.deleteObject(
            cursor, oid, oldserial)
        self._invalidated_oids(oid)
        return deleted
Пример #18
0
 def new(self, txn):
     key = id(txn)
     if key in self:
         raise StorageTransactionError("commit of transaction %r"
                                       " already started" % txn)
     context = self[key] = {
         'queue': SimpleQueue(),
         'txn': txn,
         'ttid': None,
         'data_dict': {},
         'data_size': 0,
         'cache_dict': {},
         'cache_size': 0,
         'object_base_serial_dict': {},
         'object_serial_dict': {},
         'object_stored_counter_dict': {},
         'conflict_serial_dict': {},
         'resolved_conflict_serial_dict': {},
         'involved_nodes': set(),
         'checked_nodes': set(),
     }
     return context
Пример #19
0
    def _can_lock(self, zs):
        locked = self.locked

        if locked is zs:
            raise StorageTransactionError("Already voting (locked)")

        if locked is not None:
            if not locked.connected:
                locked.log("Still locked after disconnected. Unlocking.",
                           logging.CRITICAL)
                if locked.transaction:
                    locked.storage.tpc_abort(locked.transaction)

                self._unlocked(locked)
                locked = None

            # Note that locked.locked may not be true here, because
            # .lock may be set in the lock callback, but may not have
            # been set yet.  This aspect of the API may need more
            # thought. :/

        return locked is None
Пример #20
0
    def store(self, oid, previous_tid, data, transaction):
        """
        This method should take no globally visible commit locks.
        """
        # Called by Connection.commit(), after tpc_begin has been called.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        oid_int = bytes8_to_int64(oid)
        if previous_tid and previous_tid != NO_PREV_TID:
            # previous_tid is the tid of the state that the
            # object was loaded from. cPersistent objects return a brand
            # new bytes object each time even if it's all zeros; Python implementation
            # returns a private constant. It would be nice if they all returned a public
            # interned constant so we could compare with `is`.
            prev_tid_int = bytes8_to_int64(previous_tid)
        else:
            prev_tid_int = 0

        # Save the data locally in a temporary place. Later, closer to commit time,
        # we'll send it all over at once. This lets us do things like use
        # COPY in postgres.
        self.temp_storage.store_temp(oid_int, data, prev_tid_int)
Пример #21
0
    def tpc_begin(self, id, user, description, ext, tid=None, status=" "):
        if self.read_only:
            raise ReadOnlyError()
        if self.transaction is not None:
            if self.transaction.id == id:
                self.log("duplicate tpc_begin(%s)" % repr(id))
                return
            else:
                raise StorageTransactionError("Multiple simultaneous tpc_begin"
                                              " requests from one client.")

        self.transaction = t = transaction.Transaction()
        t.id = id
        t.user = user
        t.description = description
        t._extension = ext

        self.serials = []
        self.invalidated = []
        self.txnlog = CommitLog()
        self.tid = tid
        self.status = status
        self.store_failed = 0
        self.stats.active_txns += 1
Пример #22
0
 def begin(self):
     if self._txn_blobs is not None:
         raise StorageTransactionError("Already in a transaction.")
     self._txn_blobs = {}
Пример #23
0
 def get(self, txn):
     try:
         return self[id(txn)]
     except KeyError:
         raise StorageTransactionError("unknown transaction %r" % txn)
Пример #24
0
 def tpc_begin(self, _storage, transaction):
     # Ditto as for tpc_finish
     raise StorageTransactionError('tpc_begin not allowed in this state', type(self))
Пример #25
0
 def _no_transaction(self, *args, **kwargs):
     raise StorageTransactionError("No transaction in progress")