示例#1
0
    def checkCurrentSerialInTransaction(self, oid, required_tid, transaction):
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        required_tid_int = bytes8_to_int64(required_tid)
        oid_int = bytes8_to_int64(oid)

        # If this transaction already specified a different serial for
        # this oid, the transaction conflicts with itself.
        required_tids = self.required_tids
        if not required_tids:
            required_tids = self.required_tids = OID_TID_MAP_TYPE()

        previous_serial_int = required_tids.get(oid_int, required_tid_int)
        if previous_serial_int != required_tid_int:
            raise TransactionConflictsWithItselfError(
                oid=oid,
                serials=(int64_to_8bytes(previous_serial_int), required_tid))

        newer_tid = self.shared_state.local_client.contains_oid_with_newer_tid(
            oid_int, required_tid_int)
        if newer_tid:
            raise CacheHasNewerTidError(oid=oid,
                                        serials=(int64_to_8bytes(newer_tid),
                                                 required_tid))

        required_tids[oid_int] = required_tid_int
示例#2
0
文件: load.py 项目: alecpm/relstorage
    def loadBefore(self, oid, tid):
        """
        Return the most recent revision of oid before tid committed.
        """
        if tid is maxtid or tid == maxtid:
            # This is probably from ZODB.utils.load_current(), which
            # is really trying to just get the current state of the
            # object. This is almost entirely just from test cases; ZODB 5's mvccadapter
            # doesn't even expose it, so ZODB.Connection doesn't use it.
            #
            # Shortcut the logic below by using load() (current),
            # formatted in the way this method returns it:
            #
            #     ``(state, tid # of state, tid_after_state)``
            #
            # where tid_after_state will naturally be None
            return self.load(oid) + (None,)
        oid_int = bytes8_to_int64(oid)

        # TODO: This makes three separate queries, and also bypasses the cache.
        # We should be able to fix at least the multiple queries.

        # In the past, we would use the store connection (only if it was already open)
        # to "allow leading dato from later transactions for conflict resolution".
        # However, this doesn't seem to be used in conflict
        # resolution. ZODB.ConflictResolution.tryToResolveConflict
        # calls loadSerial(); About the only call in ZODB to
        # loadBefore() is from BlobStorage.undo() (which
        # RelStorage does not extend). Mixing and matching calls
        # between connections using different isolation levels
        # isn't great.
        #
        # We had it as a todo for a long time to stop doing that, and
        # pooling store connections was a great time to try it.
        cursor = self.load_connection.cursor
        if not self.adapter.mover.exists(cursor, oid_int):
            raise self.__pke(oid, exists=False)

        state, start_tid = self.adapter.mover.load_before(
            cursor, oid_int, bytes8_to_int64(tid))

        if start_tid is None:
            return None

        if state is None:
            # This can happen if something attempts to load
            # an object whose creation has been undone, see load()
            # This change fixes the test in
            # TransactionalUndoStorage.checkUndoCreationBranch1
            # self._log_keyerror doesn't work here, only in certain states.
            self.__pke(oid, undone=True)

        end_int = self.adapter.mover.get_object_tid_after(
            cursor, oid_int, start_tid)
        if end_int is not None:
            end = int64_to_8bytes(end_int)
        else:
            end = None

        return state, int64_to_8bytes(start_tid), end
示例#3
0
    def _lock_and_move(self, vote_only=False):
        # Here's where we take the global commit lock, and
        # allocate the next available transaction id, storing it
        # into history-preserving DBs. But if someone passed us
        # a TID (``restore`` or ``undo``), then it must already be in the DB, and the lock must
        # already be held.
        #
        # If we've prepared the transaction, then the TID must be in the
        # db, the lock must be held, and we must have finished all of our
        # storage actions. This is only expected to be the case when we have
        # a shared blob dir.
        #
        # Returns True if we also committed to the database.
        if self.prepared_txn:
            # Already done; *should* have been vote_only.
            assert self.committing_tid_lock, (self.prepared_txn,
                                              self.committing_tid_lock)
            return False

        kwargs = {'commit': True}
        if self.committing_tid_lock:
            kwargs['committing_tid_int'] = self.committing_tid_lock.tid_int
        if vote_only:
            # Must be voting.
            blob_meth = self.blobhelper.vote
            kwargs['after_selecting_tid'] = lambda tid_int: blob_meth(
                int64_to_8bytes(tid_int))
            kwargs['commit'] = False

        if vote_only or self.adapter.DEFAULT_LOCK_OBJECTS_AND_DETECT_CONFLICTS_INTERLEAVABLE:
            # If we're going to have to make two trips to the database, one to lock it and get a
            # tid and then one to commit and release locks, either because we're
            # just voting right now, not committing, or because the database doesn't
            # support doing that in a single operation, we need to go critical and
            # regain control ASAP so we can complete the operation.
            self.__enter_critical_phase_until_transaction_end()

        # Note that this may commit the load_connection and make it not
        # viable for a historical view anymore.
        committing_tid_int, prepared_txn = self.adapter.lock_database_and_move(
            self.store_connection, self.load_connection, self.blobhelper,
            self.ude, **kwargs)

        self.prepared_txn = prepared_txn
        committing_tid_lock = self.committing_tid_lock
        assert committing_tid_lock is None or committing_tid_int == committing_tid_lock.tid_int, (
            committing_tid_int, committing_tid_lock)

        log_msg = "Database lock and tid already allocated: %s"
        if committing_tid_lock is None:
            self.committing_tid_lock = DatabaseLockedForTid(
                int64_to_8bytes(committing_tid_int), committing_tid_int,
                self.adapter)
            log_msg = "Adapter locked database and allocated tid: %s"

        logger.log(TRACE, log_msg, self.committing_tid_lock)

        return kwargs['commit']
示例#4
0
 def after_lock_share():
     current = self.mover.current_object_tids(cursor, read_current_oid_ints)
     # We go ahead and compare the readCurrent TIDs here, so
     # that we don't have to make the call to detect conflicts
     # or even lock rows if there are readCurrent violations.
     for oid_int, expect_tid_int in read_current_oids.items():
         actual_tid_int = current.get(oid_int, 0)
         if actual_tid_int != expect_tid_int:
             raise VoteReadConflictError(
                 oid=int64_to_8bytes(oid_int),
                 serials=(int64_to_8bytes(actual_tid_int),
                          int64_to_8bytes(expect_tid_int)))
示例#5
0
    def history(self, oid, version=None, size=1, filter=None):
        # pylint:disable=unused-argument,too-many-locals
        cursor = self.load_connection.cursor
        oid_int = bytes8_to_int64(oid)
        try:
            rows = self.adapter.dbiter.iter_object_history(cursor, oid_int)
        except KeyError:
            raise POSKeyError(oid)

        res = []
        for tid_int, username, description, extension, length in rows:
            tid = int64_to_8bytes(tid_int)
            if extension:
                d = loads(extension)
            else:
                d = {}
            d.update({
                "time": TimeStamp(tid).timeTime(),
                "user_name": username or b'',
                "description": description or b'',
                "tid": tid,
                "version": '',
                "size": length,
                "rs_tid_int": tid_int,
                "rs_oid_int": oid_int,
            })
            if filter is None or filter(d):
                res.append(d)
                if size is not None and len(res) >= size:
                    break
        return res
示例#6
0
文件: oid.py 项目: lungj/relstorage
    def new_oid(self, commit_in_progress):
        # Prior to ZODB 5.1.2, this method was actually called on the
        # storage object of the DB, not the instance storage object of
        # a Connection. This meant that this method (and the oid
        # cache) was shared among all connections using a database and
        # was called outside of a transaction (starting its own
        # long-running transaction).

        # The DB.new_oid() method still exists, but shouldn't be used;
        # if it is, we'll open a database connection and transaction that's
        # going to sit there idle, possibly holding row locks. That's bad.
        # But we don't take any counter measures.

        # Connection.new_oid() can be called at just about any time
        # thanks to the Connection.add() API, which clients can use
        # at any time (typically before commit begins, but it's possible to
        # add() objects from a ``__getstate__`` method).
        #
        # Thus we may or may not have a store connection already open;
        # if we do, we can't restart it or drop it.
        if not self.preallocated_oids:
            self.preallocated_oids = self.store_connection.call(
                self.__new_oid_callback, can_reconnect=not commit_in_progress)
            # OIDs are monotonic, always increasing. It should never
            # go down or return equal to what we've already seen.
            self.max_allocated_oid = max(self.preallocated_oids[0],
                                         self.max_allocated_oid)

        oid_int = self.preallocated_oids.pop()
        return int64_to_8bytes(oid_int)
示例#7
0
    def undoLog(self, first=0, last=-20, filter=None):
        if last < 0:
            last = first - last

        # use a private connection to ensure the most current results
        with self.load_connection.isolated_connection() as cursor:
            tx_iter = self.adapter.dbiter.iter_transactions(cursor)
            i = 0
            res = []
            for tx in tx_iter:
                tid = int64_to_8bytes(tx.tid_int)
                # Note that user and desc are schizophrenic. The transaction
                # interface specifies that they are a Python str, *probably*
                # meaning bytes. But code in the wild and the ZODB test suite
                # sets them as native strings, meaning unicode on Py3. OTOH, the
                # test suite checks that this method *returns* them as bytes!
                # This is largely cleaned up with transaction 2.0/ZODB 5, where the storage
                # interface is defined in terms of bytes only.
                d = {
                    'id': base64_encodebytes(tid)[:-1],  # pylint:disable=deprecated-method
                    'time': TimeStamp(tid).timeTime(),
                    'user_name':  tx.username or b'',
                    'description': tx.description or b'',
                }
                if tx.extension:
                    d.update(loads(tx.extension))

                if filter is None or filter(d):
                    if i >= first:
                        res.append(d)
                    i += 1
                    if i >= last:
                        break
            return res
示例#8
0
文件: oid.py 项目: gotcha/relstorage
    def new_oid(self, store_connection_pool, commit_in_progress):
        # Prior to ZODB 5.1.2, this method was actually called on the
        # storage object of the DB, not the instance storage object of
        # a Connection. This meant that this method (and the oid
        # cache) was shared among all connections using a database and
        # was called outside of a transaction (starting its own
        # long-running transaction).

        # The DB.new_oid() method still exists, but shouldn't be used;
        # In the past, because that storage was never in a transaction, that meant that
        # we could open a database transaction and never close it. RelStorage 3.3 and
        # store connection pooling fixes this.

        # Connection.new_oid() can be called at just about any time
        # thanks to the Connection.add() API, which clients can use
        # at any time (typically before commit begins, but it's possible to
        # add() objects from a ``__getstate__`` method).
        #
        # Thus we may or may not have a store connection already open;
        # if we do, we can't restart it or drop it.
        if not self.preallocated_oids:
            self.__preallocate_oids(store_connection_pool, commit_in_progress)
            # OIDs are monotonic, always increasing. It should never
            # go down or return equal to what we've already seen.
            self.max_allocated_oid = max(self.preallocated_oids[0], self.max_allocated_oid)

        oid_int = self.preallocated_oids.pop()
        return int64_to_8bytes(oid_int)
示例#9
0
    def __lock_and_move(self, vote_only=False):
        # Here's where we take the global commit lock, and
        # allocate the next available transaction id, storing it
        # into history-preserving DBs. But if someone passed us
        # a TID (``restore`` or ``undo``), then it must already be in the DB, and the lock must
        # already be held.
        #
        # If we've prepared the transaction, then the TID must be in the
        # db, the lock must be held, and we must have finished all of our
        # storage actions. This is only expected to be the case when we have
        # a shared blob dir.
        #
        # Returns True if we also committed to the database.
        if self.prepared_txn:
            # Already done; *should* have been vote_only.
            assert self.committing_tid_lock, (self.prepared_txn,
                                              self.committing_tid_lock)
            return False

        kwargs = {'commit': True}
        if self.committing_tid_lock:
            kwargs['committing_tid_int'] = self.committing_tid_lock.tid_int
        if vote_only:
            # Must be voting.
            blob_meth = self.blobhelper.vote
            kwargs['after_selecting_tid'] = lambda tid_int: blob_meth(
                int64_to_8bytes(tid_int))
            kwargs['commit'] = False

        committing_tid_int, prepared_txn = self.adapter.lock_database_and_move(
            self.store_connection, self.blobhelper, self.ude, **kwargs)

        self.prepared_txn = prepared_txn
        committing_tid_lock = self.committing_tid_lock
        assert committing_tid_lock is None or committing_tid_int == committing_tid_lock.tid_int, (
            committing_tid_int, committing_tid_lock)
        if committing_tid_lock is None:
            self.committing_tid_lock = DatabaseLockedForTid(
                int64_to_8bytes(committing_tid_int), committing_tid_int,
                self.adapter)
            logger.debug("Adapter locked database and allocated tid %s",
                         self.committing_tid_lock)
        else:
            logger.debug("Database lock and tid already done",
                         self.committing_tid_lock)

        return kwargs['commit']
示例#10
0
    def undo(self, transaction_id, transaction):
        """
        This method temporarily holds the pack lock, releasing it when
        done, and it also holds the commit lock, keeping it held for
        the next phase.

        Returns an iterable of ``(oid_int, tid_int)`` pairs giving the
        items that were restored and are now current. All of those oids that
        had any data stored for ``transaction_id`` are now invalid.
        """
        # Typically if this is called, the store/restore methods will *not* be
        # called, but there's not a strict guarantee about that.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        # Unlike most places, transaction_id is the base 64 encoding
        # of an 8 byte tid

        undo_tid = base64_decodebytes(transaction_id + b'\n')  # pylint:disable=deprecated-method
        assert len(undo_tid) == 8
        undo_tid_int = bytes8_to_int64(undo_tid)

        adapter = self.shared_state.adapter
        cursor = self.shared_state.store_connection.cursor
        assert cursor is not None

        adapter.locker.hold_pack_lock(cursor)
        try:
            adapter.packundo.verify_undoable(cursor, undo_tid_int)
            self._obtain_commit_lock(cursor)

            self_tid_int = self.committing_tid_lock.tid_int
            copied = adapter.packundo.undo(cursor, undo_tid_int, self_tid_int)

            # Invalidate all cached data for these oids. We have a
            # brand new transaction ID that's greater than any they
            # had before. In history-preserving mode, there could
            # still be other valid versions. See notes in packundo:
            # In theory we could be undoing a transaction several generations in the
            # past where the object had multiple intermediate states, but in practice
            # we're probably just undoing the latest state. Still, play it
            # a bit safer.
            oid_ints = [oid_int for oid_int, _ in copied]
            self.shared_state.cache.remove_all_cached_data_for_oids(oid_ints)

            # Update the current object pointers immediately, so that
            # subsequent undo operations within this transaction will see
            # the new current objects.
            adapter.mover.update_current(cursor, self_tid_int)

            self.shared_state.blobhelper.copy_undone(
                copied, self.committing_tid_lock.tid)

            oids = [int64_to_8bytes(oid_int) for oid_int in oid_ints]
            self._invalidated_oids(*oids)

            return copied
        finally:
            adapter.locker.release_pack_lock(cursor)
示例#11
0
    def undo(self, transaction_id, transaction):
        """
        This method temporarily holds the pack lock, releasing it when
        done, and it also holds the commit lock, keeping it held for
        the next phase.
        """
        # Typically if this is called, the store/restore methods will *not* be
        # called, but there's not a strict guarantee about that.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        # Unlike most places, transaction_id is the base 64 encoding
        # of an 8 byte tid

        undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method
        assert len(undo_tid) == 8
        undo_tid_int = bytes8_to_int64(undo_tid)

        adapter = self.adapter
        cursor = self.store_connection.cursor
        assert cursor is not None

        adapter.locker.hold_pack_lock(cursor)
        try:
            adapter.packundo.verify_undoable(cursor, undo_tid_int)
            if self.committing_tid_lock is None:
                # Note that _prepare_tid acquires the commit lock.
                # The commit lock must be acquired after the pack lock
                # because the database adapters also acquire in that
                # order during packing.
                tid_lock = DatabaseLockedForTid.lock_database_for_next_tid(
                    cursor, adapter, self.ude)
                self.committing_tid_lock = tid_lock

            self_tid_int = self.committing_tid_lock.tid_int
            copied = adapter.packundo.undo(
                cursor, undo_tid_int, self_tid_int)
            oids = [int64_to_8bytes(oid_int) for oid_int, _ in copied]

            # Update the current object pointers immediately, so that
            # subsequent undo operations within this transaction will see
            # the new current objects.
            adapter.mover.update_current(cursor, self_tid_int)

            self.blobhelper.copy_undone(copied,
                                        self.committing_tid_lock.tid)

            if not self.undone_oids:
                self.undone_oids = set()
            self.undone_oids.update(oids)
        finally:
            adapter.locker.release_pack_lock(cursor)
示例#12
0
    def __init__(self, trans_iter, tid_int, user, desc, ext, packed):
        self._trans_iter = trans_iter
        self._tid_int = tid_int
        tid = int64_to_8bytes(tid_int)
        status = 'p' if packed else ' '
        user = user or b''
        description = desc or b''
        if ext:
            extension = loads(ext)
        else:
            extension = {}

        TransactionRecord.__init__(self, tid, status, user, description, extension)
示例#13
0
    def lock_database_for_next_tid(cls, cursor, adapter, ude):
        # We're midway between the state of a database-wide lock
        # and consistent row-level locking. The lock here is now
        # a row-level artificial lock on COMMIT_ROW_LOCK, and we then
        # read TRANSACTION (or OBJECT_STATE in HF).
        # TODO: Continue working to remove the need for the artificial
        # lock.
        user, desc, ext = ude
        tid_int = adapter.lock_database_and_choose_next_tid(
            cursor, user, desc, ext)

        tid = int64_to_8bytes(tid_int)
        return cls(tid, tid_int, adapter)
示例#14
0
    def lock_database_and_choose_next_tid(self, cursor, username, description,
                                          extension):
        self.locker.hold_commit_lock(cursor, ensure_current=True)

        # Choose a transaction ID.
        #
        # Base the transaction ID on the current time, but ensure that
        # the tid of this transaction is greater than any existing
        # tid.
        last_tid = self.txncontrol.get_tid(cursor)
        now = time.time()
        stamp = timestamp_at_unixtime(now)
        stamp = stamp.laterThan(TimeStamp(int64_to_8bytes(last_tid)))
        tid = stamp.raw()

        tid_int = bytes8_to_int64(tid)
        self.txncontrol.add_transaction(cursor, tid_int, username, description,
                                        extension)
        logger.log(TRACE, "Picked next tid locally: %s", tid_int)
        return tid_int
示例#15
0
    def undoLog(self, first=0, last=-20, filter=None):
        # pylint:disable=too-many-locals
        if last < 0:
            last = first - last

        # use a private connection to ensure the most current results
        adapter = self.adapter
        conn, cursor = adapter.connmanager.open()
        try:
            rows = adapter.dbiter.iter_transactions(cursor)
            i = 0
            res = []
            for tid_int, user, desc, ext in rows:
                tid = int64_to_8bytes(tid_int)
                # Note that user and desc are schizophrenic. The transaction
                # interface specifies that they are a Python str, *probably*
                # meaning bytes. But code in the wild and the ZODB test suite
                # sets them as native strings, meaning unicode on Py3. OTOH, the
                # test suite checks that this method *returns* them as bytes!
                # This is largely cleaned up with transaction 2.0/ZODB 5, where the storage
                # interface is defined in terms of bytes only.
                d = {
                    'id': base64_encodebytes(tid)[:-1],  # pylint:disable=deprecated-method
                    'time': TimeStamp(tid).timeTime(),
                    'user_name': user or b'',
                    'description': desc or b'',
                }
                if ext:
                    d.update(loads(ext))

                if filter is None or filter(d):
                    if i >= first:
                        res.append(d)
                    i += 1
                    if i >= last:
                        break
            return res

        finally:
            adapter.connmanager.close(conn, cursor)
示例#16
0
    def load(self, oid, version=''):
        # pylint:disable=unused-argument
        oid_int = bytes8_to_int64(oid)
        state, tid_int = self.__load_using_method(self.cache.load, oid_int)

        if tid_int is None:
            _log_keyerror(self.load_connection.cursor,
                          self.adapter,
                          oid_int,
                          "no tid found")
            raise POSKeyError(oid)

        if not state:
            # This can happen if something attempts to load
            # an object whose creation has been undone or which was deleted
            # by IExternalGC.deleteObject().
            _log_keyerror(self.load_connection.cursor,
                          self.adapter,
                          oid_int,
                          "creation has been undone")
            raise POSKeyError(oid)
        return state, int64_to_8bytes(tid_int)
示例#17
0
    def load(self, oid, version=''):
        # pylint:disable=unused-argument
        oid_int = bytes8_to_int64(oid)
        # TODO: Here, and in prefetch, should we check bool(load_connection)?
        # If it's not active and had polled, we don't really want to do that, do we?
        load_cursor = self.load_connection.cursor
        state, tid_int = self.__load_using_method(load_cursor, self.cache.load,
                                                  oid_int)
        if tid_int is None:
            raise self.__pke(
                oid,
                **_make_pke_data(load_cursor, self.adapter, oid_int,
                                 "no tid found"))

        if not state:
            # This can happen if something attempts to load
            # an object whose creation has been undone or which was deleted
            # by IExternalGC.deleteObject().
            raise self.__pke(
                oid,
                **_make_pke_data(load_cursor, self.adapter, oid_int,
                                 "creation undone"))
        return state, int64_to_8bytes(tid_int)
示例#18
0
    def __pre_pack(self, t, referencesf):
        logger.info("pack: beginning pre-pack")

        # In 2019, Unix timestamps look like
        #            1564006806.0
        # While 64-bit integer TIDs for the same timestamp look like
        #    275085010696509852
        #
        # Multiple TIDs can map to a single Unix timestamp.
        # For example, the 9 integers between 275085010624927044 and
        # 275085010624927035 all map to 1564006804.9999998.
        #
        # Therefore, Unix timestamps are ambiguous, especially if we're committing
        # multiple transactions rapidly (within the resolution of the underlying TID
        # clock).
        # This ambiguity mostly matters for unit tests, where we do commit rapidly.
        #
        # To help them out, we accept 64-bit integer TIDs to specify an exact
        # transaction to pack to.

        # We also allow None or a negative number to mean "current committed transaction".
        if t is None:
            t = -1

        if t > 275085010696509852:
            # Must be a TID.

            # Turn it back into a time.time() for later logging
            ts = TimeStamp(int64_to_8bytes(t))
            logger.debug(
                "Treating requested pack time %s as TID meaning %s",
                t, ts
            )
            best_pack_tid_int = t
            t = ts.timeTime()
        elif t < 0 or t >= time.time():
            # Packing for the current time or in the future means to pack
            # to the lastest commit in the database. This matters if not all
            # machine clocks are synchronized.
            best_pack_tid_int = MAX_TID - 1
        else:
            # Find the latest commit before or at the pack time.
            # Note that several TIDs will fit in the resolution of a time.time(),
            # so this is slightly ambiguous.
            requested_pack_ts = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
            requested_pack_tid = requested_pack_ts.raw()
            requested_pack_tid_int = bytes8_to_int64(requested_pack_tid)

            best_pack_tid_int = requested_pack_tid_int

        tid_int = self.packundo.choose_pack_transaction(best_pack_tid_int)

        if tid_int is None:
            logger.debug("all transactions before %s have already "
                         "been packed", time.ctime(t))
            return

        s = time.ctime(TimeStamp(int64_to_8bytes(tid_int)).timeTime())
        logger.info("Analyzing transactions committed %s or before (TID %d)",
                    s, tid_int)

        # In pre_pack, the adapter fills tables with
        # information about what to pack.  The adapter
        # must not actually pack anything yet.
        def get_references(state):
            """Return an iterable of the set of OIDs the given state refers to."""
            if not state:
                return ()

            return {bytes8_to_int64(oid) for oid in referencesf(state)}

        self.packundo.pre_pack(tid_int, get_references)
        logger.info("pack: pre-pack complete")
        return tid_int
示例#19
0
 def __init__(self, tid, oid_int, data):
     DataRecord.__init__(self, int64_to_8bytes(oid_int), tid, data, None)
示例#20
0
    def __check_and_resolve_conflicts(self, storage, conflicts):
        """
        Either raises an `ConflictError`, or successfully resolves
        all conflicts.

        Returns a set of int OIDs for objects modified in this transaction
        but which were then updated by conflict resolution and so must
        be invalidated.

        All the rows needed for detecting conflicts should be locked against
        concurrent changes.
        """
        # pylint:disable=too-many-locals
        cursor = self.store_connection.cursor
        adapter = self.adapter
        cache = self.cache
        tryToResolveConflict = storage.tryToResolveConflict

        # Detect conflicting changes.
        # Try to resolve the conflicts.
        invalidated_oid_ints = set()

        # In the past, we didn't load all conflicts from the DB at
        # once, just one at a time. This was because we also fetched
        # the new state data from the DB, and it could be large (if
        # lots of conflicts). But now we use the state we have in our
        # local temp cache for the new state, so we don't need to
        # fetch it, meaning this result will be small.
        #
        # The resolution process needs three pickles: the one we tried
        # to save, the one we're based off of, and the one currently
        # committed. The new one is passed as a parameter; the one
        # currently committed can optionally be passed (if not,
        # loadSerial() is used to get it), and the one we were based
        # off of is always loaded with loadSerial(). We *probably*
        # have the one we're based off of already in our storage
        # cache; the one that's currently committed is, I think, less
        # likely to be there, so there may be some benefit from
        # returning it in the conflict query. If we have a cache miss
        # and have to go to the database, that's bad: we're holding
        # object locks at this point so we're potentially blocking
        # other transactions.
        required_tids = self.required_tids
        self.count_conflicts = count_conflicts = len(conflicts)
        if count_conflicts:
            logger.debug("Attempting to resolve %d conflicts", count_conflicts)

        for conflict in conflicts:
            oid_int, committed_tid_int, tid_this_txn_saw_int, committed_state = conflict
            if tid_this_txn_saw_int is None:
                # A readCurrent entry. Did it conflict?
                expect_tid_int = required_tids[oid_int]
                if committed_tid_int != expect_tid_int:
                    raise VoteReadConflictError(
                        oid=int64_to_8bytes(oid_int),
                        serials=(int64_to_8bytes(committed_tid_int),
                                 int64_to_8bytes(expect_tid_int)))
                continue

            state_from_this_txn = cache.read_temp(oid_int)
            oid = int64_to_8bytes(oid_int)
            prev_tid = int64_to_8bytes(committed_tid_int)
            serial = int64_to_8bytes(tid_this_txn_saw_int)
            resolved_state = tryToResolveConflict(oid, prev_tid, serial,
                                                  state_from_this_txn,
                                                  committed_state)

            if resolved_state is None:
                # unresolvable; kill the whole transaction
                raise ConflictError(oid=oid,
                                    serials=(prev_tid, serial),
                                    data=state_from_this_txn)

            # resolved
            invalidated_oid_ints.add(oid_int)
            cache.store_temp(oid_int, resolved_state, committed_tid_int)

        if invalidated_oid_ints:
            # We resolved some conflicts, so we need to send them over to the database.
            adapter.mover.replace_temps(
                cursor,
                self.cache.temp_objects.iter_for_oids(invalidated_oid_ints))

        return invalidated_oid_ints
示例#21
0
 def enter(self, storage):
     resolved_in_vote_oid_ints = self.__vote(storage)
     self.invalidated_oids.update(
         {int64_to_8bytes(i)
          for i in resolved_in_vote_oid_ints})
     self.lock_and_vote_times[1] = time.time()
示例#22
0
    def __check_and_resolve_conflicts(self, storage, conflicts):
        """
        Either raises an `ConflictError`, or successfully resolves
        all conflicts.

        Returns a set of int OIDs for objects modified in this transaction
        but which were then updated by conflict resolution and so must
        be invalidated.

        All the rows needed for detecting conflicts should be locked against
        concurrent changes.

        :param conflicts: A sequence of information needed for detecting
           and resolving conflicts:
           ``(oid_int, committed_tid_int, tid_this_txn_saw_int, committed_state)``.
           If ``tid_this_txn_saw_int`` is None, it was a read-current check,
           and unless the ``committed_tid_int`` matches the expected value,
           a conflict error is raised.
        """
        # pylint:disable=too-many-locals
        invalidated_oid_ints = set()
        if not conflicts:
            return invalidated_oid_ints

        self.count_conflicts = count_conflicts = len(conflicts)

        # In the past, we didn't load all conflicts from the DB at
        # once, just one at a time. This was because we also fetched
        # the new state data from the DB, and it could be large (if
        # lots of conflicts). But now we use the state we have in our
        # local temp cache for the new state, so we don't need to
        # fetch it, meaning this result will be small...
        #
        # ...almost. The resolution process needs three pickles: the
        # one we tried to save, the one we're based off of, and the
        # one currently committed. Remember we have locked objects at
        # this point, so we need to finish ASAP to not block other
        # transactions; in gevent, we need to also avoid giving up
        # control to the event loop for arbitrary periods of time too
        # as it could take a long time to get back to us.

        # - The one we tried to save (the new one) is passed as a
        # parameter. We read this from our local storage, which is
        # probably in memory and thus fast.
        #
        # - The one currently committed can optionally be passed, and
        # if not, loadSerial() is used to get it. It seems somewhat
        # unlikely that it's not in the local pickle cache, so we
        # probably benefit from returning it in the conflict query.
        #
        # - The one we were based off of is always loaded with
        # loadSerial(). We *possibly* have the one we're based off of
        # already in our storage cache, but there's no guarantee. So
        # it's best to prefetch all these things in order to limit the
        # number of database round-trips and the opportunity to block
        # for arbitrary periods of time.
        logger.debug("Attempting to resolve %d conflicts", count_conflicts)

        required_tids = self.required_tids
        old_states_to_prefetch = []
        actual_conflicts = []
        # First, go through and distinguish read-current conflicts from
        # state conflicts (if the adapter didn't do that already).
        for conflict in conflicts:
            oid_int, committed_tid_int, tid_this_txn_saw_int, _ = conflict
            if tid_this_txn_saw_int is not None:
                # An actual conflict. We need the state.
                actual_conflicts.append(conflict)
                old_states_to_prefetch.append((oid_int, tid_this_txn_saw_int))
            else:
                # A readCurrent entry. Did it conflict?
                # Note that some database adapters (MySQL) may have already raised a
                # UnableToLockRowsToReadCurrentError indicating a conflict. That's a type
                # of ReadConflictError like this.
                expect_tid_int = required_tids[oid_int]
                if committed_tid_int != expect_tid_int:
                    raise VoteReadConflictError(
                        oid=int64_to_8bytes(oid_int),
                        serials=(int64_to_8bytes(committed_tid_int),
                                 int64_to_8bytes(expect_tid_int)))

        if not actual_conflicts:
            # Nothing to prefetch or resolve. No need to go critical,
            # we have no other opportunities to switch.
            return invalidated_oid_ints

        # We're probably going to need to make a database query. Elevate our
        # priority and regain control ASAP.
        self.__enter_critical_phase_until_transaction_end()

        old_states_and_tids = self.shared_state.cache.prefetch_for_conflicts(
            self.shared_state.load_connection.cursor, old_states_to_prefetch)

        tryToResolveConflict = _CachedConflictResolver(
            storage, old_states_and_tids).tryToResolveConflict

        adapter = self.shared_state.adapter
        read_temp = self.shared_state.temp_storage.read_temp
        store_temp = self.shared_state.temp_storage.store_temp

        # The conflicts can be very large binary strings, no need to include
        # them in traceback info. (Plus they could be sensitive.)
        __traceback_info__ = count_conflicts, invalidated_oid_ints

        for conflict in actual_conflicts:
            # Match the names of the arguments used
            oid_int, committed_tid_int, tid_this_txn_saw_int, committedData = conflict

            oid = int64_to_8bytes(oid_int)
            committedSerial = int64_to_8bytes(committed_tid_int)
            oldSerial = int64_to_8bytes(tid_this_txn_saw_int)
            newpickle = read_temp(oid_int)

            # Because we're using the _CachedConflictResolver, we can only loadSerial()
            # one state: the ``oldSerial`` state. Therefore the committedData *must* be
            # given.

            resolved_state = tryToResolveConflict(oid, committedSerial,
                                                  oldSerial, newpickle,
                                                  committedData)

            if resolved_state is None:
                # unresolvable; kill the whole transaction
                raise ConflictError(
                    oid=oid,
                    serials=(oldSerial, committedSerial),
                    data=newpickle,
                )

            # resolved
            invalidated_oid_ints.add(oid_int)
            store_temp(oid_int, resolved_state, committed_tid_int)

        # We resolved some conflicts, so we need to send them over to the database.
        adapter.mover.replace_temps(
            self.shared_state.store_connection.cursor,
            self.shared_state.temp_storage.iter_for_oids(invalidated_oid_ints))

        return invalidated_oid_ints