예제 #1
0
파일: begin.py 프로젝트: gotcha/relstorage
    def checkCurrentSerialInTransaction(self, oid, required_tid, transaction):
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        required_tid_int = bytes8_to_int64(required_tid)
        oid_int = bytes8_to_int64(oid)

        # If this transaction already specified a different serial for
        # this oid, the transaction conflicts with itself.
        required_tids = self.required_tids
        if not required_tids:
            required_tids = self.required_tids = OID_TID_MAP_TYPE()

        previous_serial_int = required_tids.get(oid_int, required_tid_int)
        if previous_serial_int != required_tid_int:
            raise TransactionConflictsWithItselfError(
                oid=oid,
                serials=(int64_to_8bytes(previous_serial_int), required_tid))

        newer_tid = self.shared_state.local_client.contains_oid_with_newer_tid(
            oid_int, required_tid_int)
        if newer_tid:
            raise CacheHasNewerTidError(oid=oid,
                                        serials=(int64_to_8bytes(newer_tid),
                                                 required_tid))

        required_tids[oid_int] = required_tid_int
예제 #2
0
    def checkNoConflictWhenDeletedNotInInPersistentCacheBeforeCP1(self):
        root_tid, _mapping_tid, db = self._populate_root_and_mapping()

        # Now, remove a persistent object. We do this by setting its
        # key to a new persistent object.
        c1 = db.open()
        root = c1.root()
        new_nested_mapping = PersistentMapping()
        root.myobj1.key = new_nested_mapping

        mapping_oid = root.myobj1._p_oid
        mapping_oid_int = bytes8_to_int64(mapping_oid)
        c1.add(new_nested_mapping)
        nested_mapping_oid = new_nested_mapping._p_oid
        nested_mapping_oid_int = bytes8_to_int64(nested_mapping_oid)
        transaction.commit()
        self.assert_oid_current(nested_mapping_oid_int, c1)

        self.assert_checkpoints(c1, (root_tid, root_tid))

        # the root is not in a delta
        self.assert_oid_not_known(ROOT_OID, c1)

        # Though it is in the cache.
        self.assert_cached_exact(ROOT_OID, root_tid, c1)

        # Create a new transaction that deletes an object but
        # that won't update the persistent cache.
        new_tid, _ = self.__set_keys_in_root_to(
            self.__make_storage_no_pcache(), {'myobj1.key': None}, {}, {},
            pack=True)

        # Now move the persistent checkpoints forward, pushing the
        # last TID for the root object out of the delta ranges.
        c1._storage._cache.local_client.store_checkpoints(new_tid, new_tid)
        # Persist
        c1.close()
        db.close()
        del db, c1

        # Now a new storage that will read the persistent cache
        storage = self.__make_storage_pcache(expected_checkpoints=(new_tid,
                                                                   new_tid), )
        # The deleted object was not put in a delta map
        self.assert_oid_not_known(nested_mapping_oid_int, storage)
        # Nor is it in a cache at the old key
        self.assert_oid_not_cached(nested_mapping_oid_int, storage)

        # Likewise, the parent mapping isn't found anywhere, because it
        # changed
        self.assert_oid_not_known(mapping_oid_int, storage)
        self.assert_oid_not_cached(mapping_oid_int, storage)

        self.__set_keys_in_root_to(storage, {
            'myobj': 180,
            'myobj1.key': 360
        }, {'': root_tid}, {
            'myobj': 3,
            'myobj1.key': None
        })
예제 #3
0
    def restore(self, oid, this_tid, data, prev_txn, transaction):
        # Similar to store() (see comments in FileStorage.restore for
        # some differences), but used for importing transactions.
        # Note that *data* can be None.
        # The *prev_txn* "backpointer" optimization/hint is ignored.
        #
        # pylint:disable=unused-argument
        state = self.wrapping
        if transaction is not state.transaction:
            raise StorageTransactionError(self, transaction)

        adapter = state.shared_state.adapter
        cursor = state.shared_state.store_connection.cursor
        assert cursor is not None
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(this_tid)

        # Save the `data`.  Note that `data` can be None.
        # Note also that this doesn't go through the cache.
        state.shared_state.temp_storage.max_restored_oid = max(
            state.shared_state.temp_storage.max_restored_oid, oid_int)
        # TODO: Make it go through the cache, or at least the same
        # sort of queing thing, so that we can do a bulk COPY.
        # The way we do it now complicates restoreBlob() and it complicates voting.
        adapter.mover.restore(cursor, self.batcher, oid_int, tid_int, data)
예제 #4
0
    def store(self, oid, previous_tid, data, transaction):
        """
        This method should take no globally visible commit locks.
        """
        # Called by Connection.commit(), after tpc_begin has been called.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        cache = self.cache
        oid_int = bytes8_to_int64(oid)
        if previous_tid:
            # previous_tid is the tid of the state that the
            # object was loaded from.

            # XXX PY3: ZODB.tests.IteratorStorage passes a str (non-bytes) value for oid
            prev_tid_int = bytes8_to_int64(
                previous_tid
                if isinstance(previous_tid, bytes)
                else previous_tid.encode('ascii')
            )
        else:
            prev_tid_int = 0

        self.max_stored_oid = max(self.max_stored_oid, oid_int)
        # Save the data locally in a temporary place. Later, closer to commit time,
        # we'll send it all over at once. This lets us do things like use
        # COPY in postgres.
        cache.store_temp(oid_int, data, prev_tid_int)
예제 #5
0
    def deleteObject(self, oid, oldserial, transaction):
        """
        This method operates directly against the ``object_state`` table;
        as such, it immediately takes out locks on that table.

        This method is only expected to be called when performing
        ``IExternalGC`` operations (e.g., from zc.zodbdgc
        or from ZODB/tests/IExternalGC.test).
        """
        if transaction is not self.transaction: # pragma: no cover
            raise StorageTransactionError(self, transaction)

        # We shouldn't have to worry about anything in self._cache
        # because by definition we are deleting objects that were not
        # reachable and so shouldn't be in the cache (or if they were,
        # we'll never ask for them anyway). Most likely, this is running
        # in a separate process anyway, not used for regular storage (
        # an instance of multi-zodb-gc). However, in case it is in a regular
        # process, and in case we do have other transactions that could theoretically
        # see this state, and to relieve memory pressure on local/global caches,
        # we do go ahead and invalidate a cached entry.
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(oldserial)
        self.cache.invalidate(oid_int, tid_int)

        # We delegate the actual operation to the adapter's packundo,
        # just like native pack
        cursor = self.store_connection.cursor
        # When this is done, we get a tpc_vote,
        # and a tpc_finish.
        # The interface doesn't specify a return value, so for testing
        # we return the count of rows deleted (should be 1 if successful)
        return self.adapter.packundo.deleteObject(cursor, oid, oldserial)
예제 #6
0
파일: load.py 프로젝트: alecpm/relstorage
    def loadBefore(self, oid, tid):
        """
        Return the most recent revision of oid before tid committed.
        """
        if tid is maxtid or tid == maxtid:
            # This is probably from ZODB.utils.load_current(), which
            # is really trying to just get the current state of the
            # object. This is almost entirely just from test cases; ZODB 5's mvccadapter
            # doesn't even expose it, so ZODB.Connection doesn't use it.
            #
            # Shortcut the logic below by using load() (current),
            # formatted in the way this method returns it:
            #
            #     ``(state, tid # of state, tid_after_state)``
            #
            # where tid_after_state will naturally be None
            return self.load(oid) + (None,)
        oid_int = bytes8_to_int64(oid)

        # TODO: This makes three separate queries, and also bypasses the cache.
        # We should be able to fix at least the multiple queries.

        # In the past, we would use the store connection (only if it was already open)
        # to "allow leading dato from later transactions for conflict resolution".
        # However, this doesn't seem to be used in conflict
        # resolution. ZODB.ConflictResolution.tryToResolveConflict
        # calls loadSerial(); About the only call in ZODB to
        # loadBefore() is from BlobStorage.undo() (which
        # RelStorage does not extend). Mixing and matching calls
        # between connections using different isolation levels
        # isn't great.
        #
        # We had it as a todo for a long time to stop doing that, and
        # pooling store connections was a great time to try it.
        cursor = self.load_connection.cursor
        if not self.adapter.mover.exists(cursor, oid_int):
            raise self.__pke(oid, exists=False)

        state, start_tid = self.adapter.mover.load_before(
            cursor, oid_int, bytes8_to_int64(tid))

        if start_tid is None:
            return None

        if state is None:
            # This can happen if something attempts to load
            # an object whose creation has been undone, see load()
            # This change fixes the test in
            # TransactionalUndoStorage.checkUndoCreationBranch1
            # self._log_keyerror doesn't work here, only in certain states.
            self.__pke(oid, undone=True)

        end_int = self.adapter.mover.get_object_tid_after(
            cursor, oid_int, start_tid)
        if end_int is not None:
            end = int64_to_8bytes(end_int)
        else:
            end = None

        return state, int64_to_8bytes(start_tid), end
예제 #7
0
    def deleteObject(self, oid, oldserial, transaction):
        """
        This method operates directly against the ``object_state`` table;
        as such, it immediately takes out locks on that table.

        This method is only expected to be called when performing
        ``IExternalGC`` operations (e.g., from zc.zodbdgc
        or from ZODB/tests/IExternalGC.test).

        In history-free mode, deleting objects does not allocate
        a new tid (well, it allocates it, but there's no place to store
        it). In history preserving mode, it will wind up allocating a tid
        to store the empty transaction (only previous states were undone)

        TODO: This needs a better, staged implementation. I think it is
           highly likely to deadlock now if anything happened to be reading
           those rows.
        XXX: If we have blobs in a non-shared disk location, this does not
           remove them.
        """
        if transaction is not self.transaction:  # pragma: no cover
            raise StorageTransactionError(self, transaction)

        # We shouldn't have to worry about anything in self._cache
        # because by definition we are deleting objects that were not
        # reachable and so shouldn't be in the cache (or if they were,
        # we'll never ask for them anyway). Most likely, this is running
        # in a separate process anyway, not used for regular storage (
        # an instance of multi-zodb-gc). However, in case it is in a regular
        # process, and in case we do have other transactions that could theoretically
        # see this state, and to relieve memory pressure on local/global caches,
        # we do go ahead and invalidate a cached entry.
        # TODO: We need a distinct name for invalidate, so we can differentiate
        # between why we're doing it. Did we write a newer version? Did we
        # delete a specific verison? Etc.
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(oldserial)
        self.shared_state.cache.remove_cached_data(oid_int, tid_int)

        # We delegate the actual operation to the adapter's packundo,
        # just like native pack
        cursor = self.shared_state.store_connection.cursor
        # When this is done, we get a tpc_vote,
        # and a tpc_finish.
        # The interface doesn't specify a return value, so for testing
        # we return the count of rows deleted (should be 1 if successful)
        deleted = self.shared_state.adapter.packundo.deleteObject(
            cursor, oid, oldserial)
        self._invalidated_oids(oid)
        return deleted
예제 #8
0
    def __set_keys_in_root_to(self,
                              storage,
                              new_data,
                              old_tids,
                              old_data,
                              pack=False):
        """
        And return the transaction ID and current checkpoints.

        Uses an independent transaction.

        Closes *storage*.
        """
        db1 = self._closing(DB(storage))
        tx = transaction.TransactionManager()
        c1 = db1.open(tx)
        # We've polled and gained checkpoints
        self.assert_checkpoints(c1)

        root = c1.root()
        self.__do_sets(root, new_data, old_tids, old_data)
        tx.commit()

        checkpoints = self.assert_checkpoints(c1)
        self.__do_check_tids(root, old_tids)
        tid_int = bytes8_to_int64(c1._storage.lastTransaction())
        c1.close()
        if pack:
            storage.pack(tid_int, referencesf)
        db1.close()
        return tid_int, checkpoints
예제 #9
0
    def _populate(self):
        """
        Put some revisions of a blob object in our database and on the
        filesystem.
        """
        from ZODB.utils import u64 as bytes8_to_int64

        connection1 = self.database.open()
        root = connection1.root()

        tids = self.tids = []
        times = self.times = []
        blob = Blob()

        for i in range(self.BLOB_REVISION_COUNT):
            transaction.begin()
            with blob.open('w') as f:
                f.write(b'this is blob data ' + str(i).encode())
            if 'blob' not in root:
                root['blob'] = blob
            transaction.commit()

            blob._p_activate()
            tid = blob._p_serial
            tids.append(tid)
            tid_int = bytes8_to_int64(tid)

            times.append(tid_int - 1)

        blob._p_activate()

        self.oid = oid = root['blob']._p_oid
        fshelper = self.blob_storage.blobhelper.fshelper
        self.fns = [fshelper.getBlobFilename(oid, x) for x in tids]
        connection1.close()
예제 #10
0
    def _populate_root_and_mapping(self):
        """
        Creates the following structure in ``self._storage``::

            root.myobj1 = PersistentMapping()
            root.myobj1.key = PersistentMapping()
            root.myobj = 3

        Does this over several transactions. Returns
        the tid of the last time the root changed, and the tid
        of ``root.myobj1``, which is later than the root TID and which
        is current, and the database opened on the storage.
        """
        tx1 = transaction.TransactionManager()
        storage1 = self._storage
        db1 = self._closing(DB(storage1))
        c1 = db1.open(tx1)
        root = c1.root
        root().myobj1 = root.myobj1 = mapping = PersistentMapping()
        root().myobj = root.myobj = 1
        tx1.commit()
        c1._storage._cache.clear(load_persistent=False)

        c1._storage.poll_invalidations()
        root().myobj = root.myobj = 2
        tx1.commit()
        c1._storage._cache.clear(load_persistent=False)

        c1._storage.poll_invalidations()
        root().myobj = root.myobj = 3
        tx1.commit()
        root_tid = self.assert_oid_known(ROOT_OID, c1)
        c1._storage._cache.clear(load_persistent=False)

        # Now, mutate an object that's not the root
        # so that we get a new transaction after the root was
        # modified. This transaction will be included in
        # a persistent cache.
        c1._storage.poll_invalidations()
        root().myobj1.key = root.myobj1.key = PersistentMapping()
        mapping_oid = mapping._p_oid
        mapping_oid_int = bytes8_to_int64(mapping_oid)
        tx1.commit()
        mapping_tid = self.assert_oid_known(mapping_oid_int, c1)

        # self.assert_checkpoints(c1, (root_tid, root_tid))
        self.assert_oid_current(mapping_oid_int, c1)

        # the root is not in a delta
        self.assert_oid_not_known(ROOT_OID, c1)
        # Nor is it in the cache, because the Connection's
        # object cache still had the root and we were never
        # asked.
        self.assert_oid_not_cached(ROOT_OID, c1)
        # So lets get it in the cache with its current TID.
        c1._storage.load(z64)
        self.assert_cached_exact(ROOT_OID, root_tid, c1)

        c1.close()
        return root_tid, mapping_tid, db1
예제 #11
0
    def __do_sets(self, root, new_data, old_tids, old_data):
        for key, value in new_data.items():
            old_tid = old_tids.get(key)
            old_value = old_data.get(key)

            key_path = key.split('.')
            attr_name = key_path[-1]
            __traceback_info__ = key_path
            base = root
            name = None
            for name in key_path[:-1]:
                base = getattr(base, name)
            oid = bytes8_to_int64(base._p_oid)
            __traceback_info__ = key, oid, old_tid
            if old_tid is not None:
                # Opening the database loaded the root object, so it's
                # now cached with the expected key; it may not actually
                # be at that exact TID, though.
                self.assert_cached(oid, old_tid, root._p_jar)

            if old_value is not None:
                val = getattr(base, attr_name)
                self.assertEqual(val, old_value)

            setattr(base, attr_name, value)
            # Make sure we have something
            old_tids[key] = old_tid
예제 #12
0
    def history(self, oid, version=None, size=1, filter=None):
        # pylint:disable=unused-argument,too-many-locals
        cursor = self.load_connection.cursor
        oid_int = bytes8_to_int64(oid)
        try:
            rows = self.adapter.dbiter.iter_object_history(cursor, oid_int)
        except KeyError:
            raise POSKeyError(oid)

        res = []
        for tid_int, username, description, extension, length in rows:
            tid = int64_to_8bytes(tid_int)
            if extension:
                d = loads(extension)
            else:
                d = {}
            d.update({
                "time": TimeStamp(tid).timeTime(),
                "user_name": username or b'',
                "description": description or b'',
                "tid": tid,
                "version": '',
                "size": length,
                "rs_tid_int": tid_int,
                "rs_oid_int": oid_int,
            })
            if filter is None or filter(d):
                res.append(d)
                if size is not None and len(res) >= size:
                    break
        return res
예제 #13
0
파일: load.py 프로젝트: alecpm/relstorage
    def loadSerial(self, oid, serial):
        """Load a specific revision of an object"""
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(serial)

        # If we've got this state cached exactly,
        # use it. No need to poll or anything like that first;
        # polling is unlikely to get us the state we want.
        # If the data happens to have been removed from the database,
        # due to a pack, this won't detect it if it was already cached
        # and the pack happened somewhere else. This method is
        # only used for conflict resolution, though, and we
        # shouldn't be able to get to that point if the root revision
        # went missing, right? Packing periodically takes the same locks we
        # want to take for committing.
        state = self.cache.loadSerial(oid_int, tid_int)
        if state:
            return state

        # Allow loading data from later transactions for conflict
        # resolution. There are three states involved in conflict resolution:
        # the original state (our load connection should be able to see that),
        # the committed state (our store connection can see that, and we returned it
        # when we detected the conflict) and the state we're trying to commit
        # (stored in the temporary cache data). So if we get here, mostly we
        # should need to use the load connection.

        state = self.adapter.mover.load_revision(
            self.load_connection.cursor,
            oid_int,
            tid_int)

        if state:
            return state

        # Actually using the store_connection to pull into the future was
        # removed as part of the pooling of store_connection. The above comments
        # indicate that we really shouldn't need to get here, and no tests break
        # with this commented out. What's a legitimate need for pulling into the future?
        # state = self.adapter.mover.load_revision(
        #     self.store_connection.cursor,
        #     oid_int,
        #     tid_int)
        # if state:
        #     return state

        raise self.__pke(oid, tid_int=tid_int, state=state)
예제 #14
0
    def undo(self, transaction_id, transaction):
        """
        This method temporarily holds the pack lock, releasing it when
        done, and it also holds the commit lock, keeping it held for
        the next phase.

        Returns an iterable of ``(oid_int, tid_int)`` pairs giving the
        items that were restored and are now current. All of those oids that
        had any data stored for ``transaction_id`` are now invalid.
        """
        # Typically if this is called, the store/restore methods will *not* be
        # called, but there's not a strict guarantee about that.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        # Unlike most places, transaction_id is the base 64 encoding
        # of an 8 byte tid

        undo_tid = base64_decodebytes(transaction_id + b'\n')  # pylint:disable=deprecated-method
        assert len(undo_tid) == 8
        undo_tid_int = bytes8_to_int64(undo_tid)

        adapter = self.shared_state.adapter
        cursor = self.shared_state.store_connection.cursor
        assert cursor is not None

        adapter.locker.hold_pack_lock(cursor)
        try:
            adapter.packundo.verify_undoable(cursor, undo_tid_int)
            self._obtain_commit_lock(cursor)

            self_tid_int = self.committing_tid_lock.tid_int
            copied = adapter.packundo.undo(cursor, undo_tid_int, self_tid_int)

            # Invalidate all cached data for these oids. We have a
            # brand new transaction ID that's greater than any they
            # had before. In history-preserving mode, there could
            # still be other valid versions. See notes in packundo:
            # In theory we could be undoing a transaction several generations in the
            # past where the object had multiple intermediate states, but in practice
            # we're probably just undoing the latest state. Still, play it
            # a bit safer.
            oid_ints = [oid_int for oid_int, _ in copied]
            self.shared_state.cache.remove_all_cached_data_for_oids(oid_ints)

            # Update the current object pointers immediately, so that
            # subsequent undo operations within this transaction will see
            # the new current objects.
            adapter.mover.update_current(cursor, self_tid_int)

            self.shared_state.blobhelper.copy_undone(
                copied, self.committing_tid_lock.tid)

            oids = [int64_to_8bytes(oid_int) for oid_int in oid_ints]
            self._invalidated_oids(*oids)

            return copied
        finally:
            adapter.locker.release_pack_lock(cursor)
예제 #15
0
 def lock_database_for_given_tid(cls, tid, tid_is_packed, cursor, adapter,
                                 ude):
     adapter.locker.hold_commit_lock(cursor, ensure_current=True)
     tid_int = bytes8_to_int64(tid)
     user, desc, ext = ude
     adapter.txncontrol.add_transaction(cursor, tid_int, user, desc, ext,
                                        tid_is_packed)
     return cls(tid, tid_int, adapter)
예제 #16
0
    def checkPackWithGCOnDestinationAfterRestore(self):
        raises = self.assertRaises
        closing = self._closing
        __traceback_info__ = self._storage, self._dst
        db = closing(DB(self._storage))
        conn = closing(db.open())
        root = conn.root()
        root.obj = obj1 = MinPO(1)
        txn = transaction.get()
        txn.note(u'root -> obj')
        txn.commit()
        root.obj.obj = obj2 = MinPO(2)
        txn = transaction.get()
        txn.note(u'root -> obj -> obj')
        txn.commit()
        del root.obj
        txn = transaction.get()
        txn.note(u'root -X->')
        txn.commit()

        storage_last_tid = conn._storage.lastTransaction()
        self.assertEqual(storage_last_tid, root._p_serial)

        # Now copy the transactions to the destination
        self._dst.copyTransactionsFrom(self._storage)
        self.assertEqual(self._dst.lastTransaction(), storage_last_tid)
        # If the source storage is a history-free storage, all
        # of the transactions are now marked as packed in the
        # destination storage.  To trigger a pack, we have to
        # add another transaction to the destination that is
        # not packed.
        db2 = closing(DB(self._dst))
        tx_manager = transaction.TransactionManager(explicit=True)
        conn2 = closing(db2.open(tx_manager))
        txn = tx_manager.begin()
        root2 = conn2.root()
        root2.extra = 0
        txn.note(u'root.extra = 0')
        txn.commit()

        dest_last_tid = conn2._storage.lastTransaction()
        self.assertGreater(dest_last_tid, storage_last_tid)
        self.assertEqual(dest_last_tid, root2._p_serial)

        # Now pack the destination.
        from ZODB.utils import u64 as bytes8_to_int64
        if IRelStorage.providedBy(self._dst):
            packtime = bytes8_to_int64(storage_last_tid)
        else:
            from persistent.timestamp import TimeStamp
            packtime = TimeStamp(dest_last_tid).timeTime() + 2
        self._dst.pack(packtime, referencesf)
        # And check to see that the root object exists, but not the other
        # objects.
        __traceback_info__ += (packtime,)
        _data, _serial = self._dst.load(root._p_oid, '')
        raises(KeyError, self._dst.load, obj1._p_oid, '')
        raises(KeyError, self._dst.load, obj2._p_oid, '')
예제 #17
0
    def __init__(self, adapter, start, stop):
        self._adapter = adapter
        self._conn, self._cursor = self._adapter.connmanager.open_for_load()
        self._closed = False

        if start is not None:
            start_int = bytes8_to_int64(start)
        else:
            start_int = 1
        if stop is not None:
            stop_int = bytes8_to_int64(stop)
        else:
            stop_int = None

        # _transactions: [(tid, username, description, extension, packed)]
        self._transactions = list(adapter.dbiter.iter_transactions_range(
            self._cursor, start_int, stop_int))
        self._index = 0
예제 #18
0
    def __init__(self, adapter, load_connection, start, stop):
        self._adapter = adapter
        self._cursor = load_connection.cursor
        self._closed = False

        if start is not None:
            start_int = bytes8_to_int64(start)
        else:
            start_int = 1
        if stop is not None:
            stop_int = bytes8_to_int64(stop)
        else:
            stop_int = None

        # _transactions: [(tid, username, description, extension, packed)]
        with load_connection.server_side_cursor() as cursor:
            self._transactions = adapter.dbiter.iter_transactions_range(
                cursor, start_int, stop_int)
        self._index = 0
예제 #19
0
 def prefetch(self, oids):
     prefetch = self.cache.prefetch
     oid_ints = [bytes8_to_int64(oid) for oid in oids]
     try:
         self.__load_using_method(prefetch, oid_ints)
     except Exception: # pylint:disable=broad-except
         # This could raise self._stale_error, or
         # CacheConsistencyError. Both of those mean that regular loads
         # may fail too, but we don't know what our transaction state is
         # at this time, so we don't want to raise it to the caller.
         logger.exception("Failed to prefetch")
예제 #20
0
 def assert_oid_not_known(self, oid, storage):
     cache = find_cache(storage)
     __traceback_info__ = oid, cache
     try:
         self.assertNotIn(oid, cache.object_index or ())
     except TypeError:
         if isinstance(oid, bytes):
             # OidTidMap
             oid = bytes8_to_int64(oid)
             self.assertNotIn(oid, cache.object_index or ())
         else:
             raise
예제 #21
0
    def undo(self, transaction_id, transaction):
        """
        This method temporarily holds the pack lock, releasing it when
        done, and it also holds the commit lock, keeping it held for
        the next phase.
        """
        # Typically if this is called, the store/restore methods will *not* be
        # called, but there's not a strict guarantee about that.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        # Unlike most places, transaction_id is the base 64 encoding
        # of an 8 byte tid

        undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method
        assert len(undo_tid) == 8
        undo_tid_int = bytes8_to_int64(undo_tid)

        adapter = self.adapter
        cursor = self.store_connection.cursor
        assert cursor is not None

        adapter.locker.hold_pack_lock(cursor)
        try:
            adapter.packundo.verify_undoable(cursor, undo_tid_int)
            if self.committing_tid_lock is None:
                # Note that _prepare_tid acquires the commit lock.
                # The commit lock must be acquired after the pack lock
                # because the database adapters also acquire in that
                # order during packing.
                tid_lock = DatabaseLockedForTid.lock_database_for_next_tid(
                    cursor, adapter, self.ude)
                self.committing_tid_lock = tid_lock

            self_tid_int = self.committing_tid_lock.tid_int
            copied = adapter.packundo.undo(
                cursor, undo_tid_int, self_tid_int)
            oids = [int64_to_8bytes(oid_int) for oid_int, _ in copied]

            # Update the current object pointers immediately, so that
            # subsequent undo operations within this transaction will see
            # the new current objects.
            adapter.mover.update_current(cursor, self_tid_int)

            self.blobhelper.copy_undone(copied,
                                        self.committing_tid_lock.tid)

            if not self.undone_oids:
                self.undone_oids = set()
            self.undone_oids.update(oids)
        finally:
            adapter.locker.release_pack_lock(cursor)
예제 #22
0
    def __set_keys_in_root_to(self,
                              storage,
                              new_data,
                              old_tids,
                              old_data,
                              pack=False):
        """
        Set the values for *new_data* in the root object of the storage.

        And return the transaction ID of when we mad the change,
        and the transaction ID of the last time the root changed.

        Uses an independent transaction.

        *old_tids* is a map from the keys in *new_data* to an expected TID
        that should be cached. *old_value* is the same for the expected
        current values on the root.

        Closes *storage*.
        """
        db1 = self._closing(DB(storage))
        tx = transaction.TransactionManager()
        c1 = db1.open(tx)
        # We've polled and gained checkpoints
        # self.assert_checkpoints(c1)

        root = c1.root()
        self.__do_sets(root, new_data, old_tids, old_data)
        tx.commit()

        self.__do_check_tids(root, old_tids)
        tid_int = bytes8_to_int64(c1._storage.lastTransaction())
        self.assertEqual(c1._storage._cache.current_tid, tid_int)
        c1.close()
        if pack:
            storage.pack(tid_int, referencesf)
        db1.close()
        return tid_int, bytes8_to_int64(root._p_serial)
예제 #23
0
    def __do_check_tids(self, root, old_tids):
        for key, old_tid in old_tids.items():
            key_path = key.split('.')
            base = root
            for name in key_path[:-1]:
                base = getattr(base, name)
            oid = bytes8_to_int64(base._p_oid)

            # We have a saved TID for the root object. If we had an old one,
            # it's now bigger.
            if old_tid is not None:
                self.assert_tid_after(oid, old_tid, root._p_jar)
            else:
                self.assert_oid_current(oid, root._p_jar)
예제 #24
0
    def store(self, oid, previous_tid, data, transaction):
        """
        This method should take no globally visible commit locks.
        """
        # Called by Connection.commit(), after tpc_begin has been called.
        if transaction is not self.transaction:
            raise StorageTransactionError(self, transaction)

        oid_int = bytes8_to_int64(oid)
        if previous_tid and previous_tid != NO_PREV_TID:
            # previous_tid is the tid of the state that the
            # object was loaded from. cPersistent objects return a brand
            # new bytes object each time even if it's all zeros; Python implementation
            # returns a private constant. It would be nice if they all returned a public
            # interned constant so we could compare with `is`.
            prev_tid_int = bytes8_to_int64(previous_tid)
        else:
            prev_tid_int = 0

        # Save the data locally in a temporary place. Later, closer to commit time,
        # we'll send it all over at once. This lets us do things like use
        # COPY in postgres.
        self.temp_storage.store_temp(oid_int, data, prev_tid_int)
예제 #25
0
    def loadSerial(self, oid, serial):
        """Load a specific revision of an object"""
        oid_int = bytes8_to_int64(oid)
        tid_int = bytes8_to_int64(serial)

        # If we've got this state cached exactly,
        # use it. No need to poll or anything like that first;
        # polling is unlikely to get us the state we want.
        # If the data happens to have been removed from the database,
        # due to a pack, this won't detect it if it was already cached
        # and the pack happened somewhere else. This method is
        # only used for conflict resolution, though, and we
        # shouldn't be able to get to that point if the root revision
        # went missing, right? Packing periodically takes the same locks we
        # want to take for committing.
        state = self.cache.loadSerial(oid_int, tid_int)
        if state:
            return state

        for conn in self.store_connection, self.load_connection:
            if not conn:
                continue

            # Allow loading data from later transactions for conflict
            # resolution. In fact try that first because it's more
            # likely that our old load connection can't see this new
            # state (because this method is used only for conflict resolution).
            state = self.adapter.mover.load_revision(
                conn.cursor, oid_int, tid_int)

            if state is not None:
                break

        if state is None or not state:
            raise POSKeyError(oid)
        return state
예제 #26
0
    def lock_database_and_choose_next_tid(self, cursor, username, description,
                                          extension):
        self.locker.hold_commit_lock(cursor, ensure_current=True)

        # Choose a transaction ID.
        #
        # Base the transaction ID on the current time, but ensure that
        # the tid of this transaction is greater than any existing
        # tid.
        last_tid = self.txncontrol.get_tid(cursor)
        now = time.time()
        stamp = timestamp_at_unixtime(now)
        stamp = stamp.laterThan(TimeStamp(int64_to_8bytes(last_tid)))
        tid = stamp.raw()

        tid_int = bytes8_to_int64(tid)
        self.txncontrol.add_transaction(cursor, tid_int, username, description,
                                        extension)
        logger.log(TRACE, "Picked next tid locally: %s", tid_int)
        return tid_int
예제 #27
0
    def load(self, oid, version=''):
        # pylint:disable=unused-argument
        oid_int = bytes8_to_int64(oid)
        state, tid_int = self.__load_using_method(self.cache.load, oid_int)

        if tid_int is None:
            _log_keyerror(self.load_connection.cursor,
                          self.adapter,
                          oid_int,
                          "no tid found")
            raise POSKeyError(oid)

        if not state:
            # This can happen if something attempts to load
            # an object whose creation has been undone or which was deleted
            # by IExternalGC.deleteObject().
            _log_keyerror(self.load_connection.cursor,
                          self.adapter,
                          oid_int,
                          "creation has been undone")
            raise POSKeyError(oid)
        return state, int64_to_8bytes(tid_int)
예제 #28
0
    def test_current_time(self, now=None):
        from persistent.timestamp import TimeStamp
        from relstorage._util import int64_to_8bytes
        if now is None:
            now = time.time()
        storage = self._storage
        ts_now = timestamp_at_unixtime(now)

        expected_tid_int = bytes8_to_int64(ts_now.raw())
        __traceback_info__ = now, now % 60.0, time.gmtime(
            now), ts_now, expected_tid_int

        cursor = storage._load_connection.cursor

        cursor.execute('CALL make_tid_for_epoch(%s, @tid)', (now, ))
        cursor.execute('SELECT @tid')
        tid, = cursor.fetchall()[0]

        tid_as_timetime = TimeStamp(int64_to_8bytes(tid)).timeTime()
        __traceback_info__ += (tid_as_timetime - ts_now.timeTime(), )

        self.assertEqual(tid, expected_tid_int)
예제 #29
0
    def load(self, oid, version=''):
        # pylint:disable=unused-argument
        oid_int = bytes8_to_int64(oid)
        # TODO: Here, and in prefetch, should we check bool(load_connection)?
        # If it's not active and had polled, we don't really want to do that, do we?
        load_cursor = self.load_connection.cursor
        state, tid_int = self.__load_using_method(load_cursor, self.cache.load,
                                                  oid_int)
        if tid_int is None:
            raise self.__pke(
                oid,
                **_make_pke_data(load_cursor, self.adapter, oid_int,
                                 "no tid found"))

        if not state:
            # This can happen if something attempts to load
            # an object whose creation has been undone or which was deleted
            # by IExternalGC.deleteObject().
            raise self.__pke(
                oid,
                **_make_pke_data(load_cursor, self.adapter, oid_int,
                                 "creation undone"))
        return state, int64_to_8bytes(tid_int)
예제 #30
0
    def __pre_pack(self, t, referencesf):
        logger.info("pack: beginning pre-pack")

        # In 2019, Unix timestamps look like
        #            1564006806.0
        # While 64-bit integer TIDs for the same timestamp look like
        #    275085010696509852
        #
        # Multiple TIDs can map to a single Unix timestamp.
        # For example, the 9 integers between 275085010624927044 and
        # 275085010624927035 all map to 1564006804.9999998.
        #
        # Therefore, Unix timestamps are ambiguous, especially if we're committing
        # multiple transactions rapidly (within the resolution of the underlying TID
        # clock).
        # This ambiguity mostly matters for unit tests, where we do commit rapidly.
        #
        # To help them out, we accept 64-bit integer TIDs to specify an exact
        # transaction to pack to.

        # We also allow None or a negative number to mean "current committed transaction".
        if t is None:
            t = -1

        if t > 275085010696509852:
            # Must be a TID.

            # Turn it back into a time.time() for later logging
            ts = TimeStamp(int64_to_8bytes(t))
            logger.debug(
                "Treating requested pack time %s as TID meaning %s",
                t, ts
            )
            best_pack_tid_int = t
            t = ts.timeTime()
        elif t < 0 or t >= time.time():
            # Packing for the current time or in the future means to pack
            # to the lastest commit in the database. This matters if not all
            # machine clocks are synchronized.
            best_pack_tid_int = MAX_TID - 1
        else:
            # Find the latest commit before or at the pack time.
            # Note that several TIDs will fit in the resolution of a time.time(),
            # so this is slightly ambiguous.
            requested_pack_ts = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
            requested_pack_tid = requested_pack_ts.raw()
            requested_pack_tid_int = bytes8_to_int64(requested_pack_tid)

            best_pack_tid_int = requested_pack_tid_int

        tid_int = self.packundo.choose_pack_transaction(best_pack_tid_int)

        if tid_int is None:
            logger.debug("all transactions before %s have already "
                         "been packed", time.ctime(t))
            return

        s = time.ctime(TimeStamp(int64_to_8bytes(tid_int)).timeTime())
        logger.info("Analyzing transactions committed %s or before (TID %d)",
                    s, tid_int)

        # In pre_pack, the adapter fills tables with
        # information about what to pack.  The adapter
        # must not actually pack anything yet.
        def get_references(state):
            """Return an iterable of the set of OIDs the given state refers to."""
            if not state:
                return ()

            return {bytes8_to_int64(oid) for oid in referencesf(state)}

        self.packundo.pre_pack(tid_int, get_references)
        logger.info("pack: pre-pack complete")
        return tid_int