Пример #1
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure only the latest revision can be extracted
     __traceback_info__ = [
         bytes8_to_int64(x) for x in (oid, revid1, revid2)
     ]
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack.  The object should stay alive because it's pointed
     # to by the root.
     self._storage.pack(self._storage.lastTransactionInt(), referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
Пример #2
0
 def getBlobFilePath(self, oid, tid):
     base, rem = divmod(bytes8_to_int64(oid), self.size)
     return os.path.join(
         str(rem),
         "%s.%s%s" % (
             base,
             hexlify(tid).decode('ascii'),
             ZODB.blob.BLOB_SUFFIX
         )
     )
Пример #3
0
 def __next__(self):
     oid_int = next(self._oid_int_iter)
     state, _ = self._states[oid_int]
     logger.info("Restoring OID %d at TID %s (state size %d)",
                 oid_int, bytes8_to_int64(self._tid), len(state))
     return DataRecord(
         int64_to_8bytes(oid_int),
         self._tid,
         state,
         None
     )
Пример #4
0
    def after_tpc_finish(self, tid, temp_storage):
        """
        Flush queued changes.

        This is called after the database commit lock is released,
        but before control is returned to the Connection.

        Now that this tid is known, send all queued objects to the
        cache. The cache will have ``(oid, tid)`` entry for each object
        we have been holding on to (well, in a big transaction, some of them
        might actually not get stored in the cache. But we try!)
        """
        tid_int = bytes8_to_int64(tid)
        self.cache.set_all_for_tid(tid_int, temp_storage)
Пример #5
0
 def oid_to_path(self, oid):
     rem = bytes8_to_int64(oid) % self.size
     return str(rem)
Пример #6
0
 def __iter__(self):
     logger.info("Restoring %d oids for tid %d",
                 len(self._oid_ints),
                 bytes8_to_int64(self.tid))
     return RecordIterator(self.tid, self._oid_ints, self._states)