コード例 #1
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure only the latest revision can be extracted
     __traceback_info__ = [
         bytes8_to_int64(x) for x in (oid, revid1, revid2)
     ]
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack.  The object should stay alive because it's pointed
     # to by the root.
     self._storage.pack(self._storage.lastTransactionInt(), referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
コード例 #2
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure only the latest revision can be extracted
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack.  The object should stay alive because it's pointed
     # to by the root.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
コード例 #3
0
 def checkPackAllRevisions(self):
     from relstorage._compat import loads
     self._initroot()
     eq = self.assertEqual
     raises = self.assertRaises
     # Create a `persistent' object
     obj = self._newobj()
     oid = obj.getoid()
     obj.value = 1
     # Commit three different revisions
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure only the latest revision can be extracted
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack all transactions; need to sleep a second to make
     # sure that the pack time is greater than the last commit time.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     self._storage.sync()
     # All revisions of the object should be gone, since there is no
     # reference from the root object to this object.
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     raises(KeyError, self._storage.loadSerial, oid, revid3)
     raises(KeyError, self._storage.load, oid, '')
コード例 #4
0
ファイル: hftestbase.py プロジェクト: pyzh/relstorage
 def checkPackAllRevisions(self):
     from relstorage._compat import loads
     self._initroot()
     eq = self.assertEqual
     raises = self.assertRaises
     # Create a `persistent' object
     obj = self._newobj()
     oid = obj.getoid()
     obj.value = 1
     # Commit three different revisions
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure only the latest revision can be extracted
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack all transactions; need to sleep a second to make
     # sure that the pack time is greater than the last commit time.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     self._storage.sync()
     # All revisions of the object should be gone, since there is no
     # reference from the root object to this object.
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     raises(KeyError, self._storage.loadSerial, oid, revid3)
     raises(KeyError, self._storage.load, oid, '')
コード例 #5
0
    def history(self, oid, version=None, size=1, filter=None):
        # pylint:disable=unused-argument,too-many-locals
        cursor = self.load_connection.cursor
        oid_int = bytes8_to_int64(oid)
        try:
            rows = self.adapter.dbiter.iter_object_history(cursor, oid_int)
        except KeyError:
            raise POSKeyError(oid)

        res = []
        for tid_int, username, description, extension, length in rows:
            tid = int64_to_8bytes(tid_int)
            if extension:
                d = loads(extension)
            else:
                d = {}
            d.update({
                "time": TimeStamp(tid).timeTime(),
                "user_name": username or b'',
                "description": description or b'',
                "tid": tid,
                "version": '',
                "size": length,
                "rs_tid_int": tid_int,
                "rs_oid_int": oid_int,
            })
            if filter is None or filter(d):
                res.append(d)
                if size is not None and len(res) >= size:
                    break
        return res
コード例 #6
0
    def undoLog(self, first=0, last=-20, filter=None):
        if last < 0:
            last = first - last

        # use a private connection to ensure the most current results
        with self.load_connection.isolated_connection() as cursor:
            tx_iter = self.adapter.dbiter.iter_transactions(cursor)
            i = 0
            res = []
            for tx in tx_iter:
                tid = int64_to_8bytes(tx.tid_int)
                # Note that user and desc are schizophrenic. The transaction
                # interface specifies that they are a Python str, *probably*
                # meaning bytes. But code in the wild and the ZODB test suite
                # sets them as native strings, meaning unicode on Py3. OTOH, the
                # test suite checks that this method *returns* them as bytes!
                # This is largely cleaned up with transaction 2.0/ZODB 5, where the storage
                # interface is defined in terms of bytes only.
                d = {
                    'id': base64_encodebytes(tid)[:-1],  # pylint:disable=deprecated-method
                    'time': TimeStamp(tid).timeTime(),
                    'user_name':  tx.username or b'',
                    'description': tx.description or b'',
                }
                if tx.extension:
                    d.update(loads(tx.extension))

                if filter is None or filter(d):
                    if i >= first:
                        res.append(d)
                    i += 1
                    if i >= last:
                        break
            return res
コード例 #7
0
ファイル: hftestbase.py プロジェクト: lungj/relstorage
    def checkPackAllRevisions(self):
        from relstorage._compat import loads
        self._initroot()
        eq = self.assertEqual
        raises = self.assertRaises
        # Create a `persistent' object
        obj = self._newobj()
        oid = obj.getoid()
        obj.value = 1
        # Commit three different revisions
        revid1 = self._dostoreNP(oid, data=pdumps(obj))
        obj.value = 2
        revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
        obj.value = 3
        revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
        # Now make sure only the latest revision can be extracted
        raises(KeyError, self._storage.loadSerial, oid, revid1)
        raises(KeyError, self._storage.loadSerial, oid, revid2)
        data = self._storage.loadSerial(oid, revid3)
        pobj = loads(data)
        eq(pobj.getoid(), oid)
        eq(pobj.value, 3)

        self._storage.pack(self._storage.lastTransactionInt() + 1, referencesf)
        self._storage.sync()
        # All revisions of the object should be gone, since there is no
        # reference from the root object to this object.
        for revid in (revid1, revid2, revid3):
            __traceback_info__ = oid, revid
            with raises(KeyError):
                self._storage.loadSerial(oid, revid)
        with raises(KeyError):
            self._storage.load(oid)
コード例 #8
0
    def __init__(self, trans_iter, tid_int, user, desc, ext, packed):
        self._trans_iter = trans_iter
        self._tid_int = tid_int
        tid = int64_to_8bytes(tid_int)
        status = 'p' if packed else ' '
        user = user or b''
        description = desc or b''
        if ext:
            extension = loads(ext)
        else:
            extension = {}

        TransactionRecord.__init__(self, tid, status, user, description, extension)
コード例 #9
0
    def undoLog(self, first=0, last=-20, filter=None):
        # pylint:disable=too-many-locals
        if last < 0:
            last = first - last

        # use a private connection to ensure the most current results
        adapter = self.adapter
        conn, cursor = adapter.connmanager.open()
        try:
            rows = adapter.dbiter.iter_transactions(cursor)
            i = 0
            res = []
            for tid_int, user, desc, ext in rows:
                tid = int64_to_8bytes(tid_int)
                # Note that user and desc are schizophrenic. The transaction
                # interface specifies that they are a Python str, *probably*
                # meaning bytes. But code in the wild and the ZODB test suite
                # sets them as native strings, meaning unicode on Py3. OTOH, the
                # test suite checks that this method *returns* them as bytes!
                # This is largely cleaned up with transaction 2.0/ZODB 5, where the storage
                # interface is defined in terms of bytes only.
                d = {
                    'id': base64_encodebytes(tid)[:-1],  # pylint:disable=deprecated-method
                    'time': TimeStamp(tid).timeTime(),
                    'user_name': user or b'',
                    'description': desc or b'',
                }
                if ext:
                    d.update(loads(ext))

                if filter is None or filter(d):
                    if i >= first:
                        res.append(d)
                    i += 1
                    if i >= last:
                        break
            return res

        finally:
            adapter.connmanager.close(conn, cursor)
コード例 #10
0
 def checkPackOnlyOneObject(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj1 = self._newobj()
     oid1 = obj1.getoid()
     # Create another persistent object, with some initial state.
     obj2 = self._newobj()
     oid2 = obj2.getoid()
     # Link the root object to the persistent objects, in order to keep
     # them alive.  Store the root object.
     root.obj1 = obj1
     root.obj2 = obj2
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the first object
     obj1.value = 1
     revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
     obj1.value = 2
     revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
     obj1.value = 3
     revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
     # Now make sure only the latest revision can be extracted
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     # Now commit a revision of the second object
     obj2.value = 11
     revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
     # And make sure the revision can be extracted
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
     # Now pack just revisions 1 and 2 of object1.  Object1's current
     # revision should stay alive because it's pointed to by the root, as
     # should Object2's current revision.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero, object2, and
     # revision 3 of object1 are still there and correct.
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid1, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid2, '')
     eq(revid, revid4)
     eq(loads(data).value, 11)
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
コード例 #11
0
 def checkPackOnlyOneObject(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj1 = self._newobj()
     oid1 = obj1.getoid()
     # Create another persistent object, with some initial state.
     obj2 = self._newobj()
     oid2 = obj2.getoid()
     # Link the root object to the persistent objects, in order to keep
     # them alive.  Store the root object.
     root.obj1 = obj1
     root.obj2 = obj2
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the first object
     obj1.value = 1
     revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
     obj1.value = 2
     revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
     obj1.value = 3
     revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
     # Now make sure only the latest revision can be extracted
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     # Now commit a revision of the second object
     obj2.value = 11
     revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
     # And make sure the revision can be extracted
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
     # Now pack just revisions 1 and 2 of object1.  Object1's current
     # revision should stay alive because it's pointed to by the root, as
     # should Object2's current revision.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero, object2, and
     # revision 3 of object1 are still there and correct.
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid1, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid2, '')
     eq(revid, revid4)
     eq(loads(data).value, 11)
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)