Beispiel #1
0
    def checkSimpleTransactionalUndo(self):
        eq = self.assertEqual
        oid = self._storage.new_oid()
        revid = self._dostore(oid, data=MinPO(23))
        revid = self._dostore(oid, revid=revid, data=MinPO(24))
        revid = self._dostore(oid, revid=revid, data=MinPO(25))

        info = self._storage.undoInfo()
        # Now start an undo transaction
        self._undo(info[0]["id"], [oid], note="undo1")
        data, revid = load_current(self._storage, oid)
        eq(zodb_unpickle(data), MinPO(24))

        # Do another one
        info = self._storage.undoInfo()
        self._undo(info[2]["id"], [oid], note="undo2")
        data, revid = load_current(self._storage, oid)
        eq(zodb_unpickle(data), MinPO(23))

        # Try to undo the first record
        info = self._storage.undoInfo()
        self._undo(info[4]["id"], [oid], note="undo3")
        # This should fail since we've undone the object's creation
        self.assertRaises(KeyError, load_current, self._storage, oid)

        # And now let's try to redo the object's creation
        info = self._storage.undoInfo()
        self._undo(info[0]["id"], [oid])
        data, revid = load_current(self._storage, oid)
        eq(zodb_unpickle(data), MinPO(23))
        self._iterate()
Beispiel #2
0
    def checkUpdatesPersist(self):
        oids = []

        def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid):
            oid = new_oid()
            l.append(oid)
            return oid

        self._storage.new_oid = new_oid_wrapper

        self._dostore()
        oid = self._storage.new_oid()
        revid = self._dostore(oid)
        oid = self._storage.new_oid()
        revid = self._dostore(oid, data=1)
        revid = self._dostore(oid, revid, data=2)
        self._dostore(oid, revid, data=3)

        # keep copies of all the objects
        objects = []
        for oid in oids:
            p, s = load_current(self._storage, oid)
            objects.append((oid, '', p, s))

        self._storage.close()
        self.open()

        # keep copies of all the objects
        for oid, ver, p, s in objects:
            _p, _s = load_current(self._storage, oid)
            self.assertEqual(p, _p)
            self.assertEqual(s, _s)
Beispiel #3
0
def main(path=None):
    verbose = 0
    if path is None:
        import sys
        import getopt

        opts, args = getopt.getopt(sys.argv[1:], "v")
        for k, v in opts:
            if k == "-v":
                verbose += 1

        path, = args


    fs = FileStorage(path, read_only=1)

    # Set of oids in the index that failed to load due to POSKeyError.
    # This is what happens if undo is applied to the transaction creating
    # the object (the oid is still in the index, but its current data
    # record has a backpointer of 0, and POSKeyError is raised then
    # because of that backpointer).
    undone = {}

    # Set of oids that were present in the index but failed to load.
    # This does not include oids in undone.
    noload = {}

    for oid in fs._index.keys():
        try:
            data, serial = load_current(fs, oid)
        except (KeyboardInterrupt, SystemExit):
            raise
        except POSKeyError:
            undone[oid] = 1
        except:
            if verbose:
                traceback.print_exc()
            noload[oid] = 1

    inactive = noload.copy()
    inactive.update(undone)
    for oid in fs._index.keys():
        if oid in inactive:
            continue
        data, serial = load_current(fs, oid)
        refs = get_refs(data)
        missing = [] # contains 3-tuples of oid, klass-metadata, reason
        for ref, klass in refs:
            if klass is None:
                klass = '<unknown>'
            if ref not in fs._index:
                missing.append((ref, klass, "missing"))
            if ref in noload:
                missing.append((ref, klass, "failed to load"))
            if ref in undone:
                missing.append((ref, klass, "object creation was undone"))
        if missing:
            report(oid, data, serial, missing)
Beispiel #4
0
 def _initroot(self):
     try:
         load_current(self._storage, ZERO)
     except KeyError:
         from ZODB.Connection import TransactionMetaData
         file = BytesIO()
         p = Pickler(file, _protocol)
         p.dump((PersistentMapping, None))
         p.dump({'_container': {}})
         t = TransactionMetaData()
         t.description = u'initial database creation'
         self._storage.tpc_begin(t)
         self._storage.store(ZERO, None, file.getvalue(), '', t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
Beispiel #5
0
 def checkPackWithGCOnDestinationAfterRestore(self):
     raises = self.assertRaises
     db = DB(self._storage)
     conn = db.open()
     root = conn.root()
     root.obj = obj1 = MinPO(1)
     txn = transaction.get()
     txn.note(u'root -> obj')
     txn.commit()
     root.obj.obj = obj2 = MinPO(2)
     txn = transaction.get()
     txn.note(u'root -> obj -> obj')
     txn.commit()
     del root.obj
     txn = transaction.get()
     txn.note(u'root -X->')
     txn.commit()
     # Now copy the transactions to the destination
     self._dst.copyTransactionsFrom(self._storage)
     # Now pack the destination.
     snooze()
     self._dst.pack(time.time(),  referencesf)
     # And check to see that the root object exists, but not the other
     # objects.
     data, serial = load_current(self._dst, root._p_oid)
     raises(KeyError, load_current, self._dst, obj1._p_oid)
     raises(KeyError, load_current, self._dst, obj2._p_oid)
Beispiel #6
0
    def checkLoadBeforeUndo(self):
        # Do several transactions then undo them.
        oid = self._storage.new_oid()
        revid = None
        for i in range(5):
            revid = self._dostore(oid, revid, data=MinPO(i))
        revs = []
        for i in range(4):
            info = self._storage.undoInfo()
            tid = info[0]["id"]
            # Always undo the most recent txn, so the value will
            # alternate between 3 and 4.
            self._undo(tid, note="undo %d" % i)
            revs.append(load_current(self._storage, oid))

        prev_tid = None
        for i, (data, tid) in enumerate(revs):
            t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
            self.assertEqual(data, t[0])
            self.assertEqual(tid, t[1])
            if prev_tid:
                self.assertTrue(prev_tid < t[1])
            prev_tid = t[1]
            if i < 3:
                self.assertEqual(revs[i+1][1], t[2])
            else:
                self.assertEqual(None, t[2])
Beispiel #7
0
    def checkLoadBefore(self):
        # Store 10 revisions of one object and then make sure that we
        # can get all the non-current revisions back.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(10):
            # We need to ensure that successive timestamps are at least
            # two apart, so that a timestamp exists that's unambiguously
            # between successive timestamps.  Each call to snooze()
            # guarantees that the next timestamp will be at least one
            # larger (and probably much more than that) than the previous
            # one.
            snooze()
            snooze()
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(load_current(self._storage, oid))

        prev = u64(revs[0][1])
        for i in range(1, 10):
            tid = revs[i][1]
            cur = u64(tid)
            middle = prev + (cur - prev) // 2
            assert prev < middle < cur  # else the snooze() trick failed
            prev = cur
            t = self._storage.loadBefore(oid, p64(middle))
            self.assertTrue(t is not None)
            data, start, end = t
            self.assertEqual(revs[i-1][0], data)
            self.assertEqual(tid, end)
Beispiel #8
0
    def check_record_iternext(self):

        db = DB(self._storage)
        conn = db.open()
        conn.root()['abc'] = MinPO('abc')
        conn.root()['xyz'] = MinPO('xyz')
        transaction.commit()

        # Ensure it's all on disk.
        db.close()
        self._storage.close()

        self.open()

        key = None
        for x in (b'\000', b'\001', b'\002'):
            oid, tid, data, next_oid = self._storage.record_iternext(key)
            self.assertEqual(oid, (b'\000' * 7) + x)
            key = next_oid
            expected_data, expected_tid = load_current(self._storage, oid)
            self.assertEqual(expected_data, data)
            self.assertEqual(expected_tid, tid)
            if x == b'\002':
                self.assertEqual(next_oid, None)
            else:
                self.assertNotEqual(next_oid, None)
Beispiel #9
0
    def checkNotUndoable(self):
        eq = self.assertEqual
        # Set things up so we've got a transaction that can't be undone
        oid = self._storage.new_oid()
        revid_a = self._dostore(oid, data=MinPO(51))
        revid_b = self._dostore(oid, revid=revid_a, data=MinPO(52))
        revid_c = self._dostore(oid, revid=revid_b, data=MinPO(53))
        # Start the undo
        info = self._storage.undoInfo()
        tid = info[1]['id']
        t = TransactionMetaData()
        self.assertRaises(POSException.UndoError,
                          self._begin_undos_vote, t, tid)
        self._storage.tpc_abort(t)
        # Now have more fun: object1 and object2 are in the same transaction,
        # which we'll try to undo to, but one of them has since modified in
        # different transaction, so the undo should fail.
        oid1 = oid
        revid1 = revid_c
        oid2 = self._storage.new_oid()
        revid2 = ZERO
        p81, p82, p91, p92 = map(zodb_pickle,
                                 map(MinPO, (81, 82, 91, 92)))

        t = TransactionMetaData()
        self._storage.tpc_begin(t)
        self._storage.store(oid1, revid1, p81, '', t)
        self._storage.store(oid2, revid2, p91, '', t)
        self._storage.tpc_vote(t)
        tid = self._storage.tpc_finish(t)
        # Make sure the objects have the expected values
        data, revid_11 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(81))
        data, revid_22 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(91))
        eq(revid_11, tid)
        eq(revid_22, tid)
        # Now modify oid2
        revid2 = self._dostore(oid2, tid, MinPO(92))
        self.assertNotEqual(tid, revid2)
        info = self._storage.undoInfo()
        tid = info[1]['id']
        t = TransactionMetaData()
        self.assertRaises(POSException.UndoError,
                          self._begin_undos_vote, t, tid)
        self._storage.tpc_abort(t)
        self._iterate()
Beispiel #10
0
 def checkUndoCreationBranch2(self):
     eq = self.assertEqual
     oid = self._storage.new_oid()
     revid = self._dostore(oid, data=MinPO(11))
     revid = self._dostore(oid, revid=revid, data=MinPO(12))
     # Undo the last transaction
     info = self._storage.undoInfo()
     self._undo(info[0]['id'], [oid])
     data, revid = load_current(self._storage, oid)
     eq(zodb_unpickle(data), MinPO(11))
     # Now from here, we can either redo the last undo, or undo the object
     # creation.  Let's redo the last undo
     info = self._storage.undoInfo()
     self._undo(info[0]['id'], [oid])
     data, revid = load_current(self._storage, oid)
     eq(zodb_unpickle(data), MinPO(12))
     self._iterate()
Beispiel #11
0
 def checkTwoObjectUndoAgain(self):
     eq = self.assertEqual
     p31, p32, p33, p51, p52, p53 = map(
         zodb_pickle,
         map(MinPO, (31, 32, 33, 51, 52, 53)))
     # Like the above, but the first revision of the objects are stored in
     # different transactions.
     oid1 = self._storage.new_oid()
     oid2 = self._storage.new_oid()
     revid1 = self._dostore(oid1, data=p31, already_pickled=1)
     revid2 = self._dostore(oid2, data=p51, already_pickled=1)
     # Update those same two objects
     t = TransactionMetaData()
     self._storage.tpc_begin(t)
     self._storage.store(oid1, revid1, p32, '', t)
     self._storage.store(oid2, revid2, p52, '', t)
     # Finish the transaction
     self._storage.tpc_vote(t)
     self._storage.tpc_finish(t)
     # Now attempt to undo the transaction containing two objects
     info = self._storage.undoInfo()
     self._undo(info[0]["id"], [oid1, oid2])
     data, revid1 = load_current(self._storage, oid1)
     eq(zodb_unpickle(data), MinPO(31))
     data, revid2 = load_current(self._storage, oid2)
     eq(zodb_unpickle(data), MinPO(51))
     # Like the above, but this time, the second transaction contains only
     # one object.
     t = TransactionMetaData()
     self._storage.tpc_begin(t)
     self._storage.store(oid1, revid1, p33, '', t)
     self._storage.store(oid2, revid2, p53, '', t)
     # Finish the transaction
     self._storage.tpc_vote(t)
     tid = self._storage.tpc_finish(t)
     # Update in different transactions
     revid1 = self._dostore(oid1, revid=tid, data=MinPO(34))
     revid2 = self._dostore(oid2, revid=tid, data=MinPO(54))
     # Now attempt to undo the transaction containing two objects
     info = self._storage.undoInfo()
     self.undo(info[1]['id'])
     data, revid1 = load_current(self._storage, oid1)
     eq(zodb_unpickle(data), MinPO(33))
     data, revid2 = load_current(self._storage, oid2)
     eq(zodb_unpickle(data), MinPO(54))
     self._iterate()
Beispiel #12
0
 def checkFlushAfterTruncate(self, fail=False):
     r0 = self._dostore(z64)
     storage = self._storage
     t = TransactionMetaData()
     storage.tpc_begin(t)
     storage.store(z64, r0, b'foo', b'', t)
     storage.tpc_vote(t)
     # Read operations are done with separate 'file' objects with their
     # own buffers: here, the buffer also includes voted data.
     load_current(storage, z64)
     # This must invalidate all read buffers.
     storage.tpc_abort(t)
     self._dostore(z64, r0, b'bar', 1)
     # In the case that read buffers were not invalidated, return value
     # is based on what was cached during the first load.
     self.assertEqual(load_current(storage, z64)[0],
                      b'foo' if fail else b'bar')
Beispiel #13
0
    def checkRedundantPack(self):
        # It is an error to perform a pack with a packtime earlier
        # than a previous packtime.  The storage can't do a full
        # traversal as of the packtime, because the previous pack may
        # have removed revisions necessary for a full traversal.

        # It should be simple to test that a storage error is raised,
        # but this test case goes to the trouble of constructing a
        # scenario that would lose data if the earlier packtime was
        # honored.

        self._initroot()

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root["d"] = d = PersistentMapping()
        transaction.commit()
        snooze()

        obj = d["obj"] = C()
        obj.value = 1
        transaction.commit()
        snooze()
        packt1 = time.time()
        lost_oid = obj._p_oid

        obj = d["anotherobj"] = C()
        obj.value = 2
        transaction.commit()
        snooze()
        packt2 = time.time()

        db.pack(packt2)
        # BDBStorage allows the second pack, but doesn't lose data.
        try:
            db.pack(packt1)
        except StorageError:
            pass
        # This object would be removed by the second pack, even though
        # it is reachable.
        load_current(self._storage, lost_oid)
Beispiel #14
0
    def _packNonUndoing(self, packtime, referencesf):
        for oid, oid_path in self.fshelper.listOIDs():
            exists = True
            try:
                utils.load_current(self, oid)
            except (POSKeyError, KeyError):
                exists = False

            if exists:
                files = os.listdir(oid_path)
                files.sort()
                latest = files[-1] # depends on ever-increasing tids
                files.remove(latest)
                for f in files:
                    remove_committed(os.path.join(oid_path, f))
            else:
                remove_committed_dir(oid_path)
                continue

            if not os.listdir(oid_path):
                shutil.rmtree(oid_path)
Beispiel #15
0
 def checkReadMethods(self):
     self._create_data()
     self._make_readonly()
     # Note that this doesn't check _all_ read methods.
     for oid in self.oids.keys():
         data, revid = load_current(self._storage, oid)
         self.assertEqual(revid, self.oids[oid])
         # Storages without revisions may not have loadSerial().
         try:
             _data = self._storage.loadSerial(oid, revid)
             self.assertEqual(data, _data)
         except Unsupported:
             pass
Beispiel #16
0
 def checkLoad_was_checkLoadEx(self):
     oid = self._storage.new_oid()
     self._dostore(oid, data=42)
     data, tid = load_current(self._storage, oid)
     self.assertEqual(zodb_unpickle(data), MinPO(42))
     match = False
     for txn in self._storage.iterator():
         for rec in txn:
             if rec.oid == oid and rec.tid == tid:
                 self.assertEqual(txn.tid, tid)
                 match = True
     if not match:
         self.fail("Could not find transaction with matching id")
Beispiel #17
0
    def checkTwoObjectUndo(self):
        eq = self.assertEqual
        # Convenience
        p31, p32, p51, p52 = map(zodb_pickle,
                                 map(MinPO, (31, 32, 51, 52)))
        oid1 = self._storage.new_oid()
        oid2 = self._storage.new_oid()
        revid1 = revid2 = ZERO
        # Store two objects in the same transaction
        t = TransactionMetaData()
        self._storage.tpc_begin(t)
        self._storage.store(oid1, revid1, p31, '', t)
        self._storage.store(oid2, revid2, p51, '', t)
        # Finish the transaction
        self._storage.tpc_vote(t)
        tid = self._storage.tpc_finish(t)
        # Update those same two objects
        t = TransactionMetaData()
        self._storage.tpc_begin(t)
        self._storage.store(oid1, tid, p32, '', t)
        self._storage.store(oid2, tid, p52, '', t)
        # Finish the transaction
        self._storage.tpc_vote(t)
        self._storage.tpc_finish(t)
        # Make sure the objects have the current value
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(32))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(52))

        # Now attempt to undo the transaction containing two objects
        info = self._storage.undoInfo()
        self._undo(info[0]['id'], [oid1, oid2])
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(31))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(51))
        self._iterate()
Beispiel #18
0
 def _total_size(oid, seen):
     v = cache.get(oid)
     if v is not None:
         return v
     data, serialno = load_current(fs, oid)
     size = len(data)
     for suboid in referencesf(data):
         if suboid in seen:
             continue
         seen[suboid] = 1
         size += _total_size(suboid, seen)
     cache[oid] = size
     if len(cache) == cache_size:
         cache.popitem()
     return size
Beispiel #19
0
def main(path):
    fs = FileStorage(path, read_only=1)
    if PACK:
        fs.pack()

    db = ZODB.DB(fs)
    rt = db.open().root()
    paths = find_paths(rt, 3)

    def total_size(oid):
        cache = {}
        cache_size = 1000
        def _total_size(oid, seen):
            v = cache.get(oid)
            if v is not None:
                return v
            data, serialno = load_current(fs, oid)
            size = len(data)
            for suboid in referencesf(data):
                if suboid in seen:
                    continue
                seen[suboid] = 1
                size += _total_size(suboid, seen)
            cache[oid] = size
            if len(cache) == cache_size:
                cache.popitem()
            return size
        return _total_size(oid, {})

    keys = fs._index.keys()
    keys.sort()
    keys.reverse()

    if not VERBOSE:
        # If not running verbosely, don't print an entry for an object
        # unless it has an entry in paths.
        keys = filter(paths.has_key, keys)

    fmt = "%8s %5d %8d %s %s.%s"

    for oid in keys:
        data, serialno = load_current(fs, oid)
        mod, klass = get_pickle_metadata(data)
        refs = referencesf(data)
        path = paths.get(oid, '-')
        print(fmt % (U64(oid), len(data), total_size(oid), path, mod, klass))
    def checkTwoObjectUndoAtOnce(self):
        # Convenience
        eq = self.assertEqual
        unless = self.assertTrue
        p30, p31, p32, p50, p51, p52 = map(
            zodb_pickle, map(MinPO, (30, 31, 32, 50, 51, 52)))
        oid1 = self._storage.new_oid()
        oid2 = self._storage.new_oid()
        # Store two objects in the same transaction
        tid = self._multi_obj_transaction([
            (oid1, ZERO, p30),
            (oid2, ZERO, p50),
        ])
        # Update those same two objects
        tid = self._multi_obj_transaction([
            (oid1, tid, p31),
            (oid2, tid, p51),
        ])
        # Update those same two objects
        tid = self._multi_obj_transaction([
            (oid1, tid, p32),
            (oid2, tid, p52),
        ])
        # Make sure the objects have the current value
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(32))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(52))
        # Now attempt to undo the transaction containing two objects
        info = self._storage.undoInfo()
        tid = info[0]['id']
        tid1 = info[1]['id']
        t = TransactionMetaData()
        oids = self._begin_undos_vote(t, tid, tid1)
        serial = self._storage.tpc_finish(t)
        # We may get the finalization stuff called an extra time,
        # depending on the implementation.
        if serial is None:
            self.assertEqual(oids, {oid1, oid2})
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(30))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(50))

        # Now try to undo the one we just did to undo, whew
        info = self._storage.undoInfo()
        self._undo(info[0]['id'], [oid1, oid2])
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(32))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(52))
        self._iterate()
Beispiel #21
0
    def checkTwoObjectUndoAtOnce(self):
        # Convenience
        eq = self.assertEqual
        unless = self.assertTrue
        p30, p31, p32, p50, p51, p52 = map(zodb_pickle,
                                           map(MinPO,
                                               (30, 31, 32, 50, 51, 52)))
        oid1 = self._storage.new_oid()
        oid2 = self._storage.new_oid()
        # Store two objects in the same transaction
        tid = self._multi_obj_transaction([(oid1, ZERO, p30),
                                           (oid2, ZERO, p50),
                                           ])
        # Update those same two objects
        tid = self._multi_obj_transaction([(oid1, tid, p31),
                                           (oid2, tid, p51),
                                           ])
        # Update those same two objects
        tid = self._multi_obj_transaction([(oid1, tid, p32),
                                           (oid2, tid, p52),
                                           ])
        # Make sure the objects have the current value
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(32))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(52))
        # Now attempt to undo the transaction containing two objects
        info = self._storage.undoInfo()
        tid = info[0]['id']
        tid1 = info[1]['id']
        t = TransactionMetaData()
        oids = self._begin_undos_vote(t, tid, tid1)
        serial = self._storage.tpc_finish(t)
        # We may get the finalization stuff called an extra time,
        # depending on the implementation.
        if serial is None:
            self.assertEqual(oids, {oid1, oid2})
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(30))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(50))

        # Now try to undo the one we just did to undo, whew
        info = self._storage.undoInfo()
        self._undo(info[0]['id'], [oid1, oid2])
        data, revid1 = load_current(self._storage, oid1)
        eq(zodb_unpickle(data), MinPO(32))
        data, revid2 = load_current(self._storage, oid2)
        eq(zodb_unpickle(data), MinPO(52))
        self._iterate()
Beispiel #22
0
    def checkTransactionalUndoAfterPack(self):
        # bwarsaw Date: Thu Mar 28 21:04:43 2002 UTC
        # This is a test which should provoke the underlying bug in
        # transactionalUndo() on a standby storage.  If our hypothesis
        # is correct, the bug is in FileStorage, and is caused by
        # encoding the file position in the `id' field of the undoLog
        # information.  Note that Full just encodes the tid, but this
        # is a problem for FileStorage (we have a strategy for fixing
        # this).

        # So, basically, this makes sure that undo info doesn't depend
        # on file positions.  We change the file positions in an undo
        # record by packing.

        # Add a few object revisions
        oid = b'\0'*8
        revid0 = self._dostore(oid, data=MinPO(50))
        revid1 = self._dostore(oid, revid=revid0, data=MinPO(51))
        snooze()
        packtime = time.time()
        snooze()                # time.time() now distinct from packtime
        revid2 = self._dostore(oid, revid=revid1, data=MinPO(52))
        self._dostore(oid, revid=revid2, data=MinPO(53))
        # Now get the undo log
        info = self._storage.undoInfo()
        self.assertEqual(len(info), 4)
        tid = info[0]['id']
        # Now pack just the initial revision of the object.  We need the
        # second revision otherwise we won't be able to undo the third
        # revision!
        self._storage.pack(packtime, referencesf)
        # Make some basic assertions about the undo information now
        info2 = self._storage.undoInfo()
        self.assertEqual(len(info2), 2)
        # And now attempt to undo the last transaction
        undone, = self.undo(tid)
        self.assertEqual(undone, oid)
        data, revid = load_current(self._storage, oid)
        # The object must now be at the second state
        self.assertEqual(zodb_unpickle(data), MinPO(52))
        self._iterate()
Beispiel #23
0
    def checkTransactionalUndoAfterPack(self):
        # bwarsaw Date: Thu Mar 28 21:04:43 2002 UTC
        # This is a test which should provoke the underlying bug in
        # transactionalUndo() on a standby storage.  If our hypothesis
        # is correct, the bug is in FileStorage, and is caused by
        # encoding the file position in the `id' field of the undoLog
        # information.  Note that Full just encodes the tid, but this
        # is a problem for FileStorage (we have a strategy for fixing
        # this).

        # So, basically, this makes sure that undo info doesn't depend
        # on file positions.  We change the file positions in an undo
        # record by packing.

        # Add a few object revisions
        oid = b'\0' * 8
        revid0 = self._dostore(oid, data=MinPO(50))
        revid1 = self._dostore(oid, revid=revid0, data=MinPO(51))
        snooze()
        packtime = time.time()
        snooze()  # time.time() now distinct from packtime
        revid2 = self._dostore(oid, revid=revid1, data=MinPO(52))
        self._dostore(oid, revid=revid2, data=MinPO(53))
        # Now get the undo log
        info = self._storage.undoInfo()
        self.assertEqual(len(info), 4)
        tid = info[0]['id']
        # Now pack just the initial revision of the object.  We need the
        # second revision otherwise we won't be able to undo the third
        # revision!
        self._storage.pack(packtime, referencesf)
        # Make some basic assertions about the undo information now
        info2 = self._storage.undoInfo()
        self.assertEqual(len(info2), 2)
        # And now attempt to undo the last transaction
        undone, = self.undo(tid)
        self.assertEqual(undone, oid)
        data, revid = load_current(self._storage, oid)
        # The object must now be at the second state
        self.assertEqual(zodb_unpickle(data), MinPO(52))
        self._iterate()
Beispiel #24
0
    def checkUndoCreationBranch1(self):
        eq = self.assertEqual
        oid = self._storage.new_oid()
        revid = self._dostore(oid, data=MinPO(11))
        revid = self._dostore(oid, revid=revid, data=MinPO(12))
        # Undo the last transaction
        info = self._storage.undoInfo()
        self._undo(info[0]['id'], [oid])
        data, revid = load_current(self._storage, oid)
        eq(zodb_unpickle(data), MinPO(11))

        # Now from here, we can either redo the last undo, or undo the object
        # creation.  Let's undo the object creation.
        info = self._storage.undoInfo()
        self._undo(info[2]['id'], [oid])
        self.assertRaises(KeyError, load_current, self._storage, oid)

        # Loading current data via loadBefore should raise a POSKeyError too:
        self.assertRaises(KeyError, self._storage.loadBefore, oid,
                          b'\x7f\xff\xff\xff\xff\xff\xff\xff')
        self._iterate()
Beispiel #25
0
    def checkUndoCreationBranch1(self):
        eq = self.assertEqual
        oid = self._storage.new_oid()
        revid = self._dostore(oid, data=MinPO(11))
        revid = self._dostore(oid, revid=revid, data=MinPO(12))
        # Undo the last transaction
        info = self._storage.undoInfo()
        self._undo(info[0]['id'], [oid])
        data, revid = load_current(self._storage, oid)
        eq(zodb_unpickle(data), MinPO(11))

        # Now from here, we can either redo the last undo, or undo the object
        # creation.  Let's undo the object creation.
        info = self._storage.undoInfo()
        self._undo(info[2]['id'], [oid])
        self.assertRaises(KeyError, load_current, self._storage, oid)

        # Loading current data via loadBefore should raise a POSKeyError too:
        self.assertRaises(KeyError, self._storage.loadBefore, oid,
                          b'\x7f\xff\xff\xff\xff\xff\xff\xff')
        self._iterate()
Beispiel #26
0
def run(path, v=0):
    fs = FileStorage(path, read_only=1)
    # break into the file implementation
    if hasattr(fs._index, 'iterkeys'):
        iter = six.iterkeys(fs._index)
    else:
        iter = fs._index.keys()
    totals = {}
    for oid in iter:
        data, serialno = load_current(fs, oid)
        mod, klass = get_pickle_metadata(data)
        key = "%s.%s" % (mod, klass)
        bytes, count = totals.get(key, (0, 0))
        bytes += len(data)
        count += 1
        totals[key] = bytes, count
        if v:
            print("%8s %5d %s" % (U64(oid), len(data), key))
    L = totals.items()
    L.sort(lambda a, b: cmp(a[1], b[1]))
    L.reverse()
    print("Totals per object class:")
    for key, (bytes, count) in L:
        print("%8d %8d %s" % (count, bytes, key))
Beispiel #27
0
def run(path, v=0):
    fs = FileStorage(path, read_only=1)
    # break into the file implementation
    if hasattr(fs._index, 'iterkeys'):
        iter = six.iterkeys(fs._index)
    else:
        iter = fs._index.keys()
    totals = {}
    for oid in iter:
        data, serialno = load_current(fs, oid)
        mod, klass = get_pickle_metadata(data)
        key = "%s.%s" % (mod, klass)
        bytes, count = totals.get(key, (0, 0))
        bytes += len(data)
        count += 1
        totals[key] = bytes, count
        if v:
            print("%8s %5d %s" % (U64(oid), len(data), key))
    L = totals.items()
    L.sort(lambda a, b: cmp(a[1], b[1]))
    L.reverse()
    print("Totals per object class:")
    for key, (bytes, count) in L:
        print("%8d %8d %s" % (count, bytes, key))
Beispiel #28
0
 def do_load(self):
     oid = self.pick_oid()
     load_current(self.storage, oid)
Beispiel #29
0
 def checkPackOnlyOneObject(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj1 = self._newobj()
     oid1 = obj1.getoid()
     # Create another persistent object, with some initial state.
     obj2 = self._newobj()
     oid2 = obj2.getoid()
     # Link the root object to the persistent objects, in order to keep
     # them alive.  Store the root object.
     root.obj1 = obj1
     root.obj2 = obj2
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the first object
     obj1.value = 1
     revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
     obj1.value = 2
     revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
     obj1.value = 3
     revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid1, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid1, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     # Now commit a revision of the second object
     obj2.value = 11
     revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
     # And make sure the revision can be extracted
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
     # Now pack just revisions 1 and 2 of object1.  Object1's current
     # revision should stay alive because it's pointed to by the root, as
     # should Object2's current revision.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero, object2, and
     # revision 3 of object1 are still there and correct.
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid1)
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid2)
     eq(revid, revid4)
     eq(loads(data).value, 11)
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
Beispiel #30
0
 def _check_stores(self, oids):
     for oid, revid in oids:
         data, s_revid = load_current(self._storage, oid)
         self.assertEqual(s_revid, revid)
Beispiel #31
0
 def check(self):
     for oid, revid in self.oids.items():
         data, serial = load_current(self.storage, oid)
         self.test.assertEqual(serial, revid)
         obj = zodb_unpickle(data)
         self.test.assertEqual(obj.value[0], self.getName())
Beispiel #32
0
 def check(self):
     for oid, revid in self.oids.items():
         data, serial = load_current(self.storage, oid)
         self.test.assertEqual(serial, revid)
         obj = zodb_unpickle(data)
         self.test.assertEqual(obj.value[0], self.getName())
Beispiel #33
0
 def do_load(self):
     oid = self.pick_oid()
     load_current(self.storage, oid)
Beispiel #34
0
def main(path=None):
    verbose = 0
    if path is None:
        import sys
        import getopt

        opts, args = getopt.getopt(sys.argv[1:], "v")
        for k, v in opts:
            if k == "-v":
                verbose += 1

        path, = args

    fs = FileStorage(path, read_only=1)

    # Set of oids in the index that failed to load due to POSKeyError.
    # This is what happens if undo is applied to the transaction creating
    # the object (the oid is still in the index, but its current data
    # record has a backpointer of 0, and POSKeyError is raised then
    # because of that backpointer).
    undone = {}

    # Set of oids that were present in the index but failed to load.
    # This does not include oids in undone.
    noload = {}

    # build {pos -> oid} index that is reverse to {oid -> pos} fs._index
    # we'll need this to iterate objects in order of ascending file position to
    # optimize disk IO.
    pos2oid = QQBTree()  # pos -> u64(oid)
    for oid, pos in fs._index.iteritems():
        pos2oid[pos] = u64(oid)

    # pass 1: load all objects listed in the index and remember those objects
    # that are deleted or load with an error. Iterate objects in order of
    # ascending file position to optimize disk IO.
    for oid64 in pos2oid.itervalues():
        oid = p64(oid64)
        try:
            data, serial = load_current(fs, oid)
        except (KeyboardInterrupt, SystemExit):
            raise
        except POSKeyError:
            undone[oid] = 1
        except:
            if verbose:
                traceback.print_exc()
            noload[oid] = 1

    # pass 2: go through all objects again and verify that their references do
    # not point to problematic object set. Iterate objects in order of ascending
    # file position to optimize disk IO.
    inactive = noload.copy()
    inactive.update(undone)
    for oid64 in pos2oid.itervalues():
        oid = p64(oid64)
        if oid in inactive:
            continue
        data, serial = load_current(fs, oid)
        refs = get_refs(data)
        missing = []  # contains 3-tuples of oid, klass-metadata, reason
        for ref, klass in refs:
            if klass is None:
                klass = '<unknown>'
            if ref not in fs._index:
                missing.append((ref, klass, "missing"))
            if ref in noload:
                missing.append((ref, klass, "failed to load"))
            if ref in undone:
                missing.append((ref, klass, "object creation was undone"))
        if missing:
            report(oid, data, serial, missing)
Beispiel #35
0
 def _check_stores(self, oids):
     for oid, revid in oids:
         data, s_revid = load_current(self._storage, oid)
         self.assertEqual(s_revid, revid)
Beispiel #36
0
 def checkLoadDelegation(self):
     # Minimal test of loadEX w/o version -- ironically
     db = DB(self._storage)  # creates object 0. :)
     s2 = ZODB.DemoStorage.DemoStorage(base=self._storage)
     self.assertEqual(load_current(s2, ZODB.utils.z64), load_current(self._storage, ZODB.utils.z64))
Beispiel #37
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack just revisions 1 and 2.  The object's current revision
     # should stay alive because it's pointed to by the root.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid)
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
Beispiel #38
0
 def checkPackOnlyOneObject(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj1 = self._newobj()
     oid1 = obj1.getoid()
     # Create another persistent object, with some initial state.
     obj2 = self._newobj()
     oid2 = obj2.getoid()
     # Link the root object to the persistent objects, in order to keep
     # them alive.  Store the root object.
     root.obj1 = obj1
     root.obj2 = obj2
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the first object
     obj1.value = 1
     revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
     obj1.value = 2
     revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
     obj1.value = 3
     revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid1, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid1, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     # Now commit a revision of the second object
     obj2.value = 11
     revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
     # And make sure the revision can be extracted
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
     # Now pack just revisions 1 and 2 of object1.  Object1's current
     # revision should stay alive because it's pointed to by the root, as
     # should Object2's current revision.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero, object2, and
     # revision 3 of object1 are still there and correct.
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid1)
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid2)
     eq(revid, revid4)
     eq(loads(data).value, 11)
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
Beispiel #39
0
 def checkLoadDelegation(self):
     # Minimal test of loadEX w/o version -- ironically
     db = DB(self._storage) # creates object 0. :)
     s2 = ZODB.DemoStorage.DemoStorage(base=self._storage)
     self.assertEqual(load_current(s2, ZODB.utils.z64),
                      load_current(self._storage, ZODB.utils.z64))
Beispiel #40
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack just revisions 1 and 2.  The object's current revision
     # should stay alive because it's pointed to by the root.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid)
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)