def test_KnownConstants(self):
     self.assertEqual(b"\000\000\000\000\000\000\000\001", p64(1))
     self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1 << 32))
     self.assertEqual(u64(b"\000\000\000\000\000\000\000\001"), 1)
     self.assertEqual(U64(b"\000\000\000\000\000\000\000\001"), 1)
     self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
     self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
示例#2
0
 def checkKnownConstants(self):
     self.assertEquals("\000\000\000\000\000\000\000\001", p64(1))
     self.assertEquals("\000\000\000\001\000\000\000\000", p64(1L << 32))
     self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1)
     self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1)
     self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L << 32)
     self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L << 32)
示例#3
0
    def checkCorruptionInPack(self):
        # This sets up a corrupt .fs file, with a redundant transaction
        # length mismatch.  The implementation of pack in many releases of
        # ZODB blew up if the .fs file had such damage:  it detected the
        # damage, but the code to raise CorruptedError referenced an undefined
        # global.
        import time

        from ZODB.utils import U64, p64
        from ZODB.FileStorage.format import CorruptedError
        from ZODB.serialize import referencesf

        db = DB(self._storage)
        conn = db.open()
        conn.root()['xyz'] = 1
        transaction.commit()

        # Ensure it's all on disk.
        db.close()
        self._storage.close()

        # Reopen before damaging.
        self.open()

        # Open .fs directly, and damage content.
        with open('FileStorageTests.fs', 'r+b') as f:
            f.seek(0, 2)
            pos2 = f.tell() - 8
            f.seek(pos2)
            tlen2 = U64(f.read(8))  # length-8 of the last transaction
            pos1 = pos2 - tlen2 + 8  # skip over the tid at the start
            f.seek(pos1)
            tlen1 = U64(f.read(8))  # should be redundant length-8
            self.assertEqual(tlen1, tlen2)  # verify that it is redundant

            # Now damage the second copy.
            f.seek(pos2)
            f.write(p64(tlen2 - 1))

        # Try to pack.  This used to yield
        #     NameError: global name 's' is not defined
        try:
            self._storage.pack(time.time(), referencesf)
        except CorruptedError as detail:
            self.assertTrue("redundant transaction length does not match "
                            "initial transaction length" in str(detail))
        else:
            self.fail("expected CorruptedError")
 def test_LongToStringToLong(self):
     for num in self.all:
         s = p64(num)
         n = U64(s)
         self.assertEqual(num, n, "U64() failed")
         n2 = u64(s)
         self.assertEqual(num, n2, "u64() failed")
示例#5
0
 def checkExtendedIteration(self):
     # Store a bunch of revisions of a single object
     self._oid = oid = self._storage.new_oid()
     revid1 = self._dostore(oid, data=MinPO(11))
     revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
     revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
     revid4 = self._dostore(oid, revid=revid3, data=MinPO(14))
     # Note that the end points are included
     # Iterate over all of the transactions with explicit start/stop
     txniter = self._storage.iterator(revid1, revid4)
     self.iter_verify(txniter, [revid1, revid2, revid3, revid4], 11)
     # Iterate over some of the transactions with explicit start
     txniter = self._storage.iterator(revid3)
     self.iter_verify(txniter, [revid3, revid4], 13)
     # Iterate over some of the transactions with explicit stop
     txniter = self._storage.iterator(None, revid2)
     self.iter_verify(txniter, [revid1, revid2], 11)
     # Iterate over some of the transactions with explicit start+stop
     txniter = self._storage.iterator(revid2, revid3)
     self.iter_verify(txniter, [revid2, revid3], 12)
     # Specify an upper bound somewhere in between values
     revid3a = p64((U64(revid3) + U64(revid4)) / 2)
     txniter = self._storage.iterator(revid2, revid3a)
     self.iter_verify(txniter, [revid2, revid3], 12)
     # Specify a lower bound somewhere in between values.
     # revid2 == revid1+1 is very likely on Windows.  Adding 1 before
     # dividing ensures that "the midpoint" we compute is strictly larger
     # than revid1.
     revid1a = p64((U64(revid1) + 1 + U64(revid2)) / 2)
     assert revid1 < revid1a
     txniter = self._storage.iterator(revid1a, revid3a)
     self.iter_verify(txniter, [revid2, revid3], 12)
     # Specify an empty range
     txniter = self._storage.iterator(revid3, revid2)
     self.iter_verify(txniter, [], 13)
     # Specify a singleton range
     txniter = self._storage.iterator(revid3, revid3)
     self.iter_verify(txniter, [revid3], 13)
示例#6
0
def main(path):
    fs = FileStorage(path, read_only=1)
    if PACK:
        fs.pack()

    db = ZODB.DB(fs)
    rt = db.open().root()
    paths = find_paths(rt, 3)

    def total_size(oid):
        cache = {}
        cache_size = 1000

        def _total_size(oid, seen):
            v = cache.get(oid)
            if v is not None:
                return v
            data, serialno = fs.load(oid, '')
            size = len(data)
            for suboid in referencesf(data):
                if seen.has_key(suboid):
                    continue
                seen[suboid] = 1
                size += _total_size(suboid, seen)
            cache[oid] = size
            if len(cache) == cache_size:
                cache.popitem()
            return size

        return _total_size(oid, {})

    keys = fs._index.keys()
    keys.sort()
    keys.reverse()

    if not VERBOSE:
        # If not running verbosely, don't print an entry for an object
        # unless it has an entry in paths.
        keys = filter(paths.has_key, keys)

    fmt = "%8s %5d %8d %s %s.%s"

    for oid in keys:
        data, serialno = fs.load(oid, '')
        mod, klass = get_pickle_metadata(data)
        refs = referencesf(data)
        path = paths.get(oid, '-')
        print fmt % (U64(oid), len(data), total_size(oid), path, mod, klass)
示例#7
0
def run(path, v=0):
    fs = FileStorage(path, read_only=1)
    # break into the file implementation
    if hasattr(fs._index, 'iterkeys'):
        iter = six.iterkeys(fs._index)
    else:
        iter = fs._index.keys()
    totals = {}
    for oid in iter:
        data, serialno = fs.load(oid, '')
        mod, klass = get_pickle_metadata(data)
        key = "%s.%s" % (mod, klass)
        bytes, count = totals.get(key, (0, 0))
        bytes += len(data)
        count += 1
        totals[key] = bytes, count
        if v:
            print("%8s %5d %s" % (U64(oid), len(data), key))
    L = totals.items()
    L.sort(lambda a, b: cmp(a[1], b[1]))
    L.reverse()
    print("Totals per object class:")
    for key, (bytes, count) in L:
        print("%8d %8d %s" % (count, bytes, key))
示例#8
0
def oid_repr(oid):
    if isinstance(oid, StringType) and len(oid) == 8:
        return '%16x' % U64(oid)
    else:
        return repr(oid)