Пример #1
0
    def _():
        def current_serial(oid):
            return _serial_at(stor, oid, at)

        for obj in txn.objv:
            data = None  # data do be committed - setup vvv
            copy_from = None
            if isinstance(obj, zodbdump.ObjectCopy):
                copy_from = obj.copy_from
                try:
                    xdata = stor.loadBefore(obj.oid,
                                            p64(u64(obj.copy_from) + 1))
                except POSKeyError:
                    xdata = None
                if xdata is None:
                    raise ValueError(
                        "%s: object %s: copy from @%s: no data" %
                        (runctx, ashex(obj.oid), ashex(obj.copy_from)))
                data, _, _ = xdata

            elif isinstance(obj, zodbdump.ObjectDelete):
                data = None

            elif isinstance(obj, zodbdump.ObjectData):

                if isinstance(obj.data, zodbdump.HashOnly):
                    raise ValueError(
                        '%s: cannot commit transaction with hashonly object' %
                        runctx)

                data = obj.data

            else:
                panic('%s: invalid object record: %r' % (
                    runctx,
                    obj,
                ))

            # we have the data -> restore/store the object.
            # if it will be ConflictError - we just fail and let the caller retry.
            if data is None:
                stor.deleteObject(obj.oid, current_serial(obj.oid), txn)
            else:
                if want_restore and have_restore:
                    stor.restore(obj.oid, txn.tid, data, '', copy_from, txn)
                else:
                    # FIXME we don't handle copy_from on commit
                    # NEO does not support restore, and even if stor supports restore,
                    # going that way requires to already know tid for transaction we are
                    # committing. -> we just imitate copy by actually copying data and
                    # letting the storage deduplicate it.
                    stor.store(obj.oid, current_serial(obj.oid), data, '', txn)
Пример #2
0
 def zdump(self):
     data = self.data
     hashonly = isinstance(data, HashOnly)
     if hashonly:
         size = data.size
     else:
         size = len(data)
     z = b'obj %s %d %s:%s' % (ashex(
         self.oid), size, self.hashfunc, ashex(self.hash_))
     if hashonly:
         z += b' -'
     else:
         z += b'\n'
         z += data
     z += b'\n'
     return z
Пример #3
0
 def zdump(self):  # -> bytes
     z = b'txn %s %s\n' % (ashex(self.tid), qq(self.status))
     z += b'user %s\n' % qq(self.user)
     z += b'description %s\n' % qq(self.description)
     z += b'extension %s\n' % qq(self.extension_bytes)
     for obj in self.objv:
         z += obj.zdump()
     z += b'\n'
     return z
Пример #4
0
def storcmp(stor1, stor2, tidmin, tidmax, verbose=False):
    iter1 = stor1.iterator(tidmin, tidmax)
    iter2 = stor2.iterator(tidmin, tidmax)

    Tprev = time()
    txncount = 0
    while 1:
        txn1, ok1 = nextitem(iter1)
        txn2, ok2 = nextitem(iter2)

        # comparison finished
        if not ok1 and not ok2:
            if verbose:
                print("equal")
            return 0

        # one part has entry not present in another part
        if txn1 is None or txn2 is None or txn1.tid != txn2.tid:
            if verbose:
                tid1 = txn1.tid if txn1 else inf
                tid2 = txn2.tid if txn2 else inf
                l = [(tid1, 1, 2), (tid2, 2, 1)]
                l.sort()
                mintid, minstor, maxstor = l[0]
                print("not-equal: tid %s present in stor%i but not in stor%i" %
                      (ashex(mintid), minstor, maxstor))
            return 1

        # show current comparison state and speed
        if verbose:
            txncount += 1
            T = time()
            if T - Tprev > 5:
                print("@ %s  (%.2f TPS)" % (ashex(txn1.tid), txncount /
                                            (T - Tprev)))
                Tprev = T
                txncount = 0

        # actual txn comparison
        tcmp = txncmp(txn1, txn2)
        if tcmp:
            if verbose:
                print("not-equal: transaction %s is different")
            return 1
Пример #5
0
def main(argv):
    try:
        optv, argv = getopt.getopt(argv[1:], "h", ["help"])
    except getopt.GetoptError as e:
        print(e, file=sys.stderr)
        usage(sys.stderr)
        sys.exit(2)

    for opt, _ in optv:
        if opt in ("-h", "--help"):
            usage(sys.stdout)
            sys.exit(0)

    if len(argv) != 2:
        usage(sys.stderr)
        sys.exit(2)

    storurl = argv[0]
    at = fromhex(argv[1])

    stor = storageFromURL(storurl)
    defer(stor.close)

    # artificial transaction header with tid=0 to request regular commit
    zin = b'txn 0000000000000000 " "\n'

    zin += sys.stdin.read()
    zin = BytesIO(zin)
    zr = zodbdump.DumpReader(zin)
    zr.lineno -= 1  # we prepended txn header
    txn = zr.readtxn()
    tail = zin.read()
    if tail:
        print('E: +%d: garbage after transaction' % zr.lineno, file=sys.stderr)
        sys.exit(1)

    tid = zodbcommit(stor, at, txn)
    print(ashex(tid))
Пример #6
0
def zodbcommit(stor, at, txn):
    assert isinstance(txn, zodbdump.Transaction)

    want_restore = (txn.tid != z64)
    have_restore = IStorageRestoreable.providedBy(stor)
    # warn once if stor does not implement IStorageRestoreable
    if want_restore and (not have_restore):
        if type(stor) not in _norestoreWarned:
            warnings.warn(
                "restore: %s does not provide IStorageRestoreable ...\n"
                "\t... -> will try to emulate it on best-effort basis." %
                type(stor), RuntimeWarning)
            _norestoreWarned.add(type(stor))

    if want_restore:
        # even if stor might be not providing IStorageRestoreable and not
        # supporting .restore, it can still support .tpc_begin(tid=...). An example
        # of this is NEO. We anyway need to be able to specify which transaction ID
        # we need to restore transaction with.
        stor.tpc_begin(txn, tid=txn.tid)
        runctx = "%s: restore %s @%s" % (stor.getName(), ashex(
            txn.tid), ashex(at))
    else:
        stor.tpc_begin(txn)
        runctx = "%s: commit @%s" % (stor.getName(), ashex(at))

    def _():
        def current_serial(oid):
            return _serial_at(stor, oid, at)

        for obj in txn.objv:
            data = None  # data do be committed - setup vvv
            copy_from = None
            if isinstance(obj, zodbdump.ObjectCopy):
                copy_from = obj.copy_from
                try:
                    xdata = stor.loadBefore(obj.oid,
                                            p64(u64(obj.copy_from) + 1))
                except POSKeyError:
                    xdata = None
                if xdata is None:
                    raise ValueError(
                        "%s: object %s: copy from @%s: no data" %
                        (runctx, ashex(obj.oid), ashex(obj.copy_from)))
                data, _, _ = xdata

            elif isinstance(obj, zodbdump.ObjectDelete):
                data = None

            elif isinstance(obj, zodbdump.ObjectData):

                if isinstance(obj.data, zodbdump.HashOnly):
                    raise ValueError(
                        '%s: cannot commit transaction with hashonly object' %
                        runctx)

                data = obj.data

            else:
                panic('%s: invalid object record: %r' % (
                    runctx,
                    obj,
                ))

            # we have the data -> restore/store the object.
            # if it will be ConflictError - we just fail and let the caller retry.
            if data is None:
                stor.deleteObject(obj.oid, current_serial(obj.oid), txn)
            else:
                if want_restore and have_restore:
                    stor.restore(obj.oid, txn.tid, data, '', copy_from, txn)
                else:
                    # FIXME we don't handle copy_from on commit
                    # NEO does not support restore, and even if stor supports restore,
                    # going that way requires to already know tid for transaction we are
                    # committing. -> we just imitate copy by actually copying data and
                    # letting the storage deduplicate it.
                    stor.store(obj.oid, current_serial(obj.oid), data, '', txn)

    try:
        _()
        stor.tpc_vote(txn)
    except:
        stor.tpc_abort(txn)
        raise

    # in ZODB >= 5 tpc_finish returns tid directly, but on ZODB 4 it
    # does not do so. Since we still need to support ZODB 4, utilize tpc_finish
    # callback to know with which tid the transaction was committed.
    _ = []
    stor.tpc_finish(txn, lambda tid: _.append(tid))
    assert len(_) == 1
    tid = _[0]
    if want_restore and (tid != txn.tid):
        panic('%s: restored transaction has different tid=%s' %
              (runctx, ashex(tid)))
    return tid
Пример #7
0
 def zdump(self):
     return b'obj %s from %s\n' % (ashex(self.oid), ashex(self.copy_from))
Пример #8
0
 def zdump(self):
     return b'obj %s delete\n' % (ashex(self.oid))
Пример #9
0
    def readtxn(self):
        # header
        l = self._readline()
        if l is None:
            return None
        m = _txn_re.match(l)
        if m is None:
            self._badline('no txn start')
        tid = fromhex(m.group('tid'))
        status = m.group('status')

        def get(name):
            l = self._readline()
            if l is None or not l.startswith(b'%s ' % name):
                self._badline('no %s' % name)

            return strconv.unquote(l[len(name) + 1:])

        user = get(b'user')
        description = get(b'description')
        extension = get(b'extension')

        # objects
        objv = []
        while 1:
            l = self._readline()
            if l == b'':
                break  # empty line - end of transaction

            if l is None or not l.startswith(b'obj '):
                self._badline('no obj')

            m = _obj_re.match(l)
            if m is None:
                self._badline('invalid obj entry')

            obj = None  # will be Object*
            oid = fromhex(m.group('oid'))

            from_ = m.group('from')

            if m.group('delete'):
                obj = ObjectDelete(oid)

            elif from_:
                copy_from = fromhex(from_)
                obj = ObjectCopy(oid, copy_from)

            else:
                size = int(m.group('size'))
                hashfunc = m.group('hashfunc')
                hashok = fromhex(m.group('hash'))
                hashonly = m.group('hashonly') is not None
                data = None  # see vvv

                hcls = hashRegistry.get(hashfunc)
                if hcls is None:
                    self._badline('unknown hash function %s' % qq(hashfunc))

                if hashonly:
                    data = HashOnly(size)
                else:
                    # XXX -> io.readfull
                    n = size + 1  # data LF
                    data = b''
                    while n > 0:
                        chunk = self._r.read(n)
                        data += chunk
                        n -= len(chunk)
                    self.lineno += data.count(b'\n')
                    self._line = None
                    if data[-1:] != b'\n':
                        raise RuntimeError('%s+%d: no LF after obj data' %
                                           (_ioname(self._r), self.lineno))
                    data = data[:-1]

                    # verify data integrity
                    # TODO option to allow reading corrupted data
                    h = hcls()
                    h.update(data)
                    hash_ = h.digest()
                    if hash_ != hashok:
                        raise RuntimeError(
                            '%s+%d: data corrupt: %s = %s, expected %s' %
                            (_ioname(self._r), self.lineno, h.name,
                             ashex(hash_), ashex(hashok)))

                obj = ObjectData(oid, data, hashfunc, hashok)

            objv.append(obj)

        return Transaction(tid, status, user, description, extension, objv)
Пример #10
0
def zodbdump(stor,
             tidmin,
             tidmax,
             hashonly=False,
             pretty='raw',
             out=asbinstream(sys.stdout)):
    def badpretty():
        raise ValueError("invalid pretty format %s" % pretty)

    for txn in stor.iterator(tidmin, tidmax):
        # XXX .status not covered by IStorageTransactionInformation
        # XXX but covered by BaseStorage.TransactionRecord
        out.write(b"txn %s %s\nuser %s\ndescription %s\n" % (ashex(
            txn.tid), qq(txn.status), qq(txn.user), qq(txn.description)))

        # extension is saved by ZODB as either empty or as pickle dump of an object
        rawext = txn_raw_extension(stor, txn)
        if pretty == 'raw':
            out.write(b"extension %s\n" % qq(rawext))
        elif pretty == 'zpickledis':
            if len(rawext) == 0:
                out.write(b'extension ""\n')
            else:
                out.write(b"extension\n")
                extf = BytesIO(rawext)
                disf = BytesIO()
                pickletools.dis(extf, disf)
                out.write(indent(disf.getvalue(), "  "))
                extra = extf.read()
                if len(extra) > 0:
                    out.write(b"  + extra data %s\n" % qq(extra))
        else:
            badpretty()

        objv = txnobjv(txn)

        for obj in objv:
            entry = b"obj %s " % ashex(obj.oid)
            write_data = False

            if obj.data is None:
                entry += b"delete"

            # was undo and data taken from obj.data_txn
            elif obj.data_txn is not None:
                entry += b"from %s" % ashex(obj.data_txn)

            else:
                # XXX sha1 is hardcoded for now. Dump format allows other hashes.
                entry += b"%i sha1:%s" % (len(obj.data), ashex(sha1(obj.data)))
                write_data = True

            out.write(b(entry))

            if write_data:
                if hashonly:
                    out.write(b" -")
                else:
                    out.write(b"\n")
                    if pretty == 'raw':
                        out.write(obj.data)
                    elif pretty == 'zpickledis':
                        # https://github.com/zopefoundation/ZODB/blob/5.6.0-55-g1226c9d35/src/ZODB/serialize.py#L24-L29
                        dataf = BytesIO(obj.data)
                        disf = BytesIO()
                        pickletools.dis(dataf, disf)  # class
                        pickletools.dis(dataf, disf)  # state
                        out.write(indent(disf.getvalue(), "  "))
                        extra = dataf.read()
                        if len(extra) > 0:
                            out.write(b"  + extra data %s\n" % qq(extra))
                    else:
                        badpretty()

            out.write(b"\n")

        out.write(b"\n")
Пример #11
0
def test_parse_tid_time_format(fake_time, reference_time, reference_tid,
                               input_time):
    assert b(reference_tid) == ashex(parse_tid(input_time))
    # check that the reference_tid matches the reference time, mainly
    # to check that input is defined correctly.
    assert b(reference_tid) == ashex(parse_tid(reference_time))
Пример #12
0
def report(rep, csv=False):
    delta_fs = rep.delta_fs
    if not csv:
        if rep.TIDS == 0:
            print("# ø")
            print("No transactions processed")
            return

        print("# %s..%s" % (ashex(rep.tidmin), ashex(rep.tidmax)))
        print("Processed %d records in %d transactions" % (rep.OIDS, rep.TIDS))
        print("Average record size is %7.2f bytes" %
              (rep.DBYTES * 1.0 / rep.OIDS))
        print("Average transaction size is %7.2f bytes" %
              (rep.DBYTES * 1.0 / rep.TIDS))

        print("Types used:")
    if delta_fs:
        if csv:
            fmt = "%s,%s,%s,%s,%s"
            fmtp = "%s,%d,%d,%f%%,%f"  # per-class format
        else:
            fmt = "%-46s %7s %9s %6s %7s"
            fmtp = "%-46s %7d %9d %5.1f%% %7.2f"  # per-class format
        print(fmt % ("Class Name", "T.Count", "T.Bytes", "Pct", "AvgSize"))
        if not csv:
            print(fmt % ('-' * 46, '-' * 7, '-' * 9, '-' * 5, '-' * 7))
    else:
        if csv:
            fmt = "%s,%s,%s,%s,%s,%s,%s,%s,%s"
            fmtp = "%s,%d,%d,%f%%,%f,%d,%d,%d,%d"  # per-class format
        else:
            fmt = "%-46s %7s %9s %6s %7s %7s %9s %7s %9s"
            fmtp = "%-46s %7d %9d %5.1f%% %7.2f %7d %9d %7d %9d"  # per-class format
        print(fmt % ("Class Name", "T.Count", "T.Bytes", "Pct", "AvgSize",
                     "C.Count", "C.Bytes", "O.Count", "O.Bytes"))
        if not csv:
            print(fmt % ('-' * 46, '-' * 7, '-' * 9, '-' * 5, '-' * 7, '-' * 7,
                         '-' * 9, '-' * 7, '-' * 9))
    fmts = "%46s %7d %8dk %5.1f%% %7.2f"  # summary format
    cumpct = 0.0
    for t in sorted(rep.TYPEMAP.keys(), key=lambda a: rep.TYPESIZE[a]):
        pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES
        cumpct += pct
        if csv:
            t_display = t
        else:
            t_display = shorten(t, 46)
        if delta_fs:
            print(fmtp % (t_display, rep.TYPEMAP[t], rep.TYPESIZE[t], pct,
                          rep.TYPESIZE[t] * 1.0 / rep.TYPEMAP[t]))
        else:
            print(fmtp % (t_display, rep.TYPEMAP[t], rep.TYPESIZE[t], pct,
                          rep.TYPESIZE[t] * 1.0 / rep.TYPEMAP[t],
                          rep.COIDSMAP[t], rep.CBYTESMAP[t],
                          rep.FOIDSMAP.get(t, 0), rep.FBYTESMAP.get(t, 0)))

    if csv:
        return

    if delta_fs:
        print(fmt % ('=' * 46, '=' * 7, '=' * 9, '=' * 5, '=' * 7))
        print("%46s %7d %9s %6s %6.2f" % ('Total Transactions', rep.TIDS, ' ',
                                          ' ', rep.DBYTES * 1.0 / rep.TIDS))
        print(fmts % ('Total Records', rep.OIDS, rep.DBYTES, cumpct,
                      rep.DBYTES * 1.0 / rep.OIDS))
    else:
        print(fmt % ('=' * 46, '=' * 7, '=' * 9, '=' * 5, '=' * 7, '=' * 7,
                     '=' * 9, '=' * 7, '=' * 9))
        print("%46s %7d %9s %6s %6.2fk" %
              ('Total Transactions', rep.TIDS, ' ', ' ',
               rep.DBYTES * 1.0 / rep.TIDS / 1024.0))
        print(fmts % ('Total Records', rep.OIDS, rep.DBYTES / 1024.0, cumpct,
                      rep.DBYTES * 1.0 / rep.OIDS))

        print(fmts %
              ('Current Objects', rep.COIDS, rep.CBYTES / 1024.0,
               rep.CBYTES * 100.0 / rep.DBYTES, rep.CBYTES * 1.0 / rep.COIDS))
        if rep.FOIDS:
            print(fmts %
                  ('Old Objects', rep.FOIDS, rep.FBYTES / 1024.0, rep.FBYTES *
                   100.0 / rep.DBYTES, rep.FBYTES * 1.0 / rep.FOIDS))
Пример #13
0
from zodbtools.util import ashex, storageFromURL
from collections import OrderedDict
from golang import func, defer
import sys


def _last_tid(stor):
    print("W: last_tid is deprecated alias for head", file=sys.stderr)
    return infoDict["head"](stor)


# {} parameter_name -> get_parameter(stor)
infoDict = OrderedDict([
    ("name", lambda stor: stor.getName()),
    ("size", lambda stor: stor.getSize()),
    ("head", lambda stor: ashex(stor.lastTransaction())),
    ("last_tid", _last_tid),
])


def zodbinfo(stor, parameterv):
    wantnames = False
    if not parameterv:
        parameterv = infoDict.keys()
        wantnames = True

    for parameter in parameterv:
        get_parameter = infoDict.get(parameter)
        if get_parameter is None:
            print("invalid parameter: %s" % parameter, file=sys.stderr)
            sys.exit(1)
Пример #14
0
 def _(txn):
     print(ashex(txn.tid))