Пример #1
0
def fsdump(path, file=None, with_offset=1):
    iter = FileIterator(path)
    for i, trans in enumerate(iter):
        if with_offset:
            print(("Trans #%05d tid=%016x time=%s offset=%d" %
                   (i, u64(trans.tid), TimeStamp(trans.tid), trans._pos)),
                  file=file)
        else:
            print(("Trans #%05d tid=%016x time=%s" %
                   (i, u64(trans.tid), TimeStamp(trans.tid))),
                  file=file)
        print(("    status=%r user=%r description=%r" %
               (trans.status, trans.user, trans.description)),
              file=file)

        for j, rec in enumerate(trans):
            if rec.data is None:
                fullclass = "undo or abort of object creation"
                size = ""
            else:
                modname, classname = get_pickle_metadata(rec.data)
                size = " size=%d" % len(rec.data)
                fullclass = "%s.%s" % (modname, classname)

            if rec.data_txn:
                # It would be nice to print the transaction number
                # (i) but it would be expensive to keep track of.
                bp = " bp=%016x" % u64(rec.data_txn)
            else:
                bp = ""

            print(("  data #%05d oid=%016x%s class=%s%s" %
                   (j, u64(rec.oid), size, fullclass, bp)),
                  file=file)
    iter.close()
Пример #2
0
    def report(self):
        """Show all msgs, grouped by oid and sub-grouped by tid."""

        msgs = self.msgs
        oids = self.oids
        oid2name = self.oid2name
        # First determine which oids weren't seen at all, and synthesize msgs
        # for them.
        NOT_SEEN = "this oid was not defined (no data record for it found)"
        for oid in oids:
            if oid not in oid2name:
                msgs.append((oid, None, NOT_SEEN))

        msgs.sort()  # oids are primary key, tids secondary
        current_oid = current_tid = None
        for oid, tid, msg in msgs:
            if oid != current_oid:
                nrev = oids[oid]
                revision = "revision" + (nrev != 1 and 's' or '')
                name = oid2name.get(oid, "<unknown>")
                print "oid", oid_repr(oid), name, nrev, revision
                current_oid = oid
                current_tid = None
                if msg is NOT_SEEN:
                    assert tid is None
                    print "   ", msg
                    continue
            if tid != current_tid:
                current_tid = tid
                status, user, description, pos = self.tid2info[tid]
                print "    tid %s offset=%d %s" % (tid_repr(tid), pos,
                                                   TimeStamp(tid))
                print "        tid user=%r" % shorten(user)
                print "        tid description=%r" % shorten(description)
            print "       ", msg
Пример #3
0
 def answerTransactionInformation(self, conn, tid, user, desc, ext, packed,
                                  oid_list):
     self.app.setHandlerData(({
         'time': TimeStamp(tid).timeTime(),
         'user_name': user,
         'description': desc,
         'id': tid,
         'oids': oid_list,
         'packed': packed,
     }, ext))
Пример #4
0
    def checkFullTimeStamp(self):
        t = time.gmtime(time.time())
        ts = TimeStamp(*t[:6])

        # XXX floating point comparison
        self.assertEquals(ts.timeTime() + time.timezone, time.mktime(t))

        self.assertEqual(ts.year(), t[0])
        self.assertEqual(ts.month(), t[1])
        self.assertEqual(ts.day(), t[2])

        self.assertEquals(ts.hour(), t[3])
        self.assertEquals(ts.minute(), t[4])
        self.assert_(abs(ts.second() - t[5]) < EPSILON)
Пример #5
0
    def _check_ymd(self, yr, mo, dy):
        ts = TimeStamp(yr, mo, dy)
        self.assertEqual(ts.year(), yr)
        self.assertEqual(ts.month(), mo)
        self.assertEqual(ts.day(), dy)

        self.assertEquals(ts.hour(), 0)
        self.assertEquals(ts.minute(), 0)
        self.assertEquals(ts.second(), 0)

        t = time.gmtime(ts.timeTime())
        self.assertEquals(yr, t[0])
        self.assertEquals(mo, t[1])
        self.assertEquals(dy, t[2])
Пример #6
0
def _findModificationTime(object):
    """Find the last modification time for a version-controlled object.
       The modification time reflects the latest modification time of
       the object or any of its persistent subobjects that are not
       themselves version-controlled objects. Note that this will
       return None if the object has no modification time."""

    mtime = getattr(object, '_p_mtime', None)
    if mtime is None:
        return None

    latest = mtime
    conn = object._p_jar
    load = conn._storage.load
    try:
        version = conn._version
    except AttributeError:
        # ZODB 3.9+ compatibility
        version = None
    refs = referencesf

    oids = [object._p_oid]
    done_oids = {}
    done = done_oids.has_key
    first = 1

    while oids:
        oid = oids[0]
        del oids[0]
        if done(oid):
            continue
        done_oids[oid] = 1
        try:
            p, serial = load(oid, version)
        except:
            pass  # invalid reference!
        else:
            if first is not None:
                first = None
            else:
                if p.find('U\x0b__vc_info__') == -1:
                    mtime = TimeStamp(serial).timeTime()
                    if mtime > latest:
                        latest = mtime
            refs(p, oids)

    return latest
Пример #7
0
def report(oid, data, serial, missing):
    from_mod, from_class = get_pickle_metadata(data)
    if len(missing) > 1:
        plural = "s"
    else:
        plural = ""
    ts = TimeStamp(serial)
    print("oid %s %s.%s" % (hex(u64(oid)), from_mod, from_class))
    print("last updated: %s, tid=%s" % (ts, hex(u64(serial))))
    print("refers to invalid object%s:" % plural)
    for oid, info, reason in missing:
        if isinstance(info, tuple):
            description = "%s.%s" % info
        else:
            description = str(info)
        print("\toid %s %s: %r" % (oid_repr(oid), reason, description))
    print()
Пример #8
0
 def test_answerTransactionInformation(self):
     conn = self.getFakeConnection()
     tid = self.getNextTID()
     user = '******'
     desc = 'DESC'
     ext = 'EXT'
     packed = False
     oid_list = [self.getOID(0), self.getOID(1)]
     self.handler.answerTransactionInformation(conn, tid, user, desc, ext,
                                               packed, oid_list)
     self._checkHandlerData(({
         'time': TimeStamp(tid).timeTime(),
         'user_name': user,
         'description': desc,
         'id': tid,
         'oids': oid_list,
         'packed': packed,
     }, ext))
def _findModificationTime(object):
    """Find the last modification time for a version-controlled object.
       The modification time reflects the latest modification time of
       the object or any of its persistent subobjects that are not
       themselves version-controlled objects. Note that this will
       return None if the object has no modification time."""

    mtime = getattr(object, '_p_mtime', None)
    if mtime is None:
        return None

    latest = mtime
    conn = object._p_jar
    load = conn._storage.load
    refs = referencesf

    oids = [object._p_oid]
    done_oids = set()
    done = done_oids.__contains__
    first = 1

    while oids:
        oid = oids[0]
        del oids[0]
        if done(oid):
            continue
        done_oids.add(oid)
        try:
            p, serial = load(oid)
        except Exception:
            pass  # invalid reference!
        else:
            if first is not None:
                first = None
            else:
                if p.find(b'U\x0b__vc_info__') == -1:
                    mtime = TimeStamp(serial).timeTime()
                    if mtime > latest:
                        latest = mtime
            refs(p, oids)

    return latest
Пример #10
0
def parse_tid(tid_string, raw_only=False):
    """Try to parse `tid_string` as a time and returns the
    corresponding raw TID.
    If `tid_string` cannot be parsed as a time, assume it was
    already a TID.
    This function also raise TidRangeInvalid when `tid_string`
    is invalid.
    """
    assert isinstance(tid_string, (str, bytes))

    # If it "looks like a TID", don't try to parse it as time,
    # because parsing is slow.
    if len(tid_string) == 16:
        try:
            return fromhex(tid_string)
        except ValueError:
            pass

    if raw_only:
        # either it was not 16-char string or hex decoding failed
        raise TidInvalid(tid_string)

    # preprocess to support `1.day.ago` style formats like git log does.
    if "ago" in tid_string:
        tid_string = tid_string.replace(".", " ").replace("_", " ")
    parsed_time = dateparser.parse(tid_string,
                                   settings={
                                       'TO_TIMEZONE': 'UTC',
                                       'RETURN_AS_TIMEZONE_AWARE': True
                                   })

    if not parsed_time:
        # parsing as date failed
        raise TidInvalid(tid_string)

    # build a ZODB.TimeStamp to convert as a TID
    return TimeStamp(parsed_time.year, parsed_time.month, parsed_time.day,
                     parsed_time.hour, parsed_time.minute, parsed_time.second +
                     parsed_time.microsecond / 1000000.).raw()
Пример #11
0
def doit(srcdb, dstdb, options):
    outfp = options.outfp
    profilep = options.profilep
    verbose = options.verbose
    # some global information
    largest_pickle = 0
    largest_txn_in_size = 0
    largest_txn_in_objects = 0
    total_pickle_size = 0
    total_object_count = 0
    # Ripped from BaseStorage.copyTransactionsFrom()
    ts = None
    ok = True
    prevrevids = {}
    counter = 0
    skipper = 0
    if options.timestamps:
        print("%4s. %26s %6s %8s %5s %5s %5s %5s %5s" % (
            "NUM",
            "TID AS TIMESTAMP",
            "OBJS",
            "BYTES",
            # Does anybody know what these times mean?
            "t4-t0",
            "t1-t0",
            "t2-t1",
            "t3-t2",
            "t4-t3"))
    else:
        print("%4s. %20s %6s %8s %6s %6s %6s %6s %6s" % (
            "NUM",
            "TRANSACTION ID",
            "OBJS",
            "BYTES",
            # Does anybody know what these times mean?
            "t4-t0",
            "t1-t0",
            "t2-t1",
            "t3-t2",
            "t4-t3"))
    for txn in srcdb.iterator():
        skipper += 1
        if skipper <= options.skiptxn:
            continue
        counter += 1
        if counter > options.maxtxn >= 0:
            break
        tid = txn.tid
        if ts is None:
            ts = TimeStamp(tid)
        else:
            t = TimeStamp(tid)
            if t <= ts:
                if ok:
                    print(('Time stamps are out of order %s, %s' % (ts, t)),
                          file=sys.stderr)
                    ok = False
                    ts = t.laterThan(ts)
                    tid = ts.raw()
                else:
                    ts = t
                    if not ok:
                        print(('Time stamps are back in order %s' % t),
                              file=sys.stderr)
                        ok = True
        if verbose > 1:
            print(ts)

        prof = None
        if profilep and (counter % 100) == 0:
            prof = profile.Profile()
        objects = 0
        size = 0
        newrevids = RevidAccumulator()
        t0 = time.time()
        dstdb.tpc_begin(txn, tid, txn.status)
        t1 = time.time()
        for r in txn:
            oid = r.oid
            objects += 1
            thissize = len(r.data)
            size += thissize
            if thissize > largest_pickle:
                largest_pickle = thissize
            if verbose > 1:
                if not r.version:
                    vstr = 'norev'
                else:
                    vstr = r.version
                print(utils.U64(oid), vstr, len(r.data))
            oldrevid = prevrevids.get(oid, ZERO)
            result = dstdb.store(oid, oldrevid, r.data, r.version, txn)
            newrevids.store(oid, result)
        t2 = time.time()
        result = dstdb.tpc_vote(txn)
        t3 = time.time()
        newrevids.tpc_vote(result)
        prevrevids.update(newrevids.get_dict())
        # Profile every 100 transactions
        if prof:
            prof.runcall(dstdb.tpc_finish, txn)
        else:
            dstdb.tpc_finish(txn)
        t4 = time.time()

        # record the results
        if objects > largest_txn_in_objects:
            largest_txn_in_objects = objects
        if size > largest_txn_in_size:
            largest_txn_in_size = size
        if options.timestamps:
            tidstr = str(TimeStamp(tid))
            format = "%4d. %26s %6d %8d %5.3f %5.3f %5.3f %5.3f %5.3f"
        else:
            tidstr = utils.U64(tid)
            format = "%4d. %20s %6d %8d %6.4f %6.4f %6.4f %6.4f %6.4f"
        print(format % (skipper, tidstr, objects, size, t4 - t0, t1 - t0,
                        t2 - t1, t3 - t2, t4 - t3),
              file=outfp)
        total_pickle_size += size
        total_object_count += objects

        if prof:
            prof.create_stats()
            fp = open('profile-%02d.txt' % (counter / 100), 'wb')
            marshal.dump(prof.stats, fp)
            fp.close()
    print("Largest pickle:          %8d" % largest_pickle, file=outfp)
    print("Largest transaction:     %8d" % largest_txn_in_size, file=outfp)
    print("Largest object count:    %8d" % largest_txn_in_objects, file=outfp)
    print("Total pickle size: %14d" % total_pickle_size, file=outfp)
    print("Total object count:      %8d" % total_object_count, file=outfp)
Пример #12
0
    def _check_tid_after_load(self,
                              oid_int,
                              actual_tid_int,
                              expect_tid_int=None):
        """Verify the tid of an object loaded from the database is sane."""

        if actual_tid_int > self.current_tid:
            # Strangely, the database just gave us data from a future
            # transaction.  We can't give the data to ZODB because that
            # would be a consistency violation.  However, the cause is hard
            # to track down, so issue a ReadConflictError and hope that
            # the application retries successfully.
            raise ReadConflictError(
                "Got data for OID 0x%(oid_int)x from "
                "future transaction %(actual_tid_int)d (%(got_ts)s).  "
                "Current transaction is %(current_tid)d (%(current_ts)s)." % {
                    'oid_int': oid_int,
                    'actual_tid_int': actual_tid_int,
                    'current_tid': self.current_tid,
                    'got_ts': str(TimeStamp(p64(actual_tid_int))),
                    'current_ts': str(TimeStamp(p64(self.current_tid))),
                })

        if expect_tid_int is not None and actual_tid_int != expect_tid_int:
            # Uh-oh, the cache is inconsistent with the database.
            # Possible causes:
            #
            # - The database MUST provide a snapshot view for each
            #   session; this error can occur if that requirement is
            #   violated. For example, MySQL's MyISAM engine is not
            #   sufficient for the object_state table because MyISAM
            #   can not provide a snapshot view. (InnoDB is
            #   sufficient.)
            #
            # - Something could be writing to the database out
            #   of order, such as a version of RelStorage that
            #   acquires a different commit lock.
            #
            # - A software bug. In the past, there was a subtle bug
            #   in after_poll() that caused it to ignore the
            #   transaction order, leading it to sometimes put the
            #   wrong tid in delta_after*.
            cp0, cp1 = self.checkpoints
            import os
            import thread
            raise AssertionError(
                "Detected an inconsistency "
                "between the RelStorage cache and the database "
                "while loading an object using the delta_after0 dict.  "
                "Please verify the database is configured for "
                "ACID compliance and that all clients are using "
                "the same commit lock.  "
                "(oid_int=%(oid_int)r, expect_tid_int=%(expect_tid_int)r, "
                "actual_tid_int=%(actual_tid_int)r, "
                "current_tid=%(current_tid)r, cp0=%(cp0)r, cp1=%(cp1)r, "
                "len(delta_after0)=%(lda0)r, len(delta_after1)=%(lda1)r, "
                "pid=%(pid)r, thread_ident=%(thread_ident)r)" % {
                    'oid_int': oid_int,
                    'expect_tid_int': expect_tid_int,
                    'actual_tid_int': actual_tid_int,
                    'current_tid': self.current_tid,
                    'cp0': cp0,
                    'cp1': cp1,
                    'lda0': len(self.delta_after0),
                    'lda1': len(self.delta_after1),
                    'pid': os.getpid(),
                    'thread_ident': thread.get_ident(),
                })
Пример #13
0
    def checkRawTimestamp(self):
        t = time.gmtime(time.time())
        ts1 = TimeStamp(*t[:6])
        ts2 = TimeStamp(`ts1`)

        self.assertEquals(ts1, ts2)
        self.assertEquals(ts1.timeTime(), ts2.timeTime())
        self.assertEqual(ts1.year(), ts2.year())
        self.assertEqual(ts1.month(), ts2.month())
        self.assertEqual(ts1.day(), ts2.day())
        self.assertEquals(ts1.hour(), ts2.hour())
        self.assertEquals(ts1.minute(), ts2.minute())
        self.assert_(abs(ts1.second() - ts2.second()) < EPSILON)
Пример #14
0
def doit(srcdb, dstdb, options):
    outfp = options.outfp
    profilep = options.profilep
    verbose = options.verbose
    # some global information
    largest_pickle = 0
    largest_txn_in_size = 0
    largest_txn_in_objects = 0
    total_pickle_size = 0L
    total_object_count = 0
    # Ripped from BaseStorage.copyTransactionsFrom()
    ts = None
    ok = True
    prevrevids = {}
    counter = 0
    skipper = 0
    if options.timestamps:
        print "%4s. %26s %6s %8s %5s %5s %5s %5s %5s" % (
            "NUM", "TID AS TIMESTAMP", "OBJS", "BYTES",
            # Does anybody know what these times mean?
            "t4-t0", "t1-t0", "t2-t1", "t3-t2", "t4-t3")
    else:
        print "%4s. %20s %6s %8s %6s %6s %6s %6s %6s" % (
            "NUM", "TRANSACTION ID", "OBJS", "BYTES",
            # Does anybody know what these times mean?
            "t4-t0", "t1-t0", "t2-t1", "t3-t2", "t4-t3")
    for txn in srcdb.iterator():
        skipper += 1
        if skipper <= options.skiptxn:
            continue
        counter += 1
        if counter > options.maxtxn >= 0:
            break
        tid = txn.tid
        if ts is None:
            ts = TimeStamp(tid)
        else:
            t = TimeStamp(tid)
            if t <= ts:
                if ok:
                    print >> sys.stderr, \
                          'Time stamps are out of order %s, %s' % (ts, t)
                    ok = False
                    ts = t.laterThan(ts)
                    tid = `ts`
                else:
                    ts = t
                    if not ok:
                        print >> sys.stderr, \
                              'Time stamps are back in order %s' % t
                        ok = True
        if verbose > 1:
            print ts

        prof = None
        if profilep and (counter % 100) == 0:
            prof = profile.Profile()
        objects = 0
        size = 0
        newrevids = RevidAccumulator()
        t0 = time.time()
        dstdb.tpc_begin(txn, tid, txn.status)
        t1 = time.time()
        for r in txn:
            oid = r.oid
            objects += 1
            thissize = len(r.data)
            size += thissize
            if thissize > largest_pickle:
                largest_pickle = thissize
            if verbose > 1:
                if not r.version:
                    vstr = 'norev'
                else:
                    vstr = r.version
                print utils.U64(oid), vstr, len(r.data)
            oldrevid = prevrevids.get(oid, ZERO)
            result = dstdb.store(oid, oldrevid, r.data, r.version, txn)
            newrevids.store(oid, result)
        t2 = time.time()
        result = dstdb.tpc_vote(txn)
        t3 = time.time()
        newrevids.tpc_vote(result)
        prevrevids.update(newrevids.get_dict())
        # Profile every 100 transactions
        if prof:
            prof.runcall(dstdb.tpc_finish, txn)
        else:
            dstdb.tpc_finish(txn)
        t4 = time.time()

        # record the results
        if objects > largest_txn_in_objects:
            largest_txn_in_objects = objects
        if size > largest_txn_in_size:
            largest_txn_in_size = size
        if options.timestamps:
            tidstr = str(TimeStamp(tid))
            format = "%4d. %26s %6d %8d %5.3f %5.3f %5.3f %5.3f %5.3f"
        else:
            tidstr = utils.U64(tid)
            format = "%4d. %20s %6d %8d %6.4f %6.4f %6.4f %6.4f %6.4f"
        print >> outfp, format % (skipper, tidstr, objects, size,
                                  t4-t0, t1-t0, t2-t1, t3-t2, t4-t3)
        total_pickle_size += size
        total_object_count += objects

        if prof:
            prof.create_stats()
            fp = open('profile-%02d.txt' % (counter / 100), 'wb')
            marshal.dump(prof.stats, fp)
            fp.close()
    print >> outfp, "Largest pickle:          %8d" % largest_pickle
    print >> outfp, "Largest transaction:     %8d" % largest_txn_in_size
    print >> outfp, "Largest object count:    %8d" % largest_txn_in_objects
    print >> outfp, "Total pickle size: %14d" % total_pickle_size
    print >> outfp, "Total object count:      %8d" % total_object_count
Пример #15
0
 def _set_time(self, obj, t):
     """Sets the last modification time of a Persistent obj to float t.
     """
     args = time.gmtime(t)[:5] + (t%60,)
     obj._p_serial = repr(TimeStamp(*args))
Пример #16
0
 def checkLaterThan(self):
     # XXX what does laterThan() do?
     t = time.gmtime(time.time())
     ts = TimeStamp(*t[:6])
     ts2 = ts.laterThan(ts)
     self.assert_(ts2 > ts)
Пример #17
0
 def checkTimeStamp(self):
     # Alternate test suite
     t = TimeStamp(2002, 1, 23, 10, 48, 5) # GMT
     self.assertEquals(str(t), '2002-01-23 10:48:05.000000')
     self.assertEquals(repr(t), '\x03B9H\x15UUU')
     self.assertEquals(TimeStamp('\x03B9H\x15UUU'), t)
     self.assertEquals(t.year(), 2002)
     self.assertEquals(t.month(), 1)
     self.assertEquals(t.day(), 23)
     self.assertEquals(t.hour(), 10)
     self.assertEquals(t.minute(), 48)
     self.assertEquals(round(t.second()), 5)
     self.assertEquals(t.second(), t.seconds()) # Alias
     self.assertEquals(t.timeTime(), 1011782885)
     t1 = TimeStamp(2002, 1, 23, 10, 48, 10)
     self.assertEquals(str(t1), '2002-01-23 10:48:10.000000')
     self.assert_(t == t)
     self.assert_(t != t1)
     self.assert_(t < t1)
     self.assert_(t <= t1)
     self.assert_(t1 >= t)
     self.assert_(t1 > t)
     self.failIf(t == t1)
     self.failIf(t != t)
     self.failIf(t > t1)
     self.failIf(t >= t1)
     self.failIf(t1 < t)
     self.failIf(t1 <= t)
     self.assertEquals(cmp(t, t), 0)
     self.assertEquals(cmp(t, t1), -1)
     self.assertEquals(cmp(t1, t), 1)
     self.assertEquals(t1.laterThan(t), t1)
     self.assert_(t.laterThan(t1) > t1)
     self.assertEquals(TimeStamp(2002,1,23), TimeStamp(2002,1,23,0,0,0))
Пример #18
0
 def make_tid(*args):
     return u64(TimeStamp(*args).raw())
Пример #19
0
def main(args=None):
    options = parser.parse_args(args)

    if options.logging_configuration.upper() in logging_levels:
        logging.basicConfig(level=options.logging_configuration.upper())
    else:
        with open(options.logging_configuration) as f:
            from ZConfig import configureLoggers
            configureLoggers(f.read())

    transform = options.transform
    if transform is not None:
        from .component import global_by_name
        transform = global_by_name(transform)

    jsonifier = Jsonifier(transform=transform)
    driver = relstorage.adapters.postgresql.select_driver(
        relstorage.options.Options(driver=options.driver))
    Binary = driver.Binary
    dsn = options.connection_string
    with closing(pg_connection(dsn)) as conn:
        with closing(conn.cursor()) as cursor:
            if options.nagios:
                if not table_exists(cursor, 'newt_follow_progress'):
                    print("Updater has not run")
                    return 2
                cursor.execute("select max(tid) from object_state")
                [[stid]] = cursor
                utid = follow.get_progress_tid(conn, __name__)
                if stid is None:
                    if utid == -1:
                        print("No transactions")
                        return 0
                    else:
                        print("Updater saw data but there was None")
                        return 2
                elif utid < 0:
                    print("Updater hasn't done anything")
                    return 2
                else:
                    from ZODB.utils import p64
                    from ZODB.TimeStamp import TimeStamp
                    lag = (TimeStamp(p64(stid)).timeTime() -
                           TimeStamp(p64(utid)).timeTime())
                    if lag < 0:
                        print("Updater is ahead")
                        return 2
                    warn, error = map(int, options.nagios.split(','))
                    flag = lambda : ("%99.3f" % lag).strip()
                    if lag > error:
                        print("Updater is too far behind | %s" % flag())
                        return 2
                    elif lag > warn:
                        print("Updater is behind | %s" % flag())
                        return 1
                    else:
                        print("OK | %s" % flag())
                        return 0

            compute_missing = options.compute_missing
            if (compute_missing and
                not table_exists(cursor, follow.PROGRESS_TABLE)
                ):
                if not table_exists(cursor, 'newt'):
                    raise AssertionError("newt table doesn't exist")
                cursor.execute("select max(tid) from object_state")
                [[tid]] = cursor
            else:
                tid = follow.get_progress_tid(conn, __name__)
                if tid < 0 and not table_exists(cursor, 'newt'):
                    from ._adapter import _newt_ddl
                    cursor.execute(_newt_ddl)
                elif trigger_exists(cursor, DELETE_TRIGGER):
                    if options.remove_delete_trigger:
                        cursor.execute("drop trigger %s on object_state" %
                                       DELETE_TRIGGER)
                    else:
                        logger.error(
                            "The Newt DB delete trigger exists.\n"
                            "It is incompatible with the updater.\n"
                            "Use -T to remove it.")
                        return 1

                if not options.no_gc:
                    cursor.execute(gc_sql)

            conn.commit()

            if options.gc_only:
                if options.no_gc:
                    logger.warn(
                        "Exiting after garbage collection,\n"
                        "but garbage collection was suppressed.")
                return 0

            if options.compute_missing:
                start_tid = -1
                end_tid = tid
                logger.info("Compute_missing through %s", tid)
                process = _compute_missing
            else:
                logger.info("Starting updater at %s", tid)
                start_tid = tid
                end_tid = None
                process = _update_newt

            for batch in follow.updates(
                dsn,
                start_tid=start_tid,
                end_tid=end_tid,
                batch_limit=options.transaction_size_limit,
                poll_timeout=options.poll_timeout,
                ):
                process(conn, cursor, jsonifier, Binary, batch)
Пример #20
0
def recover(argv=sys.argv):

    try:
        opts, (inp, outp) = getopt.getopt(argv[1:], 'fv:pP:')
        force = partial = verbose = 0
        pack = None
        for opt, v in opts:
            if opt == '-v': verbose = int(v)
            elif opt == '-p': partial=1
            elif opt == '-f': force=1
            elif opt == '-P': pack=time.time()-float(v)


        force = filter(lambda opt: opt[0]=='-f', opts)
        partial = filter(lambda opt: opt[0]=='-p', opts)
        verbose = filter(lambda opt: opt[0]=='-v', opts)
        verbose = verbose and int(verbose[0][1]) or 0
        print 'Recovering', inp, 'into', outp
    except:
        die()
        print __doc__ % argv[0]


    if os.path.exists(outp) and not force:
        die("%s exists" % outp)

    file=open(inp, "rb")
    seek=file.seek
    read=file.read
    if read(4) != ZODB.FileStorage.packed_version:
        die("input is not a file storage")

    seek(0,2)
    file_size=file.tell()

    ofs=ZODB.FileStorage.FileStorage(outp, create=1)
    _ts=None
    ok=1
    prog1=0
    preindex={}; preget=preindex.get   # waaaa
    undone=0

    pos=4
    while pos:

        try:
            npos, transaction = read_transaction_header(file, pos, file_size)
        except EOF:
            break
        except:
            print "\n%s: %s\n" % sys.exc_info()[:2]
            if not verbose: progress(prog1)
            pos = scan(file, pos, file_size)
            continue

        if transaction is None:
            undone = undone + npos - pos
            pos=npos
            continue
        else:
            pos=npos

        tid=transaction.tid

        if _ts is None:
            _ts=TimeStamp(tid)
        else:
            t=TimeStamp(tid)
            if t <= _ts:
                if ok: print ('Time stamps out of order %s, %s' % (_ts, t))
                ok=0
                _ts=t.laterThan(_ts)
                tid=`_ts`
            else:
                _ts = t
                if not ok:
                    print ('Time stamps back in order %s' % (t))
                    ok=1

        if verbose:
            print 'begin',
            if verbose > 1: print
            sys.stdout.flush()

        ofs.tpc_begin(transaction, tid, transaction.status)

        if verbose:
            print 'begin', pos, _ts,
            if verbose > 1: print
            sys.stdout.flush()

        nrec=0
        try:
            for r in transaction:
                oid=r.oid
                if verbose > 1: print U64(oid), r.version, len(r.data)
                pre=preget(oid, None)
                s=ofs.store(oid, pre, r.data, r.version, transaction)
                preindex[oid]=s
                nrec=nrec+1
        except:
            if partial and nrec:
                ofs._status='p'
                ofs.tpc_vote(transaction)
                ofs.tpc_finish(transaction)
                if verbose: print 'partial'
            else:
                ofs.tpc_abort(transaction)
            print "\n%s: %s\n" % sys.exc_info()[:2]
            if not verbose: progress(prog1)
            pos = scan(file, pos, file_size)
        else:
            ofs.tpc_vote(transaction)
            ofs.tpc_finish(transaction)
            if verbose:
                print 'finish'
                sys.stdout.flush()

        if not verbose:
            prog = pos * 20l / file_size
            while prog > prog1:
                prog1 = prog1 + 1
                iprogress(prog1)


    bad = file_size - undone - ofs._pos

    print "\n%s bytes removed during recovery" % bad
    if undone:
        print "%s bytes of undone transaction data were skipped" % undone

    if pack is not None:
        print "Packing ..."
        from ZODB.referencesf import referencesf
        ofs.pack(pack, referencesf)

    ofs.close()