def test__p_mtime_w_serial(self):
     from persistent.timestamp import TimeStamp
     WHEN_TUPLE = (2011, 2, 15, 13, 33, 27.5)
     ts = TimeStamp(*WHEN_TUPLE)
     inst, jar, OID = self._makeOneWithJar()
     inst._p_serial = ts.raw()
     self.assertEqual(inst._p_mtime, ts.timeTime())
 def test__p_mtime_w_serial(self):
     from persistent.timestamp import TimeStamp
     WHEN_TUPLE = (2011, 2, 15, 13, 33, 27.5)
     ts = TimeStamp(*WHEN_TUPLE)
     inst, jar, OID = self._makeOneWithJar()
     inst._p_serial = ts.raw()
     self.assertEqual(inst._p_mtime, ts.timeTime())
Example #3
0
def newTid(old):
    t = time.time()
    ts = TimeStamp(*time.gmtime(t)[:5]+(t%60,))
    if old is not None:
        ts = ts.laterThan(TimeStamp(old))
    return ts.raw()
Example #4
0
def doit(srcdb, dstdb, options):
    outfp = options.outfp
    profilep = options.profilep
    verbose = options.verbose
    # some global information
    largest_pickle = 0
    largest_txn_in_size = 0
    largest_txn_in_objects = 0
    total_pickle_size = 0
    total_object_count = 0
    # Ripped from BaseStorage.copyTransactionsFrom()
    ts = None
    ok = True
    prevrevids = {}
    counter = 0
    skipper = 0
    if options.timestamps:
        print("%4s. %26s %6s %8s %5s %5s %5s %5s %5s" % (
            "NUM", "TID AS TIMESTAMP", "OBJS", "BYTES",
            # Does anybody know what these times mean?
            "t4-t0", "t1-t0", "t2-t1", "t3-t2", "t4-t3"))
    else:
        print("%4s. %20s %6s %8s %6s %6s %6s %6s %6s" % (
            "NUM", "TRANSACTION ID", "OBJS", "BYTES",
            # Does anybody know what these times mean?
            "t4-t0", "t1-t0", "t2-t1", "t3-t2", "t4-t3"))
    for txn in srcdb.iterator():
        skipper += 1
        if skipper <= options.skiptxn:
            continue
        counter += 1
        if counter > options.maxtxn >= 0:
            break
        tid = txn.tid
        if ts is None:
            ts = TimeStamp(tid)
        else:
            t = TimeStamp(tid)
            if t <= ts:
                if ok:
                    print((
                        'Time stamps are out of order %s, %s' % (ts, t)), file=sys.stderr)
                    ok = False
                    ts = t.laterThan(ts)
                    tid = ts.raw()
                else:
                    ts = t
                    if not ok:
                        print((
                            'Time stamps are back in order %s' % t), file=sys.stderr)
                        ok = True
        if verbose > 1:
            print(ts)

        prof = None
        if profilep and (counter % 100) == 0:
            prof = profile.Profile()
        objects = 0
        size = 0
        newrevids = RevidAccumulator()
        t0 = time.time()
        dstdb.tpc_begin(txn, tid, txn.status)
        t1 = time.time()
        for r in txn:
            oid = r.oid
            objects += 1
            thissize = len(r.data)
            size += thissize
            if thissize > largest_pickle:
                largest_pickle = thissize
            if verbose > 1:
                if not r.version:
                    vstr = 'norev'
                else:
                    vstr = r.version
                print(utils.U64(oid), vstr, len(r.data))
            oldrevid = prevrevids.get(oid, utils.z64)
            result = dstdb.store(oid, oldrevid, r.data, r.version, txn)
            newrevids.store(oid, result)
        t2 = time.time()
        result = dstdb.tpc_vote(txn)
        t3 = time.time()
        newrevids.tpc_vote(result)
        prevrevids.update(newrevids.get_dict())
        # Profile every 100 transactions
        if prof:
            prof.runcall(dstdb.tpc_finish, txn)
        else:
            dstdb.tpc_finish(txn)
        t4 = time.time()

        # record the results
        if objects > largest_txn_in_objects:
            largest_txn_in_objects = objects
        if size > largest_txn_in_size:
            largest_txn_in_size = size
        if options.timestamps:
            tidstr = str(TimeStamp(tid))
            format = "%4d. %26s %6d %8d %5.3f %5.3f %5.3f %5.3f %5.3f"
        else:
            tidstr = utils.U64(tid)
            format = "%4d. %20s %6d %8d %6.4f %6.4f %6.4f %6.4f %6.4f"
        print(format % (skipper, tidstr, objects, size,
                                  t4-t0, t1-t0, t2-t1, t3-t2, t4-t3), file=outfp)
        total_pickle_size += size
        total_object_count += objects

        if prof:
            prof.create_stats()
            fp = open('profile-%02d.txt' % (counter / 100), 'wb')
            marshal.dump(prof.stats, fp)
            fp.close()
    print("Largest pickle:          %8d" % largest_pickle, file=outfp)
    print("Largest transaction:     %8d" % largest_txn_in_size, file=outfp)
    print("Largest object count:    %8d" % largest_txn_in_objects, file=outfp)
    print("Total pickle size: %14d" % total_pickle_size, file=outfp)
    print("Total object count:      %8d" % total_object_count, file=outfp)
Example #5
0
    def __pre_pack(self, t, referencesf):
        logger.info("pack: beginning pre-pack")

        # In 2019, Unix timestamps look like
        #            1564006806.0
        # While 64-bit integer TIDs for the same timestamp look like
        #    275085010696509852
        #
        # Multiple TIDs can map to a single Unix timestamp.
        # For example, the 9 integers between 275085010624927044 and
        # 275085010624927035 all map to 1564006804.9999998.
        #
        # Therefore, Unix timestamps are ambiguous, especially if we're committing
        # multiple transactions rapidly (within the resolution of the underlying TID
        # clock).
        # This ambiguity mostly matters for unit tests, where we do commit rapidly.
        #
        # To help them out, we accept 64-bit integer TIDs to specify an exact
        # transaction to pack to.

        # We also allow None or a negative number to mean "current committed transaction".
        if t is None:
            t = -1

        if t > 275085010696509852:
            # Must be a TID.

            # Turn it back into a time.time() for later logging
            ts = TimeStamp(int64_to_8bytes(t))
            logger.debug(
                "Treating requested pack time %s as TID meaning %s",
                t, ts
            )
            best_pack_tid_int = t
            t = ts.timeTime()
        elif t < 0 or t >= time.time():
            # Packing for the current time or in the future means to pack
            # to the lastest commit in the database. This matters if not all
            # machine clocks are synchronized.
            best_pack_tid_int = MAX_TID - 1
        else:
            # Find the latest commit before or at the pack time.
            # Note that several TIDs will fit in the resolution of a time.time(),
            # so this is slightly ambiguous.
            requested_pack_ts = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
            requested_pack_tid = requested_pack_ts.raw()
            requested_pack_tid_int = bytes8_to_int64(requested_pack_tid)

            best_pack_tid_int = requested_pack_tid_int

        tid_int = self.packundo.choose_pack_transaction(best_pack_tid_int)

        if tid_int is None:
            logger.debug("all transactions before %s have already "
                         "been packed", time.ctime(t))
            return

        s = time.ctime(TimeStamp(int64_to_8bytes(tid_int)).timeTime())
        logger.info("Analyzing transactions committed %s or before (TID %d)",
                    s, tid_int)

        # In pre_pack, the adapter fills tables with
        # information about what to pack.  The adapter
        # must not actually pack anything yet.
        def get_references(state):
            """Return an iterable of the set of OIDs the given state refers to."""
            if not state:
                return ()

            return {bytes8_to_int64(oid) for oid in referencesf(state)}

        self.packundo.pre_pack(tid_int, get_references)
        logger.info("pack: pre-pack complete")
        return tid_int