Esempio n. 1
0
    def checkPackVersionReachable(self):
        db = DB(self._storage)
        cn = db.open()
        root = cn.root()

        names = "a", "b", "c"

        for name in names:
            root[name] = MinPO(name)
            transaction.commit()

        for name in names:
            cn2 = db.open(version=name)
            rt2 = cn2.root()
            obj = rt2[name]
            obj.value = MinPO("version")
            transaction.commit()
            cn2.close()

        root["d"] = MinPO("d")
        transaction.commit()
        snooze()

        self._storage.pack(time.time(), referencesf)
        cn.sync()

        # make sure all the non-version data is there
        for name, obj in root.items():
            self.assertEqual(name, obj.value)

        # make sure all the version-data is there,
        # and create a new revision in the version
        for name in names:
            cn2 = db.open(version=name)
            rt2 = cn2.root()
            obj = rt2[name].value
            self.assertEqual(obj.value, "version")
            obj.value = "still version"
            transaction.commit()
            cn2.close()

        db.abortVersion("b")
        txn = transaction.get()
        txn.note("abort version b")
        txn.commit()

        t = time.time()
        snooze()

        L = db.undoInfo()
        db.undo(L[0]["id"])
        txn = transaction.get()
        txn.note("undo abort")
        txn.commit()

        self._storage.pack(t, referencesf)

        cn2 = db.open(version="b")
        rt2 = cn2.root()
        self.assertEqual(rt2["b"].value.value, "still version")
Esempio n. 2
0
    def checkLoadBefore(self):
        # Store 10 revisions of one object and then make sure that we
        # can get all the non-current revisions back.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(10):
            # We need to ensure that successive timestamps are at least
            # two apart, so that a timestamp exists that's unambiguously
            # between successive timestamps.  Each call to snooze()
            # guarantees that the next timestamp will be at least one
            # larger (and probably much more than that) than the previous
            # one.
            snooze()
            snooze()
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(self._storage.loadEx(oid, ""))

        prev = u64(revs[0][1])
        for i in range(1, 10):
            tid = revs[i][1]
            cur = u64(tid)
            middle = prev + (cur - prev) // 2
            assert prev < middle < cur  # else the snooze() trick failed
            prev = cur
            t = self._storage.loadBefore(oid, p64(middle))
            self.assert_(t is not None)
            data, start, end = t
            self.assertEqual(revs[i - 1][0], data)
            self.assertEqual(tid, end)
Esempio n. 3
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = range(10)
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        self._sanity_check()
Esempio n. 4
0
    def checkLoadBefore(self):
        # Store 10 revisions of one object and then make sure that we
        # can get all the non-current revisions back.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(10):
            # We need to ensure that successive timestamps are at least
            # two apart, so that a timestamp exists that's unambiguously
            # between successive timestamps.  Each call to snooze()
            # guarantees that the next timestamp will be at least one
            # larger (and probably much more than that) than the previous
            # one.
            snooze()
            snooze()
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(load_current(self._storage, oid))

        prev = u64(revs[0][1])
        for i in range(1, 10):
            tid = revs[i][1]
            cur = u64(tid)
            middle = prev + (cur - prev) // 2
            assert prev < middle < cur  # else the snooze() trick failed
            prev = cur
            t = self._storage.loadBefore(oid, p64(middle))
            self.assertTrue(t is not None)
            data, start, end = t
            self.assertEqual(revs[i-1][0], data)
            self.assertEqual(tid, end)
Esempio n. 5
0
 def checkPackWithGCOnDestinationAfterRestore(self):
     raises = self.assertRaises
     db = DB(self._storage)
     conn = db.open()
     root = conn.root()
     root.obj = obj1 = MinPO(1)
     txn = transaction.get()
     txn.note('root -> obj')
     txn.commit()
     root.obj.obj = obj2 = MinPO(2)
     txn = transaction.get()
     txn.note('root -> obj -> obj')
     txn.commit()
     del root.obj
     txn = transaction.get()
     txn.note('root -X->')
     txn.commit()
     # Now copy the transactions to the destination
     self._dst.copyTransactionsFrom(self._storage)
     # Now pack the destination.
     snooze()
     self._dst.pack(time.time(), referencesf)
     # And check to see that the root object exists, but not the other
     # objects.
     data, serial = self._dst.load(root._p_oid, '')
     raises(KeyError, self._dst.load, obj1._p_oid, '')
     raises(KeyError, self._dst.load, obj2._p_oid, '')
Esempio n. 6
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = list(range(10))
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        self._sanity_check()
Esempio n. 7
0
 def checkPackWithGCOnDestinationAfterRestore(self):
     raises = self.assertRaises
     db = DB(self._storage)
     conn = db.open()
     root = conn.root()
     root.obj = obj1 = MinPO(1)
     txn = transaction.get()
     txn.note("root -> obj")
     txn.commit()
     root.obj.obj = obj2 = MinPO(2)
     txn = transaction.get()
     txn.note("root -> obj -> obj")
     txn.commit()
     del root.obj
     txn = transaction.get()
     txn.note("root -X->")
     txn.commit()
     # Now copy the transactions to the destination
     self._dst.copyTransactionsFrom(self._storage)
     # Now pack the destination.
     snooze()
     self._dst.pack(time.time(), referencesf)
     # And check to see that the root object exists, but not the other
     # objects.
     data, serial = self._dst.load(root._p_oid, "")
     raises(KeyError, self._dst.load, obj1._p_oid, "")
     raises(KeyError, self._dst.load, obj2._p_oid, "")
Esempio n. 8
0
    def dont_checkPackUndoLogUndoable(self):
        # A disabled test. I wanted to test that the content of the
        # undo log was consistent, but every storage appears to
        # include something slightly different. If the result of this
        # method is only used to fill a GUI then this difference
        # doesnt matter.  Perhaps re-enable this test once we agree
        # what should be asserted.

        self._initroot()
        # Create two `persistent' object
        obj1 = self._newobj()
        oid1 = obj1.getoid()
        obj1.value = 1
        obj2 = self._newobj()
        oid2 = obj2.getoid()
        obj2.value = 2

        # Commit the first revision of each of them
        revid11 = self._dostoreNP(oid1, data=pdumps(obj1), description="1-1")
        revid22 = self._dostoreNP(oid2, data=pdumps(obj2), description="2-2")

        # remember the time. everything above here will be packed away
        snooze()
        packtime = time.time()
        snooze()
        # Commit two revisions of the first object
        obj1.value = 3
        revid13 = self._dostoreNP(oid1,
                                  revid=revid11,
                                  data=pdumps(obj1),
                                  description="1-3")
        obj1.value = 4
        self._dostoreNP(oid1,
                        revid=revid13,
                        data=pdumps(obj1),
                        description="1-4")
        # Commit one revision of the second object
        obj2.value = 5
        self._dostoreNP(oid2,
                        revid=revid22,
                        data=pdumps(obj2),
                        description="2-5")
        # Now pack
        self.assertEqual(6, len(self._storage.undoLog()))
        print('\ninitial undoLog was')
        for r in self._storage.undoLog():
            print(r)
        self._storage.pack(packtime, referencesf)
        # The undo log contains only two undoable transaction.
        print('\nafter packing undoLog was')
        for r in self._storage.undoLog():
            print(r)
Esempio n. 9
0
    def dont_checkPackUndoLogUndoable(self):
        # A disabled test. I wanted to test that the content of the
        # undo log was consistent, but every storage appears to
        # include something slightly different. If the result of this
        # method is only used to fill a GUI then this difference
        # doesnt matter.  Perhaps re-enable this test once we agree
        # what should be asserted.

        self._initroot()
        # Create two `persistent' object
        obj1 = self._newobj()
        oid1 = obj1.getoid()
        obj1.value = 1
        obj2 = self._newobj()
        oid2 = obj2.getoid()
        obj2.value = 2

        # Commit the first revision of each of them
        revid11 = self._dostoreNP(oid1, data=pdumps(obj1),
                                  description="1-1")
        revid22 = self._dostoreNP(oid2, data=pdumps(obj2),
                                  description="2-2")

        # remember the time. everything above here will be packed away
        snooze()
        packtime = time.time()
        snooze()
        # Commit two revisions of the first object
        obj1.value = 3
        revid13 = self._dostoreNP(oid1, revid=revid11,
                                  data=pdumps(obj1), description="1-3")
        obj1.value = 4
        self._dostoreNP(oid1, revid=revid13,
                        data=pdumps(obj1), description="1-4")
        # Commit one revision of the second object
        obj2.value = 5
        self._dostoreNP(oid2, revid=revid22,
                        data=pdumps(obj2), description="2-5")
        # Now pack
        self.assertEqual(6,len(self._storage.undoLog()))
        print('\ninitial undoLog was')
        for r in self._storage.undoLog(): print(r)
        self._storage.pack(packtime, referencesf)
        # The undo log contains only two undoable transaction.
        print('\nafter packing undoLog was')
        for r in self._storage.undoLog(): print(r)
Esempio n. 10
0
 def checkPackUndoLog(self):
     self._initroot()
     # Create a `persistent' object
     obj = self._newobj()
     oid = obj.getoid()
     obj.value = 1
     # Commit two different revisions
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     snooze()
     packtime = time.time()
     snooze()
     self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     # Now pack the first transaction
     self.assertEqual(3, len(self._storage.undoLog()))
     self._storage.pack(packtime, referencesf)
     # The undo log contains only the most resent transaction
     self.assertEqual(1, len(self._storage.undoLog()))
Esempio n. 11
0
 def checkPackUndoLog(self):
     self._initroot()
     # Create a `persistent' object
     obj = self._newobj()
     oid = obj.getoid()
     obj.value = 1
     # Commit two different revisions
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     snooze()
     packtime = time.time()
     snooze()
     self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     # Now pack the first transaction
     self.assertEqual(3, len(self._storage.undoLog()))
     self._storage.pack(packtime, referencesf)
     # The undo log contains only the most resent transaction
     self.assertEqual(1, len(self._storage.undoLog()))
Esempio n. 12
0
    def checkPackVersionsInPast(self):
        db = DB(self._storage)
        cn = db.open(version="testversion")
        root = cn.root()

        obj = root["obj"] = MinPO("obj")
        root["obj2"] = MinPO("obj2")
        txn = transaction.get()
        txn.note("create 2 objs in version")
        txn.commit()

        obj.value = "77"
        txn = transaction.get()
        txn.note("modify obj in version")
        txn.commit()

        t0 = time.time()
        snooze()

        # undo the modification to generate a mix of backpointers
        # and versions for pack to chase
        info = db.undoInfo()
        db.undo(info[0]["id"])
        txn = transaction.get()
        txn.note("undo modification")
        txn.commit()

        self._storage.pack(t0, referencesf)

        db.commitVersion("testversion")
        txn = transaction.get()
        txn.note("commit version")
        txn.commit()

        cn = db.open()
        root = cn.root()
        root["obj"] = "no version"

        txn = transaction.get()
        txn.note("modify obj")
        txn.commit()

        self._storage.pack(time.time(), referencesf)
Esempio n. 13
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = range(10)
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        # Iterate over the storage to make sure it's sane.
        if not hasattr(self._storage, "iterator"):
            return
        it = self._storage.iterator()
        for txn in it:
            for data in txn:
                pass
        it.close()
Esempio n. 14
0
 def checkPackWithGCOnDestinationAfterRestore(self):
     raises = self.assertRaises
     closing = self._closing
     db = closing(DB(self._storage))
     conn = closing(db.open())
     root = conn.root()
     root.obj = obj1 = MinPO(1)
     txn = transaction.get()
     txn.note(u'root -> obj')
     txn.commit()
     root.obj.obj = obj2 = MinPO(2)
     txn = transaction.get()
     txn.note(u'root -> obj -> obj')
     txn.commit()
     del root.obj
     txn = transaction.get()
     txn.note(u'root -X->')
     txn.commit()
     # Now copy the transactions to the destination
     self._dst.copyTransactionsFrom(self._storage)
     # If the source storage is a history-free storage, all
     # of the transactions are now marked as packed in the
     # destination storage.  To trigger a pack, we have to
     # add another transaction to the destination that is
     # not packed.
     db2 = closing(DB(self._dst))
     conn2 = closing(db2.open())
     conn2.root().extra = 0
     txn = transaction.get()
     txn.note(u'root.extra = 0')
     txn.commit()
     # Now pack the destination.
     snooze()
     self._dst.pack(time.time(), referencesf)
     # And check to see that the root object exists, but not the other
     # objects.
     _data, _serial = self._dst.load(root._p_oid, '')
     raises(KeyError, self._dst.load, obj1._p_oid, '')
     raises(KeyError, self._dst.load, obj2._p_oid, '')
Esempio n. 15
0
 def checkPackWithGCOnDestinationAfterRestore(self):
     raises = self.assertRaises
     closing = self._closing
     db = closing(DB(self._storage))
     conn = closing(db.open())
     root = conn.root()
     root.obj = obj1 = MinPO(1)
     txn = transaction.get()
     txn.note(u'root -> obj')
     txn.commit()
     root.obj.obj = obj2 = MinPO(2)
     txn = transaction.get()
     txn.note(u'root -> obj -> obj')
     txn.commit()
     del root.obj
     txn = transaction.get()
     txn.note(u'root -X->')
     txn.commit()
     # Now copy the transactions to the destination
     self._dst.copyTransactionsFrom(self._storage)
     # If the source storage is a history-free storage, all
     # of the transactions are now marked as packed in the
     # destination storage.  To trigger a pack, we have to
     # add another transaction to the destination that is
     # not packed.
     db2 = closing(DB(self._dst))
     conn2 = closing(db2.open())
     conn2.root().extra = 0
     txn = transaction.get()
     txn.note(u'root.extra = 0')
     txn.commit()
     # Now pack the destination.
     snooze()
     self._dst.pack(time.time(), referencesf)
     # And check to see that the root object exists, but not the other
     # objects.
     _data, _serial = self._dst.load(root._p_oid, '')
     raises(KeyError, self._dst.load, obj1._p_oid, '')
     raises(KeyError, self._dst.load, obj2._p_oid, '')
Esempio n. 16
0
    def checkRedundantPack(self):
        # It is an error to perform a pack with a packtime earlier
        # than a previous packtime.  The storage can't do a full
        # traversal as of the packtime, because the previous pack may
        # have removed revisions necessary for a full traversal.

        # It should be simple to test that a storage error is raised,
        # but this test case goes to the trouble of constructing a
        # scenario that would lose data if the earlier packtime was
        # honored.

        self._initroot()

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root["d"] = d = PersistentMapping()
        transaction.commit()
        snooze()

        obj = d["obj"] = C()
        obj.value = 1
        transaction.commit()
        snooze()
        packt1 = time.time()
        lost_oid = obj._p_oid

        obj = d["anotherobj"] = C()
        obj.value = 2
        transaction.commit()
        snooze()
        packt2 = time.time()

        db.pack(packt2)
        # BDBStorage allows the second pack, but doesn't lose data.
        try:
            db.pack(packt1)
        except StorageError:
            pass
        # This object would be removed by the second pack, even though
        # it is reachable.
        load_current(self._storage, lost_oid)
Esempio n. 17
0
    def checkRedundantPack(self):
        # It is an error to perform a pack with a packtime earlier
        # than a previous packtime.  The storage can't do a full
        # traversal as of the packtime, because the previous pack may
        # have removed revisions necessary for a full traversal.

        # It should be simple to test that a storage error is raised,
        # but this test case goes to the trouble of constructing a
        # scenario that would lose data if the earlier packtime was
        # honored.

        self._initroot()

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root["d"] = d = PersistentMapping()
        transaction.commit()
        snooze()

        obj = d["obj"] = C()
        obj.value = 1
        transaction.commit()
        snooze()
        packt1 = time.time()
        lost_oid = obj._p_oid

        obj = d["anotherobj"] = C()
        obj.value = 2
        transaction.commit()
        snooze()
        packt2 = time.time()

        db.pack(packt2)
        # BDBStorage allows the second pack, but doesn't lose data.
        try:
            db.pack(packt1)
        except StorageError:
            pass
        # This object would be removed by the second pack, even though
        # it is reachable.
        self._storage.load(lost_oid, "")
Esempio n. 18
0
    def _PackWhileWriting(self, pack_now):
        # A storage should allow some reading and writing during
        # a pack.  This test attempts to exercise locking code
        # in the storage to test that it is safe.  It generates
        # a lot of revisions, so that pack takes a long time.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        for i in range(10):
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        choices = list(range(10))
        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        # How many client threads should we run, and how long should we
        # wait for them to finish?  Hard to say.  Running 4 threads and
        # waiting 30 seconds too often left a thread still alive on Tim's
        # Win98SE box, during ZEO flavors of this test.  Those tend to
        # run one thread at a time to completion, and take about 10 seconds
        # per thread.  There doesn't appear to be a compelling reason to
        # run that many threads.  Running 3 threads and waiting up to a
        # minute seems to work well in practice.  The ZEO tests normally
        # finish faster than that, and the non-ZEO tests very much faster
        # than that.
        NUM_LOOP_TRIP = 50
        timer = ElapsedTimer(time.time())
        threads = [ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
                   for i in range(3)]
        for t in threads:
            t.start()

        if pack_now:
            db.pack(time.time())
        else:
            db.pack(packt)

        for t in threads:
            t.join(60)
        liveness = [t.isAlive() for t in threads]
        if True in liveness:
            # They should have finished by now.
            print('Liveness:', liveness)
            # Combine the outcomes, and sort by start time.
            outcomes = []
            for t in threads:
                outcomes.extend(t.outcomes)
            # each outcome list has as many of these as a loop trip got thru:
            #     thread_id
            #     elapsed millis at loop top
            #     elapsed millis at attempt to assign to self.root[index]
            #     index into self.root getting replaced
            #     elapsed millis when outcome known
            #     'OK' or 'Conflict'
            #     True if we got beyond this line, False if it raised an
            #         exception (one possible Conflict cause):
            #             self.root[index].value = MinPO(j)
            def cmp_by_time(a, b):
                return cmp((a[1], a[0]), (b[1], b[0]))
            outcomes.sort(cmp_by_time)
            counts = [0] * 4
            for outcome in outcomes:
                n = len(outcome)
                assert n >= 2
                tid = outcome[0]
                print('tid:%d top:%5d' % (tid, outcome[1]), end=' ')
                if n > 2:
                    print('commit:%5d' % outcome[2], end=' ')
                    if n > 3:
                        print('index:%2d' % outcome[3], end=' ')
                        if n > 4:
                            print('known:%5d' % outcome[4], end=' ')
                            if n > 5:
                                print('%8s' % outcome[5], end=' ')
                                if n > 6:
                                    print('assigned:%5s' % outcome[6], end=' ')
                counts[tid] += 1
                if counts[tid] == NUM_LOOP_TRIP:
                    print('thread %d done' % tid, end=' ')
                print()

            self.fail('a thread is still alive')

        self._sanity_check()

        db.close()
Esempio n. 19
0
    def _PackWhileWriting(self, pack_now):
        # A storage should allow some reading and writing during
        # a pack.  This test attempts to exercise locking code
        # in the storage to test that it is safe.  It generates
        # a lot of revisions, so that pack takes a long time.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        for i in range(10):
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        choices = list(range(10))
        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        # How many client threads should we run, and how long should we
        # wait for them to finish?  Hard to say.  Running 4 threads and
        # waiting 30 seconds too often left a thread still alive on Tim's
        # Win98SE box, during ZEO flavors of this test.  Those tend to
        # run one thread at a time to completion, and take about 10 seconds
        # per thread.  There doesn't appear to be a compelling reason to
        # run that many threads.  Running 3 threads and waiting up to a
        # minute seems to work well in practice.  The ZEO tests normally
        # finish faster than that, and the non-ZEO tests very much faster
        # than that.
        NUM_LOOP_TRIP = 50
        timer = ElapsedTimer(time.time())
        threads = [
            ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
            for i in range(3)
        ]
        for t in threads:
            t.start()

        if pack_now:
            db.pack(time.time())
        else:
            db.pack(packt)

        for t in threads:
            t.join(60)
        liveness = [t.isAlive() for t in threads]
        if True in liveness:
            # They should have finished by now.
            print('Liveness:', liveness)
            # Combine the outcomes, and sort by start time.
            outcomes = []
            for t in threads:
                outcomes.extend(t.outcomes)
            # each outcome list has as many of these as a loop trip got thru:
            #     thread_id
            #     elapsed millis at loop top
            #     elapsed millis at attempt to assign to self.root[index]
            #     index into self.root getting replaced
            #     elapsed millis when outcome known
            #     'OK' or 'Conflict'
            #     True if we got beyond this line, False if it raised an
            #         exception (one possible Conflict cause):
            #             self.root[index].value = MinPO(j)
            def cmp_by_time(a, b):
                return cmp((a[1], a[0]), (b[1], b[0]))

            outcomes.sort(cmp_by_time)
            counts = [0] * 4
            for outcome in outcomes:
                n = len(outcome)
                assert n >= 2
                tid = outcome[0]
                print('tid:%d top:%5d' % (tid, outcome[1]), end=' ')
                if n > 2:
                    print('commit:%5d' % outcome[2], end=' ')
                    if n > 3:
                        print('index:%2d' % outcome[3], end=' ')
                        if n > 4:
                            print('known:%5d' % outcome[4], end=' ')
                            if n > 5:
                                print('%8s' % outcome[5], end=' ')
                                if n > 6:
                                    print('assigned:%5s' % outcome[6], end=' ')
                counts[tid] += 1
                if counts[tid] == NUM_LOOP_TRIP:
                    print('thread %d done' % tid, end=' ')
                print()

            self.fail('a thread is still alive')

        self._sanity_check()