Esempio n. 1
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = range(10)
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        self._sanity_check()
Esempio n. 2
0
    def doreadconflict(self, db, mvcc):
        tm1 = transaction.TransactionManager()
        conn = db.open(mvcc=mvcc, transaction_manager=tm1)
        r1 = conn.root()
        obj = MinPO('root')
        r1["p"] = obj
        obj = r1["p"]
        obj.child1 = MinPO('child1')
        tm1.get().commit()

        # start a new transaction with a new connection
        tm2 = transaction.TransactionManager()
        cn2 = db.open(mvcc=mvcc, transaction_manager=tm2)
        r2 = cn2.root()

        self.assertEqual(r1._p_serial, r2._p_serial)

        obj.child2 = MinPO('child2')
        tm1.get().commit()

        # resume the transaction using cn2
        obj = r2["p"]

        # An attempt to access obj.child1 should fail with an RCE
        # below if conn isn't using mvcc, because r2 was read earlier
        # in the transaction and obj was modified by the other
        # transaction.

        obj.child1 
        return obj
Esempio n. 3
0
 def checkConflicts(self):
     oid = self._storage.new_oid()
     revid1 = self._dostore(oid, data=MinPO(11))
     self._dostore(oid, revid=revid1, data=MinPO(12))
     self.assertRaises(POSException.ConflictError,
                       self._dostore,
                       oid,
                       revid=revid1,
                       data=MinPO(13))
Esempio n. 4
0
 def checkSimpleIteration(self):
     # Store a bunch of revisions of a single object
     self._oid = oid = self._storage.new_oid()
     revid1 = self._dostore(oid, data=MinPO(11))
     revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
     revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
     # Now iterate over all the transactions and compare carefully
     txniter = self._storage.iterator()
     self.iter_verify(txniter, [revid1, revid2, revid3], 11)
Esempio n. 5
0
 def dowork(self, version=''):
     c = self.db.open(version)
     r = c.root()
     o = r[time.time()] = MinPO(0)
     transaction.commit()
     for i in range(25):
         o.value = MinPO(i)
         transaction.commit()
         o = o.value
     c.close()
Esempio n. 6
0
 def checkCommitToNonVersion(self):
     eq = self.assertEqual
     oid, version = self._setup_version()
     data, revid = self._storage.load(oid, version)
     eq(zodb_unpickle(data), MinPO(54))
     data, revid = self._storage.load(oid, '')
     eq(zodb_unpickle(data), MinPO(51))
     self._commitVersion(version, '')
     data, revid = self._storage.load(oid, '')
     eq(zodb_unpickle(data), MinPO(54))
Esempio n. 7
0
 def checkVersionLock(self):
     oid = self._storage.new_oid()
     revid = self._dostore(oid, data=MinPO(11))
     version = 'test-version'
     revid = self._dostore(oid, revid=revid, data=MinPO(12),
                           version=version)
     self.assertRaises(POSException.VersionLockError,
                       self._dostore,
                       oid, revid=revid, data=MinPO(14),
                       version='another-version')
Esempio n. 8
0
 def checkWriteAfterAbort(self):
     oid = self._storage.new_oid()
     t = TransactionMetaData()
     self._storage.tpc_begin(t)
     self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
     # Now abort this transaction
     self._storage.tpc_abort(t)
     # Now start all over again
     oid = self._storage.new_oid()
     self._dostore(oid=oid, data=MinPO(6))
Esempio n. 9
0
 def checkLen(self):
     # len(storage) reports the number of objects.
     # check it is zero when empty
     self.assertEqual(len(self._storage), 0)
     # check it is correct when the storage contains two object.
     # len may also be zero, for storages that do not keep track
     # of this number
     self._dostore(data=MinPO(22))
     self._dostore(data=MinPO(23))
     self.assertTrue(len(self._storage) in [0, 2])
Esempio n. 10
0
    def checkCommitToOtherVersion(self):
        eq = self.assertEqual
        oid1, version1 = self._setup_version('one')

        data, revid1 = self._storage.load(oid1, version1)
        eq(zodb_unpickle(data), MinPO(54))
        oid2, version2 = self._setup_version('two')
        data, revid2 = self._storage.load(oid2, version2)
        eq(zodb_unpickle(data), MinPO(54))

        # make sure we see the non-version data when appropriate
        data, revid2 = self._storage.load(oid1, version2)
        eq(zodb_unpickle(data), MinPO(51))
        data, revid2 = self._storage.load(oid2, version1)
        eq(zodb_unpickle(data), MinPO(51))
        data, revid2 = self._storage.load(oid1, '')
        eq(zodb_unpickle(data), MinPO(51))

        # Okay, now let's commit object1 to version2
        oids = self._commitVersion(version1, version2)
        eq(len(oids), 1)
        eq(oids[0], oid1)
        data, revid = self._storage.load(oid1, version2)
        eq(zodb_unpickle(data), MinPO(54))
        data, revid = self._storage.load(oid2, version2)
        eq(zodb_unpickle(data), MinPO(54))

        # an object can only exist in one version, so a load from
        # version1 should now give the non-version data
        data, revid2 = self._storage.load(oid1, version1)
        eq(zodb_unpickle(data), MinPO(51))

        # as should a version that has never been used
        data, revid2 = self._storage.load(oid1, 'bela lugosi')
        eq(zodb_unpickle(data), MinPO(51))
Esempio n. 11
0
 def checkLoadSerial(self):
     oid = self._storage.new_oid()
     revid = ZERO
     revisions = {}
     for i in range(31, 38):
         revid = self._dostore(oid, revid=revid, data=MinPO(i))
         revisions[revid] = MinPO(i)
     # Now make sure all the revisions have the correct value
     for revid, value in revisions.items():
         data = self._storage.loadSerial(oid, revid)
         self.assertEqual(zodb_unpickle(data), value)
Esempio n. 12
0
 def checkCommitVersionSerialno(self):
     oid = self._storage.new_oid()
     revid1 = self._dostore(oid, data=MinPO(12))
     revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
                            version="version")
     oids = self._commitVersion("version", "")
     self.assertEqual([oid], oids)
     data, revid3 = self._storage.load(oid, "")
     # use repr() to avoid getting binary data in a traceback on error
     self.assertNotEqual(`revid1`, `revid3`)
     self.assertNotEqual(`revid2`, `revid3`)
Esempio n. 13
0
    def checkPackAfterUndoDeletion(self):
        db = DB(self._storage)
        cn = db.open()
        root = cn.root()

        pack_times = []

        def set_pack_time():
            pack_times.append(time.time())
            snooze()

        root["key0"] = MinPO(0)
        root["key1"] = MinPO(1)
        root["key2"] = MinPO(2)
        txn = transaction.get()
        txn.note("create 3 keys")
        txn.commit()

        set_pack_time()

        del root["key1"]
        txn = transaction.get()
        txn.note("delete 1 key")
        txn.commit()

        set_pack_time()

        root._p_deactivate()
        cn.sync()
        self.assert_(listeq(root.keys(), ["key0", "key2"]))

        L = db.undoInfo()
        db.undo(L[0]["id"])
        txn = transaction.get()
        txn.note("undo deletion")
        txn.commit()

        set_pack_time()

        root._p_deactivate()
        cn.sync()
        self.assert_(listeq(root.keys(), ["key0", "key1", "key2"]))

        for t in pack_times:
            self._storage.pack(t, referencesf)

            root._p_deactivate()
            cn.sync()
            self.assert_(listeq(root.keys(), ["key0", "key1", "key2"]))
            for i in range(3):
                obj = root["key%d" % i]
                self.assertEqual(obj.value, i)
            root.items()
            self._inter_pack_pause()
    def checkUndoAbortVersion(self):
        def load_value(oid, version=''):
            data, revid = self._storage.load(oid, version)
            return zodb_unpickle(data).value

        # create a bunch of packable transactions
        oid = self._storage.new_oid()
        revid = '\000' * 8
        for i in range(3):
            revid = self._dostore(oid, revid, description='packable%d' % i)
        pt = time.time()
        time.sleep(1)

        oid1 = self._storage.new_oid()
        version = 'version'
        revid1 = self._dostore(oid1, data=MinPO(0), description='create1')
        revid2 = self._dostore(oid1,
                               data=MinPO(1),
                               revid=revid1,
                               version=version,
                               description='version1')
        self._dostore(oid1,
                      data=MinPO(2),
                      revid=revid2,
                      version=version,
                      description='version2')
        self._dostore(description='create2')

        self._abortVersion(version)

        info = self._storage.undoInfo()
        t_id = info[0]['id']

        self.assertEqual(load_value(oid1), 0)
        # after abort, we should see non-version data
        self.assertEqual(load_value(oid1, version), 0)

        self._undo(t_id, note="undo abort version")

        self.assertEqual(load_value(oid1), 0)
        # t undo will re-create the version
        self.assertEqual(load_value(oid1, version), 2)

        info = self._storage.undoInfo()
        t_id = info[0]['id']

        self._storage.pack(pt, referencesf)

        self._undo(t_id, note="undo undo")

        # undo of undo will put as back where we started
        self.assertEqual(load_value(oid1), 0)
        # after abort, we should see non-version data
        self.assertEqual(load_value(oid1, version), 0)
Esempio n. 15
0
 def checkModifyAfterAbortVersion(self):
     eq = self.assertEqual
     oid, version = self._setup_version()
     self._abortVersion(version)
     data, revid = self._storage.load(oid, '')
     # And modify it a few times
     revid = self._dostore(oid, revid=revid, data=MinPO(52))
     revid = self._dostore(oid, revid=revid, data=MinPO(53))
     revid = self._dostore(oid, revid=revid, data=MinPO(54))
     data, newrevid = self._storage.load(oid, '')
     eq(newrevid, revid)
     eq(zodb_unpickle(data), MinPO(54))
Esempio n. 16
0
 def checkVersionedLoadErrors(self):
     oid = self._storage.new_oid()
     version = 'test-version'
     revid = self._dostore(oid, data=MinPO(11))
     revid = self._dostore(oid, revid=revid, data=MinPO(12),
                           version=version)
     # Try to load a bogus oid
     self.assertRaises(KeyError,
                       self._storage.load,
                       self._storage.new_oid(), '')
     data, revid = self._storage.load(oid, 'bogus')
     self.assertEqual(zodb_unpickle(data), MinPO(11))
    def checkUndoCommitVersion(self):
        def load_value(oid, version=''):
            data, revid = self._storage.load(oid, version)
            return zodb_unpickle(data).value

        # create a bunch of packable transactions
        oid = self._storage.new_oid()
        revid = '\000' * 8
        for i in range(4):
            revid = self._dostore(oid, revid, description='packable%d' % i)
        pt = time.time()
        time.sleep(1)

        oid1 = self._storage.new_oid()
        version = 'version'
        revid1 = self._dostore(oid1, data=MinPO(0), description='create1')
        revid2 = self._dostore(oid1, data=MinPO(1), revid=revid1,
                               version=version, description='version1')
        self._dostore(oid1, data=MinPO(2), revid=revid2,
                      version=version, description='version2')
        self._dostore(description='create2')

        t = transaction.Transaction()
        t.description = 'commit version'
        self._storage.tpc_begin(t)
        self._storage.commitVersion(version, '', t)
        self._storage.tpc_vote(t)
        self._storage.tpc_finish(t)

        info = self._storage.undoInfo()
        t_id = info[0]['id']

        self.assertEqual(load_value(oid1), 2)
        self.assertEqual(load_value(oid1, version), 2)

        self._storage.pack(pt, referencesf)

        self._undo(t_id, note="undo commit version")

        self.assertEqual(load_value(oid1), 0)
        self.assertEqual(load_value(oid1, version), 2)

        data, tid = self._storage.load(oid1, "")
        # After undoing the version commit, the non-version data
        # once again becomes the non-version data from 'create1'.
        self.assertEqual(tid, self._storage.lastTransaction())

        # The current version data comes from an undo record, which
        # means that it gets data via the backpointer but tid from the
        # current txn.
        data, tid, ver = loadEx(self._storage, oid1, version)
        self.assertEqual(ver, version)
        self.assertEqual(tid, self._storage.lastTransaction())
Esempio n. 18
0
 def dowork(self):
     c = self.db.open()
     r = c.root()
     o = r[time.time()] = MinPO(0)
     transaction.commit()
     for i in range(25):
         o.value = MinPO(i)
         transaction.commit()
         o = o.value
     serial = o._p_serial
     root_serial = r._p_serial
     c.close()
     return serial, root_serial
Esempio n. 19
0
 def checkStoreAndLoad(self):
     eq = self.assertEqual
     oid = self._storage.new_oid()
     self._dostore(oid=oid, data=MinPO(7))
     data, revid = utils.load_current(self._storage, oid)
     value = zodb_unpickle(data)
     eq(value, MinPO(7))
     # Now do a bunch of updates to an object
     for i in range(13, 22):
         revid = self._dostore(oid, revid=revid, data=MinPO(i))
     # Now get the latest revision of the object
     data, revid = utils.load_current(self._storage, oid)
     eq(zodb_unpickle(data), MinPO(21))
Esempio n. 20
0
 def checkSerialIsNoneForInitialRevision(self):
     eq = self.assertEqual
     oid = self._storage.new_oid()
     txn = TransactionMetaData()
     self._storage.tpc_begin(txn)
     # Use None for serial.  Don't use _dostore() here because that coerces
     # serial=None to serial=ZERO.
     self._storage.store(oid, None, zodb_pickle(MinPO(11)), '', txn)
     self._storage.tpc_vote(txn)
     newrevid = self._storage.tpc_finish(txn)
     data, revid = utils.load_current(self._storage, oid)
     value = zodb_unpickle(data)
     eq(value, MinPO(11))
     eq(revid, newrevid)
Esempio n. 21
0
 def checkCommitVersionInvalidation(self):
     oid = self._storage.new_oid()
     revid = self._dostore(oid, data=MinPO(1))
     revid = self._dostore(oid, revid=revid, data=MinPO(2))
     revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
     t = Transaction()
     self._storage.tpc_begin(t)
     self._storage.commitVersion("foo", "bar", t)
     self._storage.load(oid, "")
     self._storage.tpc_vote(t)
     self._storage.tpc_finish(t)
     data, revid = self._storage.load(oid, "bar")
     obj = zodb_unpickle(data)
     assert obj == MinPO(3), obj
Esempio n. 22
0
    def checkLoadBeforeOld(self):
        # Look for a very old revision.  With the BaseStorage implementation
        # this should require multple history() calls.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(50):
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(revid)

        data, start, end = self._storage.loadBefore(oid, revs[12])
        self.assertEqual(zodb_unpickle(data), MinPO(11))
        self.assertEqual(start, revs[11])
        self.assertEqual(end, revs[12])
Esempio n. 23
0
 def checkSerialIsNoneForInitialRevision(self):
     eq = self.assertEqual
     oid = self._storage.new_oid()
     txn = transaction.Transaction()
     self._storage.tpc_begin(txn)
     # Use None for serial.  Don't use _dostore() here because that coerces
     # serial=None to serial=ZERO.
     r1 = self._storage.store(oid, None, zodb_pickle(MinPO(11)), '', txn)
     r2 = self._storage.tpc_vote(txn)
     self._storage.tpc_finish(txn)
     newrevid = handle_serials(oid, r1, r2)
     data, revid = self._storage.load(oid, '')
     value = zodb_unpickle(data)
     eq(value, MinPO(11))
     eq(revid, newrevid)
Esempio n. 24
0
    def _dostore(self,
                 storage,
                 oid=None,
                 revid=None,
                 data=None,
                 already_pickled=0,
                 user=None,
                 description=None):
        # Borrowed from StorageTestBase, to allow passing storage.
        """Do a complete storage transaction.  The defaults are:

         - oid=None, ask the storage for a new oid
         - revid=None, use a revid of ZERO
         - data=None, pickle up some arbitrary data (the integer 7)

        Returns the object's new revision id.
        """
        import transaction
        from ZODB.tests.MinPO import MinPO

        if oid is None:
            oid = storage.new_oid()
        if revid is None:
            revid = StorageTestBase.ZERO
        if data is None:
            data = MinPO(7)
        if type(data) == int:
            data = MinPO(data)
        if not already_pickled:
            data = StorageTestBase.zodb_pickle(data)
        # Begin the transaction
        t = transaction.Transaction()
        if user is not None:
            t.user = user
        if description is not None:
            t.description = description
        try:
            storage.tpc_begin(t)
            # Store an object
            r1 = storage.store(oid, revid, data, '', t)
            # Finish the transaction
            r2 = storage.tpc_vote(t)
            revid = handle_serials(oid, r1, r2)
            storage.tpc_finish(t)
        except:
            storage.tpc_abort(t)
            raise
        return revid
Esempio n. 25
0
 def _checkHistory(self, data):
     start = time()
     # Store a couple of revisions of the object
     oid = self._storage.new_oid()
     self.assertRaises(KeyError,self._storage.history,oid)
     revids = [None]
     for data in data:
         if sys.platform == 'win32':
             # time.time() has a precision of 1ms on Windows.
             sleep(0.002)
         revids.append(self._dostore(oid, revids[-1], MinPO(data)))
     revids.reverse()
     del revids[-1]
     # Now get various snapshots of the object's history
     for i in range(1, 1 + len(revids)):
         h = self._storage.history(oid, size=i)
         self.assertEqual([d['tid'] for d in h], revids[:i])
     # Check results are sorted by timestamp, in descending order.
     if sys.platform == 'win32':
         # Same as above. This is also required in case this method is
         # called several times for the same storage.
         sleep(0.002)
     a = time()
     for d in h:
         b = a
         a = d['time']
         self.assertLess(a, b)
     self.assertLess(start, a)
Esempio n. 26
0
    def checkUndoZombieNonVersion(self):
        if not hasattr(self._storage, 'supportsTransactionalUndo'):
            return
        if not self._storage.supportsTransactionalUndo():
            return

        oid = self._storage.new_oid()
        revid = self._dostore(oid, data=MinPO(94))
        # Get the undo information
        info = self._storage.undoInfo()
        tid = info[0]['id']
        # Undo the creation of the object, rendering it a zombie
        t = Transaction()
        self._storage.tpc_begin(t)
        oids = self._storage.undo(tid, t)
        self._storage.tpc_vote(t)
        self._storage.tpc_finish(t)
        # Now attempt to iterator over the storage
        iter = self._storage.iterator()
        for txn in iter:
            for rec in txn:
                pass

        # The last transaction performed an undo of the transaction that
        # created object oid.  (As Barry points out, the object is now in the
        # George Bailey state.)  Assert that the final data record contains
        # None in the data attribute.
        self.assertEqual(rec.oid, oid)
        self.assertEqual(rec.data, None)
Esempio n. 27
0
 def checkTimeoutAfterVote(self):
     self._storage = storage = self.openClientStorage()
     # Assert that the zeo cache is empty
     self.assert_(not list(storage._cache.contents()))
     # Create the object
     oid = storage.new_oid()
     obj = MinPO(7)
     # Now do a store, sleeping before the finish so as to cause a timeout
     t = TransactionMetaData()
     old_connection_count = storage.connection_count_for_tests
     storage.tpc_begin(t)
     revid1 = storage.store(oid, ZERO, zodb_pickle(obj), '', t)
     storage.tpc_vote(t)
     # Now sleep long enough for the storage to time out
     time.sleep(3)
     self.assert_(
         (not storage.is_connected())
         or (storage.connection_count_for_tests > old_connection_count))
     storage._wait()
     self.assert_(storage.is_connected())
     # We expect finish to fail
     self.assertRaises(ClientDisconnected, storage.tpc_finish, t)
     # The cache should still be empty
     self.assert_(not list(storage._cache.contents()))
     # Load should fail since the object should not be in either the cache
     # or the server.
     self.assertRaises(KeyError, storage.load, oid, '')
Esempio n. 28
0
    def checkLoadBefore(self):
        # Store 10 revisions of one object and then make sure that we
        # can get all the non-current revisions back.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(10):
            # We need to ensure that successive timestamps are at least
            # two apart, so that a timestamp exists that's unambiguously
            # between successive timestamps.  Each call to snooze()
            # guarantees that the next timestamp will be at least one
            # larger (and probably much more than that) than the previous
            # one.
            snooze()
            snooze()
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(self._storage.loadEx(oid, ""))

        prev = u64(revs[0][1])
        for i in range(1, 10):
            tid = revs[i][1]
            cur = u64(tid)
            middle = prev + (cur - prev) // 2
            assert prev < middle < cur  # else the snooze() trick failed
            prev = cur
            t = self._storage.loadBefore(oid, p64(middle))
            self.assert_(t is not None)
            data, start, end = t
            self.assertEqual(revs[i - 1][0], data)
            self.assertEqual(tid, end)
Esempio n. 29
0
    def checkLoadBeforeConsecutiveTids(self):
        eq = self.assertEqual
        oid = self._storage.new_oid()

        def helper(tid, revid, x):
            data = zodb_pickle(MinPO(x))
            t = transaction.Transaction()
            try:
                self._storage.tpc_begin(t, p64(tid))
                r1 = self._storage.store(oid, revid, data, '', t)
                # Finish the transaction
                r2 = self._storage.tpc_vote(t)
                newrevid = handle_serials(oid, r1, r2)
                self._storage.tpc_finish(t)
            except:
                self._storage.tpc_abort(t)
                raise
            return newrevid

        revid1 = helper(1, None, 1)
        revid2 = helper(2, revid1, 2)
        revid3 = helper(3, revid2, 3)
        data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2))
        eq(zodb_unpickle(data), MinPO(1))
        eq(u64(start_tid), 1)
        eq(u64(end_tid), 2)
Esempio n. 30
0
    def checkLoadBeforeUndo(self):
        # Do several transactions then undo them.
        oid = self._storage.new_oid()
        revid = None
        for i in range(5):
            revid = self._dostore(oid, revid, data=MinPO(i))
        revs = []
        for i in range(4):
            info = self._storage.undoInfo()
            tid = info[0]["id"]
            # Always undo the most recent txn, so the value will
            # alternate between 3 and 4.
            self._undo(tid, [oid], note="undo %d" % i)
            revs.append(self._storage.loadEx(oid, ""))

        prev_tid = None
        for i, (data, tid, ver) in enumerate(revs):
            t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
            self.assertEqual(data, t[0])
            self.assertEqual(tid, t[1])
            if prev_tid:
                self.assert_(prev_tid < t[1])
            prev_tid = t[1]
            if i < 3:
                self.assertEqual(revs[i + 1][1], t[2])
            else:
                self.assertEqual(None, t[2])
Esempio n. 31
0
 def testLRU(self):
     # verify the LRU behavior of the cache
     dataset_size = 5
     CACHE_SIZE = dataset_size * 2 + 1
     # a cache big enough to hold the objects added in two
     # transactions, plus the root object
     self.db.setCacheSize(CACHE_SIZE)
     c = self.db.open()
     r = c.root()
     l = {}
     # the root is the only thing in the cache, because all the
     # other objects are new
     self.assertEqual(len(c._cache), 1)
     # run several transactions
     for t in range(5):
         for i in range(dataset_size):
             l[(t, i)] = r[i] = MinPO(i)
         transaction.commit()
         # commit() will register the objects, placing them in the
         # cache.  at the end of commit, the cache will be reduced
         # down to CACHE_SIZE items
         if len(l) > CACHE_SIZE:
             self.assertEqual(c._cache.ringlen(), CACHE_SIZE)
     for i in range(dataset_size):
         # Check objects added in the first two transactions.
         # They must all be ghostified.
         self.assertEqual(l[(0, i)]._p_changed, None)
         self.assertEqual(l[(1, i)]._p_changed, None)
         # Check objects added in the last two transactions.
         # They must all still exist in memory, but have
         # had their changes flushed
         self.assertEqual(l[(3, i)]._p_changed, 0)
         self.assertEqual(l[(4, i)]._p_changed, 0)
    def test_ConflictErrorDoesntImport(self):
        from ZODB.serialize import ObjectWriter
        from ZODB.POSException import ConflictError
        from ZODB.tests.MinPO import MinPO

        obj = MinPO()
        data = ObjectWriter().serialize(obj)

        # The pickle contains a GLOBAL ('c') opcode resolving to MinPO's
        # module and class.
        self.assertTrue(b'cZODB.tests.MinPO\nMinPO\n' in data)

        # Fiddle the pickle so it points to something "impossible" instead.
        data = data.replace(
            b'cZODB.tests.MinPO\nMinPO\n',
            b'cpath.that.does.not.exist\nlikewise.the.class\n')
        # Pickle can't resolve that GLOBAL opcode -- gets ImportError.
        self.assertRaises(ImportError, loads, data)

        # Verify that building ConflictError doesn't get ImportError.
        try:
            raise ConflictError(object=obj, data=data)
        except ConflictError as detail:
            # And verify that the msg names the impossible path.
            self.assertTrue(
                'path.that.does.not.exist.likewise.the.class' in str(detail))
        else:
            self.fail("expected ConflictError, but no exception raised")
Esempio n. 33
0
 def __init__(self, value):
     MinPO.__init__(self, value)
     self.an_attribute = 42
    def checkTimeoutProvokingConflicts(self):
        self._storage = storage = self.openClientStorage()
        # Assert that the zeo cache is empty.
        self.assert_(not list(storage._cache.contents()))
        # Create the object
        oid = storage.new_oid()
        obj = MinPO(7)
        # We need to successfully commit an object now so we have something to
        # conflict about.
        t = Transaction()
        storage.tpc_begin(t)
        revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t)
        revid1b = storage.tpc_vote(t)
        revid1 = handle_serials(oid, revid1a, revid1b)
        storage.tpc_finish(t)
        # Now do a store, sleeping before the finish so as to cause a timeout.
        obj.value = 8
        t = Transaction()
        old_connection_count = storage.connection_count_for_tests
        storage.tpc_begin(t)
        revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t)
        revid2b = storage.tpc_vote(t)
        revid2 = handle_serials(oid, revid2a, revid2b)

        # Now sleep long enough for the storage to time out.
        # This used to sleep for 3 seconds, and sometimes (but very rarely)
        # failed then.  Now we try for a minute.  It typically succeeds
        # on the second time thru the loop, and, since self.timeout is 1,
        # it's typically faster now (2/1.8 ~= 1.11 seconds sleeping instead
        # of 3).
        deadline = time.time() + 60 # wait up to a minute
        while time.time() < deadline:
            if (storage.is_connected() and
                (storage.connection_count_for_tests == old_connection_count)
                ):
                time.sleep(self.timeout / 1.8)
            else:
                break
        self.assert_(
            (not storage.is_connected())
            or
            (storage.connection_count_for_tests > old_connection_count)
            )
        storage._wait()
        self.assert_(storage.is_connected())
        # We expect finish to fail.
        self.assertRaises(ClientDisconnected, storage.tpc_finish, t)
        storage.tpc_abort(t)

        # Now we think we've committed the second transaction, but we really
        # haven't.  A third one should produce a POSKeyError on the server,
        # which manifests as a ConflictError on the client.
        obj.value = 9
        t = Transaction()
        storage.tpc_begin(t)
        storage.store(oid, revid2, zodb_pickle(obj), '', t)
        self.assertRaises(ConflictError, storage.tpc_vote, t)
        # Even aborting won't help.
        storage.tpc_abort(t)
        self.assertRaises(ZODB.POSException.StorageTransactionError,
                          storage.tpc_finish, t)
        # Try again.
        obj.value = 10
        t = Transaction()
        storage.tpc_begin(t)
        storage.store(oid, revid2, zodb_pickle(obj), '', t)
        # Even aborting won't help.
        self.assertRaises(ConflictError, storage.tpc_vote, t)
        # Abort this one and try a transaction that should succeed.
        storage.tpc_abort(t)
        
        # Now do a store.
        obj.value = 11
        t = Transaction()
        storage.tpc_begin(t)
        revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t)
        revid2b = storage.tpc_vote(t)
        revid2 = handle_serials(oid, revid2a, revid2b)
        storage.tpc_finish(t)
        # Now load the object and verify that it has a value of 11.
        data, revid = storage.load(oid, '')
        self.assertEqual(zodb_unpickle(data), MinPO(11))
        self.assertEqual(revid, revid2)
Esempio n. 35
0
    def _PackWhileWriting(self, pack_now):
        # A storage should allow some reading and writing during
        # a pack.  This test attempts to exercise locking code
        # in the storage to test that it is safe.  It generates
        # a lot of revisions, so that pack takes a long time.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        for i in range(10):
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        choices = list(range(10))
        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        # How many client threads should we run, and how long should we
        # wait for them to finish?  Hard to say.  Running 4 threads and
        # waiting 30 seconds too often left a thread still alive on Tim's
        # Win98SE box, during ZEO flavors of this test.  Those tend to
        # run one thread at a time to completion, and take about 10 seconds
        # per thread.  There doesn't appear to be a compelling reason to
        # run that many threads.  Running 3 threads and waiting up to a
        # minute seems to work well in practice.  The ZEO tests normally
        # finish faster than that, and the non-ZEO tests very much faster
        # than that.
        NUM_LOOP_TRIP = 50
        timer = ElapsedTimer(time.time())
        threads = [ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
                   for i in range(3)]
        for t in threads:
            t.start()

        if pack_now:
            db.pack(time.time())
        else:
            db.pack(packt)

        for t in threads:
            t.join(60)
        liveness = [t.isAlive() for t in threads]
        if True in liveness:
            # They should have finished by now.
            print('Liveness:', liveness)
            # Combine the outcomes, and sort by start time.
            outcomes = []
            for t in threads:
                outcomes.extend(t.outcomes)
            # each outcome list has as many of these as a loop trip got thru:
            #     thread_id
            #     elapsed millis at loop top
            #     elapsed millis at attempt to assign to self.root[index]
            #     index into self.root getting replaced
            #     elapsed millis when outcome known
            #     'OK' or 'Conflict'
            #     True if we got beyond this line, False if it raised an
            #         exception (one possible Conflict cause):
            #             self.root[index].value = MinPO(j)
            def cmp_by_time(a, b):
                return cmp((a[1], a[0]), (b[1], b[0]))
            outcomes.sort(cmp_by_time)
            counts = [0] * 4
            for outcome in outcomes:
                n = len(outcome)
                assert n >= 2
                tid = outcome[0]
                print('tid:%d top:%5d' % (tid, outcome[1]), end=' ')
                if n > 2:
                    print('commit:%5d' % outcome[2], end=' ')
                    if n > 3:
                        print('index:%2d' % outcome[3], end=' ')
                        if n > 4:
                            print('known:%5d' % outcome[4], end=' ')
                            if n > 5:
                                print('%8s' % outcome[5], end=' ')
                                if n > 6:
                                    print('assigned:%5s' % outcome[6], end=' ')
                counts[tid] += 1
                if counts[tid] == NUM_LOOP_TRIP:
                    print('thread %d done' % tid, end=' ')
                print()

            self.fail('a thread is still alive')

        self._sanity_check()

        db.close()