def checkResolveConflictBetweenConnections(self): # Verify that conflict resolution works between storage instances # bound to connections. obj = ConflictResolution.PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) storage1 = self._storage.bind_connection(None) storage1.load(oid, '') storage2 = self._storage.bind_connection(None) storage2.load(oid, '') obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. root_storage = self._storage try: self._storage = storage1 revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) self._storage = storage2 revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) data, serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5) finally: self._storage = root_storage
def checkResolveConflictBetweenConnections(self): # Verify that conflict resolution works between storage instances # bound to connections. obj = ConflictResolution.PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) storage1 = self._storage.new_instance() storage1.load(oid, '') storage2 = self._storage.new_instance() storage2.load(oid, '') obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. root_storage = self._storage try: self._storage = storage1 _revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) self._storage = storage2 _revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) data, _serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5) finally: storage1.close() storage2.close() self._storage = root_storage
def testrun(self): try: self.storage.tpc_begin(self.trans) oid = self.storage.new_oid() p = zodb_pickle(MinPO("c")) self.storage.store(oid, ZERO, p, '', self.trans) oid = self.storage.new_oid() p = zodb_pickle(MinPO("c")) self.storage.store(oid, ZERO, p, '', self.trans) self.myvote() self.storage.tpc_finish(self.trans) except ClientDisconnected: pass
def testrun(self): try: self.storage.tpc_begin(self.trans) oid = self.storage.new_oid() p = zodb_pickle(MinPO("c")) self.storage.store(oid, ZERO, p, '', self.trans) oid = self.storage.new_oid() p = zodb_pickle(MinPO("c")) self.storage.store(oid, ZERO, p, '', self.trans) self.myvote() self.storage.tpc_finish(self.trans) except ClientDisconnected: pass
def checkStoreBlob(self): from ZODB.utils import oid_repr, tid_repr from ZODB.blob import Blob, BLOB_SUFFIX from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \ handle_serials import transaction somedata = 'a' * 10 blob = Blob() bd_fh = blob.open('w') bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) self.assert_(os.path.exists(tfname)) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t) r2 = self._storage.tpc_vote(t) revid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self.assert_(not os.path.exists(tfname)) filename = self._storage.fshelper.getBlobFilename(oid, revid) self.assert_(os.path.exists(filename)) self.assertEqual(somedata, open(filename).read())
def checkLoadBlob(self): from ZODB.blob import Blob from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, handle_serials import transaction somedata = "a" * 10 blob = Blob() bd_fh = blob.open("w") bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, "", t) r2 = self._storage.tpc_vote(t) serial = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise filename = self._storage.loadBlob(oid, serial) self.assertEquals(somedata, open(filename, "rb").read()) self.assert_(not (os.stat(filename).st_mode & stat.S_IWRITE)) self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
def checkStoreBlob(self): from ZODB.utils import oid_repr, tid_repr from ZODB.blob import Blob, BLOB_SUFFIX from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, handle_serials import transaction somedata = "a" * 10 blob = Blob() bd_fh = blob.open("w") bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) self.assert_(os.path.exists(tfname)) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, "", t) r2 = self._storage.tpc_vote(t) revid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self.assert_(not os.path.exists(tfname)) filename = self._storage.fshelper.getBlobFilename(oid, revid) self.assert_(os.path.exists(filename)) self.assertEqual(somedata, open(filename).read())
def checkTimeoutAfterVote(self): self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty self.assert_(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # Now do a store, sleeping before the finish so as to cause a timeout t = TransactionMetaData() old_connection_count = storage.connection_count_for_tests storage.tpc_begin(t) revid1 = storage.store(oid, ZERO, zodb_pickle(obj), '', t) storage.tpc_vote(t) # Now sleep long enough for the storage to time out time.sleep(3) self.assert_( (not storage.is_connected()) or (storage.connection_count_for_tests > old_connection_count) ) storage._wait() self.assert_(storage.is_connected()) # We expect finish to fail self.assertRaises(ClientDisconnected, storage.tpc_finish, t) # The cache should still be empty self.assert_(not list(storage._cache.contents())) # Load should fail since the object should not be in either the cache # or the server. self.assertRaises(KeyError, storage.load, oid, '')
def checkTimeoutAfterVote(self): self._storage = storage = self.openClientStorage() # Assert that the zeo cache is empty self.assert_(not list(storage._cache.contents())) # Create the object oid = storage.new_oid() obj = MinPO(7) # Now do a store, sleeping before the finish so as to cause a timeout t = TransactionMetaData() old_connection_count = storage.connection_count_for_tests storage.tpc_begin(t) revid1 = storage.store(oid, ZERO, zodb_pickle(obj), '', t) storage.tpc_vote(t) # Now sleep long enough for the storage to time out time.sleep(3) self.assert_( (not storage.is_connected()) or (storage.connection_count_for_tests > old_connection_count)) storage._wait() self.assert_(storage.is_connected()) # We expect finish to fail self.assertRaises(ClientDisconnected, storage.tpc_finish, t) # The cache should still be empty self.assert_(not list(storage._cache.contents())) # Load should fail since the object should not be in either the cache # or the server. self.assertRaises(KeyError, storage.load, oid, '')
def checkLoadBlob(self): from ZODB.blob import Blob from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \ handle_serials import transaction version = '' somedata = 'a' * 10 blob = Blob() bd_fh = blob.open('w') bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t) r2 = self._storage.tpc_vote(t) serial = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise filename = self._storage.loadBlob(oid, serial) self.assertEquals(somedata, open(filename, 'rb').read()) self.assert_(not (os.stat(filename).st_mode & stat.S_IWRITE)) self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
def checkRSResolve(self): # ZODB.tests.ConflictResolution.ConflictResolvingStorage has a checkResolve # with a different signature (as of 4.4.0) that we were unintentionally(?) # shadowing, hence the weird name. obj = PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. # open s1 at this point of time. s1 = self._storage.new_instance() # start a load transaction in s1 s1.poll_invalidations() # commit a change not visible to s1 _revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) # commit a conflicting change using s1 main_storage = self._storage self._storage = s1 try: # we can resolve this conflict because s1 has an open # transaction that can read the old state of the object. _revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) s1.release() finally: self._storage = main_storage # If we don't restart our load connection, # we will still read the old state. data, _serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 3) self._storage.poll_invalidations() data, _serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5)
def checkBuggyResolve2(self): obj = PCounter4() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) self.assertRaises(ConflictError, self._dostoreNP, oid, revid=revid1, data=zodb_pickle(obj))
def checkBuggyResolve1(self): obj = PCounter3() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) self.assertRaises(ConflictError, self._dostoreNP, oid, revid=revid1, data=zodb_pickle(obj))
def checkNote(self): oid = self._storage.new_oid() t = TransactionMetaData() self._storage.tpc_begin(t) t.note(u'this is a test') self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t) self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkNote(self): oid = self._storage.new_oid() t = TransactionMetaData() self._storage.tpc_begin(t) t.note(u'this is a test') self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t) self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkNote(self): oid = self._storage.new_oid() t = transaction.Transaction() self._storage.tpc_begin(t) t.note("this is a test") self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), "", t) self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkUnresolvable(self): obj = PCounter2() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) try: self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) except ConflictError, err: self.assert_("PCounter2" in str(err))
def checkUnresolvable(self): obj = PCounter2() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) try: self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) except ConflictError, err: self.assert_("PCounter2" in str(err))
def run(self): tname = self.getName() testcase = self.testcase # Create client connections to each server clients = self.clients for i in range(len(testcase.addr)): c = testcase.openClientStorage(addr=testcase.addr[i]) c.__name = "C%d" % i clients.append(c) for i in range(testcase.ntrans): # Because we want a transaction spanning all storages, # we can't use _dostore(). This is several _dostore() calls # expanded in-line (mostly). # Create oid->serial mappings for c in clients: c.__oids = [] c.__serials = {} # Begin a transaction t = Transaction() for c in clients: #print "%s.%s.%s begin\n" % (tname, c.__name, i), c.tpc_begin(t) for j in range(testcase.nobj): for c in clients: # Create and store a new object on each server oid = c.new_oid() c.__oids.append(oid) data = MinPO("%s.%s.t%d.o%d" % (tname, c.__name, i, j)) #print data.value data = zodb_pickle(data) s = c.store(oid, ZERO, data, '', t) c.__serials.update(handle_all_serials(oid, s)) # Vote on all servers and handle serials for c in clients: #print "%s.%s.%s vote\n" % (tname, c.__name, i), s = c.tpc_vote(t) c.__serials.update(handle_all_serials(None, s)) # Finish on all servers for c in clients: #print "%s.%s.%s finish\n" % (tname, c.__name, i), c.tpc_finish(t) for c in clients: # Check that we got serials for all oids for oid in c.__oids: testcase.failUnless(c.__serials.has_key(oid)) # Check that we got serials for no other oids for oid in c.__serials.keys(): testcase.failUnless(oid in c.__oids)
def run(self): tname = self.getName() testcase = self.testcase # Create client connections to each server clients = self.clients for i in range(len(testcase.addr)): c = testcase.openClientStorage(addr=testcase.addr[i]) c.__name = "C%d" % i clients.append(c) for i in range(testcase.ntrans): # Because we want a transaction spanning all storages, # we can't use _dostore(). This is several _dostore() calls # expanded in-line (mostly). # Create oid->serial mappings for c in clients: c.__oids = [] c.__serials = {} # Begin a transaction t = Transaction() for c in clients: #print("%s.%s.%s begin" % (tname, c.__name, i)) c.tpc_begin(t) for j in range(testcase.nobj): for c in clients: # Create and store a new object on each server oid = c.new_oid() c.__oids.append(oid) data = MinPO("%s.%s.t%d.o%d" % (tname, c.__name, i, j)) #print(data.value) data = zodb_pickle(data) s = c.store(oid, ZERO, data, '', t) c.__serials.update(handle_all_serials(oid, s)) # Vote on all servers and handle serials for c in clients: #print("%s.%s.%s vote" % (tname, c.__name, i)) s = c.tpc_vote(t) c.__serials.update(handle_all_serials(None, s)) # Finish on all servers for c in clients: #print("%s.%s.%s finish\n" % (tname, c.__name, i)) c.tpc_finish(t) for c in clients: # Check that we got serials for all oids for oid in c.__oids: testcase.failUnless(oid in c.__serials) # Check that we got serials for no other oids for oid in c.__serials.keys(): testcase.failUnless(oid in c.__oids)
def checkWriteAfterAbort(self): oid = self._storage.new_oid() t = transaction.Transaction() self._storage.tpc_begin(t) self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), "", t) # Now abort this transaction self._storage.tpc_abort(t) # Now start all over again oid = self._storage.new_oid() self._dostore(oid=oid, data=MinPO(6))
def checkTwoArgBegin(self): # Unsure: how standard is three-argument tpc_begin()? t = transaction.Transaction() tid = '\0\0\0\0\0psu' self._storage.tpc_begin(t, tid) oid = self._storage.new_oid() data = zodb_pickle(MinPO(8)) self._storage.store(oid, None, data, '', t) self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkWriteAfterAbort(self): oid = self._storage.new_oid() t = TransactionMetaData() self._storage.tpc_begin(t) self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t) # Now abort this transaction self._storage.tpc_abort(t) # Now start all over again oid = self._storage.new_oid() self._dostore(oid=oid, data=MinPO(6))
def checkTwoArgBegin(self): # XXX how standard is three-argument tpc_begin()? t = Transaction() tid = '\0\0\0\0\0psu' self._storage.tpc_begin(t, tid) oid = self._storage.new_oid() data = zodb_pickle(MinPO(8)) self._storage.store(oid, None, data, '', t) self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkResolve(self): obj = PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) data, serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5)
def checkResolve(self): obj = PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) data, serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5)
def checkanalyze(self): import types from BTrees.OOBTree import OOBTree from ZODB.scripts import analyze # Set up a module to act as a broken import module_name = 'brokenmodule' module = types.ModuleType(module_name) sys.modules[module_name] = module class Broken(MinPO): __module__ = module_name module.Broken = Broken oids = [[self._storage.new_oid(), None] for i in range(3)] def store(i, data): oid, revid = oids[i] self._storage.store(oid, revid, data, "", t) for i in range(2): t = TransactionMetaData() self._storage.tpc_begin(t) # sometimes data is in this format store(0, dumps(OOBTree, _protocol)) # and it could be from a broken module store(1, dumps(Broken, _protocol)) # but mostly it looks like this store(2, zodb_pickle(MinPO(2))) self._storage.tpc_vote(t) tid = self._storage.tpc_finish(t) for oid_revid in oids: oid_revid[1] = tid # now break the import of the Broken class del sys.modules[module_name] # from ZODB.scripts.analyze.analyze fsi = self._storage.iterator() rep = analyze.Report() for txn in fsi: analyze.analyze_trans(rep, txn) # from ZODB.scripts.analyze.report typemap = sorted(rep.TYPEMAP.keys()) cumpct = 0.0 for t in typemap: pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES cumpct += pct self.assertAlmostEqual(cumpct, 100.0, 0, "Failed to analyze some records")
def checkRestoreAfterDoubleCommit(self): oid = self._storage.new_oid() revid = b'\0' * 8 data1 = zodb_pickle(MinPO(11)) data2 = zodb_pickle(MinPO(12)) # Begin the transaction t = transaction.Transaction() try: self._storage.tpc_begin(t) # Store an object self._storage.store(oid, revid, data1, '', t) # Store it again self._storage.store(oid, revid, data2, '', t) # Finish the transaction self._storage.tpc_vote(t) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst)
def checkanalyze(self): import types from BTrees.OOBTree import OOBTree from ZODB.scripts import analyze # Set up a module to act as a broken import module_name = 'brokenmodule' module = types.ModuleType(module_name) sys.modules[module_name] = module class Broken(MinPO): __module__ = module_name module.Broken = Broken oids = [[self._storage.new_oid(), None] for i in range(3)] def store(i, data): oid, revid = oids[i] self._storage.store(oid, revid, data, "", t) for i in range(2): t = TransactionMetaData() self._storage.tpc_begin(t) # sometimes data is in this format store(0, dumps(OOBTree, _protocol)) # and it could be from a broken module store(1, dumps(Broken, _protocol)) # but mostly it looks like this store(2, zodb_pickle(MinPO(2))) self._storage.tpc_vote(t) tid = self._storage.tpc_finish(t) for oid_revid in oids: oid_revid[1] = tid # now break the import of the Broken class del sys.modules[module_name] # from ZODB.scripts.analyze.analyze fsi = self._storage.iterator() rep = analyze.Report() for txn in fsi: analyze.analyze_trans(rep, txn) # from ZODB.scripts.analyze.report typemap = sorted(rep.TYPEMAP.keys()) cumpct = 0.0 for t in typemap: pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES cumpct += pct self.assertAlmostEqual(cumpct, 100.0, 0, "Failed to analyze some records")
def checkRestoreAfterDoubleCommit(self): oid = self._storage.new_oid() revid = b'\0' * 8 data1 = zodb_pickle(MinPO(11)) data2 = zodb_pickle(MinPO(12)) # Begin the transaction t = transaction.Transaction() try: self._storage.tpc_begin(t) # Store an object self._storage.store(oid, revid, data1, '', t) # Store it again self._storage.store(oid, revid, data2, '', t) # Finish the transaction self._storage.tpc_vote(t) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst)
def run(self): self.storage.tpc_begin(self.trans) oid = self.storage.new_oid() self.storage.store(oid, ZERO, zodb_pickle(MinPO("c")), '', self.trans) self.storage.tpc_vote(self.trans) self.threadStartedEvent.set() self.doNextEvent.wait(10) try: self.storage.tpc_finish(self.trans) except ZEO.Exceptions.ClientStorageError: self.gotValueError = 1 self.storage.tpc_abort(self.trans)
def checkRSResolve(self): # ZODB.tests.ConflictResolution.ConflictResolvingStorage has a checkResolve # with a different signature (as of 4.4.0) that we were unintentionally(?) # shadowing, hence the weird name. obj = PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. # open s1 s1 = self._storage.new_instance() # start a load transaction in s1 s1.poll_invalidations() # commit a change _revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) # commit a conflicting change using s1 main_storage = self._storage self._storage = s1 try: # we can resolve this conflict because s1 has an open # transaction that can read the old state of the object. _revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) s1.release() finally: self._storage = main_storage data, _serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5)
def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = TransactionMetaData() try: self._storage.tpc_begin(t, p64(tid)) self._storage.store(oid, revid, data, '', t) # Finish the transaction self._storage.tpc_vote(t) newrevid = self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid
def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = TransactionMetaData() try: self._storage.tpc_begin(t, p64(tid)) self._storage.store(oid, revid, data, '', t) # Finish the transaction self._storage.tpc_vote(t) newrevid = self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid
def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = transaction.Transaction() try: self._storage.tpc_begin(t, p64(tid)) r1 = self._storage.store(oid, revid, data, '', t) # Finish the transaction r2 = self._storage.tpc_vote(t) newrevid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid
def checkSerialIsNoneForInitialRevision(self): eq = self.assertEqual oid = self._storage.new_oid() txn = TransactionMetaData() self._storage.tpc_begin(txn) # Use None for serial. Don't use _dostore() here because that coerces # serial=None to serial=ZERO. self._storage.store(oid, None, zodb_pickle(MinPO(11)), '', txn) self._storage.tpc_vote(txn) newrevid = self._storage.tpc_finish(txn) data, revid = utils.load_current(self._storage, oid) value = zodb_unpickle(data) eq(value, MinPO(11)) eq(revid, newrevid)
def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = transaction.Transaction() try: self._storage.tpc_begin(t, p64(tid)) r1 = self._storage.store(oid, revid, data, '', t) # Finish the transaction r2 = self._storage.tpc_vote(t) newrevid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid
def check10Kstores(self): # The _get_cached_serial() method has a special case # every 8000 calls. Make sure it gets minimal coverage. oids = [[self._storage.new_oid(), None] for i in range(100)] for i in range(100): t = transaction.Transaction() self._storage.tpc_begin(t) for j in range(100): o = MinPO(j) oid, revid = oids[j] serial = self._storage.store(oid, revid, zodb_pickle(o), "", t) oids[j][1] = serial self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkResolve(self): obj = PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. # open s1 s1 = self._storage.new_instance() # start a load transaction in s1 s1.poll_invalidations() # commit a change revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) # commit a conflicting change using s1 main_storage = self._storage self._storage = s1 try: # we can resolve this conflict because s1 has an open # transaction that can read the old state of the object. revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) s1.release() finally: self._storage = main_storage data, serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5)
def checkResolve(self): obj = PCounter() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() # The effect of committing two transactions with the same # pickle is to commit two different transactions relative to # revid1 that add two to _value. # open s1 s1 = self._storage.new_instance() # start a load transaction in s1 s1.poll_invalidations() # commit a change revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) # commit a conflicting change using s1 main_storage = self._storage self._storage = s1 try: # we can resolve this conflict because s1 has an open # transaction that can read the old state of the object. revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) s1.release() finally: self._storage = main_storage data, serialno = self._storage.load(oid, '') inst = zodb_unpickle(data) self.assertEqual(inst._value, 5)
def checkSerialIsNoneForInitialRevision(self): eq = self.assertEqual oid = self._storage.new_oid() txn = transaction.Transaction() self._storage.tpc_begin(txn) # Use None for serial. Don't use _dostore() here because that coerces # serial=None to serial=ZERO. r1 = self._storage.store(oid, None, zodb_pickle(MinPO(11)), '', txn) r2 = self._storage.tpc_vote(txn) self._storage.tpc_finish(txn) newrevid = handle_serials(oid, r1, r2) data, revid = self._storage.load(oid, '') value = zodb_unpickle(data) eq(value, MinPO(11)) eq(revid, newrevid)
def checkDisconnectedAbort(self): self._storage = self.openClientStorage() self._dostore() oids = [self._storage.new_oid() for i in range(5)] txn = TransactionMetaData() self._storage.tpc_begin(txn) for oid in oids: data = zodb_pickle(MinPO(oid)) self._storage.store(oid, None, data, '', txn) self.shutdownServer() with short_timeout(self): self.assertRaises(ClientDisconnected, self._storage.tpc_vote, txn) self.startServer(create=0) self._storage.tpc_abort(txn) self._dostore()
def checkDisconnectedAbort(self): self._storage = self.openClientStorage() self._dostore() oids = [self._storage.new_oid() for i in range(5)] txn = TransactionMetaData() self._storage.tpc_begin(txn) for oid in oids: data = zodb_pickle(MinPO(oid)) self._storage.store(oid, None, data, '', txn) self.shutdownServer() with short_timeout(self): self.assertRaises(ClientDisconnected, self._storage.tpc_vote, txn) self.startServer(create=0) self._storage.tpc_abort(txn) self._dostore()
def checkSerialIsNoneForInitialRevision(self): eq = self.assertEqual oid = self._storage.new_oid() txn = TransactionMetaData() self._storage.tpc_begin(txn) # Use None for serial. Don't use _dostore() here because that coerces # serial=None to serial=ZERO. self._storage.store(oid, None, zodb_pickle(MinPO(11)), '', txn) self._storage.tpc_vote(txn) newrevid = self._storage.tpc_finish(txn) data, revid = utils.load_current(self._storage, oid) value = zodb_unpickle(data) eq(value, MinPO(11)) eq(revid, newrevid)
def checkSerialIsNoneForInitialRevision(self): eq = self.assertEqual oid = self._storage.new_oid() txn = transaction.Transaction() self._storage.tpc_begin(txn) # Use None for serial. Don't use _dostore() here because that coerces # serial=None to serial=ZERO. r1 = self._storage.store(oid, None, zodb_pickle(MinPO(11)), "", txn) r2 = self._storage.tpc_vote(txn) self._storage.tpc_finish(txn) newrevid = handle_serials(oid, r1, r2) data, revid = self._storage.load(oid, "") value = zodb_unpickle(data) eq(value, MinPO(11)) eq(revid, newrevid)
def checkAbortAfterVote(self): oid1 = self._storage.new_oid() revid1 = self._dostore(oid=oid1, data=MinPO(-2)) oid = self._storage.new_oid() t = TransactionMetaData() self._storage.tpc_begin(t) self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t) # Now abort this transaction self._storage.tpc_vote(t) self._storage.tpc_abort(t) # Now start all over again oid = self._storage.new_oid() revid = self._dostore(oid=oid, data=MinPO(6)) for oid, revid in [(oid1, revid1), (oid, revid)]: data, _revid = utils.load_current(self._storage, oid) self.assertEqual(revid, _revid)
def checkAbortAfterVote(self): oid1 = self._storage.new_oid() revid1 = self._dostore(oid=oid1, data=MinPO(-2)) oid = self._storage.new_oid() t = transaction.Transaction() self._storage.tpc_begin(t) self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), "", t) # Now abort this transaction self._storage.tpc_vote(t) self._storage.tpc_abort(t) # Now start all over again oid = self._storage.new_oid() revid = self._dostore(oid=oid, data=MinPO(6)) for oid, revid in [(oid1, revid1), (oid, revid)]: data, _revid = self._storage.load(oid, "") self.assertEqual(revid, _revid)
def checkIterationIntraTransaction(self): # XXX: This test overrides the broken version from # IteratorStorage; prior to # https://github.com/zopefoundation/ZODB/pull/281 it passed a # native str, not bytes, as the previous tid. oid = self._storage.new_oid() t = TransactionMetaData() data = zodb_pickle(MinPO(0)) try: self._storage.tpc_begin(t) self._storage.store(oid, RevisionStorage.ZERO, data, '', t) self._storage.tpc_vote(t) # Don't do tpc_finish yet it = self._storage.iterator() for x in it: self.assertIsNotNone(x) finally: self._storage.tpc_finish(t)
def dostore(self, i): data = zodb_pickle(MinPO((self.getName(), i))) t = TransactionMetaData() oid = self.oid() self.pause() self.storage.tpc_begin(t) self.pause() # Always create a new object, signified by None for revid self.storage.store(oid, None, data, '', t) self.pause() self.storage.tpc_vote(t) self.pause() revid = self.storage.tpc_finish(t) self.pause() self.oids[oid] = revid
def main(): if len(sys.argv) not in (3, 4): sys.stderr.write("Usage: timeout.py address delay [storage-name]\n" % sys.argv[0]) sys.exit(2) hostport = sys.argv[1] delay = float(sys.argv[2]) if sys.argv[3:]: name = sys.argv[3] else: name = "1" if "/" in hostport: address = hostport else: if ":" in hostport: i = hostport.index(":") host, port = hostport[:i], hostport[i+1:] else: host, port = "", hostport port = int(port) address = (host, port) print "Connecting to %s..." % repr(address) storage = ClientStorage(address, name) print "Connected. Now starting a transaction..." oid = storage.new_oid() version = "" revid = ZERO data = MinPO("timeout.py") pickled_data = zodb_pickle(data) t = Transaction() t.user = "******" storage.tpc_begin(t) storage.store(oid, revid, pickled_data, version, t) print "Stored. Now voting..." storage.tpc_vote(t) print "Voted; now sleeping %s..." % delay time.sleep(delay) print "Done."
def testReplicationBlockedByUnfinished(self): # start a cluster with 1 of 2 storages and a replica (started, stopped) = self.__setup(storage_number=2, replicas=1, pending_number=1, partitions=10) self.neo.expectRunning(started[0]) self.neo.expectStorageNotKnown(stopped[0]) self.neo.expectOudatedCells(number=0) self.neo.expectClusterRunning() self.__populate() self.neo.expectOudatedCells(number=0) # start a transaction that will block the end of the replication db, conn = self.neo.getZODBConnection() st = conn._storage t = transaction.Transaction() t.user = '******' t.description = 'desc' oid = st.new_oid() rev = '\0' * 8 data = zodb_pickle(PObject(42)) st.tpc_begin(t) st.store(oid, rev, data, '', t) # start the oudated storage stopped[0].start() self.neo.expectPending(stopped[0]) self.neo.neoctl.enableStorageList([stopped[0].getUUID()]) self.neo.neoctl.tweakPartitionTable() self.neo.expectRunning(stopped[0]) self.neo.expectClusterRunning() self.neo.expectAssignedCells(started[0], 10) self.neo.expectAssignedCells(stopped[0], 10) # wait a bit, replication must not happen. This hack is required # because we cannot gather informations directly from the storages time.sleep(10) self.neo.expectOudatedCells(number=10) # finish the transaction, the replication must happen and finish st.tpc_vote(t) st.tpc_finish(t) self.neo.expectOudatedCells(number=0, timeout=10)
def dostore(self, i): data = zodb_pickle(MinPO((self.getName(), i))) t = TransactionMetaData() oid = self.oid() self.pause() self.storage.tpc_begin(t) self.pause() # Always create a new object, signified by None for revid self.storage.store(oid, None, data, '', t) self.pause() self.storage.tpc_vote(t) self.pause() revid = self.storage.tpc_finish(t) self.pause() self.oids[oid] = revid
def main(): if len(sys.argv) not in (3, 4): sys.stderr.write("Usage: timeout.py address delay [storage-name]\n" % sys.argv[0]) sys.exit(2) hostport = sys.argv[1] delay = float(sys.argv[2]) if sys.argv[3:]: name = sys.argv[3] else: name = "1" if "/" in hostport: address = hostport else: if ":" in hostport: i = hostport.index(":") host, port = hostport[:i], hostport[i + 1:] else: host, port = "", hostport port = int(port) address = (host, port) print "Connecting to %s..." % repr(address) storage = ClientStorage(address, name) print "Connected. Now starting a transaction..." oid = storage.new_oid() version = "" revid = ZERO data = MinPO("timeout.py") pickled_data = zodb_pickle(data) t = Transaction() t.user = "******" storage.tpc_begin(t) storage.store(oid, revid, pickled_data, version, t) print "Stored. Now voting..." storage.tpc_vote(t) print "Voted; now sleeping %s..." % delay time.sleep(delay) print "Done."
def checkIterationIntraTransaction(self): # XXX try this test with logging enabled. If you see something like # # ZODB FS FS21 warn: FileStorageTests.fs truncated, possibly due to # damaged records at 4 # # Then the code in FileIterator.next() hasn't yet been fixed. oid = self._storage.new_oid() t = Transaction() data = zodb_pickle(MinPO(0)) try: self._storage.tpc_begin(t) self._storage.store(oid, '\0'*8, data, '', t) self._storage.tpc_vote(t) # Don't do tpc_finish yet it = self._storage.iterator() for x in it: pass finally: self._storage.tpc_finish(t)
def testReplicationBlockedByUnfinished(self): # start a cluster with 1 of 2 storages and a replica (started, stopped) = self.__setup(storage_number=2, replicas=1, pending_number=1, partitions=10) self.neo.expectRunning(started[0]) self.neo.expectStorageNotKnown(stopped[0]) self.neo.expectOudatedCells(number=0) self.neo.expectClusterRunning() self.__populate() self.neo.expectOudatedCells(number=0) # start a transaction that will block the end of the replication db, conn = self.neo.getZODBConnection() st = conn._storage t = transaction.Transaction() t.user = '******' t.description = 'desc' oid = st.new_oid() rev = '\0' * 8 data = zodb_pickle(PObject(42)) st.tpc_begin(t) st.store(oid, rev, data, '', t) # start the oudated storage stopped[0].start() self.neo.expectPending(stopped[0]) self.neo.neoctl.enableStorageList([stopped[0].getUUID()]) self.neo.neoctl.tweakPartitionTable() self.neo.expectRunning(stopped[0]) self.neo.expectClusterRunning() self.neo.expectAssignedCells(started[0], 10) self.neo.expectAssignedCells(stopped[0], 10) # wait a bit, replication must not happen. This hack is required # because we cannot gather informations directly from the storages time.sleep(10) self.neo.expectOudatedCells(number=10) # finish the transaction, the replication must happen and finish st.tpc_vote(t) st.tpc_finish(t) self.neo.expectOudatedCells(number=0, timeout=10)
def dostore(self, i): data = zodb_pickle(MinPO((self.getName(), i))) t = transaction.Transaction() oid = self.oid() self.pause() self.storage.tpc_begin(t) self.pause() # Always create a new object, signified by None for revid r1 = self.storage.store(oid, None, data, "", t) self.pause() r2 = self.storage.tpc_vote(t) self.pause() self.storage.tpc_finish(t) self.pause() revid = handle_serials(oid, r1, r2) self.oids[oid] = revid
def testGreaterOIDSaved(self): """ Store an object with an OID greater than the last generated by the master. This OID must be intercepted at commit, used for next OID generations and persistently saved on storage nodes. """ self.neo.start() db1, conn1 = self.neo.getZODBConnection() st1 = conn1._storage t1 = transaction.Transaction() rev = '\0' * 8 data = zodb_pickle(PObject()) my_oid = pack('!Q', 100000) # store an object with this OID st1.tpc_begin(t1) st1.store(my_oid, rev, data, '', t1) st1.tpc_vote(t1) st1.tpc_finish(t1) # request an oid, should be greater than mine oid = st1.new_oid() self.assertTrue(oid > my_oid)
def dostore(self, i): data = zodb_pickle(MinPO((self.getName(), i))) t = transaction.Transaction() oid = self.oid() self.pause() self.storage.tpc_begin(t) self.pause() # Always create a new object, signified by None for revid r1 = self.storage.store(oid, None, data, '', t) self.pause() r2 = self.storage.tpc_vote(t) self.pause() self.storage.tpc_finish(t) self.pause() revid = handle_serials(oid, r1, r2) self.oids[oid] = revid