def testRedo(self): database = DB(self._storage) connection = database.open() root = connection.root() blob = Blob() transaction.begin() with blob.open('w') as file: file.write(b'this is state 1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] with blob.open('w') as file: file.write(b'this is state 2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() with blob.open('r') as file: self.assertEqual(file.read(), b'this is state 1') database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() with blob.open('r') as file: self.assertEqual(file.read(), b'this is state 2') database.close()
def testUndo(self): database = DB(self._storage) connection = database.open() root = connection.root() transaction.begin() blob = Blob() with blob.open('w') as f: f.write(b'this is state 1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] with blob.open('w') as f: f.write(b'this is state 2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() with blob.open('r') as f: data = f.read() self.assertEqual(data, b'this is state 1') database.close()
def testLargeBlob(self): # Large blobs are chunked into multiple pieces, we want to know # if they come out the same way they went in. db = DB(self._storage) conn = db.open() blob = conn.root()[1] = ZODB.blob.Blob() blob_file = blob.open('w') signature = self._random_file(self.testsize, blob_file) blob_file.close() transaction.commit() conn.close() # Clear the cache for base, _dir, files in os.walk('.'): for f in files: if f.endswith('.blob'): ZODB.blob.remove_committed(os.path.join(base, f)) # Re-download blob conn = db.open() with conn.root()[1].open('r') as blob: self.assertEqual(self._md5sum(blob), signature) conn.close() db.close()
def testUndoAfterConsumption(self): database = DB(self._storage) connection = database.open() root = connection.root() transaction.begin() with open('consume1', 'wb') as f: f.write(b'this is state 1') blob = Blob() blob.consumeFile('consume1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] with open('consume2', 'wb') as f: f.write(b'this is state 2') blob.consumeFile('consume2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() with blob.open('r') as f: data = f.read() self.assertEqual(data, b'this is state 1') database.close()
def checkStoreBlob(self): from ZODB.utils import oid_repr, tid_repr from ZODB.blob import Blob, BLOB_SUFFIX from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \ handle_serials import transaction somedata = 'a' * 10 blob = Blob() bd_fh = blob.open('w') bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) self.assert_(os.path.exists(tfname)) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t) r2 = self._storage.tpc_vote(t) revid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self.assert_(not os.path.exists(tfname)) filename = self._storage.fshelper.getBlobFilename(oid, revid) self.assert_(os.path.exists(filename)) self.assertEqual(somedata, open(filename).read())
def testUndoAfterConsumption(self): database = DB(self._storage) connection = database.open() root = connection.root() transaction.begin() with open('consume1', 'wb') as file: file.write(b'this is state 1') blob = Blob() blob.consumeFile('consume1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] with open('consume2', 'wb') as file: file.write(b'this is state 2') blob.consumeFile('consume2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() with blob.open('r') as file: self.assertEqual(file.read(), b'this is state 1') database.close()
def checkStoreBlob(self): from ZODB.utils import oid_repr, tid_repr from ZODB.blob import Blob, BLOB_SUFFIX from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, handle_serials import transaction somedata = "a" * 10 blob = Blob() bd_fh = blob.open("w") bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) self.assert_(os.path.exists(tfname)) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, "", t) r2 = self._storage.tpc_vote(t) revid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self.assert_(not os.path.exists(tfname)) filename = self._storage.fshelper.getBlobFilename(oid, revid) self.assert_(os.path.exists(filename)) self.assertEqual(somedata, open(filename).read())
def testUndoAfterConsumption(self): base_storage = FileStorage(self.storagefile) blob_storage = BlobStorage(self.blob_dir, base_storage) database = DB(blob_storage) connection = database.open() root = connection.root() transaction.begin() open('consume1', 'w').write('this is state 1') blob = Blob() blob.consumeFile('consume1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] open('consume2', 'w').write('this is state 2') blob.consumeFile('consume2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 1') database.close()
def checkLoadBlob(self): from ZODB.blob import Blob from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, handle_serials import transaction somedata = "a" * 10 blob = Blob() bd_fh = blob.open("w") bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, "", t) r2 = self._storage.tpc_vote(t) serial = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise filename = self._storage.loadBlob(oid, serial) self.assertEquals(somedata, open(filename, "rb").read()) self.assert_(not (os.stat(filename).st_mode & stat.S_IWRITE)) self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
def checkLoadBlob(self): from ZODB.blob import Blob from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \ handle_serials import transaction version = '' somedata = 'a' * 10 blob = Blob() bd_fh = blob.open('w') bd_fh.write(somedata) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t) r2 = self._storage.tpc_vote(t) serial = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise filename = self._storage.loadBlob(oid, serial) self.assertEquals(somedata, open(filename, 'rb').read()) self.assert_(not (os.stat(filename).st_mode & stat.S_IWRITE)) self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
def testLargeBlob(self): # Large blobs are chunked into multiple pieces, we want to know # if they come out the same way they went in. db = DB(self._storage) conn = db.open() blob = conn.root()[1] = ZODB.blob.Blob() size = sizeof_fmt(self.testsize) self._log('Creating %s blob file' % size) blob_file = blob.open('w') signature = random_file(self.testsize, blob_file) blob_file.close() self._log('Committing %s blob file' % size) transaction.commit() conn.close() # Clear the cache for base, _dir, files in os.walk('.'): for f in files: if f.endswith('.blob'): ZODB.blob.remove_committed(os.path.join(base, f)) # Re-download blob self._log('Caching %s blob file' % size) conn = db.open() with conn.root()[1].open('r') as blob: self._log('Creating signature for %s blob cache' % size) self.assertEqual(md5sum(blob), signature) conn.close() db.close()
def testUndo(self): database = DB(self._storage) connection = database.open() root = connection.root() transaction.begin() blob = Blob() blob.open('w').write(b('this is state 1')) root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] blob.open('w').write(b('this is state 2')) transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), b('this is state 1')) database.close()
def testRedo(self): database = DB(self._storage) connection = database.open() root = connection.root() blob = Blob() transaction.begin() blob.open('w').write(b('this is state 1')) root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] blob.open('w').write(b('this is state 2')) transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), b('this is state 1')) database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), b('this is state 2')) database.close()
def testRedo(self): base_storage = FileStorage(self.storagefile) blob_storage = BlobStorage(self.blob_dir, base_storage) database = DB(blob_storage) connection = database.open() root = connection.root() blob = Blob() transaction.begin() blob.open('w').write('this is state 1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] blob.open('w').write('this is state 2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 1') serial = base64.encodestring(blob_storage._tid) database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 2') database.close()
def testRedoOfCreation(self): database = DB(self._storage) connection = database.open() root = connection.root() blob = Blob() transaction.begin() blob.open('w').write('this is state 1') root['blob'] = blob transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertRaises(KeyError, root.__getitem__, 'blob') database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 1') database.close()
def testRedoOfCreation(self): database = DB(self._storage) connection = database.open() root = connection.root() blob = Blob() transaction.begin() blob.open('w').write(b('this is state 1')) root['blob'] = blob transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertRaises(KeyError, root.__getitem__, 'blob') database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), b('this is state 1')) database.close()
def files(self): path = unicode(startup.CFG.value('prefs/imgpath', u'').toString()) path = path.strip('\\/') c = 1 out = [] base = os.path.join(Item.BASEDIR, path) if not os.path.isdir(base): os.makedirs(base) for blob in self.photos: fname = "%s/%s-%s-%i.jpg" % (path, self.item_id, slugify(self.title), c) d = os.path.join(Item.BASEDIR, fname) data = blob.open('r').read() fout = open(d, 'wb') fout.write(data) fout.close() out.append(fname) c += 1 return out
def testLargeBlob(self): # Large blobs are chunked into multiple pieces, we want to know # if they come out the same way they went in. db = DB(self._storage) conn = db.open() blob = conn.root()[1] = ZODB.blob.Blob() size = sizeof_fmt(self.testsize) self._log('Creating %s blob file' % size) signature = random_file(self.testsize, blob.open('w')) self._log('Committing %s blob file' % size) transaction.commit() # Clear the cache for base, dir, files in os.walk('.'): for f in files: if f.endswith('.blob'): ZODB.blob.remove_committed(os.path.join(base, f)) # Re-download blob self._log('Caching %s blob file' % size) conn = db.open() blob = conn.root()[1].open('r') self._log('Creating signature for %s blob cache' % size) self.assertEqual(md5sum(blob), signature)
def checkStoreAndLoadBlob(self): from ZODB.utils import oid_repr, tid_repr from ZODB.blob import Blob, BLOB_SUFFIX from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, handle_serials import transaction somedata_path = os.path.join(self.blob_cache_dir, "somedata") somedata = open(somedata_path, "w+b") for i in range(1000000): somedata.write("%s\n" % i) somedata.seek(0) blob = Blob() bd_fh = blob.open("w") ZODB.utils.cp(somedata, bd_fh) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) self.assert_(os.path.exists(tfname)) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, "", t) r2 = self._storage.tpc_vote(t) revid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise # The uncommitted data file should have been removed self.assert_(not os.path.exists(tfname)) def check_data(path): self.assert_(os.path.exists(path)) f = open(path, "rb") somedata.seek(0) d1 = d2 = 1 while d1 or d2: d1 = f.read(8096) d2 = somedata.read(8096) self.assertEqual(d1, d2) # The file should be in the cache ... filename = self._storage.fshelper.getBlobFilename(oid, revid) check_data(filename) # ... and on the server server_filename = os.path.join(self.blobdir, ZODB.blob.BushyLayout().getBlobFilePath(oid, revid)) self.assert_(server_filename.startswith(self.blobdir)) check_data(server_filename) # If we remove it from the cache and call loadBlob, it should # come back. We can do this in many threads. We'll instrument # the method that is used to request data from teh server to # verify that it is only called once. sendBlob_org = ZEO.ServerStub.StorageServer.sendBlob calls = [] def sendBlob(self, oid, serial): calls.append((oid, serial)) sendBlob_org(self, oid, serial) ZODB.blob.remove_committed(filename) returns = [] threads = [ threading.Thread(target=lambda: returns.append(self._storage.loadBlob(oid, revid))) for i in range(10) ] [thread.start() for thread in threads] [thread.join() for thread in threads] [self.assertEqual(r, filename) for r in returns] check_data(filename)
def checkStoreAndLoadBlob(self): from ZODB.utils import oid_repr, tid_repr from ZODB.blob import Blob, BLOB_SUFFIX from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \ handle_serials import transaction somedata_path = os.path.join(self.blob_cache_dir, 'somedata') somedata = open(somedata_path, 'w+b') for i in range(1000000): somedata.write("%s\n" % i) somedata.seek(0) blob = Blob() bd_fh = blob.open('w') ZODB.utils.cp(somedata, bd_fh) bd_fh.close() tfname = bd_fh.name oid = self._storage.new_oid() data = zodb_pickle(blob) self.assert_(os.path.exists(tfname)) t = transaction.Transaction() try: self._storage.tpc_begin(t) r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t) r2 = self._storage.tpc_vote(t) revid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise # The uncommitted data file should have been removed self.assert_(not os.path.exists(tfname)) def check_data(path): self.assert_(os.path.exists(path)) f = open(path, 'rb') somedata.seek(0) d1 = d2 = 1 while d1 or d2: d1 = f.read(8096) d2 = somedata.read(8096) self.assertEqual(d1, d2) # The file should be in the cache ... filename = self._storage.fshelper.getBlobFilename(oid, revid) check_data(filename) # ... and on the server server_filename = filename.replace(self.blob_cache_dir, self.blobdir) self.assert_(server_filename.startswith(self.blobdir)) check_data(server_filename) # If we remove it from the cache and call loadBlob, it should # come back. We can do this in many threads. We'll instrument # the method that is used to request data from teh server to # verify that it is only called once. sendBlob_org = ZEO.ServerStub.StorageServer.sendBlob calls = [] def sendBlob(self, oid, serial): calls.append((oid, serial)) sendBlob_org(self, oid, serial) ZODB.blob.remove_committed(filename) returns = [] threads = [ threading.Thread(target=lambda: returns.append( self._storage.loadBlob(oid, revid))) for i in range(10) ] [thread.start() for thread in threads] [thread.join() for thread in threads] [self.assertEqual(r, filename) for r in returns] check_data(filename)