def test_pack_defaults(self): from ZODB.DB import DB from ZODB.FileStorage import FileStorage from ZODB.POSException import POSKeyError import time import transaction from relstorage.zodbpack import main storage = FileStorage(self.db_fn, create=True) db = DB(storage) conn = db.open() conn.root()['x'] = 1 transaction.commit() oid = b'\0' * 8 state, serial = storage.load(oid, '') time.sleep(0.1) conn.root()['x'] = 2 transaction.commit() conn.close() self.assertEqual(state, storage.loadSerial(oid, serial)) db.close() storage = None main(['', self.cfg_fn]) # packing should have removed the old state. storage = FileStorage(self.db_fn) self.assertRaises(POSKeyError, storage.loadSerial, oid, serial) storage.close()
def testBadTransaction(self): # Find transaction headers and blast them. L = self.storage.undoLog() r = L[3] tid = base64.decodestring(r["id"] + "\n") pos1 = self.storage._txn_find(tid, 0) r = L[8] tid = base64.decodestring(r["id"] + "\n") pos2 = self.storage._txn_find(tid, 0) self.storage.close() # Overwrite the entire header. f = open(self.path, "a+b") f.seek(pos1 - 50) f.write("\0" * 100) f.close() output = self.recover() self.assert_('error' in output, output) self.recovered = FileStorage(self.dest) self.recovered.close() os.remove(self.path) os.rename(self.dest, self.path) # Overwrite part of the header. f = open(self.path, "a+b") f.seek(pos2 + 10) f.write("\0" * 100) f.close() output = self.recover() self.assert_('error' in output, output) self.recovered = FileStorage(self.dest) self.recovered.close()
def test_pack_with_1_day(self): from ZODB.DB import DB from ZODB.FileStorage import FileStorage import time import transaction from relstorage.zodbpack import main storage = FileStorage(self.db_fn, create=True) db = DB(storage) conn = db.open() conn.root()['x'] = 1 transaction.commit() oid = b'\0' * 8 state, serial = storage.load(oid, '') time.sleep(0.1) conn.root()['x'] = 2 transaction.commit() conn.close() self.assertEqual(state, storage.loadSerial(oid, serial)) db.close() storage = None main(['', '--days=1', self.cfg_fn]) # packing should not have removed the old state. storage = FileStorage(self.db_fn) self.assertEqual(state, storage.loadSerial(oid, serial)) storage.close()
def connectZODB(self): print "connnecting" if not os.path.exists(self.bd + '.fs'): self.storage = FileStorage(self.bd + '.fs') self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root() self.root = PersistentDict() else: self.storage = FileStorage(self.bd + '.fs') self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root()
def testMigrationTool(self): dfs_storage = self.__getDataFS() dfs_db = ZODB.DB(dfs_storage) self.__populate(dfs_db, with_undo=False) dump = self.__dump(dfs_storage) fs_path = dfs_storage.__name__ dfs_db.close() neo = self.neo neo.start() kw = {'cluster': neo.cluster_name, 'quiet': None} master_nodes = neo.master_nodes.replace('/', ' ') if neo.SSL: kw['ca'], kw['cert'], kw['key'] = neo.SSL p = NEOProcess('neomigrate', fs_path, master_nodes, **kw) p.start() p.wait() os.remove(fs_path) p = NEOProcess('neomigrate', master_nodes, fs_path, **kw) p.start() p.wait() self.assertEqual(dump, self.__dump(FileStorage(fs_path)))
def open(self): import ZODB from ZODB.FileStorage import FileStorage from zc.lockfile import LockError self.path = self.conf['rdf.store_conf'] openstr = os.path.abspath(self.path) try: fs = FileStorage(openstr) except IOError: L.exception("Failed to create a FileStorage") raise ZODBSourceOpenFailError(openstr) except LockError: L.exception('Found database "{}" is locked when trying to open it. ' 'The PID of this process: {}'.format(openstr, os.getpid()), exc_info=True) raise DatabaseConflict('Database ' + openstr + ' locked') self.zdb = ZODB.DB(fs, cache_size=1600) self.conn = self.zdb.open() root = self.conn.root() if 'rdflib' not in root: root['rdflib'] = ConjunctiveGraph('ZODB') self.graph = root['rdflib'] try: transaction.commit() except Exception: # catch commit exception and close db. # otherwise db would stay open and follow up tests # will detect the db in error state L.exception('Forced to abort transaction on ZODB store opening', exc_info=True) transaction.abort() transaction.begin() self.graph.open(self.path)
def test_storage_has_data(self): from relstorage.zodbconvert import storage_has_data src = FileStorage(self.srcfile, create=True) self.assertFalse(storage_has_data(src)) db = DB(src) # add the root object db.close() self.assertTrue(storage_has_data(src))
def do_incremental_backup(options, reposz, repofiles): options.full = False dest = os.path.join(options.repository, gen_filename(options)) if os.path.exists(dest): raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest) # Find the file position of the last completed transaction. fs = FileStorage(options.file, read_only=True) # Note that the FileStorage ctor calls read_index() which scans the file # and returns "the position just after the last valid transaction record". # getSize() then returns this position, which is exactly what we want, # because we only want to copy stuff from the beginning of the file to the # last valid transaction record. pos = fs.getSize() log('writing index') index_file = os.path.join(options.repository, gen_filename(options, '.index')) fs._index.save(pos, index_file) fs.close() log('writing incremental: %s bytes to %s', pos-reposz, dest) sum = copyfile(options, dest, reposz, pos - reposz) # The first file in repofiles points to the last full backup. Use this to # get the .dat file and append the information for this incrementatl to # that file. fullfile = repofiles[0] datfile = os.path.splitext(fullfile)[0] + '.dat' # This .dat file better exist. Let the exception percolate if not. fp = open(datfile, 'a') print >> fp, dest, reposz, pos, sum fp.flush() os.fsync(fp.fileno()) fp.close()
def openDB(self): from ZODB.FileStorage import FileStorage from ZODB.DB import DB self.dir = tempfile.mkdtemp() self.storage = FileStorage(os.path.join(self.dir, 'testQCConflicts.fs')) self.db = DB(self.storage)
def test_no_overwrite(self): from ZODB.DB import DB from ZODB.FileStorage import FileStorage from relstorage.zodbconvert import main from relstorage.zodbconvert import storage_has_data import transaction src = FileStorage(self.srcfile) db = DB(src) # create the root object db.close() dest = FileStorage(self.destfile) db = DB(dest) # create the root object db.close() self.assertRaises(SystemExit, main, ['', self.cfgfile])
def analyze(path): fs = FileStorage(path, read_only=1) fsi = fs.iterator() report = Report() for txn in fsi: analyze_trans(report, txn) return report
def setUp(self): ZODB.tests.util.TestCase.setUp(self) self.path = 'source.fs' self.storage = FileStorage(self.path) self.populate() self.dest = 'dest.fs' self.recovered = None
def main(argv=sys.argv): parser = argparse.ArgumentParser( prog="zodbverify", description="Verifies that all records in the database can be loaded.", ) parser.add_argument( "-f", "--zodbfile", action="store", dest="zodbfile", required=True, help="Path to file-storage", ) parser.add_argument( "-D", "--debug", action="store_true", dest="debug", help="pause to debug broken pickles", ) options = parser.parse_args(argv[1:]) logging.basicConfig(level=logging.INFO) storage = FileStorage(options.zodbfile, read_only=True) verify_zodb(storage, debug=options.debug)
def testRedo(self): base_storage = FileStorage(self.storagefile) blob_storage = BlobStorage(self.blob_dir, base_storage) database = DB(blob_storage) connection = database.open() root = connection.root() blob = Blob() transaction.begin() blob.open('w').write('this is state 1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] blob.open('w').write('this is state 2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 1') serial = base64.encodestring(blob_storage._tid) database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 2') database.close()
def setUp(self): # set up a zodb # we can't use DemoStorage here 'cos it doesn't support History self.dir = tempfile.mkdtemp() self.s = FileStorage(os.path.join(self.dir, 'testHistory.fs'), create=True) self.connection = ZODB.DB(self.s).open() r = self.connection.root() a = Application() r['Application'] = a self.root = a # create a python script manage_addPythonScript(a, 'test') self.ps = ps = a.test # commit some changes ps.write('return 1') t = transaction.get() # undo note made by Application instantiation above. t.description = None t.note('Change 1') t.commit() ps.write('return 2') t = transaction.get() t.note('Change 2') t.commit() ps.write('return 3') t = transaction.get() t.note('Change 3') t.commit()
def setup_package(): if not get_selenium_enabled(): return # Create a temporary folder to hold site files _site_temp_path = mkdtemp() app.root = _site_temp_path # Set up a temporary database datastore.storage = FileStorage(app.path("testdb.fs")) # Initialize site content before testing site_initializer = SiteInitializer() site_initializer.initialize(admin_email, admin_password, site_languages) datastore.commit() # Configure the site's webserver hostname, port = get_selenium_site_address() cherrypy.config.update({ "log.screen": False, "server.socket_host": hostname, "server.socket_port": port, "engine.autoreload.on": False }) # Configure the application cms = CMSController() cms.closing_item_requires_confirmation = False # Launch the site's webserver on another thread cms.run(block=False)
def init(test=False): global _db, _testing if _db and not test: return log.info("Initializing zodb") handle(BeforeDatabaseInitalizedEvent()) if not test: storage_type = get_config().get('db', 'storage_type') if storage_type == 'zeo': from ZODB import DB storage = ClientStorage('%s/socket' % get_db_dir()) _db = DB(storage) elif storage_type == 'embedded': from ZODB import DB storage = FileStorage('%s/data.fs' % get_db_dir()) _db = DB(storage) elif storage_type == 'memory': from ZODB.tests.util import DB _db = DB() else: raise Exception("Unknown storage type '%s'" % storage_type) else: from ZODB.tests.util import DB _db = DB() _testing = True init_schema()
def openDB(self): from ZODB.FileStorage import FileStorage from ZODB.DB import DB n = 'fs_tmp__%s' % os.getpid() self.storage = FileStorage(n) self.db = DB(self.storage) return self.db
def open_db(path): zdb = {} zdb['storage'] = FileStorage(path + 'NBA_Roster_ZODB.fs') zdb['db'] = DB(zdb['storage']) zdb['connection'] = zdb['db'].open() zdb['root'] = zdb['connection'].root() return zdb
def testUndoAfterConsumption(self): base_storage = FileStorage(self.storagefile) blob_storage = BlobStorage(self.blob_dir, base_storage) database = DB(blob_storage) connection = database.open() root = connection.root() transaction.begin() open('consume1', 'w').write('this is state 1') blob = Blob() blob.consumeFile('consume1') root['blob'] = blob transaction.commit() transaction.begin() blob = root['blob'] open('consume2', 'w').write('this is state 2') blob.consumeFile('consume2') transaction.commit() database.undo(database.undoLog(0, 1)[0]['id']) transaction.commit() self.assertEqual(blob.open('r').read(), 'this is state 1') database.close()
def setUp(self): if self.url.endswith('.fs'): from ZODB.FileStorage import FileStorage if os.path.exists(self.path): os.unlink('/tmp/zodb_local3.fs') os.unlink('/tmp/zodb_local3.fs.index') os.unlink('/tmp/zodb_local3.fs.tmp') os.unlink('/tmp/zodb_local3.fs.lock') openstr = os.path.abspath(os.path.expanduser(self.url[7:])) fs = FileStorage(openstr) else: from ZEO.ClientStorage import ClientStorage schema, opts = _parse_rfc1738_args(self.url) fs = ClientStorage((opts['host'], int(opts['port']))) self.zdb = ZODB.DB(fs) self.conn = self.zdb.open() root = self.conn.root() if 'rdflib' not in root: root['rdflib'] = ConjunctiveGraph(self.store_name) self.graph = self.g = root['rdflib'] self.michel = URIRef('michel') self.tarek = URIRef('tarek') self.bob = URIRef('bob') self.likes = URIRef('likes') self.hates = URIRef('hates') self.pizza = URIRef('pizza') self.cheese = URIRef('cheese') transaction.commit()
def tearDown(self): self.storage.close() if self.recovered is not None: self.recovered.close() temp = FileStorage(self.dest) temp.close() ZODB.tests.util.TestCase.tearDown(self)
def setUp(self): # set up a zodb # we can't use DemoStorage here 'cos it doesn't support History self.dir = tempfile.mkdtemp() fs_path = os.path.join(self.dir, 'testHistory.fs') self.s = FileStorage(fs_path, create=True) self.connection = ZODB.DB(self.s).open() r = self.connection.root() a = Application() r['Application'] = a self.root = a # create a python script a['test'] = HistoryItem() self.hi = hi = a.test # commit some changes hi.title = 'First title' t = transaction.get() # undo note made by Application instantiation above. t.description = None t.note(u'Change 1') t.commit() time.sleep(0.02) # wait at least one Windows clock tick hi.title = 'Second title' t = transaction.get() t.note(u'Change 2') t.commit() time.sleep(0.02) # wait at least one Windows clock tick hi.title = 'Third title' t = transaction.get() t.note(u'Change 3') t.commit()
def start(self): self.started = 1 self.path = tempfile.mktemp(suffix=".fs") self._storage = FileStorage(self.path) self.db = ZODB.DB(self._storage) self.do_updates() self.pid, self.exit = forker.start_zeo_server(self._storage, self.addr)
def testDeepCopyCanInvalidate(self): """ Tests regression for invalidation problems related to missing readers and writers values in cloned objects (see http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html) """ base_storage = FileStorage(self.storagefile) blob_storage = BlobStorage(self.blob_dir, base_storage) database = DB(blob_storage) connection = database.open() root = connection.root() transaction.begin() root['blob'] = Blob() transaction.commit() stream = StringIO() p = Pickler(stream, 1) p.dump(root['blob']) u = Unpickler(stream) stream.seek(0) clone = u.load() clone._p_invalidate() # it should also be possible to open the cloned blob # (even though it won't contain the original data) clone.open()
def open(self): import ZODB from ZODB.FileStorage import FileStorage self.path = self.conf['rdf.store_conf'] openstr = os.path.abspath(self.path) try: fs = FileStorage(openstr) except IOError: L.exception("Failed to create a FileStorage") raise ZODBSourceOpenFailError(openstr) self.zdb = ZODB.DB(fs, cache_size=1600) self.conn = self.zdb.open() root = self.conn.root() if 'rdflib' not in root: root['rdflib'] = ConjunctiveGraph('ZODB') self.graph = root['rdflib'] try: transaction.commit() except Exception: # catch commit exception and close db. # otherwise db would stay open and follow up tests # will detect the db in error state L.exception('Forced to abort transaction on ZODB store opening', exc_info=True) transaction.abort() transaction.begin() self.graph.open(self.path)
def openDB(self): from ZODB.DB import DB from ZODB.FileStorage import FileStorage n = 'fs_tmp__{0}'.format(os.getpid()) self.storage = FileStorage(n) self.db = DB(self.storage)
def do_full_backup(options): options.full = True dest = os.path.join(options.repository, gen_filename(options)) if os.path.exists(dest): raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest) # Find the file position of the last completed transaction. fs = FileStorage(options.file, read_only=True) # Note that the FileStorage ctor calls read_index() which scans the file # and returns "the position just after the last valid transaction record". # getSize() then returns this position, which is exactly what we want, # because we only want to copy stuff from the beginning of the file to the # last valid transaction record. pos = fs.getSize() # Save the storage index into the repository index_file = os.path.join(options.repository, gen_filename(options, '.index')) log('writing index') fs._index.save(pos, index_file) fs.close() log('writing full backup: %s bytes to %s', pos, dest) sum = copyfile(options, dest, 0, pos) # Write the data file for this full backup datfile = os.path.splitext(dest)[0] + '.dat' fp = open(datfile, 'w') print >> fp, dest, 0, pos, sum fp.flush() os.fsync(fp.fileno()) fp.close() if options.killold: delete_old_backups(options)
def init_database(self): self.storage = FileStorage('Data\Data.fs') self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root() pass
def testIncremental(self): """ This reproduces an undocumented way to speed up the import of a single ZODB by doing most of the work before switching to NEO. """ beforeCheck, before, finalCheck, after = self.getData() fs_path, cfg = self.getFS() c = ZODB.DB(FileStorage(fs_path)).open() r = c.root()['tree'] = random_tree.Node() transaction.commit() for _ in before(r): transaction.commit() c.db().close() importer = {'zodb': [('root', cfg)]} # Start NEO cluster with transparent import. with NEOCluster(importer=importer, partitions=2) as cluster: s = cluster.storage l = threading.Lock() l.acquire() def _finished(orig): orig() l.release() with Patch(s.dm, _finished=_finished): cluster.start() l.acquire() t, c = cluster.getTransaction() r = c.root()['tree'] beforeCheck(random_tree.hashTree(r)) c = ZODB.DB(FileStorage(fs_path)).open() for _ in after(c.root()['tree']): transaction.commit() c.db().close() # TODO: Add a storage option that only does this and exits. # Such command would also check that there's no data after # what's already imported. s.dm.setConfiguration('zodb', None) s.stop() cluster.join((s, )) s.resetNode() with Patch(s.dm, _finished=_finished): s.start() self.tic() l.acquire() t.begin() finalCheck(r)