def testdb(request): storage = FileStorage.FileStorage('listingsc.fs') db = DB(storage) conn = db.open() dbroot = conn.root() # Ensure that a 'listingdb' key is present # in the root if not dbroot.has_key('listingdb'): from BTrees.OOBTree import OOBTree dbroot['listingdb'] = OOBTree() userdb = dbroot['listingdb'] lists = Listing.objects.all().values() minlat = min([l['latitude'] for l in lists]) minlng = min([l['longitude'] for l in lists]) for l in lists: #return HttpResponse(l) key1 = int(l['latitude']*1000000-minlat*1000000) key2 = int(l['longitude']*1000000-minlng*1000000) #userdb[hashlib.sha224(str(l['keyword_id'])).hexdigest()[:8]+str(zcurve.interleave2(key1,key2)).zfill(9)]=l userdb[hashlib.sha224(str(l['category_id'])).hexdigest()[:8]]=l # Commit the change transaction.commit() db.close() return HttpResponse("woo i guess")
def convertEkinprjs(local=None, server=None, project=None, username=None): """convert old ekin prjs in a db to new""" if local != None: DB = PDatabase(local=local) elif server != None: DB = PDatabase(server=server, username='******', port=8080, password='******', project=project) for f in DB['userfields']: if DB['userfields'][f]['field_type'] in ekintypes: print f for r in DB.getRecs(): rec = DB[r] if rec.has_key(f): E=rec[f] E.checkDatasets() for d in E.datasets: ek=E.getDataset(d) #ek.prettyPrint() rec[f] = E print DB.getChanged() DB.commit('converted ekin data') return
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = load_current(self._storage, oid) self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
class DBStorage(jpath4.db.storage.Storage): def __init__(self, file_path): self.storage = ZFileStorage.FileStorage(file_path) self.db = ZDB(self.storage) self.connection = self.db.open() self.z_root = self.connection.root() if "jpath4-root" not in self.z_root: self.z_root["jpath4-root"] = DBDict() transaction.commit() self.unwrapped_root = self.z_root["jpath4-root"] self.wrapped_root = standard_wrapper.wrap(self.unwrapped_root) def get_root(self): return self.wrapped_root def apply_updates(self, updates): standard_wrapper.apply_updates(self.wrapped_root, updates, classes={"object": DBDict, "list": DBList}) def commit(self): transaction.commit() def abort(self): transaction.abort() def close(self): self.connection.close() self.db.close() self.storage.close()
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises db = DB(self._storage) conn = db.open() root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note('root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note('root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note('root -X->') txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. data, serial = self._dst.load(root._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
def makeMap(request): listings = Building.objects.order_by('?')[:1000] keywords = Keywords.objects.order_by('keyword') allcategories = Categories.objects.order_by('category').values('category') zipcodes = Zipcode.objects.order_by('code') if request.method == 'GET': try: lat = request.GET['lat'] lng = request.GET['lng'] lists = Listing.objects.all().values() minlat = min([l['latitude'] for l in lists]) minlng = min([l['longitude'] for l in lists]) key1 = int(float(lat)*1000000-minlat*1000000) key2 = int(float(lng)*1000000-minlng*1000000) search = zcurve.interleave2(key1,key2) storage = FileStorage.FileStorage('listings2.fs') db = DB(storage) conn = db.open() dbroot = conn.root() userdb = dbroot['listingdb'] listings = [] for i in range(search-100000,search+10000): try: listings.append(userdb[int(i)]) except: pass db.close() return render_to_response('map2.html', locals()) except: pass return render_to_response('map.html', locals())
class PersistenceHandler: def __init__(self, db_name): self.db_name = db_name def __enter__(self): self.db = DB(FileStorage(self.db_name)) self.conn = self.db.open() self.root = self.conn.root() return self def __exit__(self, _, __, ___): try: self.conn.close() finally: self.db.close() def store_progress(self, scrape_targets, vulnerability_results): self.root["scrape_targets"] = scrape_targets self.root["vulnerability_results"] = vulnerability_results transaction.commit() def retrieve_progress(self): progress = [] for key in ["scrape_targets", "vulnerability_results"]: try: progress.append(self.root[key]) except KeyError: progress.append(list()) return progress
def __init__(self, path): self.storage = FileStorage.FileStorage(path) # creating file storage self.db = DB(self.storage) # initializing the database self.connection = self.db.open() # initializing the connections # print 'sss', help(self.connection.root) self.dbroot1 = self.connection.root( ) # connection to store and retreive datas
def init(test=False): global _db, _testing if _db and not test: return log.info("Initializing zodb") handle(BeforeDatabaseInitalizedEvent()) if not test: storage_type = get_config().get('db', 'storage_type') if storage_type == 'zeo': from ZODB import DB storage = ClientStorage('%s/socket' % get_db_dir()) _db = DB(storage) elif storage_type == 'embedded': from ZODB import DB storage = FileStorage('%s/data.fs' % get_db_dir()) _db = DB(storage) elif storage_type == 'memory': from ZODB.tests.util import DB _db = DB() else: raise Exception("Unknown storage type '%s'" % storage_type) else: from ZODB.tests.util import DB _db = DB() _testing = True init_schema()
def get_conn(): try: return _thread_data.zodb_conn except AttributeError: db = DB(get_storage()) _thread_data.zodb_conn = db.open() return _thread_data.zodb_conn
def getUrl2(self): storage = FileStorage.FileStorage('feed.fs') db = DB(storage) connection = db.open() root = connection.root() new = self.getNew() urllist = [] list = [] list = root['Feed'] if new is not None: list.append(FeedAdmin(new, self.datetime.date.today() - self.datetime.timedelta(7))) transaction.begin() root['Feed'] = list transaction.commit() feedlist = root['Feed'] for f in feedlist: urllist.append([f.url, f.updated]) connection.close() db.close() root['Feed'] = [] return urllist
def connectdb(dbfile): from ZODB import FileStorage, DB storage = FileStorage.FileStorage(dbfile) # automate connect protocol db = DB(storage) # caller must still commit connection = db.open() root = connection.root() return root, storage
def __init__(self) -> None: """Initialize the GitHub client and load state from db""" self.CONFIG = load_config() self.storage = ClientStorage(self.CONFIG["port"]) self.db = DB(self.storage) self._client = github3.login(token=self.CONFIG["token"]) self._init_db()
def checkPackVersionReachable(self): db = DB(self._storage) cn = db.open() root = cn.root() names = "a", "b", "c" for name in names: root[name] = MinPO(name) transaction.commit() for name in names: cn2 = db.open(version=name) rt2 = cn2.root() obj = rt2[name] obj.value = MinPO("version") transaction.commit() cn2.close() root["d"] = MinPO("d") transaction.commit() snooze() self._storage.pack(time.time(), referencesf) cn.sync() # make sure all the non-version data is there for name, obj in root.items(): self.assertEqual(name, obj.value) # make sure all the version-data is there, # and create a new revision in the version for name in names: cn2 = db.open(version=name) rt2 = cn2.root() obj = rt2[name].value self.assertEqual(obj.value, "version") obj.value = "still version" transaction.commit() cn2.close() db.abortVersion("b") txn = transaction.get() txn.note("abort version b") txn.commit() t = time.time() snooze() L = db.undoInfo() db.undo(L[0]["id"]) txn = transaction.get() txn.note("undo abort") txn.commit() self._storage.pack(t, referencesf) cn2 = db.open(version="b") rt2 = cn2.root() self.assertEqual(rt2["b"].value.value, "still version")
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]['id']) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() final = list(it)[-1] self._dst.tpc_begin(final, final.tid, final.status) for r in final: self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn, final) it.close() self._dst.tpc_vote(final) self._dst.tpc_finish(final)
class MyZODB(object): def __init__(self, dbPath): self.dbPath = os.path.abspath(dbPath) self.logger = logging.getLogger('monitor_local.MyProjectDB.MyZODB') self.logger.info("Open database: %s", self.dbPath) try: self.storage = FileStorage.FileStorage(self.dbPath) except zc.lockfile.LockError: raise DBisBusyError("Database is in use by other progress") self.db = DB(self.storage) self.conn = self.db.open() self.root = self.conn.root() self.openFlag = True def close(self): self.logger.info("Close database: %s", self.dbPath) self.conn.close() self.db.close() self.storage.close() self.openFlag = False def removeDBFile(self): self.logger.info("Remove database: %s", self.dbPath) if self.openFlag: self.close() for suffix in ('', '.index', '.lock', '.tmp'): dbFile = self.dbPath + suffix if os.path.isfile(dbFile): os.remove(dbFile)
def spojiSeNaServer(self): #self.spremnik = ClientStorage.ClientStorage(('localhost', 1981)) self.spremnik = ClientStorage.ClientStorage(('45.63.117.103', 1981)) self.spremnik.server_sync = True self.bp = DB(self.spremnik) self.veza = self.bp.open() self.root = self.veza.root()
class MyZODB(object): # singleton DB instance _instance = None def __new__(cls): if not isinstance(cls._instance, cls): cls._instance = object.__new__(cls) return cls._instance def __init__(self): # return if connection already set if hasattr(self, "connection"): logging.debug(f'DB Object already exists. Return') return logging.debug('Creating a new db instance') # start the server if not started already start_server() server_and_port = (SERVER, DB_PORT) self.storage = ClientStorage(server_and_port) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
def convertEkinprjs(local=None, server=None, project=None, username=None): """convert old ekin prjs in a db to new""" if local != None: DB = PDatabase(local=local) elif server != None: DB = PDatabase(server=server, username='******', port=8080, password='******', project=project) for f in DB['userfields']: if DB['userfields'][f]['field_type'] in ekintypes: print f for r in DB.getRecs(): rec = DB[r] if rec.has_key(f): E = rec[f] E.checkDatasets() for d in E.datasets: ek = E.getDataset(d) #ek.prettyPrint() rec[f] = E print DB.getChanged() DB.commit('converted ekin data') return
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises db = DB(self._storage) conn = db.open() root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note("root -> obj") txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note("root -> obj -> obj") txn.commit() del root.obj txn = transaction.get() txn.note("root -X->") txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. data, serial = self._dst.load(root._p_oid, "") raises(KeyError, self._dst.load, obj1._p_oid, "") raises(KeyError, self._dst.load, obj2._p_oid, "")
def test_initialize_applications(): def apps(): return {'mine':MyApp, 'app': SimpleApp, 'app2': SimpleApp, 'obj':tuple} def failing_apps(): return OrderedDict((('foo', SimpleApp), ('spam', lambda: 1/0))) db = DB(DemoStorage()) initialize_applications(db, apps) conn = db.open() root = conn.root() assert root['mine']() == "running !" assert root['app']() == "simply running !" assert root['app2']() == "simply running !" assert isinstance(root['obj'], tuple) transaction.abort() conn.close() # verify it's all or nothing try: initialize_applications(db, failing_apps) except ZeroDivisionError: pass conn = db.open() root = conn.root() assert 'foo' not in root conn.close()
def __init__(self, *args, **kw): DB._old_init(self, *args, **kw) if (_async_layer_db is not None and self.database_name == 'unnamed' and 'async' not in self.databases): _async_layer_db.databases['unnamed'] = self # Fake dbtab self.databases['async'] = _async_layer_db # Fake dbtab
def make_one(self): db = DB(None) conn = db.open() obj = WithProperty() conn.add(obj) return obj
def checkPackLotsWhileWriting(self): # This is like the other pack-while-writing tests, except it packs # repeatedly until the client thread is done. At the time it was # introduced, it reliably provoked # CorruptedError: ... transaction with checkpoint flag set # in the ZEO flavor of the FileStorage tests. db = DB(self._storage) conn = db.open() root = conn.root() choices = range(10) for i in choices: root[i] = MinPO(i) transaction.commit() snooze() packt = time.time() for dummy in choices: for i in choices: root[i].value = MinPO(i) transaction.commit() NUM_LOOP_TRIP = 100 timer = ElapsedTimer(time.time()) thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0) thread.start() while thread.isAlive(): db.pack(packt) snooze() packt = time.time() thread.join() self._sanity_check()
def _store_base_subs_in_zodb(self, storage): db = DB(storage) conn = db.open() base_comps = BLSM(None) base_comps.btree_threshold = 0 base_comps.__name__ = u'base' # replace with "broken" base_comps.adapters = _LocalAdapterRegistry() base_comps.utilities = _LocalAdapterRegistry() sub_comps = BLSM(None) sub_comps.__name__ = u'sub' sub_comps.__bases__ = (base_comps,) assert_that(sub_comps.adapters.__bases__, is_((base_comps.adapters,))) assert_that(sub_comps.utilities.__bases__, is_((base_comps.utilities,))) assert_that(sub_comps.utilities.__bases__[0], is_(_LocalAdapterRegistry)) conn.root()['base'] = base_comps conn.root()['sub'] = sub_comps transaction.commit() conn.close() db.close()
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]['id']) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() # Get the last transaction and its record iterator. Record iterators # can't be accessed out-of-order, so we need to do this in a bit # complicated way: for final in it: records = list(final) self._dst.tpc_begin(final, final.tid, final.status) for r in records: self._dst.restore(r.oid, r.tid, r.data, '', r.data_txn, final) self._dst.tpc_vote(final) self._dst.tpc_finish(final)
def checkPackLotsWhileWriting(self): # This is like the other pack-while-writing tests, except it packs # repeatedly until the client thread is done. At the time it was # introduced, it reliably provoked # CorruptedError: ... transaction with checkpoint flag set # in the ZEO flavor of the FileStorage tests. db = DB(self._storage) conn = db.open() root = conn.root() choices = list(range(10)) for i in choices: root[i] = MinPO(i) transaction.commit() snooze() packt = time.time() for dummy in choices: for i in choices: root[i].value = MinPO(i) transaction.commit() NUM_LOOP_TRIP = 100 timer = ElapsedTimer(time.time()) thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0) thread.start() while thread.isAlive(): db.pack(packt) snooze() packt = time.time() thread.join() self._sanity_check()
class Base(object): def __init__(self, path, authkey): if not os.path.exists(path): os.makedirs(path) self._path = path self.authkey = authkey path = os.path.join(path, 'graph.fs') self.storage = FileStorage(path) self.db = DB(self.storage) def path(self): return self._path def process(self, connection): (func, args) = connection self.connection = func(*args) def recv(self): return self.connection.recv() def send(self, message): self.connection.send(message) self.connection.close() def open(self): return self.db.open() def close(self): transaction.get().abort() self.db.close() self.storage.close()
def mapSearch(request, search): keywords = Keywords.objects.order_by('keyword') categories = Categories.objects.order_by('category') zipcodes = Zipcode.objects.order_by('code') listings = [] timings = [] for i in range(1,10): start = time() #try: #isearch = int(search) #storage = FileStorage.FileStorage('listings.fs') #db = DB(storage) #conn = db.open() #dbroot = conn.root() #userdb = dbroot['listingdb'] #listings.append(userdb[isearch]) #search = isearch #db.close() #except: #pass #try: #csearch = Categories.objects.filter(category=search)[0].id #storage = FileStorage.FileStorage('listingsc.fs') #db = DB(storage) #conn = db.open() #dbroot = conn.root() #userdb = dbroot['listingdb'] #listings.append(userdb[hashlib.sha224(str(csearch)).hexdigest()[:8]]) #search = csearch #db.close() #except: #pass try: ksearch = Keywords.objects.filter(keyword=search)[0].id storage = FileStorage.FileStorage('listingsk.fs') db = DB(storage) conn = db.open() dbroot = conn.root() userdb = dbroot['listingdb'] listings.append(userdb[hashlib.sha224(str(ksearch)).hexdigest()[:8]]) search = ksearch db.close() except: pass T = time() - start start = time() try: ksearch = Keywords.objects.filter(keyword=search)[0].id except: pass l = Building.objects.filter(number=612) listings.append(l[:0]) search = 612 N = time() - start timings +=[{'Tree-Time:':T, 'MYSQL-Time:':N}] writeToFile(timings,'search_times') listings = listings[:1000] return render_to_response('map2.html', locals())
def main(argv=None): parser = argparse.ArgumentParser(description='ZODB bridge test script') v_major, v_minor = sys.version_info.major, sys.version_info.minor default_db = os.path.join('.', 'data-py{}.{}.fs'.format(v_major, v_minor)) parser.add_argument('-d', '--db-file', default=default_db, help="Path to the database file") subparsers = parser.add_subparsers() init_parser = subparsers.add_parser('init') init_parser.set_defaults(func=init_db) init_parser.add_argument('-l', '--location', default='.', help="filesystem location to initialize from") list_parser = subparsers.add_parser('list') list_parser.set_defaults(func=list_db) compare_parser = subparsers.add_parser('compare') compare_parser.set_defaults(func=compare_db) args = parser.parse_args(argv) db = DB(args.db_file) db_conn = db.open() db_root = db_conn.root() if 'test' not in db_root: root = db_root['test'] = Folder('test', None) else: root = db_root['test'] set_root(root) args.func(root, args) db_conn.close()
def __init__(self, filepath): """Connects to database file on instantiation""" if os.path.exists(filepath): self.storage = FileStorage.FileStorage(filepath) db = DB(self.storage) connection = db.open() self.root = connection.root() try: self.misc = self.root['misc_queries'] except: self.root['misc_queries'] = OOBTree() try: self.squeries = self.root['search_queries'] except: self.root['search_queries'] = OOBTree() try: self.qsets = self.root['query_sets'] except: self.root['query_sets'] = OOBTree() try: self.rsets = self.root['record_sets'] except: self.root['record_sets'] = OOBTree() else: # first time accessing self.storage = FileStorage.FileStorage(filepath) db = DB(self.storage) connection = db.open() self.root = connection.root() # create data structures self.root['queries'] = OOBTree() self.root['records'] = OOBTree() self.root['results'] = OOBTree() self.root['searches'] = OOBTree() self.root['summaries'] = OOBTree() self._lock = Lock()
def checkResolve(self, resolvable=True): db = DB(self._storage) t1 = TransactionManager() c1 = db.open(t1) o1 = c1.root()['p'] = (PCounter if resolvable else PCounter2)() o1.inc() t1.commit() t2 = TransactionManager() c2 = db.open(t2) o2 = c2.root()['p'] o2.inc(2) t2.commit() o1.inc(3) try: t1.commit() except ConflictError as err: self.assertIn(".PCounter2,", str(err)) self.assertEqual(o1._value, 3) else: self.assertTrue(resolvable, "Expected ConflictError") self.assertEqual(o1._value, 6) t2.begin() self.assertEqual(o2._value, o1._value) db.close()
def __init__(self, location, name0="O", name1="X"): # aufrufen von __init__ der Elternklasse(n) super().__init__(name0, name1) self.db = DB(FileStorage.FileStorage(location)) self.connection = self.db.open() self.root = self.connection.root()
def test_execute(self): from zope.processlifetime import DatabaseOpenedWithRoot from nti.zodb.activitylog import register_subscriber class DB(object): dam = None def __init__(self): self.databases = {'': self} def getActivityMonitor(self): return self.dam def setActivityMonitor(self, dam): self.dam = dam db = DB() db.setActivityMonitor(42) event = DatabaseOpenedWithRoot(db) register_subscriber(event) dam = db.getActivityMonitor() assert_that(dam, is_(ComponentActivityMonitor)) assert_that(dam, has_property('base', is_(42))) assert_that( dam, has_property( 'components', contains_exactly(is_(LogActivityComponent), is_(StatsdActivityComponent))))
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = self._storage.load(oid, '') self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]["id"]) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() # Get the last transaction and its record iterator. Record iterators # can't be accessed out-of-order, so we need to do this in a bit # complicated way: for final in it: records = list(final) self._dst.tpc_begin(final, final.tid, final.status) for r in records: self._dst.restore(r.oid, r.tid, r.data, "", r.data_txn, final) self._dst.tpc_vote(final) self._dst.tpc_finish(final)
def __init__(self,filename): if isinstance(filename,basestring): filename = filename.replace('\\','/') # ZODB filepaths always with UNIX seperators from ZODB import FileStorage,DB self.storage = FileStorage.FileStorage(filename) db = DB(self.storage) connection = db.open() self.root = connection.root()
def __init__(self): self.store = FileStorage.FileStorage("data.fs") self.database = DB(self.store) self.connection = self.database.open() self.root = self.connection.root() if not 'Accounts' in self.root: self.root['Accounts'] = [] self.accounts = self.root['Accounts']
def test_connection_manager_default_transaction(): db = DB(DemoStorage()) with Connection(db) as conn: conn.root()['foo'] = 'bar' transaction.commit() assert db.open().root()['foo'] == 'bar'
def __init__(self, engine, **kw): super(ZODBStore, self).__init__(engine, **kw) self._storage = FileStorage.FileStorage(self._engine) self._db = DB(self._storage) self._connection = self._db.open() self._store = self._connection.root() # keeps DB in synch through commits of transactions self.sync = transaction.commit
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises closing = self._closing __traceback_info__ = self._storage, self._dst db = closing(DB(self._storage)) conn = closing(db.open()) root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note(u'root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note(u'root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note(u'root -X->') txn.commit() storage_last_tid = conn._storage.lastTransaction() self.assertEqual(storage_last_tid, root._p_serial) # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) self.assertEqual(self._dst.lastTransaction(), storage_last_tid) # If the source storage is a history-free storage, all # of the transactions are now marked as packed in the # destination storage. To trigger a pack, we have to # add another transaction to the destination that is # not packed. db2 = closing(DB(self._dst)) tx_manager = transaction.TransactionManager(explicit=True) conn2 = closing(db2.open(tx_manager)) txn = tx_manager.begin() root2 = conn2.root() root2.extra = 0 txn.note(u'root.extra = 0') txn.commit() dest_last_tid = conn2._storage.lastTransaction() self.assertGreater(dest_last_tid, storage_last_tid) self.assertEqual(dest_last_tid, root2._p_serial) # Now pack the destination. from ZODB.utils import u64 as bytes8_to_int64 if IRelStorage.providedBy(self._dst): packtime = bytes8_to_int64(storage_last_tid) else: from persistent.timestamp import TimeStamp packtime = TimeStamp(dest_last_tid).timeTime() + 2 self._dst.pack(packtime, referencesf) # And check to see that the root object exists, but not the other # objects. __traceback_info__ += (packtime,) _data, _serial = self._dst.load(root._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
def viewdb(dbfilename): from ZODB import FileStorage, DB storage = FileStorage.FileStorage(dbfilename) db = DB(storage) connection = db.open() root = connection.root() for key in root.keys(): popup = Toplevel() viewer(popup, root[key], key)
def test_connection_manager_transaction(): db = DB(DemoStorage()) tm = transaction.TransactionManager() with Connection(db, transaction_manager=tm) as conn: with tm as transaction_: conn.root()['foo'] = 'bar' assert db.open().root()['foo'] == 'bar' # transaction was commited
def make_one(self): db = DB(None) conn = db.open() pers = Persistent() conn.add(pers) orig_wref = WeakRef(pers) return orig_wref
def test_connection_manager_aborting(): db = DB(DemoStorage()) tm = transaction.TransactionManager() with Connection(db, transaction_manager=tm) as conn: with tm as transaction_: conn.root()['foo'] = 'bar' transaction_.abort() assert db.open().root().get('foo') is None # transaction was aborted
def opendb(zodbfile): if not os.path.exists(zodbfile): raise OSError("%s does not exist" % zodbfile) storage = FileStorage.FileStorage(zodbfile) db = DB(storage) conn = db.open() dbroot = conn.root() app = dbroot['Application'] return app
def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root()
class Bank: def __init__(self): self.store = FileStorage.FileStorage("data.fs") self.database = DB(self.store) self.connection = self.database.open() self.root = self.connection.root() if not 'Accounts' in self.root: self.root['Accounts'] = [] self.accounts = self.root['Accounts'] def __enter__(self): return self def __exit__(self, type, value, traceback): transaction.get() transaction.abort() self.connection.close() self.database.close() self.store.close() def add(self, clientName, clientId, address, phone, accountId): if clientName != "": newAccount = Accounts() newAccount.clientName = clientName newAccount.clientId = clientId newAccount.address = address newAccount.phone = phone newAccount.accountId = accountId self.accounts.append(newAccount) self.root['Accounts'] = self.accounts transaction.commit() print("New client added..") else: print("Error add 5 arguments: client name, id, address, phone.....") def list(self): if len(self.accounts) > 0: print("Bank accounts.......") for account in self.accounts: print("%s\t%s" %(account.clientName,account.accountId)) else: print("No accounts in this bank.") def delete(self, accountId): for i in range(len(self.accounts)): deleted = False if self.accounts[i].accountId == accountId: del(self.accounts[i]) deleted = True if deleted: self.root['Accounts'] = self.accounts transaction.commit() print("Account deleted..") else: print("There is no Account Id '%s'.." % accountId)
def __init__(self): self.zodb_backend = DB(None) self.connection = self.zodb_backend.open() self.root = self.connection.root() self.root["sources"] = BTrees.OOBTree.BTree() self.root["events"] = BTrees.OOBTree.BTree() self.sources = self.root["sources"] self.events = self.root["events"] self.status = {}
def open(self): """""" # todo docstrings if self.is_opened: return 1 else: self._storage = FileStorage.FileStorage(self._full_path) self._db = DB(self._storage) self._connection = self._db.open() self._root = self._connection.root() return 0
class New: def __init__(self, dbPath=setup.dbPath): self.db = DB(FileStorage.FileStorage(dbPath)) def connect(self, app=None): conn = Connection(self.db.open(), db=self.db) return conn, conn.root() def close(self): self.db.close() def pack(self): self.db.pack()
def db_setup(): """ Open database connection storage -> database -> connection -> root node """ fs_cleanup() storage = FileStorage.FileStorage('fruits.fs') db = DB(storage) connection = db.open() return (db, connection.root())
class MyZODB(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage,create=True, large_record_size=999) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class Database: def __init__(self, file_storage): self.storage = FileStorage.FileStorage(file_storage) self.compressed_storage = zc.zlibstorage.ZlibStorage(self.storage) self.db = DB(self.compressed_storage) self.connection = self.db.open() self.db_root = self.connection.root() def close(self): self.db.close()
class MiZODB(object): def __init__(self, archivo): self.storage = FileStorage.FileStorage(archivo) self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self): eq = self.assertEqual db = DB(self._storage) conn = db.open() root = conn.root() o1 = C() o2 = C() root['obj'] = o1 o1.obj = o2 txn = transaction.get() txn.note(u'o1 -> o2') txn.commit() now = packtime = time.time() while packtime <= now: packtime = time.time() o3 = C() o2.obj = o3 txn = transaction.get() txn.note(u'o1 -> o2 -> o3') txn.commit() o1.obj = o3 txn = transaction.get() txn.note(u'o1 -> o3') txn.commit() log = self._storage.undoLog() eq(len(log), 4) for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3', b'o1 -> o2', b'initial database creation')): eq(entry[0]['description'], entry[1]) self._storage.pack(packtime, referencesf) log = self._storage.undoLog() for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3')): eq(entry[0]['description'], entry[1]) tid = log[0]['id'] db.undo(tid) txn = transaction.get() txn.note(u'undo') txn.commit() # undo does a txn-undo, but doesn't invalidate conn.sync() log = self._storage.undoLog() for entry in zip(log, (b'undo', b'o1 -> o3', b'o1 -> o2 -> o3')): eq(entry[0]['description'], entry[1]) eq(o1.obj, o2) eq(o1.obj.obj, o3) self._iterate()