def checkResolve(self, resolvable=True): db = DB(self._storage) t1 = TransactionManager() c1 = db.open(t1) o1 = c1.root()['p'] = (PCounter if resolvable else PCounter2)() o1.inc() t1.commit() t2 = TransactionManager() c2 = db.open(t2) o2 = c2.root()['p'] o2.inc(2) t2.commit() o1.inc(3) try: t1.commit() except ConflictError as err: self.assertIn(".PCounter2,", str(err)) self.assertEqual(o1._value, 3) else: self.assertTrue(resolvable, "Expected ConflictError") self.assertEqual(o1._value, 6) t2.begin() self.assertEqual(o2._value, o1._value) db.close()
def test_local_utility_persitent(): db = DB(DemoStorage('test_storage')) connection = db.open() root = connection.root() site = root['site'] = SimpleSite() components.LocalSiteManager(site) transaction.commit() getSiteManager(context=site).registerUtility(PeculiarDummyUtil()) dummy = getUtility(IDummyUtil, context=site) dummy.answer = 'no' assert dummy.callme() == "Perculiarly, no" transaction.commit() del site del dummy connection = db.open() site = connection.root()['site'] dummy = getUtility(IDummyUtil, context=site) assert dummy.callme() == "Perculiarly, no" # and aborting does not save state dummy.answer = 'yes' assert dummy.callme() == "Perculiarly, yes" transaction.abort connection = db.open() site = connection.root()['site'] dummy = getUtility(IDummyUtil, context=site) assert dummy.callme() == "Perculiarly, no" db.close()
def test_initialize_applications(): def apps(): return {'mine':MyApp, 'app': SimpleApp, 'app2': SimpleApp, 'obj':tuple} def failing_apps(): return OrderedDict((('foo', SimpleApp), ('spam', lambda: 1/0))) db = DB(DemoStorage()) initialize_applications(db, apps) conn = db.open() root = conn.root() assert root['mine']() == "running !" assert root['app']() == "simply running !" assert root['app2']() == "simply running !" assert isinstance(root['obj'], tuple) transaction.abort() conn.close() # verify it's all or nothing try: initialize_applications(db, failing_apps) except ZeroDivisionError: pass conn = db.open() root = conn.root() assert 'foo' not in root conn.close()
def __init__(self, name="DEV"): self.name: str = name # name of the crawled document directory self.index = dict( ) # type Dict[str,PostingDict], temporary store some of the index and later dump into disk self.page: int = 1 self.map: IOBTree = IOBTree() #type: Dict[docId, url] self.invertmap = dict() # type: Dict[url, docId] # the next section is initialize three file that would store the map between url and docId, champlist and inverted Index '''ZODB Opening''' indexStorage = FileStorage.FileStorage(f'IndexInvertIndex{name}.fs') indexdb = DB(indexStorage) self.indexconnection = indexdb.open() self.indexRoot = self.indexconnection.root() mapStorage = FileStorage.FileStorage(f'map{name}.fs') mapdb = DB(mapStorage) self.mapconnection = mapdb.open() self.mapRoot = self.mapconnection.root() champStorage = FileStorage.FileStorage(f'champList{self.name}.fs') champdb = DB(champStorage) self.champListConnection = champdb.open() self.champListRoot = self.champListConnection.root() self.threshold: int = 0 # when reach a threshold, dump the inverted index to disk self.countTotalWebPages( name) # count how many webpage the crawler crawl self.url = set() # store the url crawled, to avoid duplication
def __init__(self, filepath): """Connects to database file on instantiation""" if os.path.exists(filepath): self.storage = FileStorage.FileStorage(filepath) db = DB(self.storage) connection = db.open() self.root = connection.root() try: self.misc = self.root['misc_queries'] except: self.root['misc_queries'] = OOBTree() try: self.squeries = self.root['search_queries'] except: self.root['search_queries'] = OOBTree() try: self.qsets = self.root['query_sets'] except: self.root['query_sets'] = OOBTree() try: self.rsets = self.root['record_sets'] except: self.root['record_sets'] = OOBTree() else: # first time accessing self.storage = FileStorage.FileStorage(filepath) db = DB(self.storage) connection = db.open() self.root = connection.root() # create data structures self.root['queries'] = OOBTree() self.root['records'] = OOBTree() self.root['results'] = OOBTree() self.root['searches'] = OOBTree() self.root['summaries'] = OOBTree() self._lock = Lock()
def checkPackVersionReachable(self): db = DB(self._storage) cn = db.open() root = cn.root() names = "a", "b", "c" for name in names: root[name] = MinPO(name) transaction.commit() for name in names: cn2 = db.open(version=name) rt2 = cn2.root() obj = rt2[name] obj.value = MinPO("version") transaction.commit() cn2.close() root["d"] = MinPO("d") transaction.commit() snooze() self._storage.pack(time.time(), referencesf) cn.sync() # make sure all the non-version data is there for name, obj in root.items(): self.assertEqual(name, obj.value) # make sure all the version-data is there, # and create a new revision in the version for name in names: cn2 = db.open(version=name) rt2 = cn2.root() obj = rt2[name].value self.assertEqual(obj.value, "version") obj.value = "still version" transaction.commit() cn2.close() db.abortVersion("b") txn = transaction.get() txn.note("abort version b") txn.commit() t = time.time() snooze() L = db.undoInfo() db.undo(L[0]["id"]) txn = transaction.get() txn.note("undo abort") txn.commit() self._storage.pack(t, referencesf) cn2 = db.open(version="b") rt2 = cn2.root() self.assertEqual(rt2["b"].value.value, "still version")
def test_pickle_zodb_lookup_utility(self): # Now, we can register a couple utilities in the base, save everything, # and look it up in the sub (when the classes don't match) storage = DemoStorage() self._store_base_subs_in_zodb(storage) db = DB(storage) conn = db.open() new_base = conn.root()['base'] new_base._p_activate() new_sub = conn.root()['sub'] new_base.utilities.btree_provided_threshold = 0 new_base.utilities.btree_map_threshold = 0 new_base.registerUtility(MockSite(), provided=IFoo) provided1 = new_base.adapters._provided # Previously this would fail. Now it works. new_base.registerUtility(MockSite(), provided=implementedBy(object), name=u'foo') new_base.registerUtility(MockSite(), provided=IMock, name=u'foo') provided2 = new_base.adapters._provided # Make sure that it only converted once assert_that(provided1, is_(same_instance(provided2))) assert_that(new_base._utility_registrations, is_(BTrees.OOBTree.OOBTree)) assert_that(new_base._utility_registrations.keys(), contains( (IFoo, u''), (IMock, u'foo'), (implementedBy(object), u'foo'), )) assert_that(new_base.utilities._provided, is_(BTrees.family64.OI.BTree)) assert_that(new_base.utilities._adapters[0], is_(BTrees.family64.OO.BTree)) assert_that(new_base.utilities._adapters[0][IFoo], is_(BTrees.family64.OO.BTree)) transaction.commit() conn.close() db.close() db = DB(storage) conn = db.open() new_sub = conn.root()['sub'] x = new_sub.queryUtility(IFoo) assert_that(x, is_(MockSite)) x = new_sub.queryUtility(IMock, u'foo') assert_that(x, is_(MockSite))
def test_pickle_zodb_lookup_adapter(self): # Now, we can register a couple adapters in the base, save everything, # and look it up in the sub (when the classes don't match) storage = DemoStorage() self._store_base_subs_in_zodb(storage) db = DB(storage) conn = db.open() new_base = conn.root()['base'] new_base._p_activate() new_sub = conn.root()['sub'] new_base.adapters.btree_provided_threshold = 0 new_base.adapters.btree_map_threshold = 1 # Note: this used-to cause btree-ing the map to fail. The # implementedBy callable previously had default comparison and can't be # stored in a btree. As of zope.interface 4.3.0, this is fixed. new_base.registerAdapter(_foo_factory, required=(object,), provided=IFoo) new_base.registerAdapter(_foo_factory2, required=(IFoo,), provided=IMock) assert_that(new_base._adapter_registrations, is_(BTrees.OOBTree.OOBTree)) assert_that(new_base._adapter_registrations.keys(), contains( ((IFoo,), IMock, u''), ((implementedBy(object),), IFoo, u'' ), )) assert_that(new_base.adapters._provided, is_(BTrees.family64.OI.BTree)) assert_that(new_base.adapters._adapters[0], is_({})) assert_that(new_base.adapters._adapters[1][IFoo], is_(dict)) new_base.registerAdapter(_foo_factory2, required=(IFoo,), provided=IFoo) assert_that(new_base.adapters._adapters[1][IFoo], is_(BTrees.family64.OO.BTree)) transaction.commit() conn.close() db.close() db = DB(storage) conn = db.open() new_sub = conn.root()['sub'] x = new_sub.queryAdapter(RootFoo(), IMock) assert_that(x, is_(2))
def second_init(): with EntryPointMocker(['myapp=cromlech.zodb.tests.test_initialize:MyApp']): db = DB(DemoStorage()) utils.initialize_applications(db) conn = db.open() app = conn.root()['myapp'] # again utils.initialize_applications(db) conn = db.open() assert app is conn.root()['myapp']
def test_register_implemented_by_lookup_utility(self): storage = DemoStorage() self._store_base_subs_in_zodb(storage) db = DB(storage) conn = db.open() new_base = conn.root()['base'] new_base._p_activate() new_sub = conn.root()['sub'] new_base.utilities.btree_provided_threshold = 0 new_base.utilities.btree_map_threshold = 0 new_base.registerUtility(MockSite(), provided=IFoo) provided1 = new_base.adapters._provided # In the past, we couldn't register by implemented, but now we can. new_base.registerUtility(MockSite(), provided=implementedBy(MockSite), name=u'foo') provided2 = new_base.adapters._provided # Make sure that it only converted once assert_that(provided1, is_(same_instance(provided2))) assert_that(new_base._utility_registrations, is_(BTrees.OOBTree.OOBTree)) assert_that(new_base._utility_registrations.keys(), contains( (IFoo, u''), ((implementedBy(MockSite), u'foo')), )) assert_that(new_base.utilities._provided, is_(BTrees.family64.OI.BTree)) assert_that(new_base.utilities._adapters[0], is_(BTrees.family64.OO.BTree)) assert_that(new_base.utilities._adapters[0][IFoo], is_(BTrees.family64.OO.BTree)) transaction.commit() conn.close() db.close() db = DB(storage) conn = db.open() new_sub = conn.root()['sub'] x = new_sub.queryUtility(IFoo) assert_that(x, is_(MockSite)) # But it can't actually be looked up, regardless of whether we # convert to btrees or not x = new_sub.queryUtility(MockSite, u'foo') assert_that(x, is_(none()))
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]['id']) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() final = list(it)[-1] self._dst.tpc_begin(final, final.tid, final.status) for r in final: self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn, final) it.close() self._dst.tpc_vote(final) self._dst.tpc_finish(final)
class MyZODB(object): def __init__(self, dbPath): self.dbPath = os.path.abspath(dbPath) self.logger = logging.getLogger('monitor_local.MyProjectDB.MyZODB') self.logger.info("Open database: %s", self.dbPath) try: self.storage = FileStorage.FileStorage(self.dbPath) except zc.lockfile.LockError: raise DBisBusyError("Database is in use by other progress") self.db = DB(self.storage) self.conn = self.db.open() self.root = self.conn.root() self.openFlag = True def close(self): self.logger.info("Close database: %s", self.dbPath) self.conn.close() self.db.close() self.storage.close() self.openFlag = False def removeDBFile(self): self.logger.info("Remove database: %s", self.dbPath) if self.openFlag: self.close() for suffix in ('', '.index', '.lock', '.tmp'): dbFile = self.dbPath + suffix if os.path.isfile(dbFile): os.remove(dbFile)
class BodyDB( object ): def __init__(self, db_dir): self.zodb_file = os.path.join(db_dir, 'kamino.zodb') self.file_store_fn = os.path.join(db_dir, 'file_store') self.zodb_storage = FileStorage.FileStorage( self.zodb_file ) self.zodb_db = DB(self.zodb_storage) self.zodb_con = self.zodb_db.open() self.db_root = self.zodb_con.root() if not self.db_root.has_key('/'): self.db_root['/'] = types.Directory( '', None, 0, 0, 0755, 0 ) self.db_root['fs_db'] = types.FileSystems() self.db_root['file_db'] = types.FileDatabase() self.db_root['patch_db'] = types.PatchDatabase() self.fs_db = self.db_root['fs_db'] self.file_db = self.db_root['file_db'] self.patch_db = self.db_root['patch_db'] # Ensure that all mounted local filesystems have an ID self.fs_db.check_filesystems() self.file_store = file_store.FileStore( self.file_store_fn, self )
def testdb(request): storage = FileStorage.FileStorage('listingsc.fs') db = DB(storage) conn = db.open() dbroot = conn.root() # Ensure that a 'listingdb' key is present # in the root if not dbroot.has_key('listingdb'): from BTrees.OOBTree import OOBTree dbroot['listingdb'] = OOBTree() userdb = dbroot['listingdb'] lists = Listing.objects.all().values() minlat = min([l['latitude'] for l in lists]) minlng = min([l['longitude'] for l in lists]) for l in lists: #return HttpResponse(l) key1 = int(l['latitude']*1000000-minlat*1000000) key2 = int(l['longitude']*1000000-minlng*1000000) #userdb[hashlib.sha224(str(l['keyword_id'])).hexdigest()[:8]+str(zcurve.interleave2(key1,key2)).zfill(9)]=l userdb[hashlib.sha224(str(l['category_id'])).hexdigest()[:8]]=l # Commit the change transaction.commit() db.close() return HttpResponse("woo i guess")
class DBStorage(jpath4.db.storage.Storage): def __init__(self, file_path): self.storage = ZFileStorage.FileStorage(file_path) self.db = ZDB(self.storage) self.connection = self.db.open() self.z_root = self.connection.root() if "jpath4-root" not in self.z_root: self.z_root["jpath4-root"] = DBDict() transaction.commit() self.unwrapped_root = self.z_root["jpath4-root"] self.wrapped_root = standard_wrapper.wrap(self.unwrapped_root) def get_root(self): return self.wrapped_root def apply_updates(self, updates): standard_wrapper.apply_updates(self.wrapped_root, updates, classes={"object": DBDict, "list": DBList}) def commit(self): transaction.commit() def abort(self): transaction.abort() def close(self): self.connection.close() self.db.close() self.storage.close()
class Conductor(child.AMPChild): """Base class for Conductor object""" def __init__(self, dbaddr='/tmp/zeosocket', AMPCommands=None, key=None): Process.__init__(self) self.dbaddr = dbaddr self.AMPCommands = AMPCommands self.key = key self.storage = None #just in case. self.db = None self.conn = None self.root = None def start(self): """opens DB connection.""" self.storage = ClientStorage.ClientStorage(self.dbaddr) self.db = DB(self.storage) self.conn = self.db.open() self.root = self.conn.root() def end(self): """closes DB connection.""" pass def get_object(self, kind, key): """returns an object from zodb.""" #TODO defer return self.root[kind][key]
class Base(object): def __init__(self, path, authkey): if not os.path.exists(path): os.makedirs(path) self._path = path self.authkey = authkey path = os.path.join(path, 'graph.fs') self.storage = FileStorage(path) self.db = DB(self.storage) def path(self): return self._path def process(self, connection): (func, args) = connection self.connection = func(*args) def recv(self): return self.connection.recv() def send(self, message): self.connection.send(message) self.connection.close() def open(self): return self.db.open() def close(self): transaction.get().abort() self.db.close() self.storage.close()
def get_conn(): try: return _thread_data.zodb_conn except AttributeError: db = DB(get_storage()) _thread_data.zodb_conn = db.open() return _thread_data.zodb_conn
def getUrl2(self): storage = FileStorage.FileStorage('feed.fs') db = DB(storage) connection = db.open() root = connection.root() new = self.getNew() urllist = [] list = [] list = root['Feed'] if new is not None: list.append(FeedAdmin(new, self.datetime.date.today() - self.datetime.timedelta(7))) transaction.begin() root['Feed'] = list transaction.commit() feedlist = root['Feed'] for f in feedlist: urllist.append([f.url, f.updated]) connection.close() db.close() root['Feed'] = [] return urllist
class World_zeo(object): def __init__(self, addr=('localhost', 9100)): self.addr = addr self.storage = ClientStorage.ClientStorage(self.addr) self.db = DB(self.storage) self.conn = self.db.open() self.root = self.conn.root()
class PersistenceHandler: def __init__(self, db_name): self.db_name = db_name def __enter__(self): self.db = DB(FileStorage(self.db_name)) self.conn = self.db.open() self.root = self.conn.root() return self def __exit__(self, _, __, ___): try: self.conn.close() finally: self.db.close() def store_progress(self, scrape_targets, vulnerability_results): self.root["scrape_targets"] = scrape_targets self.root["vulnerability_results"] = vulnerability_results transaction.commit() def retrieve_progress(self): progress = [] for key in ["scrape_targets", "vulnerability_results"]: try: progress.append(self.root[key]) except KeyError: progress.append(list()) return progress
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = load_current(self._storage, oid) self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
def make_one(self): db = DB(None) conn = db.open() obj = WithProperty() conn.add(obj) return obj
def makeMap(request): listings = Building.objects.order_by('?')[:1000] keywords = Keywords.objects.order_by('keyword') allcategories = Categories.objects.order_by('category').values('category') zipcodes = Zipcode.objects.order_by('code') if request.method == 'GET': try: lat = request.GET['lat'] lng = request.GET['lng'] lists = Listing.objects.all().values() minlat = min([l['latitude'] for l in lists]) minlng = min([l['longitude'] for l in lists]) key1 = int(float(lat)*1000000-minlat*1000000) key2 = int(float(lng)*1000000-minlng*1000000) search = zcurve.interleave2(key1,key2) storage = FileStorage.FileStorage('listings2.fs') db = DB(storage) conn = db.open() dbroot = conn.root() userdb = dbroot['listingdb'] listings = [] for i in range(search-100000,search+10000): try: listings.append(userdb[int(i)]) except: pass db.close() return render_to_response('map2.html', locals()) except: pass return render_to_response('map.html', locals())
def checkUndoMultipleConflictResolution(self, reverse=False): from .ConflictResolution import PCounter db = DB(self._storage) cn = db.open() try: cn.root.x = PCounter() transaction.commit() for i in range(4): with db.transaction() as conn: conn.transaction_manager.get().note( (str if PY3 else unicode)(i)) conn.root.x.inc() ids = [l['id'] for l in db.undoLog(1, 3)] if reverse: ids.reverse() db.undoMultiple(ids) transaction.commit() self.assertEqual(cn.root.x._value, 2) finally: cn.close() db.close()
class PersistentTicTacToe(TicTacToe): """Tic Tac Toe Game that can be saved. >>> PersistentTicTacToe(name0="O", name1="X", location="ttt.pickle") """ # PersistentTicTacToe ist eine Unterklasse von TicTacToe def __init__(self, location, name0="O", name1="X"): # aufrufen von __init__ der Elternklasse(n) super().__init__(name0, name1) self.db = DB(FileStorage.FileStorage(location)) self.connection = self.db.open() self.root = self.connection.root() def save(self): self.root["field"] = self.field self.root["turn"] = self.turn self.root["active_player"] = self.active_player self.root["player_names"] = self.player_names transaction.commit() def load(self): self.field = self.root["field"] self.turn = self.root["turn"] self.active_player = self.root["active_player"] self.player_names = self.root["player_names"]
def mapSearch(request, search): keywords = Keywords.objects.order_by('keyword') categories = Categories.objects.order_by('category') zipcodes = Zipcode.objects.order_by('code') listings = [] timings = [] for i in range(1,10): start = time() #try: #isearch = int(search) #storage = FileStorage.FileStorage('listings.fs') #db = DB(storage) #conn = db.open() #dbroot = conn.root() #userdb = dbroot['listingdb'] #listings.append(userdb[isearch]) #search = isearch #db.close() #except: #pass #try: #csearch = Categories.objects.filter(category=search)[0].id #storage = FileStorage.FileStorage('listingsc.fs') #db = DB(storage) #conn = db.open() #dbroot = conn.root() #userdb = dbroot['listingdb'] #listings.append(userdb[hashlib.sha224(str(csearch)).hexdigest()[:8]]) #search = csearch #db.close() #except: #pass try: ksearch = Keywords.objects.filter(keyword=search)[0].id storage = FileStorage.FileStorage('listingsk.fs') db = DB(storage) conn = db.open() dbroot = conn.root() userdb = dbroot['listingdb'] listings.append(userdb[hashlib.sha224(str(ksearch)).hexdigest()[:8]]) search = ksearch db.close() except: pass T = time() - start start = time() try: ksearch = Keywords.objects.filter(keyword=search)[0].id except: pass l = Building.objects.filter(number=612) listings.append(l[:0]) search = 612 N = time() - start timings +=[{'Tree-Time:':T, 'MYSQL-Time:':N}] writeToFile(timings,'search_times') listings = listings[:1000] return render_to_response('map2.html', locals())
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = self._storage.load(oid, '') self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]["id"]) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() # Get the last transaction and its record iterator. Record iterators # can't be accessed out-of-order, so we need to do this in a bit # complicated way: for final in it: records = list(final) self._dst.tpc_begin(final, final.tid, final.status) for r in records: self._dst.restore(r.oid, r.tid, r.data, "", r.data_txn, final) self._dst.tpc_vote(final) self._dst.tpc_finish(final)
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises db = DB(self._storage) conn = db.open() root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note('root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note('root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note('root -X->') txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. data, serial = self._dst.load(root._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
def checkPackLotsWhileWriting(self): # This is like the other pack-while-writing tests, except it packs # repeatedly until the client thread is done. At the time it was # introduced, it reliably provoked # CorruptedError: ... transaction with checkpoint flag set # in the ZEO flavor of the FileStorage tests. db = DB(self._storage) conn = db.open() root = conn.root() choices = list(range(10)) for i in choices: root[i] = MinPO(i) transaction.commit() snooze() packt = time.time() for dummy in choices: for i in choices: root[i].value = MinPO(i) transaction.commit() NUM_LOOP_TRIP = 100 timer = ElapsedTimer(time.time()) thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0) thread.start() while thread.isAlive(): db.pack(packt) snooze() packt = time.time() thread.join() self._sanity_check()
class MyZODB(object): # singleton DB instance _instance = None def __new__(cls): if not isinstance(cls._instance, cls): cls._instance = object.__new__(cls) return cls._instance def __init__(self): # return if connection already set if hasattr(self, "connection"): logging.debug(f'DB Object already exists. Return') return logging.debug('Creating a new db instance') # start the server if not started already start_server() server_and_port = (SERVER, DB_PORT) self.storage = ClientStorage(server_and_port) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
def connectdb(dbfile): from ZODB import FileStorage, DB storage = FileStorage.FileStorage(dbfile) # automate connect protocol db = DB(storage) # caller must still commit connection = db.open() root = connection.root() return root, storage
def main(argv=None): parser = argparse.ArgumentParser(description='ZODB bridge test script') v_major, v_minor = sys.version_info.major, sys.version_info.minor default_db = os.path.join('.', 'data-py{}.{}.fs'.format(v_major, v_minor)) parser.add_argument('-d', '--db-file', default=default_db, help="Path to the database file") subparsers = parser.add_subparsers() init_parser = subparsers.add_parser('init') init_parser.set_defaults(func=init_db) init_parser.add_argument('-l', '--location', default='.', help="filesystem location to initialize from") list_parser = subparsers.add_parser('list') list_parser.set_defaults(func=list_db) compare_parser = subparsers.add_parser('compare') compare_parser.set_defaults(func=compare_db) args = parser.parse_args(argv) db = DB(args.db_file) db_conn = db.open() db_root = db_conn.root() if 'test' not in db_root: root = db_root['test'] = Folder('test', None) else: root = db_root['test'] set_root(root) args.func(root, args) db_conn.close()
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]['id']) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() # Get the last transaction and its record iterator. Record iterators # can't be accessed out-of-order, so we need to do this in a bit # complicated way: for final in it: records = list(final) self._dst.tpc_begin(final, final.tid, final.status) for r in records: self._dst.restore(r.oid, r.tid, r.data, '', r.data_txn, final) self._dst.tpc_vote(final) self._dst.tpc_finish(final)
def _store_base_subs_in_zodb(self, storage): db = DB(storage) conn = db.open() base_comps = BLSM(None) base_comps.btree_threshold = 0 base_comps.__name__ = u'base' # replace with "broken" base_comps.adapters = _LocalAdapterRegistry() base_comps.utilities = _LocalAdapterRegistry() sub_comps = BLSM(None) sub_comps.__name__ = u'sub' sub_comps.__bases__ = (base_comps,) assert_that(sub_comps.adapters.__bases__, is_((base_comps.adapters,))) assert_that(sub_comps.utilities.__bases__, is_((base_comps.utilities,))) assert_that(sub_comps.utilities.__bases__[0], is_(_LocalAdapterRegistry)) conn.root()['base'] = base_comps conn.root()['sub'] = sub_comps transaction.commit() conn.close() db.close()
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises db = DB(self._storage) conn = db.open() root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note("root -> obj") txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note("root -> obj -> obj") txn.commit() del root.obj txn = transaction.get() txn.note("root -X->") txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. data, serial = self._dst.load(root._p_oid, "") raises(KeyError, self._dst.load, obj1._p_oid, "") raises(KeyError, self._dst.load, obj2._p_oid, "")
def checkPackLotsWhileWriting(self): # This is like the other pack-while-writing tests, except it packs # repeatedly until the client thread is done. At the time it was # introduced, it reliably provoked # CorruptedError: ... transaction with checkpoint flag set # in the ZEO flavor of the FileStorage tests. db = DB(self._storage) conn = db.open() root = conn.root() choices = range(10) for i in choices: root[i] = MinPO(i) transaction.commit() snooze() packt = time.time() for dummy in choices: for i in choices: root[i].value = MinPO(i) transaction.commit() NUM_LOOP_TRIP = 100 timer = ElapsedTimer(time.time()) thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0) thread.start() while thread.isAlive(): db.pack(packt) snooze() packt = time.time() thread.join() self._sanity_check()
def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self): eq = self.assertEqual db = DB(self._storage) conn = db.open() try: root = conn.root() o1 = C() o2 = C() root['obj'] = o1 o1.obj = o2 txn = transaction.get() txn.note(u'o1 -> o2') txn.commit() now = packtime = time.time() while packtime <= now: packtime = time.time() o3 = C() o2.obj = o3 txn = transaction.get() txn.note(u'o1 -> o2 -> o3') txn.commit() o1.obj = o3 txn = transaction.get() txn.note(u'o1 -> o3') txn.commit() log = self._storage.undoLog() eq(len(log), 4) for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3', b'o1 -> o2', b'initial database creation')): eq(entry[0]['description'], entry[1]) self._storage.pack(packtime, referencesf) log = self._storage.undoLog() for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3')): eq(entry[0]['description'], entry[1]) tid = log[0]['id'] db.undo(tid) txn = transaction.get() txn.note(u'undo') txn.commit() # undo does a txn-undo, but doesn't invalidate conn.sync() log = self._storage.undoLog() for entry in zip(log, (b'undo', b'o1 -> o3', b'o1 -> o2 -> o3')): eq(entry[0]['description'], entry[1]) eq(o1.obj, o2) eq(o1.obj.obj, o3) self._iterate() finally: conn.close() db.close()
class Database(UserDict): def __init__(self, file): self.db = DB(FileStorage.FileStorage(file)) self.connection = self.db.open() self.data = self.connection.root() def commit(self): get_transaction().commit()
class DatabaseFacade(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() if 'test_results' not in self.dbroot: self.dbroot['test_results'] = OOBTree()
def test_connection_manager_default_transaction(): db = DB(DemoStorage()) with Connection(db) as conn: conn.root()['foo'] = 'bar' transaction.commit() assert db.open().root()['foo'] == 'bar'
def __init__(self,filename): if isinstance(filename,basestring): filename = filename.replace('\\','/') # ZODB filepaths always with UNIX seperators from ZODB import FileStorage,DB self.storage = FileStorage.FileStorage(filename) db = DB(self.storage) connection = db.open() self.root = connection.root()
def checkPackAfterUndoDeletion(self): db = DB(self._storage) cn = db.open() try: root = cn.root() pack_times = [] def set_pack_time(): pack_times.append(time.time()) snooze() root["key0"] = MinPO(0) root["key1"] = MinPO(1) root["key2"] = MinPO(2) txn = transaction.get() txn.note(u"create 3 keys") txn.commit() set_pack_time() del root["key1"] txn = transaction.get() txn.note(u"delete 1 key") txn.commit() set_pack_time() root._p_deactivate() cn.sync() self.assertTrue(listeq(root.keys(), ["key0", "key2"])) L = db.undoInfo() db.undo(L[0]["id"]) txn = transaction.get() txn.note(u"undo deletion") txn.commit() set_pack_time() root._p_deactivate() cn.sync() self.assertTrue(listeq(root.keys(), ["key0", "key1", "key2"])) for t in pack_times: self._storage.pack(t, referencesf) root._p_deactivate() cn.sync() self.assertTrue(listeq(root.keys(), ["key0", "key1", "key2"])) for i in range(3): obj = root["key%d" % i] self.assertEqual(obj.value, i) root.items() self._inter_pack_pause() finally: cn.close() db.close()
class ZO: def __init__(self, file): self.file = file self.db = DB(None) #file) self.connection = self.db.open() self.root = self.connection.root() def commit(self): transaction.commit()
def make_one(self): db = DB(None) conn = db.open() pers = Persistent() conn.add(pers) orig_wref = WeakRef(pers) return orig_wref
def viewdb(dbfilename): from ZODB import FileStorage, DB storage = FileStorage.FileStorage(dbfilename) db = DB(storage) connection = db.open() root = connection.root() for key in root.keys(): popup = Toplevel() viewer(popup, root[key], key)
class Bank: def __init__(self): self.store = FileStorage.FileStorage("data.fs") self.database = DB(self.store) self.connection = self.database.open() self.root = self.connection.root() if not 'Accounts' in self.root: self.root['Accounts'] = [] self.accounts = self.root['Accounts'] def __enter__(self): return self def __exit__(self, type, value, traceback): transaction.get() transaction.abort() self.connection.close() self.database.close() self.store.close() def add(self, clientName, clientId, address, phone, accountId): if clientName != "": newAccount = Accounts() newAccount.clientName = clientName newAccount.clientId = clientId newAccount.address = address newAccount.phone = phone newAccount.accountId = accountId self.accounts.append(newAccount) self.root['Accounts'] = self.accounts transaction.commit() print("New client added..") else: print("Error add 5 arguments: client name, id, address, phone.....") def list(self): if len(self.accounts) > 0: print("Bank accounts.......") for account in self.accounts: print("%s\t%s" %(account.clientName,account.accountId)) else: print("No accounts in this bank.") def delete(self, accountId): for i in range(len(self.accounts)): deleted = False if self.accounts[i].accountId == accountId: del(self.accounts[i]) deleted = True if deleted: self.root['Accounts'] = self.accounts transaction.commit() print("Account deleted..") else: print("There is no Account Id '%s'.." % accountId)
def db_setup(): """ Open database connection storage -> database -> connection -> root node """ fs_cleanup() storage = FileStorage.FileStorage('fruits.fs') db = DB(storage) connection = db.open() return (db, connection.root())
def checkPackVersionsInPast(self): db = DB(self._storage) cn = db.open(version="testversion") root = cn.root() obj = root["obj"] = MinPO("obj") root["obj2"] = MinPO("obj2") txn = transaction.get() txn.note("create 2 objs in version") txn.commit() obj.value = "77" txn = transaction.get() txn.note("modify obj in version") txn.commit() t0 = time.time() snooze() # undo the modification to generate a mix of backpointers # and versions for pack to chase info = db.undoInfo() db.undo(info[0]["id"]) txn = transaction.get() txn.note("undo modification") txn.commit() self._storage.pack(t0, referencesf) db.commitVersion("testversion") txn = transaction.get() txn.note("commit version") txn.commit() cn = db.open() root = cn.root() root["obj"] = "no version" txn = transaction.get() txn.note("modify obj") txn.commit() self._storage.pack(time.time(), referencesf)
def testRelstorage(): import ZODB, transaction from ZODB import FileStorage, DB from relstorage.adapters.mysql import MySQLAdapter from relstorage.storage import RelStorage from MySQLdb import OperationalError server = 'peat.ucd.ie' username = '******' password = '******' project = 'test' port = 8080 adapter = MySQLAdapter(host=server, user=username, passwd=password, db=project, port=port) storage = RelStorage(adapter, shared_blob_dir=False, blob_dir='tempblob') db = DB(storage) connection = db.open() print storage connection = db.open() dbroot = connection.root() data = dbroot['data'] print data def addfile(fname): myblob = Blob() b = myblob.open('w') o = open(fname) data = o.read() b.write(data) print b.name b.close() return myblob '''f='gogh.chambre-arles.jpg' b=addfile(f) data['aaa'] = FileRecord(name=f,blob=b)''' #t = transaction.get() #t.commit() return
class Database: def __init__(self, file_storage): self.storage = FileStorage.FileStorage(file_storage) self.compressed_storage = zc.zlibstorage.ZlibStorage(self.storage) self.db = DB(self.compressed_storage) self.connection = self.db.open() self.db_root = self.connection.root() def close(self): self.db.close()
class MiZODB(object): def __init__(self, archivo): self.storage = FileStorage.FileStorage(archivo) self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class ZODB(AbstractDatabase): name = 'zodb' def __init__(self, conf=None): if conf is None: raise Exception('Path required.') if not os.path.exists(conf): os.makedirs(conf) storage = FileStorage.FileStorage(os.path.join(conf, 'db'), pack_keep_old=False) self._tmp_path = os.path.join(conf, 'db.tmp') self._db = DB(storage) self._connection = self._db.open() self._root = self._connection.root() if getattr(self._root, 'db', None) is None: self._root.db = BTrees.OOBTree.BTree() self._root_db = self._root.db self._transaction = transaction self._bytes_written = 0 def put(self, k, v): self._root_db[k] = v self._bytes_written += len(k) + len(v) if self._bytes_written >= 104857600: self.compact() def get(self, k): db = self._root_db return db[k] if k in db else None def delete(self, k): del self._root_db[k] def close(self): self._transaction.commit() self._db.close() try: os.remove(self._tmp_path) except: pass def compact(self): self._transaction.commit() self._db.pack() self._bytes_written = 0 def length(self): return len(self._root_db) def list(self): return self._root_db.keys() def savepoint(self): self._transaction.commit()