def testdb(request): storage = FileStorage.FileStorage('listingsc.fs') db = DB(storage) conn = db.open() dbroot = conn.root() # Ensure that a 'listingdb' key is present # in the root if not dbroot.has_key('listingdb'): from BTrees.OOBTree import OOBTree dbroot['listingdb'] = OOBTree() userdb = dbroot['listingdb'] lists = Listing.objects.all().values() minlat = min([l['latitude'] for l in lists]) minlng = min([l['longitude'] for l in lists]) for l in lists: #return HttpResponse(l) key1 = int(l['latitude']*1000000-minlat*1000000) key2 = int(l['longitude']*1000000-minlng*1000000) #userdb[hashlib.sha224(str(l['keyword_id'])).hexdigest()[:8]+str(zcurve.interleave2(key1,key2)).zfill(9)]=l userdb[hashlib.sha224(str(l['category_id'])).hexdigest()[:8]]=l # Commit the change transaction.commit() db.close() return HttpResponse("woo i guess")
def mapSearch(request, search): keywords = Keywords.objects.order_by('keyword') categories = Categories.objects.order_by('category') zipcodes = Zipcode.objects.order_by('code') listings = [] timings = [] for i in range(1,10): start = time() #try: #isearch = int(search) #storage = FileStorage.FileStorage('listings.fs') #db = DB(storage) #conn = db.open() #dbroot = conn.root() #userdb = dbroot['listingdb'] #listings.append(userdb[isearch]) #search = isearch #db.close() #except: #pass #try: #csearch = Categories.objects.filter(category=search)[0].id #storage = FileStorage.FileStorage('listingsc.fs') #db = DB(storage) #conn = db.open() #dbroot = conn.root() #userdb = dbroot['listingdb'] #listings.append(userdb[hashlib.sha224(str(csearch)).hexdigest()[:8]]) #search = csearch #db.close() #except: #pass try: ksearch = Keywords.objects.filter(keyword=search)[0].id storage = FileStorage.FileStorage('listingsk.fs') db = DB(storage) conn = db.open() dbroot = conn.root() userdb = dbroot['listingdb'] listings.append(userdb[hashlib.sha224(str(ksearch)).hexdigest()[:8]]) search = ksearch db.close() except: pass T = time() - start start = time() try: ksearch = Keywords.objects.filter(keyword=search)[0].id except: pass l = Building.objects.filter(number=612) listings.append(l[:0]) search = 612 N = time() - start timings +=[{'Tree-Time:':T, 'MYSQL-Time:':N}] writeToFile(timings,'search_times') listings = listings[:1000] return render_to_response('map2.html', locals())
def makeMap(request): listings = Building.objects.order_by('?')[:1000] keywords = Keywords.objects.order_by('keyword') allcategories = Categories.objects.order_by('category').values('category') zipcodes = Zipcode.objects.order_by('code') if request.method == 'GET': try: lat = request.GET['lat'] lng = request.GET['lng'] lists = Listing.objects.all().values() minlat = min([l['latitude'] for l in lists]) minlng = min([l['longitude'] for l in lists]) key1 = int(float(lat)*1000000-minlat*1000000) key2 = int(float(lng)*1000000-minlng*1000000) search = zcurve.interleave2(key1,key2) storage = FileStorage.FileStorage('listings2.fs') db = DB(storage) conn = db.open() dbroot = conn.root() userdb = dbroot['listingdb'] listings = [] for i in range(search-100000,search+10000): try: listings.append(userdb[int(i)]) except: pass db.close() return render_to_response('map2.html', locals()) except: pass return render_to_response('map.html', locals())
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = load_current(self._storage, oid) self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
class DBStorage(jpath4.db.storage.Storage): def __init__(self, file_path): self.storage = ZFileStorage.FileStorage(file_path) self.db = ZDB(self.storage) self.connection = self.db.open() self.z_root = self.connection.root() if "jpath4-root" not in self.z_root: self.z_root["jpath4-root"] = DBDict() transaction.commit() self.unwrapped_root = self.z_root["jpath4-root"] self.wrapped_root = standard_wrapper.wrap(self.unwrapped_root) def get_root(self): return self.wrapped_root def apply_updates(self, updates): standard_wrapper.apply_updates(self.wrapped_root, updates, classes={"object": DBDict, "list": DBList}) def commit(self): transaction.commit() def abort(self): transaction.abort() def close(self): self.connection.close() self.db.close() self.storage.close()
def checkUndoMultipleConflictResolution(self, reverse=False): from .ConflictResolution import PCounter db = DB(self._storage) cn = db.open() try: cn.root.x = PCounter() transaction.commit() for i in range(4): with db.transaction() as conn: conn.transaction_manager.get().note( (str if PY3 else unicode)(i)) conn.root.x.inc() ids = [l['id'] for l in db.undoLog(1, 3)] if reverse: ids.reverse() db.undoMultiple(ids) transaction.commit() self.assertEqual(cn.root.x._value, 2) finally: cn.close() db.close()
def checkResolve(self, resolvable=True): db = DB(self._storage) t1 = TransactionManager() c1 = db.open(t1) o1 = c1.root()['p'] = (PCounter if resolvable else PCounter2)() o1.inc() t1.commit() t2 = TransactionManager() c2 = db.open(t2) o2 = c2.root()['p'] o2.inc(2) t2.commit() o1.inc(3) try: t1.commit() except ConflictError as err: self.assertIn(".PCounter2,", str(err)) self.assertEqual(o1._value, 3) else: self.assertTrue(resolvable, "Expected ConflictError") self.assertEqual(o1._value, 6) t2.begin() self.assertEqual(o2._value, o1._value) db.close()
def test_local_utility_persitent(): db = DB(DemoStorage('test_storage')) connection = db.open() root = connection.root() site = root['site'] = SimpleSite() components.LocalSiteManager(site) transaction.commit() getSiteManager(context=site).registerUtility(PeculiarDummyUtil()) dummy = getUtility(IDummyUtil, context=site) dummy.answer = 'no' assert dummy.callme() == "Perculiarly, no" transaction.commit() del site del dummy connection = db.open() site = connection.root()['site'] dummy = getUtility(IDummyUtil, context=site) assert dummy.callme() == "Perculiarly, no" # and aborting does not save state dummy.answer = 'yes' assert dummy.callme() == "Perculiarly, yes" transaction.abort connection = db.open() site = connection.root()['site'] dummy = getUtility(IDummyUtil, context=site) assert dummy.callme() == "Perculiarly, no" db.close()
def getUrl2(self): storage = FileStorage.FileStorage('feed.fs') db = DB(storage) connection = db.open() root = connection.root() new = self.getNew() urllist = [] list = [] list = root['Feed'] if new is not None: list.append(FeedAdmin(new, self.datetime.date.today() - self.datetime.timedelta(7))) transaction.begin() root['Feed'] = list transaction.commit() feedlist = root['Feed'] for f in feedlist: urllist.append([f.url, f.updated]) connection.close() db.close() root['Feed'] = [] return urllist
def _store_base_subs_in_zodb(self, storage): db = DB(storage) conn = db.open() base_comps = BLSM(None) base_comps.btree_threshold = 0 base_comps.__name__ = u'base' # replace with "broken" base_comps.adapters = _LocalAdapterRegistry() base_comps.utilities = _LocalAdapterRegistry() sub_comps = BLSM(None) sub_comps.__name__ = u'sub' sub_comps.__bases__ = (base_comps,) assert_that(sub_comps.adapters.__bases__, is_((base_comps.adapters,))) assert_that(sub_comps.utilities.__bases__, is_((base_comps.utilities,))) assert_that(sub_comps.utilities.__bases__[0], is_(_LocalAdapterRegistry)) conn.root()['base'] = base_comps conn.root()['sub'] = sub_comps transaction.commit() conn.close() db.close()
class MyZODB(object): def __init__(self, dbPath): self.dbPath = os.path.abspath(dbPath) self.logger = logging.getLogger('monitor_local.MyProjectDB.MyZODB') self.logger.info("Open database: %s", self.dbPath) try: self.storage = FileStorage.FileStorage(self.dbPath) except zc.lockfile.LockError: raise DBisBusyError("Database is in use by other progress") self.db = DB(self.storage) self.conn = self.db.open() self.root = self.conn.root() self.openFlag = True def close(self): self.logger.info("Close database: %s", self.dbPath) self.conn.close() self.db.close() self.storage.close() self.openFlag = False def removeDBFile(self): self.logger.info("Remove database: %s", self.dbPath) if self.openFlag: self.close() for suffix in ('', '.index', '.lock', '.tmp'): dbFile = self.dbPath + suffix if os.path.isfile(dbFile): os.remove(dbFile)
class PersistenceHandler: def __init__(self, db_name): self.db_name = db_name def __enter__(self): self.db = DB(FileStorage(self.db_name)) self.conn = self.db.open() self.root = self.conn.root() return self def __exit__(self, _, __, ___): try: self.conn.close() finally: self.db.close() def store_progress(self, scrape_targets, vulnerability_results): self.root["scrape_targets"] = scrape_targets self.root["vulnerability_results"] = vulnerability_results transaction.commit() def retrieve_progress(self): progress = [] for key in ["scrape_targets", "vulnerability_results"]: try: progress.append(self.root[key]) except KeyError: progress.append(list()) return progress
class MyZODB(object): # singleton DB instance _instance = None def __new__(cls): if not isinstance(cls._instance, cls): cls._instance = object.__new__(cls) return cls._instance def __init__(self): # return if connection already set if hasattr(self, "connection"): logging.debug(f'DB Object already exists. Return') return logging.debug('Creating a new db instance') # start the server if not started already start_server() server_and_port = (SERVER, DB_PORT) self.storage = ClientStorage(server_and_port) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class Base(object): def __init__(self, path, authkey): if not os.path.exists(path): os.makedirs(path) self._path = path self.authkey = authkey path = os.path.join(path, 'graph.fs') self.storage = FileStorage(path) self.db = DB(self.storage) def path(self): return self._path def process(self, connection): (func, args) = connection self.connection = func(*args) def recv(self): return self.connection.recv() def send(self, message): self.connection.send(message) self.connection.close() def open(self): return self.db.open() def close(self): transaction.get().abort() self.db.close() self.storage.close()
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = self._storage.load(oid, '') self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self): eq = self.assertEqual db = DB(self._storage) conn = db.open() try: root = conn.root() o1 = C() o2 = C() root['obj'] = o1 o1.obj = o2 txn = transaction.get() txn.note(u'o1 -> o2') txn.commit() now = packtime = time.time() while packtime <= now: packtime = time.time() o3 = C() o2.obj = o3 txn = transaction.get() txn.note(u'o1 -> o2 -> o3') txn.commit() o1.obj = o3 txn = transaction.get() txn.note(u'o1 -> o3') txn.commit() log = self._storage.undoLog() eq(len(log), 4) for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3', b'o1 -> o2', b'initial database creation')): eq(entry[0]['description'], entry[1]) self._storage.pack(packtime, referencesf) log = self._storage.undoLog() for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3')): eq(entry[0]['description'], entry[1]) tid = log[0]['id'] db.undo(tid) txn = transaction.get() txn.note(u'undo') txn.commit() # undo does a txn-undo, but doesn't invalidate conn.sync() log = self._storage.undoLog() for entry in zip(log, (b'undo', b'o1 -> o3', b'o1 -> o2 -> o3')): eq(entry[0]['description'], entry[1]) eq(o1.obj, o2) eq(o1.obj.obj, o3) self._iterate() finally: conn.close() db.close()
def test_pickle_zodb_lookup_utility(self): # Now, we can register a couple utilities in the base, save everything, # and look it up in the sub (when the classes don't match) storage = DemoStorage() self._store_base_subs_in_zodb(storage) db = DB(storage) conn = db.open() new_base = conn.root()['base'] new_base._p_activate() new_sub = conn.root()['sub'] new_base.utilities.btree_provided_threshold = 0 new_base.utilities.btree_map_threshold = 0 new_base.registerUtility(MockSite(), provided=IFoo) provided1 = new_base.adapters._provided # Previously this would fail. Now it works. new_base.registerUtility(MockSite(), provided=implementedBy(object), name=u'foo') new_base.registerUtility(MockSite(), provided=IMock, name=u'foo') provided2 = new_base.adapters._provided # Make sure that it only converted once assert_that(provided1, is_(same_instance(provided2))) assert_that(new_base._utility_registrations, is_(BTrees.OOBTree.OOBTree)) assert_that(new_base._utility_registrations.keys(), contains( (IFoo, u''), (IMock, u'foo'), (implementedBy(object), u'foo'), )) assert_that(new_base.utilities._provided, is_(BTrees.family64.OI.BTree)) assert_that(new_base.utilities._adapters[0], is_(BTrees.family64.OO.BTree)) assert_that(new_base.utilities._adapters[0][IFoo], is_(BTrees.family64.OO.BTree)) transaction.commit() conn.close() db.close() db = DB(storage) conn = db.open() new_sub = conn.root()['sub'] x = new_sub.queryUtility(IFoo) assert_that(x, is_(MockSite)) x = new_sub.queryUtility(IMock, u'foo') assert_that(x, is_(MockSite))
def checkPackAfterUndoDeletion(self): db = DB(self._storage) cn = db.open() try: root = cn.root() pack_times = [] def set_pack_time(): pack_times.append(time.time()) snooze() root["key0"] = MinPO(0) root["key1"] = MinPO(1) root["key2"] = MinPO(2) txn = transaction.get() txn.note(u"create 3 keys") txn.commit() set_pack_time() del root["key1"] txn = transaction.get() txn.note(u"delete 1 key") txn.commit() set_pack_time() root._p_deactivate() cn.sync() self.assertTrue(listeq(root.keys(), ["key0", "key2"])) L = db.undoInfo() db.undo(L[0]["id"]) txn = transaction.get() txn.note(u"undo deletion") txn.commit() set_pack_time() root._p_deactivate() cn.sync() self.assertTrue(listeq(root.keys(), ["key0", "key1", "key2"])) for t in pack_times: self._storage.pack(t, referencesf) root._p_deactivate() cn.sync() self.assertTrue(listeq(root.keys(), ["key0", "key1", "key2"])) for i in range(3): obj = root["key%d" % i] self.assertEqual(obj.value, i) root.items() self._inter_pack_pause() finally: cn.close() db.close()
def __call__(self, container, name, args): copts, cargs = self.parser.parse_args(args) data = os.path.join(container.opts.data, name) # First, pack the ZODB storage = FileStorage.FileStorage("%s/var/Data.fs" % data) db = DB(storage) db.pack() # Can't pack an Rtree's storage in-place, so we move it away and # recreate from the contents of the ZODB rtree = None rtree_filename = '%s/var/vrt1' % data try: shutil.move(rtree_filename + ".dat", rtree_filename + ".bkup.dat") shutil.move(rtree_filename + ".idx", rtree_filename + ".bkup.idx") conn = db.open() root = conn.root() keys = root['index'].keys bkup = Rtree('%s/var/vrt1.bkup' % data) pagesize = bkup.properties.pagesize if len(keys) == 0: fwd = Rtree( '%s/var/vrt1' % data, # Passing in copied properties doesn't work, # leading to errors involving page ids # properties=new_properties, pagesize=pagesize ) else: gen = ((intid, bbox, None) for intid, (uid, bbox) \ in keys.items()) fwd = Rtree( '%s/var/vrt1' % data, gen, # Passing in copied properties doesn't work, # leading to errors involving page ids # properties=new_properties, pagesize=pagesize ) conn.close() db.close() storage.close() except: # Restore backups shutil.copy(rtree_filename + ".bkup.dat", rtree_filename + ".dat") shutil.copy(rtree_filename + ".bkup.idx", rtree_filename + ".idx") raise finally: if fwd is not None: fwd.close()
class Bank: def __init__(self): self.store = FileStorage.FileStorage("data.fs") self.database = DB(self.store) self.connection = self.database.open() self.root = self.connection.root() if not 'Accounts' in self.root: self.root['Accounts'] = [] self.accounts = self.root['Accounts'] def __enter__(self): return self def __exit__(self, type, value, traceback): transaction.get() transaction.abort() self.connection.close() self.database.close() self.store.close() def add(self, clientName, clientId, address, phone, accountId): if clientName != "": newAccount = Accounts() newAccount.clientName = clientName newAccount.clientId = clientId newAccount.address = address newAccount.phone = phone newAccount.accountId = accountId self.accounts.append(newAccount) self.root['Accounts'] = self.accounts transaction.commit() print("New client added..") else: print("Error add 5 arguments: client name, id, address, phone.....") def list(self): if len(self.accounts) > 0: print("Bank accounts.......") for account in self.accounts: print("%s\t%s" %(account.clientName,account.accountId)) else: print("No accounts in this bank.") def delete(self, accountId): for i in range(len(self.accounts)): deleted = False if self.accounts[i].accountId == accountId: del(self.accounts[i]) deleted = True if deleted: self.root['Accounts'] = self.accounts transaction.commit() print("Account deleted..") else: print("There is no Account Id '%s'.." % accountId)
class MyZODB(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage,create=True, large_record_size=999) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class MiZODB(object): def __init__(self, archivo): self.storage = FileStorage.FileStorage(archivo) self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class New: def __init__(self, dbPath=setup.dbPath): self.db = DB(FileStorage.FileStorage(dbPath)) def connect(self, app=None): conn = Connection(self.db.open(), db=self.db) return conn, conn.root() def close(self): self.db.close() def pack(self): self.db.pack()
class Database: def __init__(self, file_storage): self.storage = FileStorage.FileStorage(file_storage) self.compressed_storage = zc.zlibstorage.ZlibStorage(self.storage) self.db = DB(self.compressed_storage) self.connection = self.db.open() self.db_root = self.connection.root() def close(self): self.db.close()
class ZODB(AbstractDatabase): name = 'zodb' def __init__(self, conf=None): if conf is None: raise Exception('Path required.') if not os.path.exists(conf): os.makedirs(conf) storage = FileStorage.FileStorage(os.path.join(conf, 'db'), pack_keep_old=False) self._tmp_path = os.path.join(conf, 'db.tmp') self._db = DB(storage) self._connection = self._db.open() self._root = self._connection.root() if getattr(self._root, 'db', None) is None: self._root.db = BTrees.OOBTree.BTree() self._root_db = self._root.db self._transaction = transaction self._bytes_written = 0 def put(self, k, v): self._root_db[k] = v self._bytes_written += len(k) + len(v) if self._bytes_written >= 104857600: self.compact() def get(self, k): db = self._root_db return db[k] if k in db else None def delete(self, k): del self._root_db[k] def close(self): self._transaction.commit() self._db.close() try: os.remove(self._tmp_path) except: pass def compact(self): self._transaction.commit() self._db.pack() self._bytes_written = 0 def length(self): return len(self._root_db) def list(self): return self._root_db.keys() def savepoint(self): self._transaction.commit()
def test_pickle_zodb_lookup_adapter(self): # Now, we can register a couple adapters in the base, save everything, # and look it up in the sub (when the classes don't match) storage = DemoStorage() self._store_base_subs_in_zodb(storage) db = DB(storage) conn = db.open() new_base = conn.root()['base'] new_base._p_activate() new_sub = conn.root()['sub'] new_base.adapters.btree_provided_threshold = 0 new_base.adapters.btree_map_threshold = 1 # Note: this used-to cause btree-ing the map to fail. The # implementedBy callable previously had default comparison and can't be # stored in a btree. As of zope.interface 4.3.0, this is fixed. new_base.registerAdapter(_foo_factory, required=(object,), provided=IFoo) new_base.registerAdapter(_foo_factory2, required=(IFoo,), provided=IMock) assert_that(new_base._adapter_registrations, is_(BTrees.OOBTree.OOBTree)) assert_that(new_base._adapter_registrations.keys(), contains( ((IFoo,), IMock, u''), ((implementedBy(object),), IFoo, u'' ), )) assert_that(new_base.adapters._provided, is_(BTrees.family64.OI.BTree)) assert_that(new_base.adapters._adapters[0], is_({})) assert_that(new_base.adapters._adapters[1][IFoo], is_(dict)) new_base.registerAdapter(_foo_factory2, required=(IFoo,), provided=IFoo) assert_that(new_base.adapters._adapters[1][IFoo], is_(BTrees.family64.OO.BTree)) transaction.commit() conn.close() db.close() db = DB(storage) conn = db.open() new_sub = conn.root()['sub'] x = new_sub.queryAdapter(RootFoo(), IMock) assert_that(x, is_(2))
class PayPyDB(calculations.Calculations): def __init__(self,date,dbfn,loglevel): super(PayPyDB,self).__init__() #connect to database logging.basicConfig() try: self.__storage__=storageFromURL('paypydb.conf') except: self.__storage__=FileStorage.FileStorage(dbfn) self.__db__=DB(self.__storage__) self.__connection__=self.__db__.open() self.__dbroot__=self.__connection__.root() if not date in self.alldays(): # if today date is exists then fill the 'data' attr #self.__fill_yesterday__() self.__dbroot__[date]=self.data # create database and 'newday' with zero data self.commit() # commit changes def __del__(self): self.__connection__.close() self.__db__.close() self.__storage__.close() def __fill_yesterday__(self): yesterday=self.yesterday() if yesterday: pddata=self.__dbroot__[yesterday] for keys in self.blocks[:]: a=keys[:] b=keys[:] a.append('outbal') b.append('youtbal') self.setnode(self.data,b,self.findnode(pddata,a)) def getdata(self,date): return self.__dbroot__[date] def setdata(self,date,data): self.__dbroot__[date]=data self.commit() def yesterday(self): days=self.alldays() days.sort(reverse=True) try: return days[0] except IndexError: return None def alldays(self): return self.__dbroot__.keys() def commit(self): self.__dbroot__._p_changed=True transaction.commit()
class app_db(object): def __init__(self, path='/home/hme/data/Data.fs'): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class MyZODB(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage, create=True, large_record_size=999) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class MyZODB(object): def __init__(self,path): self.storage = FileStorage.FileStorage(path)#存储数据库数据的方法 self.db = DB(self.storage)#围绕存储并为存储提供实际数据库行为“db”包装 self.connection = self.db.open()#启动与该数据库的特定会话的“connection”对象 self.dbroot = self.connection.root()#允许我们访问包含在数据库中的对象层次结构的根的“dbroot”对象 def close(self): self.connection.close() self.db.close() self.storage.close()
class MiZODB(): """Clase MiZODB para la persistencia de los objetos""" def __init__(self): self.storage = FileStorage.FileStorage('Data.fs') self.db = DB(self.storage) self.conexion = self.db.open() self.raiz=self.conexion.root() def close(self): self.conexion.close() self.db.close() self.storage.close()
class MyZODB(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
class MiZODB(object): def __init__(self): self.storage=FileStorage.FileStorage('Data.fs') self.db=DB(self.storage) self.conexion=self.db.open() self.raiz=self.conexion.root() def close(self): self.conexion.close() self.db.close() self.storage.close()
def speedTest1(): """benchmark read and write""" DB = PDatabase(server='localhost',port=8090) #DB = PDatabase(local='./Data.fs') for i in range(100,300): DB.add(i) for i in range(100,290): DB.delete(i) DB.commit() print DB DB.close() return
def main2(): mydb = DB("C:\\Users\\Daniele\\Desktop\\mydb.db") conn = mydb.open() root = conn.root() print root print root['Test'] print dir(root['Test']) print root['Test'].password conn.close() mydb.close()
def zeo_connection(): """Provide a transactional scope around a series of operations.""" storage = ClientStorage.ClientStorage(('localhost', 50001)) db = DB(storage) conn = db.open() try: yield conn.root() except Exception as ex: logger.error('Could not connect to ZEO. Exception: {0}'.format(ex)) finally: conn.close() db.close()
class ToDo: def __init__(self): self.store = FileStorage.FileStorage("ToDo2.fs") self.database = DB(self.store) self.connection = self.database.open() self.root = self.connection.root() if not 'Tasks' in self.root: self.root['Tasks'] = [] self.tasks = self.root['Tasks'] def __enter__(self): return self def __exit__(self, type, value, traceback): transaction.get() transaction.abort() self.connection.close() self.database.close() self.store.close() def add(self, name, description): if name != "": new_task = Task() new_task.name = name new_task.description = description self.tasks.append(new_task) self.root['Tasks'] = self.tasks transaction.commit() print("New task added..") else: print("Tasks must have a name") def list(self): if len(self.tasks) > 0: print("Tasks To Do..") for task in self.tasks: print("%s\t%s" %(task.name, task.description)) else: print("No pending tasks..") def delete(self, name): for i in range(len(self.tasks)): deleted = False if self.tasks[i].name == name: del(self.tasks[i]) deleted = True if deleted: self.root['Tasks'] = self.tasks transaction.commit() print("Task deleted..") else: print("There is no task '%s'.." % name)
def test_register_implemented_by_lookup_utility(self): storage = DemoStorage() self._store_base_subs_in_zodb(storage) db = DB(storage) conn = db.open() new_base = conn.root()['base'] new_base._p_activate() new_sub = conn.root()['sub'] new_base.utilities.btree_provided_threshold = 0 new_base.utilities.btree_map_threshold = 0 new_base.registerUtility(MockSite(), provided=IFoo) provided1 = new_base.adapters._provided # In the past, we couldn't register by implemented, but now we can. new_base.registerUtility(MockSite(), provided=implementedBy(MockSite), name=u'foo') provided2 = new_base.adapters._provided # Make sure that it only converted once assert_that(provided1, is_(same_instance(provided2))) assert_that(new_base._utility_registrations, is_(BTrees.OOBTree.OOBTree)) assert_that(new_base._utility_registrations.keys(), contains( (IFoo, u''), ((implementedBy(MockSite), u'foo')), )) assert_that(new_base.utilities._provided, is_(BTrees.family64.OI.BTree)) assert_that(new_base.utilities._adapters[0], is_(BTrees.family64.OO.BTree)) assert_that(new_base.utilities._adapters[0][IFoo], is_(BTrees.family64.OO.BTree)) transaction.commit() conn.close() db.close() db = DB(storage) conn = db.open() new_sub = conn.root()['sub'] x = new_sub.queryUtility(IFoo) assert_that(x, is_(MockSite)) # But it can't actually be looked up, regardless of whether we # convert to btrees or not x = new_sub.queryUtility(MockSite, u'foo') assert_that(x, is_(none()))
class MiZODB(object): '''clase que inicializa el archivo que se encarga de la perscistencia del objeto''' def __init__(self, archivo): self.storage = FileStorage.FileStorage(archivo) self.db = DB(self.storage) self.conexion = self.db.open() self.raiz = self.conexion.root() def close(self): self.conexion.close() self.db.close() self.storage.close()
def speedTest1(): """benchmark read and write""" DB = PDatabase(server='localhost', port=8090) #DB = PDatabase(local='./Data.fs') for i in range(100, 300): DB.add(i) for i in range(100, 290): DB.delete(i) DB.commit() print DB DB.close() return
class ZDatabase(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): transaction.commit() self.connection.close() self.db.close() self.storage.close()
class OurDB: _file_name = None def __init__(self, dir): from BTrees.OOBTree import OOBTree import transaction self.dir = dir self.getdb() conn = self.db.open() conn.root()['tree'] = OOBTree() transaction.commit() self.pos = self.db.storage._pos self.close() def getdb(self): from ZODB import DB from ZODB.FileStorage import FileStorage self._file_name = storage_filename = os.path.join(self.dir, 'Data.fs') storage = FileStorage(storage_filename) self.db = DB(storage) def gettree(self): self.getdb() conn = self.db.open() return conn.root()['tree'] def pack(self): self.getdb() self.db.pack() def close(self): if self.db is not None: self.db.close() self.db = None def mutate(self): # Make random mutations to the btree in the database. import random import transaction tree = self.gettree() for dummy in range(100): if random.random() < 0.6: tree[random.randrange(100000)] = random.randrange(100000) else: keys = tree.keys() if keys: del tree[keys[0]] transaction.commit() self.pos = self.db.storage._pos self.maxkey = self.db.storage._oid self.close()
class OurDB(object): _file_name = None def __init__(self, dir): from BTrees.OOBTree import OOBTree import transaction self.dir = dir self.getdb() conn = self.db.open() conn.root()['tree'] = OOBTree() transaction.commit() self.pos = self.db.storage._pos self.close() def getdb(self): from ZODB import DB from ZODB.FileStorage import FileStorage self._file_name = storage_filename = os.path.join(self.dir, 'Data.fs') storage = FileStorage(storage_filename) self.db = DB(storage) def gettree(self): self.getdb() conn = self.db.open() return conn.root()['tree'] def pack(self): self.getdb() self.db.pack() def close(self): if self.db is not None: self.db.close() self.db = None def mutate(self): # Make random mutations to the btree in the database. import random import transaction tree = self.gettree() for dummy in range(100): if random.random() < 0.6: tree[random.randrange(100000)] = random.randrange(100000) else: keys = tree.keys() if keys: del tree[keys[0]] transaction.commit() self.pos = self.db.storage._pos self.maxkey = self.db.storage._oid self.close()
class MyZODB: def __init__(self, path): self.storage = FileStorage.FileStorage(path) # creating file storage self.db = DB(self.storage) # initializing the database self.connection = self.db.open() # initializing the connections # print 'sss', help(self.connection.root) self.dbroot1 = self.connection.root( ) # connection to store and retreive datas def close(self): self.connection.close() self.db.close() self.storage.close()
class remoteZODB(object): def __init__(self, server, port): server_and_port = (server, port) self.storage = ClientStorage(server_and_port, storage='data', read_only=True, wait=False, ) self.db = DB(self.storage) self.connection = self.db.open() self.dbroot = self.connection.root() def close(self): self.connection.close() self.db.close() self.storage.close()
def arc(sef): storage = FileStorage.FileStorage('init_photos.fs') db = DB(storage) connection = db.open() root = connection.root() transaction.begin() arc = root['init'] transaction.commit() connection.close() db.close() return arc[::-1]
def getInfoOfEntry(sNameOfEntry): storage = FileStorage.FileStorage(CUON_FS) db = DB(storage) connection = db.open() t2 = None try: root = connection.root() t2 = root[sNameOfEntry] except StandardError: pass connection.close() db.close() return t2
def _functest_load(fqn): # Open the database and attempt to deserialize the tree # (run in separate process) from ZODB import DB WORKING, FAILING = _working_failing_datetimes() db = DB(fqn) conn = db.open() try: root = conn.root() tree = root['tree'] assert tree[WORKING] == 'working' assert tree[FAILING] == 'failing' finally: # Windoze conn.close() db.close()
class ZDB(object): def __init__(self, path): self.storage = FileStorage.FileStorage(path) self.db = DB(self.storage) self.connection = self.db.open() self.root = self.connection.root() def commit(self): transaction.commit() def close(self, commit=True): if commit: self.commit() self.connection.close() self.db.close() self.storage.close()
def zodb_test(): db = DB(FileStorage.FileStorage('Data.fs')).open() # Via http://www.ibm.com/developerworks/aix/library/au-zodb/ dbroot = db.root() dbroot['a_number'] = 3 dbroot['a_string'] = 'Gift' dbroot['a_list'] = [1, 2, 3, 5, 7, 12] dbroot['a_dictionary'] = { 1918: 'Red Sox', 1919: 'Reds' } dbroot['deeply_nested'] = { 1918: [ ('Red Sox', 4), ('Cubs', 2) ], 1919: [ ('Reds', 5), ('White Sox', 3) ], } transaction.commit() db.close()
def _persist_zodb(self, obj): from ZODB import DB from ZODB.MappingStorage import MappingStorage import transaction db = DB(MappingStorage()) conn = db.open() try: conn.root.key = obj transaction.commit() finally: conn.close() db.close() transaction.abort()
def checkCorruptionInPack(self): # This sets up a corrupt .fs file, with a redundant transaction # length mismatch. The implementation of pack in many releases of # ZODB blew up if the .fs file had such damage: it detected the # damage, but the code to raise CorruptedError referenced an undefined # global. import time from ZODB.utils import U64, p64 from ZODB.FileStorage.format import CorruptedError from ZODB.serialize import referencesf db = DB(self._storage) conn = db.open() conn.root()['xyz'] = 1 transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() # Reopen before damaging. self.open() # Open .fs directly, and damage content. with open('FileStorageTests.fs', 'r+b') as f: f.seek(0, 2) pos2 = f.tell() - 8 f.seek(pos2) tlen2 = U64(f.read(8)) # length-8 of the last transaction pos1 = pos2 - tlen2 + 8 # skip over the tid at the start f.seek(pos1) tlen1 = U64(f.read(8)) # should be redundant length-8 self.assertEqual(tlen1, tlen2) # verify that it is redundant # Now damage the second copy. f.seek(pos2) f.write(p64(tlen2 - 1)) # Try to pack. This used to yield # NameError: global name 's' is not defined try: self._storage.pack(time.time(), referencesf) except CorruptedError as detail: self.assertTrue("redundant transaction length does not match " "initial transaction length" in str(detail)) else: self.fail("expected CorruptedError")
def checkPackAfterUndoManyTimes(self): db = DB(self._storage) cn = db.open() try: rt = cn.root() rt["test"] = MinPO(1) transaction.commit() rt["test2"] = MinPO(2) transaction.commit() rt["test"] = MinPO(3) txn = transaction.get() txn.note(u"root of undo") txn.commit() packtimes = [] for i in range(10): L = db.undoInfo() db.undo(L[0]["id"]) txn = transaction.get() txn.note(u"undo %d" % i) txn.commit() rt._p_deactivate() cn.sync() self.assertEqual(rt["test"].value, i % 2 and 3 or 1) self.assertEqual(rt["test2"].value, 2) packtimes.append(time.time()) snooze() for t in packtimes: self._storage.pack(t, referencesf) cn.sync() # TODO: Is _cache supposed to have a clear() method, or not? # cn._cache.clear() # The last undo set the value to 3 and pack should # never change that. self.assertEqual(rt["test"].value, 3) self.assertEqual(rt["test2"].value, 2) self._inter_pack_pause() finally: cn.close() db.close()
def checkPackWithMultiDatabaseReferences(self): databases = {} db = DB(self._storage, databases=databases, database_name='') otherdb = ZODB.tests.util.DB(databases=databases, database_name='o') conn = db.open() root = conn.root() root[1] = C() transaction.commit() del root[1] transaction.commit() root[2] = conn.get_connection('o').root() transaction.commit() db.pack(time.time() + 1) # some valid storages always return 0 for len() self.assertTrue(len(self._storage) in (0, 1)) conn.close() otherdb.close() db.close()
def _makeFilestorage(self): import os from persistent import Persistent import transaction from ZODB import DB from ZODB.FileStorage import FileStorage global Root class Root(Persistent): __name__ = __parent__ = None db = DB(FileStorage(os.path.join(self._getTempdir(), 'Data.fs'))) conn = db.open() root = conn.root() app = root['app_root'] = Root() self._populate(app) transaction.commit() db.close()
def DataBucketStream_getChecksumListFromNEONodeListForKey(self, \ node_list, \ ca_file, \ cert_file, \ key_file, \ key, \ threshold): """ Directly connect to NEO backends and check checksums of this Data Bucket Stream for this key. """ checksum_list = [] # get directly checksum as we have access to data stream over self data = self.getBucketByKey(key) data = data[:threshold] checksum = hashlib.sha256(data).hexdigest() checksum_list.append(checksum) for node in node_list: kw = { 'master_nodes': node[0], 'name': node[1], 'ca': ca_file, 'cert': cert_file, 'key': key_file } # make a direct connection stor = Storage(**kw) db = DB(stor) conn = db.open() root = conn.root() data_stream_id = self.getId() data_stream = root['Application'].erp5.data_stream_module[ data_stream_id] data = data_stream.getBucketByKey(key) data = data[:threshold] conn.close() db.close() checksum = hashlib.sha256(data).hexdigest() checksum_list.append(checksum) return checksum_list
def DataStream_getChecksumListFromNEONodeListForStartStopOffset(self, \ node_list, \ ca_file, \ cert_file, \ key_file, \ start_offset, \ end_offset): """ Directly connect to NEO backends and check checksums of this Data Stream. """ checksum_list = [] # get directly checksum as we have access to data stream over self chunk_list = self.readChunkList(start_offset, end_offset) data = '\n'.join(chunk_list) checksum = hashlib.sha256(data).hexdigest() checksum_list.append(checksum) for node in node_list: kw = { 'master_nodes': node[0], 'name': node[1], 'ca': ca_file, 'cert': cert_file, 'key': key_file } # make a direct connection stor = Storage(**kw) db = DB(stor) conn = db.open() root = conn.root() data_stream_id = self.getId() data_stream = root['Application'].erp5.data_stream_module[ data_stream_id] chunk_list = data_stream.readChunkList(start_offset, end_offset) data = '\n'.join(chunk_list) conn.close() db.close() checksum = hashlib.sha256(data).hexdigest() checksum_list.append(checksum) return checksum_list