def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)
def check_client_storage(self): b = ClientStorage(address=self.address) c = ClientStorage(address=self.address) oid = b.new_oid() assert oid == int8_to_str(0), repr(oid) oid = b.new_oid() assert oid == int8_to_str(1), repr(oid) oid = b.new_oid() assert oid == int8_to_str(2), repr(oid) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) assert b.end() is None b.load(int8_to_str(0)) assert b.sync() == [] b.begin() b.store( int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record())) == 1 records = b.bulk_load([int8_to_str(0), int8_to_str(1)]) assert len(list(records)) == 2 records = b.bulk_load([int8_to_str(0), int8_to_str(1), int8_to_str(2)]) raises(DurusKeyError, list, records) b.pack() assert len(list(b.gen_oid_record())) == 1 raises(ReadConflictError, c.load, int8_to_str(0)) raises(ReadConflictError, c.load, int8_to_str(0)) assert set(c.sync()) == set([int8_to_str(0), int8_to_str(1)]) assert record == c.load(int8_to_str(0)) b.close() c.close()
def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record( int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)
def check_record_pack_unpack(self): oid = as_bytes('0'*8) data = as_bytes('sample') reflist = ['1'*8, '2'*8] reflist = list(map(as_bytes, reflist)) refs = join_bytes(reflist) result=unpack_record(pack_record(oid, data, refs)) assert result[0] == oid assert result[1] == data assert split_oids(result[2]) == reflist assert split_oids('') == []
def load(self, oid): """(str) -> str Return object record identified by 'oid'. """ c = self._conn.cursor() c.execute('SELECT id, data, refs FROM objects WHERE id = ?', (str_to_int8(oid),)) v = c.fetchone() if v is None: raise KeyError(oid) return pack_record(int8_to_str(v[0]), v[1], v[2])
def check_memory_storage(self): b = MemoryStorage() assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store( int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record())) == 1 assert record == b.load(int8_to_str(0)) records = b.bulk_load([int8_to_str(0), int8_to_str(1)]) assert len(list(records)) == 2 records = b.bulk_load([int8_to_str(0), int8_to_str(1), int8_to_str(2)]) raises(KeyError, list, records)
def generate_durus_object_records(): sio = cStringIO.StringIO() zodb_storage = ZODBFileStorage(zodb_file_name) n = 0 for oid in zodb_storage._index.keys(): n += 1 if n % 10000 == 0: sys.stdout.write('.') sys.stdout.flush() p, serial = zodb_storage.load(oid, '') refs = referencesf(p) # unwrap extra tuple from class meta data sio.seek(0) sio.write(p) sio.truncate() sio.seek(0) def get_class(module_class): module, klass = module_class if module not in sys.modules: __import__(module) return getattr(sys.modules[module], klass) class PersistentRef: def __init__(self, v): oid, module_class = v self.oid_klass = (oid, get_class(module_class)) unpickler = cPickle.Unpickler(sio) unpickler.persistent_load = lambda v: PersistentRef(v) class_meta = unpickler.load() class_meta, extra = class_meta assert extra is None object_state = unpickler.load() if type(object_state) == dict and '_container' in object_state: assert 'data' not in object_state object_state['data'] = object_state['_container'] del object_state['_container'] sio.seek(0) sio.truncate() cPickle.dump(get_class(class_meta), sio, 2) pickler = cPickle.Pickler(sio, 2) def persistent_id(v): if isinstance(v, PersistentRef): return v.oid_klass return None pickler.persistent_id = persistent_id pickler.dump(object_state) record = pack_record(oid, sio.getvalue(), ''.join(refs)) yield record print print n, 'objects written'
def commit(self): """ If there are any changes, try to store them, and raise ConflictError if there are any invalid oids saved or if there are any invalid oids for non-ghost objects. """ if not self.changed: self._sync() else: if self.invalid_oids: # someone is trying to commit after a read or write conflict raise ConflictError(list(self.invalid_oids)) self.storage.begin() new_objects = {} for oid, changed_object in self.changed.iteritems(): writer = ObjectWriter(self) try: for obj in writer.gen_new_objects(changed_object): oid = obj._p_oid if oid in new_objects: continue elif oid not in self.changed: new_objects[oid] = obj self.cache[oid] = obj data, refs = writer.get_state(obj) self.storage.store(oid, pack_record(oid, data, refs)) obj._p_set_status_saved() finally: writer.close() try: self.storage.end(self._handle_invalidations) except ConflictError, exc: for oid, obj in new_objects.iteritems(): del self.cache[oid] self.loaded_oids.discard(oid) obj._p_set_status_unsaved() obj._p_oid = None obj._p_connection = None raise self.changed.clear()
def commit(self): """ If there are any changes, try to store them, and raise WriteConflictError if there are any invalid oids saved or if there are any invalid oids for non-ghost objects. """ if not self.changed: self._sync() else: assert not self.invalid_oids, "still conflicted: missing abort()" self.storage.begin() new_objects = {} for oid, changed_object in iteritems(self.changed): writer = ObjectWriter(self) try: for obj in writer.gen_new_objects(changed_object): oid = obj._p_oid if oid in new_objects: continue elif oid not in self.changed: new_objects[oid] = obj self.cache[oid] = obj data, refs = writer.get_state(obj) self.storage.store(oid, pack_record(oid, data, refs)) obj._p_set_status_saved() finally: writer.close() try: self.storage.end(self._handle_invalidations) except ConflictError: for oid, obj in iteritems(new_objects): obj._p_oid = None del self.cache[oid] obj._p_set_status_unsaved() obj._p_connection = None raise self.changed.clear() self.shrink_cache() self.transaction_serial += 1
def __init__(self, storage, cache_size=8000): """(storage:Storage, cache_size:int=8000) Make a connection to `storage`. Set the target number of non-ghosted persistent objects to keep in the cache at `cache_size`. """ assert isinstance(storage, Storage) self.storage = storage self.reader = ObjectReader(self) self.changed = {} self.invalid_oids = Set() self.loaded_oids = Set() try: storage.load(ROOT_OID) except KeyError: self.storage.begin() writer = ObjectWriter(self) data, refs = writer.get_state(PersistentDict()) writer.close() self.storage.store(ROOT_OID, pack_record(ROOT_OID, data, refs)) self.storage.end(self._handle_invalidations) self.new_oid = storage.new_oid # needed by serialize self.cache = Cache(cache_size) self.cache.hold(self.get_root())
def _gen_records(self): c = self._conn.cursor() c.execute('SELECT (id, data, refs) FROM objects ORDER BY id') for oid, data, refs in c.fetchall(): yield int8_to_str(oid), pack_record(oid, data, refs)