def packer(): yield "started %s" % datetime.now() items = self.gen_oid_record(start_oid=int8_to_str(0)) for step in Shelf.generate_shelf(file, items): yield step file.flush() file.fsync() shelf = Shelf(file) yield "base written %s" % datetime.now() # Invalidate oids that have been removed. for hole in shelf.get_offset_map().gen_holes(): yield hole oid = int8_to_str(hole) if self.shelf.get_position(oid) is not None: assert shelf.get_position(oid) is None self.invalid.add(oid) yield "invalidations identified %s" % datetime.now() shelf.store( (name, self.shelf.get_value(name)) for name in self.pack_extra) file.flush() file.fsync() if not self.shelf.get_file().is_temporary(): self.shelf.get_file().rename(file_path + '.prepack') self.shelf.get_file().close() shelf.get_file().rename(file_path) self.shelf = shelf self.pack_extra = None yield "finished %s" % datetime.now()
def check_connection(self): self.conn = conn = Connection(self._get_storage()) self.root = root = conn.get_root() assert root._p_is_ghost() == False assert root is conn.get(int8_to_str(0)) assert root is conn.get(0) assert conn is root._p_connection assert conn.get(int8_to_str(1)) == None conn.abort() conn.commit() assert root._p_is_ghost() == False root['a'] = Persistent() assert root._p_is_unsaved() == True assert root['a']._p_is_unsaved() == True root['a'].f = 2 assert list(conn.changed.values()) == [root] conn.commit() assert root._p_is_saved() assert list(conn.changed.values()) == [] root['a'] = Persistent() assert list(conn.changed.values()) == [root] root['b'] = Persistent() root['a'].a = 'a' root['b'].b = 'b' conn.commit() root['a'].a = 'a' root['b'].b = 'b' conn.abort() conn.shrink_cache() root['b'].b = 'b' del conn
def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)
def check_connection(self): self.conn=conn=Connection(self._get_storage()) self.root=root=conn.get_root() assert root._p_is_ghost() == False assert root is conn.get(int8_to_str(0)) assert root is conn.get(0) assert conn is root._p_connection assert conn.get(int8_to_str(1)) == None conn.abort() conn.commit() assert root._p_is_ghost() == False root['a'] = Persistent() assert root._p_is_unsaved() == True assert root['a']._p_is_unsaved() == True root['a'].f=2 assert list(conn.changed.values()) == [root] conn.commit() assert root._p_is_saved() assert list(conn.changed.values()) == [] root['a'] = Persistent() assert list(conn.changed.values()) == [root] root['b'] = Persistent() root['a'].a = 'a' root['b'].b = 'b' conn.commit() root['a'].a = 'a' root['b'].b = 'b' conn.abort() conn.shrink_cache() root['b'].b = 'b' del conn
def packer(): yield "started %s" % datetime.now() seen = IntSet() items = self.gen_oid_record(start_oid=int8_to_str(0), seen=seen) for step in Shelf.generate_shelf(file, items): yield step file.flush() file.fsync() shelf = Shelf(file) yield "base written %s" % datetime.now() # Invalidate oids that have been removed. for hole in shelf.get_offset_map().gen_holes(): yield hole oid = int8_to_str(hole) if self.shelf.get_position(oid) is not None: assert shelf.get_position(oid) is None self.invalid.add(oid) yield "invalidations identified %s" % datetime.now() for oid in self.pack_extra: seen.discard(str_to_int8(oid)) for oid in self.pack_extra: shelf.store(self.gen_oid_record(start_oid=oid, seen=seen)) file.flush() file.fsync() if not self.shelf.get_file().is_temporary(): self.shelf.get_file().rename(file_path + '.prepack') self.shelf.get_file().close() shelf.get_file().rename(file_path) self.shelf = shelf self.pack_extra = None yield "finished %s" % datetime.now()
def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record( int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)
def generate_unused_names(): for j in self.offset_map.gen_holes(): name = int8_to_str(j) if name not in self.memory_index: yield name # Now continue with values above those in the offset map. j = self.offset_map.get_array_size() while True: name = int8_to_str(j) if name not in self.memory_index: yield name j += 1
def check_accessors(self): p = PersistentObject() p._p_oid assert p._p_format_oid() == 'None' p._p_oid = int8_to_str(1) assert p._p_format_oid() == '1' assert repr(p) == "<PersistentObject 1>"
def store(self, oid, record): """(str, str)""" self.pending_records[oid] = record if (oid not in self.allocated_unused_oids and oid not in self.shelf and oid != int8_to_str(0)): self.begin() raise ValueError("oid %r is a surprise" % oid)
def b(self): n = 1000 s = BytesIO() word_array = WordArray(file=s, bytes_per_word=8, number_of_words=n) for x in xrange(n): word_array[x] = int8_to_str(x) assert word_array[-1] == int8_to_str(n - 1) for x in xrange(n): assert x == str_to_int8(word_array[x]) word_array[x] = int8_to_str(2*x) assert x == str_to_int8(word_array[x]) / 2 assert len(word_array) == n assert raises(IndexError, word_array.__getitem__, n + 1) s.seek(0) word_array2 = WordArray(file=s) word_array2[-1] = as_bytes('mmmmmmmm') assert word_array2[-1] == as_bytes('mmmmmmmm')
def iterindex(self): for n, position in iteritems(self.offset_map): if position < self.offset_map.get_start(): name = int8_to_str(n) if name not in self.memory_index: yield name, position for item in list(self.memory_index.items()): yield item
def check_conflict(self): b = Connection(self._get_storage()) c = Connection(self._get_storage()) rootb = b.get(int8_to_str(0)) rootb['b'] = Persistent() rootc = c.get(int8_to_str(0)) rootc['c'] = Persistent() c.commit() raises(ConflictError, b.commit) raises(KeyError, rootb.__getitem__, 'c') transaction_serial = b.transaction_serial b.abort() assert b.get_transaction_serial() > transaction_serial assert rootb._p_is_ghost() rootc['d'] = Persistent() c.commit() rootb['d']
def check_storage_tools(self): connection = Connection(self._get_storage()) root = connection.get_root() root['a'] = Persistent() root['b'] = Persistent() connection.commit() index = get_reference_index(connection.get_storage()) assert index == { int8_to_str(1): [int8_to_str(0)], int8_to_str(2): [int8_to_str(0)]} census = get_census(connection.get_storage()) assert census == {as_bytes('PersistentDict'):1, as_bytes('Persistent'):2} references = list(gen_referring_oid_record(connection.get_storage(), int8_to_str(1))) assert references == [ (int8_to_str(0), connection.get_storage().load(int8_to_str(0)))] class Fake(object): pass s = Fake() s.__class__ = Storage raises(RuntimeError, s.__init__) raises(NotImplementedError, s.load, None) raises(NotImplementedError, s.begin) raises(NotImplementedError, s.store, None, None) raises(NotImplementedError, s.end) raises(NotImplementedError, s.sync) g = s.gen_oid_record() raises(NotImplementedError, next, g)
def check_storage_tools(self): connection = Connection(self._get_storage()) root = connection.get_root() root['a'] = Persistent() root['b'] = Persistent() connection.commit() index = get_reference_index(connection.get_storage()) assert index == { int8_to_str(1): [int8_to_str(0)], int8_to_str(2): [int8_to_str(0)] } census = get_census(connection.get_storage()) assert census == { as_bytes('PersistentDict'): 1, as_bytes('Persistent'): 2 } references = list( gen_referring_oid_record(connection.get_storage(), int8_to_str(1))) assert references == [(int8_to_str(0), connection.get_storage().load(int8_to_str(0)))] class Fake(object): pass s = Fake() s.__class__ = Storage raises(RuntimeError, s.__init__) raises(NotImplementedError, s.load, None) raises(NotImplementedError, s.begin) raises(NotImplementedError, s.store, None, None) raises(NotImplementedError, s.end) raises(NotImplementedError, s.sync) g = s.gen_oid_record() raises(NotImplementedError, next, g)
def check_accessors(self): p=Persistent() p._p_oid assert p._p_format_oid() == 'None' p._p_oid = 'aaaaaaaa' assert p._p_format_oid() == '7016996765293437281' p._p_oid = int8_to_str(1) assert p._p_format_oid() == '1' assert repr(p) == "<Persistent 1>"
def load(self, oid): """(str) -> str Return object record identified by 'oid'. """ c = self._conn.cursor() c.execute('SELECT id, data, refs FROM objects WHERE id = ?', (str_to_int8(oid),)) v = c.fetchone() if v is None: raise KeyError(oid) return pack_record(int8_to_str(v[0]), v[1], v[2])
def a(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oids = [ r['a0']._p_oid, r['a2']._p_oid, r['a7']._p_oid, r['a8']._p_oid ] del r['a0'] del r['a2'] del r['a7'] del r['a8'] c.commit() c.pack() c.abort() assert c.get(deleted_oids[0])._p_is_ghost() assert c.get(deleted_oids[1])._p_is_ghost() raises(KeyError, getattr, c.get(deleted_oids[0]), 'a') assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 7 c.commit() c.pack() new_oid = s.new_oid() assert new_oid == deleted_oids[-1], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-2], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-3], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-4], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == int8_to_str(11), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(12), repr(new_oid)
def c(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oid = r['a9']._p_oid del r['a9'] c.commit() c.pack() c.abort() r.clear() c.commit() c.pack() c.abort() new_oid = s.new_oid() assert new_oid == int8_to_str(1), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(2), repr(new_oid)
def a(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oids = [ r['a0']._p_oid, r['a2']._p_oid, r['a7']._p_oid, r['a8']._p_oid] del r['a0'] del r['a2'] del r['a7'] del r['a8'] c.commit() c.pack() c.abort() assert c.get(deleted_oids[0])._p_is_ghost() assert c.get(deleted_oids[1])._p_is_ghost() raises(ReadConflictError, getattr, c.get(deleted_oids[0]), 'a') assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 7 c.commit() c.pack() new_oid = s.new_oid() assert new_oid == deleted_oids[-1], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-2], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-3], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-4], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == int8_to_str(11), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(12), repr(new_oid)
def b(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oid = r['a9']._p_oid del r['a9'] c.commit() c.pack() c.abort() assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 10 new_oid = s.new_oid() assert new_oid == deleted_oid new_oid = s.new_oid() assert new_oid == int8_to_str(11)
def get(self, oid): """(oid:str|int|long) -> PersistentObject | None Return object for `oid`. The object may be a ghost. """ if not isinstance(oid, byte_string): oid = int8_to_str(oid) obj = self.cache.get(oid) if obj is not None: return obj try: data = self.get_stored_pickle(oid) except KeyError: return None klass = loads(data) obj = self.cache.get_instance(oid, klass, self) state = self.reader.get_state(data, load=True) obj.__setstate__(state) obj._p_set_status_saved() return obj
def e(self): f = File() n1 = int8_to_str(0) n2 = int8_to_str(1) s = Shelf(f, items=[(n1, 'record1'), (n2, 'record2')])
def test_e(self): f = File() n1 = int8_to_str(0) n2 = int8_to_str(1) s = Shelf(f, items=[(n1, 'record1'), (n2, 'record2')])
def check_client_storage(self): b = ClientStorage(address=self.address) c = ClientStorage(address=self.address) oid = b.new_oid() assert oid == int8_to_str(0), repr(oid) oid = b.new_oid() assert oid == int8_to_str(1), repr(oid) oid = b.new_oid() assert oid == int8_to_str(2), repr(oid) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) assert b.end() is None b.load(int8_to_str(0)) assert b.sync() == [] b.begin() b.store( int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record())) == 1 records = b.bulk_load([int8_to_str(0), int8_to_str(1)]) assert len(list(records)) == 2 records = b.bulk_load([int8_to_str(0), int8_to_str(1), int8_to_str(2)]) raises(DurusKeyError, list, records) b.pack() assert len(list(b.gen_oid_record())) == 1 raises(ReadConflictError, c.load, int8_to_str(0)) raises(ReadConflictError, c.load, int8_to_str(0)) assert set(c.sync()) == set([int8_to_str(0), int8_to_str(1)]) assert record == c.load(int8_to_str(0)) b.close() c.close()
def load_protocol_error(self): s1 = ClientStorage(address=self.address) c1 = Connection(s1) s1.s = FakeSocket('?') raises(ProtocolError, s1.load, int8_to_str(0))
from durus.error import ConflictError, WriteConflictError, ReadConflictError from durus.error import DurusKeyError from durus.logger import log from durus.persistent import ConnectionBase, GHOST from durus.persistent_dict import PersistentDict from durus.serialize import ObjectReader, ObjectWriter from durus.serialize import unpack_record, pack_record from durus.utils import int8_to_str, iteritems, loads, byte_string, as_bytes from heapq import heappush, heappop from itertools import islice, chain from os import getpid from time import time from weakref import ref, KeyedRef import durus.storage ROOT_OID = int8_to_str(0) class Connection(ConnectionBase): """ The Connection manages movement of objects in and out of storage. Instance attributes: storage: Storage cache: Cache reader: ObjectReader changed: {oid:str : PersistentObject} invalid_oids: set([str]) Set of oids of objects known to have obsolete state. transaction_serial: int Number of calls to commit() or abort() since this instance was created.
def check_int8_to_str_str_to_int8(self): for x in range(3): assert len(int8_to_str(x)) == 8 assert str_to_int8(int8_to_str(x)) == x
def _gen_records(self): c = self._conn.cursor() c.execute('SELECT (id, data, refs) FROM objects ORDER BY id') for oid, data, refs in c.fetchall(): yield int8_to_str(oid), pack_record(oid, data, refs)
def new_oid(self): oid = int8_to_str(self._last_oid) self._last_oid += 1 return oid
def check_memory_storage(self): b = MemoryStorage() assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store( int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record())) == 1 assert record == b.load(int8_to_str(0)) records = b.bulk_load([int8_to_str(0), int8_to_str(1)]) assert len(list(records)) == 2 records = b.bulk_load([int8_to_str(0), int8_to_str(1), int8_to_str(2)]) raises(KeyError, list, records)
from durus.error import ConflictError, WriteConflictError, ReadConflictError from durus.error import DurusKeyError from durus.logger import log from durus.persistent import ConnectionBase, GHOST from durus.persistent_dict import PersistentDict from durus.serialize import ObjectReader, ObjectWriter from durus.serialize import unpack_record, pack_record from durus.utils import int8_to_str, iteritems, loads, byte_string, as_bytes from heapq import heappush, heappop from itertools import islice, chain from os import getpid from time import time from weakref import ref, KeyedRef import durus.storage ROOT_OID = int8_to_str(0) class Connection (ConnectionBase): """ The Connection manages movement of objects in and out of storage. Instance attributes: storage: Storage cache: Cache reader: ObjectReader changed: {oid:str : PersistentObject} invalid_oids: set([str]) Set of oids of objects known to have obsolete state. transaction_serial: int Number of calls to commit() or abort() since this instance was created. This is used to maintain consistency, and to implement LRU replacement
def new_oid(self): self.oid += 1 return int8_to_str(self.oid)
def _list_all_oids(self): c = self._conn.cursor() c.execute('SELECT id FROM objects ORDER BY id') for oid, in c.fetchall(): yield int8_to_str(oid)