def new_oid(self): self.oid += 1 return p64(self.oid)
def _write_index(self, fp, index): index_offset = fp.tell() compressed = compress(dumps(index)) fp.write(p64(len(compressed))) fp.write(compressed) fp.seek(len(self.MAGIC)) fp.write(p64(index_offset)) assert fp.tell() == len(self.MAGIC) + 8
def gen_oid_record(self): """() -> sequence([oid:str, record:str]) A FileStorage will do a better job of this. """ for oid_num in xrange(u64(self.new_oid())): try: oid = p64(oid_num) record = self.load(oid) yield oid, record except DurusKeyError: pass
def get(self, oid): """(oid:str|int|long) -> Persistent | None Return object for `oid`. The object may be a ghost. """ if type(oid) is not str: oid = p64(oid) obj = self.cache.get(oid) if obj is not None: return obj try: pickle = self.get_stored_pickle(oid) except KeyError: return None obj = self.reader.get_ghost(pickle) obj._p_oid = oid obj._p_connection = self obj._p_set_status_ghost() self.cache[oid] = obj return obj
def _write_header(self, fp): FileStorage._write_header(self, fp) fp.write(p64(0)) # index offset
def _disk_format(self, record): return p64(self.tid) + record
""" from cPickle import loads from durus.error import ConflictError, ReadConflictError, DurusKeyError from durus.logger import log from durus.persistent_dict import PersistentDict from durus.serialize import ObjectReader, ObjectWriter from durus.serialize import unpack_record, pack_record from durus.storage import Storage from durus.utils import p64 from itertools import islice, chain from os import getpid from sets import Set from time import time from weakref import ref ROOT_OID = p64(0) class Connection(object): """ The Connection manages movement of objects in and out of storage. Instance attributes: storage: Storage cache: Cache reader: ObjectReader changed: {oid:str : Persistent} invalid_oids: Set([str]) Set of oids of objects known to have obsolete state. loaded_oids : Set([str]) Set of oids of objects that were in the SAVED state at some time during the current transaction.