def get_crawler(self, start_oid=ROOT_OID, batch_size=100): """(start_oid:str = ROOT_OID, batch_size:int = 100) -> sequence(PersistentObject) Returns a generator for the sequence of objects in a breadth first traversal of the object graph, starting at the given start_oid. The objects in the sequence have their state loaded at the same time, so this can be used to initialize the object cache. This uses the storage's bulk_load() method to make it faster. The batch_size argument sets the number of object records loaded on each call to bulk_load(). """ oid_record_sequence = self.storage.gen_oid_record( start_oid=start_oid, batch_size=batch_size) for oid, record in oid_record_sequence: obj = self.cache.get(oid) if obj is not None and not obj._p_is_ghost(): yield obj else: record_oid, data, refdata = unpack_record(record) if obj is None: klass = loads(data) obj = self.cache.get_instance(oid, klass, self) state = self.reader.get_state(data, load=True) obj.__setstate__(state) obj._p_set_status_saved() yield obj
def gen_every_instance(connection, *classes): """(connection:Connection, *classes:(class)) -> sequence [PersistentObject] Generate all PersistentObject instances that are instances of any of the given classes.""" for oid, record in connection.get_storage().gen_oid_record(): record_oid, state, refs = unpack_record(record) record_class = loads(state) if issubclass(record_class, classes): yield connection.get(oid)
def _build_index(self, repair): self.fp.seek(0) if read(self.fp, len(self.MAGIC)) != self.MAGIC: raise IOError("invalid storage (missing magic in %r)" % self.fp) index_offset = read_int8(self.fp) assert index_offset > 0 self.fp.seek(index_offset) tmp_index = loads(decompress(read_int8_str(self.fp))) self.index = {} def oid_as_bytes(oid): if isinstance(oid, byte_string): return oid else: return oid.encode('latin1') for tmp_oid in tmp_index: self.index[oid_as_bytes(tmp_oid)] = tmp_index[tmp_oid] del tmp_index while 1: # Read one transaction each time here. oids = {} transaction_offset = self.fp.tell() try: while 1: object_record_offset = self.fp.tell() record = self._read_block() if len(record) == 0: break # normal termination if len(record) < 12: raise ShortRead("Bad record size") oid = record[0:8] oids[oid] = object_record_offset # We've reached the normal end of a transaction. self.index.update(oids) oids.clear() except (ValueError, IOError): if self.fp.tell() > transaction_offset: if not repair: raise # The transaction was malformed. Attempt repair. self.fp.seek(transaction_offset) self.fp.truncate() break
def get(self, oid): """(oid:str|int|long) -> PersistentObject | None Return object for `oid`. The object may be a ghost. """ if not isinstance(oid, byte_string): oid = int8_to_str(oid) obj = self.cache.get(oid) if obj is not None: return obj try: data = self.get_stored_pickle(oid) except KeyError: return None klass = loads(data) obj = self.cache.get_instance(oid, klass, self) state = self.reader.get_state(data, load=True) obj.__setstate__(state) obj._p_set_status_saved() return obj
def get_ghost(self, data): klass = loads(data) instance = klass.__new__(klass) instance._p_set_status_ghost() return instance
def pickling(self): a = Persistent() pickle_a = dumps(a, 2) b = loads(pickle_a) assert isinstance(b, Persistent)
def pickling(self): a = SlottedPersistentObjectWithDict() pickle_a = dumps(a, 2) b = loads(pickle_a) assert isinstance(b, SlottedPersistentObjectWithDict)
def test_pickling(self): a = PersistentObject() pickle_a = dumps(a, 2) b = loads(pickle_a) assert isinstance(b, PersistentObject)