def setUp(self): setup.placelessSetUp() provideAdapter(ZodbObjectHistory) tree = OOBTree() tree[1] = 42 tree[2] = 23 tree[3] = 17 state = tree.__getstate__() self.state = EmptyOOBTreeState(None, state, None)
def test_normal(self): from BTrees.OOBTree import OOBTree tree = OOBTree() for i in range(31): tree[i] = 2 * i state = tree.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 3) self.assertEqual(state[0][1], 15) self._callFUT(tree) #noraise
def test_normal(self): from BTrees.OOBTree import OOBTree tree = OOBTree() for i in range(31): tree[i] = 2*i state = tree.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 3) self.assertEqual(state[0][1], 15) self._callFUT(tree) #noraise
def _constructAnnotatedHistory(self, max=10): """Reconstruct historical revisions of archetypes objects Merges revisions to self with revisions to archetypes-related items in __annotations__. Yields at most max recent revisions. """ # All relevant historical states by transaction id # For every tid, keep a dict with object revisions, keyed on annotation # id, or None for self and '__annotations__' for the ann OOBTree # Initialize with self revisions history = dict([(tid, {None: rev}) for (tid, rev) in _objectRevisions(self, max)]) if not getattr(self, '__annotations__', None): # No annotations, just return the history we have for self # Note that if this object had __annotations__ in a past # transaction they will be ignored! Working around this is a # YAGNI I think though. for tid in sorted(history.keys()): yield history[tid][None] return # Now find all __annotation__ revisions, and the annotation keys # used in those. annotation_key_objects = {} isatkey = lambda k, aak=AT_ANN_KEYS: filter(k.startswith, aak) # Loop over max revisions of the __annotations__ object to retrieve # all keys (and more importantly, their objects so we can get # revisions) for tid, rev in _objectRevisions(self.__annotations__, max): history.setdefault(tid, {})['__annotations__'] = rev revision = rev['object'] for key in itertools.ifilter(isatkey, revision.iterkeys()): if not hasattr(revision[key], '_p_jar'): continue # Not persistent if key not in annotation_key_objects: annotation_key_objects[key] = revision[key] # For all annotation keys, get their revisions for key, obj in annotation_key_objects.iteritems(): for tid, rev in _objectRevisions(obj, max): history.setdefault(tid, {})[key] = rev del annotation_key_objects # Now we merge the annotation and object revisions into one for each # transaction id, and yield the results tids = sorted(history.iterkeys(), reverse=True) def find_revision(tids, key): """First revision of key in a series of tids""" has_revision = lambda t, h=history, k=key: k in h[t] next_tid = itertools.ifilter(has_revision, tids).next() return history[next_tid][key] for i, tid in enumerate(tids[:max]): revision = find_revision(tids[i:], None) obj = revision['object'] # Track size to maintain correct metadata size = revision['size'] anns_rev = find_revision(tids[i:], '__annotations__') size += anns_rev['size'] anns = anns_rev['object'] # We use a temporary OOBTree to avoid _p_jar complaints from the # transaction machinery tempbtree = OOBTree() tempbtree.__setstate__(anns.__getstate__()) # Find annotation revisions and insert for key in itertools.ifilter(isatkey, tempbtree.iterkeys()): if not hasattr(tempbtree[key], '_p_jar'): continue # Not persistent value_rev = find_revision(tids[i:], key) size += value_rev['size'] tempbtree[key] = value_rev['object'] # Now transfer the tembtree state over to anns, effectively # bypassing the transaction registry while maintaining BTree # integrity anns.__setstate__(tempbtree.__getstate__()) anns._p_changed = 0 del tempbtree # Do a similar hack to set anns on the main object state = obj.__getstate__() state['__annotations__'] = anns obj.__setstate__(state) obj._p_changed = 0 # Update revision metadata if needed if revision['tid'] != tid: # any other revision will do; only size and object are unique revision = history[tid].values()[0].copy() revision['object'] = obj # Correct size based on merged records revision['size'] = size # clean up as we go del history[tid] yield revision
def __getstate__(self): return Persistent.__getstate__(self), OOBTree.__getstate__(self)
def _constructAnnotatedHistory(self, max=10): """Reconstruct historical revisions of archetypes objects Merges revisions to self with revisions to archetypes-related items in __annotations__. Yields at most max recent revisions. """ # All relevant historical states by transaction id # For every tid, keep a dict with object revisions, keyed on annotation # id, or None for self and '__annotations__' for the ann OOBTree # Initialize with self revisions history = dict([(tid, {None: rev}) for (tid, rev) in _objectRevisions(self, max)]) if not getattr(self, '__annotations__', None): # No annotations, just return the history we have for self # Note that if this object had __annotations__ in a past # transaction they will be ignored! Working around this is a # YAGNI I think though. for tid in sorted(history.keys()): yield history[tid][None] return # Now find all __annotation__ revisions, and the annotation keys # used in those. annotation_key_objects = {} isatkey = lambda k, aak=AT_ANN_KEYS: filter(k.startswith, aak) # Loop over max revisions of the __annotations__ object to retrieve # all keys (and more importantly, their objects so we can get revisions) for tid, rev in _objectRevisions(self.__annotations__, max): history.setdefault(tid, {})['__annotations__'] = rev revision = rev['object'] for key in itertools.ifilter(isatkey, revision.iterkeys()): if not hasattr(revision[key], '_p_jar'): continue # Not persistent if key not in annotation_key_objects: annotation_key_objects[key] = revision[key] # For all annotation keys, get their revisions for key, obj in annotation_key_objects.iteritems(): for tid, rev in _objectRevisions(obj, max): history.setdefault(tid, {})[key] = rev del annotation_key_objects # Now we merge the annotation and object revisions into one for each # transaction id, and yield the results tids = sorted(history.iterkeys(), reverse=True) def find_revision(tids, key): # First revision of key in a series of tids. has_revision = lambda t, h=history, k=key: k in h[t] next_tid = itertools.ifilter(has_revision, tids).next() return history[next_tid][key] for i, tid in enumerate(tids[:max]): revision = find_revision(tids[i:], None) obj = revision['object'] # Track size to maintain correct metadata size = revision['size'] anns_rev = find_revision(tids[i:], '__annotations__') size += anns_rev['size'] anns = anns_rev['object'] # We use a temporary OOBTree to avoid _p_jar complaints from the # transaction machinery tempbtree = OOBTree() tempbtree.__setstate__(anns.__getstate__()) # Find annotation revisions and insert for key in itertools.ifilter(isatkey, tempbtree.iterkeys()): if not hasattr(tempbtree[key], '_p_jar'): continue # Not persistent value_rev = find_revision(tids[i:], key) size += value_rev['size'] tempbtree[key] = value_rev['object'] # Now transfer the tembtree state over to anns, effectively # bypassing the transaction registry while maintaining BTree # integrity anns.__setstate__(tempbtree.__getstate__()) anns._p_changed = 0 del tempbtree # Do a similar hack to set anns on the main object state = obj.__getstate__() state['__annotations__'] = anns obj.__setstate__(state) obj._p_changed = 0 # Update revision metadata if needed if revision['tid'] != tid: # any other revision will do; only size and object are unique revision = history[tid].values()[0].copy() revision['object'] = obj # Correct size based on merged records revision['size'] = size # clean up as we go del history[tid] yield revision