def main(fname): fs = FileStorage(fname, read_only=1) cn = ZODB.DB(fs).open() rt = cn.root() todo = [] add_if_new_persistent(todo, rt, '') found = 0 while todo: obj, path = todo.pop(0) found += 1 if not path: print "<root>", repr(obj) else: print path, repr(obj) mod = str(obj.__class__.__module__) if mod.startswith("BTrees"): if hasattr(obj, "_check"): try: obj._check() except AssertionError, msg: print "*" * 60 print msg print "*" * 60 try: check(obj) except AssertionError, msg: print "*" * 60 print msg print "*" * 60
def testDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. # This is a tough test. Previous failure modes included: # 1. A variety of assertion failures in _checkRanges. # 2. Assorted "Invalid firstbucket pointer" failures at # seemingly random times, coming out of the BTree destructor. # 3. Under Python 2.3 CVS, some baffling # RuntimeWarning: tp_compare didn't return -1 or -2 for exception # warnings, possibly due to memory corruption after a BTree # goes insane. # On CPython in PURE_PYTHON mode, this is a *slow* test, taking 15+s # on a 2015 laptop. from BTrees.check import check t, keys = self._build_degenerate_tree() for oneperm in permutations(keys): t, keys = self._build_degenerate_tree() for key in oneperm: t.remove(key) keys.remove(key) t._check() check(t) self._checkRanges(t, keys) # We removed all the keys, so the tree should be empty now. self.assertEqual(t.__getstate__(), None) # A damaged tree may trigger an "invalid firstbucket pointer" # failure at the time its destructor is invoked. Try to force # that to happen now, so it doesn't look like a baffling failure # at some unrelated line. del t # trigger destructor
def testKeysSwapped(self): # Damage an invariant by swapping two key/value pairs. s = self.state # Looks like (state, first_bucket) # where state looks like (bucket0, 15, bucket1). (b0, num, b1), firstbucket = s self.assertEqual(b0[4], 8) self.assertEqual(b0[5], 10) b0state = b0.__getstate__() self.assertEqual(len(b0state), 2) # b0state looks like # ((k0, v0, k1, v1, ...), nextbucket) pairs, nextbucket = b0state self.assertEqual(pairs[8], 4) self.assertEqual(pairs[9], 8) self.assertEqual(pairs[10], 5) self.assertEqual(pairs[11], 10) newpairs = pairs[:8] + (5, 10, 4, 8) + pairs[12:] b0.__setstate__((newpairs, nextbucket)) self.t._check() # not caught try: check(self.t) except AssertionError, detail: self.failUnless(str(detail).find( "key 5 at index 4 >= key 4 at index 5") > 0)
def testDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. # This is a tough test. Previous failure modes included: # 1. A variety of assertion failures in _checkRanges. # 2. Assorted "Invalid firstbucket pointer" failures at # seemingly random times, coming out of the BTree destructor. # 3. Under Python 2.3 CVS, some baffling # RuntimeWarning: tp_compare didn't return -1 or -2 for exception # warnings, possibly due to memory corruption after a BTree # goes insane. from BTrees.check import check t, keys = self._build_degenerate_tree() for oneperm in permutations(keys): t, keys = self._build_degenerate_tree() for key in oneperm: t.remove(key) keys.remove(key) t._check() check(t) self._checkRanges(t, keys) # We removed all the keys, so the tree should be empty now. self.assertEqual(t.__getstate__(), None) # A damaged tree may trigger an "invalid firstbucket pointer" # failure at the time its destructor is invoked. Try to force # that to happen now, so it doesn't look like a baffling failure # at some unrelated line. del t # trigger destructor
def is_ordered(self, item): if item is None: return True try: check(item) except AssertionError: return False return True
def testNormal(self): s = self.state # Looks like (state, first_bucket) # where state looks like (bucket0, 15, bucket1). self.assertEqual(len(s), 2) self.assertEqual(len(s[0]), 3) self.assertEqual(s[0][1], 15) self.t._check() # shouldn't blow up check(self.t) # shouldn't blow up
def testKeyTooSmall(self): # Damage an invariant by bumping the BTree key to 16. s = self.state news = (s[0][0], 16, s[0][2]), s[1] self.t.__setstate__(news) self.t._check() # not caught try: # Expecting "... key %r < lower bound %r at index %d" check(self.t) except AssertionError, detail: self.failUnless(str(detail).find("< lower bound") > 0)
def main(fname=None): if fname is None: import sys try: fname, = sys.argv[1:] except: print(__doc__) sys.exit(2) fs = FileStorage(fname, read_only=1) cn = ZODB.DB(fs).open() rt = cn.root() todo = [] add_if_new_persistent(todo, rt, '') found = 0 while todo: obj, path = todo.pop(0) found += 1 if not path: print("<root>", repr(obj)) else: print(path, repr(obj)) mod = str(obj.__class__.__module__) if mod.startswith("BTrees"): if hasattr(obj, "_check"): try: obj._check() except AssertionError as msg: print("*" * 60) print(msg) print("*" * 60) try: check(obj) except AssertionError as msg: print("*" * 60) print(msg) print("*" * 60) if found % 100 == 0: cn.cacheMinimize() for k, v in get_subobjects(obj): if k.startswith('['): # getitem newpath = "%s%s" % (path, k) else: newpath = "%s.%s" % (path, k) add_if_new_persistent(todo, v, newpath) print("total", len(fs._index), "found", found)
def _build_degenerate_tree(self): # Build the buckets and chain them together. from BTrees.IIBTree import IISet from BTrees.IIBTree import IITreeSet from BTrees.check import check bucket11 = IISet([11]) bucket7 = IISet() bucket7.__setstate__(((7,), bucket11)) bucket5 = IISet() bucket5.__setstate__(((5,), bucket7)) bucket3 = IISet() bucket3.__setstate__(((3,), bucket5)) bucket1 = IISet() bucket1.__setstate__(((1,), bucket3)) # Build the deepest layers of indirection nodes. ts = IITreeSet tree1 = ts() tree1.__setstate__(((bucket1,), bucket1)) tree3 = ts() tree3.__setstate__(((bucket3,), bucket3)) tree5lower = ts() tree5lower.__setstate__(((bucket5,), bucket5)) tree5 = ts() tree5.__setstate__(((tree5lower,), bucket5)) tree7 = ts() tree7.__setstate__(((bucket7,), bucket7)) tree11 = ts() tree11.__setstate__(((bucket11,), bucket11)) # Paste together the middle layers. tree13 = ts() tree13.__setstate__(((tree1, 2, tree3), bucket1)) tree5711lower = ts() tree5711lower.__setstate__(((tree5, 6, tree7, 10, tree11), bucket5)) tree5711 = ts() tree5711.__setstate__(((tree5711lower,), bucket5)) # One more. t = ts() t.__setstate__(((tree13, 4, tree5711), bucket1)) t._check() check(t) return t, [1, 3, 5, 7, 11]
def hCheck(htree): """ Recursively check the btree """ check(htree) for key in htree.keys(): if not htree.has_key(key): raise AssertionError("Missing value for key: %s" % repr(key)) else: ob = htree[key] if isinstance(ob, OOBTree): hCheck(ob) return 1
def hCheck(htree): """ Recursively check the btree """ check(htree) for key in htree.keys(): if not htree.has_key(key): raise AssertionError( "Missing value for key: %s" % repr(key)) else: ob = htree[key] if isinstance(ob, OOBTree): hCheck(ob) return 1
def _check_tree(self, cn, tree): # Make sure the BTree is sane at the C level. retries = 3 while retries: retries -= 1 try: check(tree) tree._check() except ReadConflictError: if retries: transaction.abort() else: raise except: display(tree) raise
def __init__(self, tree, base_id=_marker): self._tree = tree if base_id is _marker: tree_id_list = tree.getTreeIdList() self._count = tree._count else: tree_id_list = base_id, check = tree._checkObjectId self._keys = lambda: (x for base_id in tree_id_list for x in ( tree._htree if base_id is None else tree._getTree(base_id)).keys() if check((base_id, x)))
def cleanup_to_mapping(self, iface_name, mapping_key): # Check the BTree consistency, to avoid key errors when deleting # entries in the BTree. # If the check method detects an inconsistency, we fix the Btree by # creating a copy of it. # # See http://do3.cc/blog/2012/09/264/debugging-zcrelations---broken-btrees/ # for more information. try: check(self.catalog._name_TO_mapping[mapping_key]) except AssertionError: btree = self.catalog._name_TO_mapping[mapping_key] self.catalog._name_TO_mapping[mapping_key] = btree.__class__(btree) logger.warning( 'Inconsistent BTree detected and fixed by recreating it.') for iface in self.catalog._name_TO_mapping[mapping_key].keys(): if '{}.{}'.format(iface.__module__, iface.__name__) == iface_name: del self.catalog._name_TO_mapping[mapping_key][iface] break
def __init__(self, tree, base_id=_marker): self._tree = tree if base_id is _marker: tree_id_list = tree.getTreeIdList() self._count = tree._count else: tree_id_list = base_id, check = tree._checkObjectId self._keys = lambda: (x for base_id in tree_id_list for x in (tree._htree if base_id is None else tree._getTree(base_id)).keys() if check((base_id, x)))
def main(fname=None): if fname is None: import sys try: fname, = sys.argv[1:] except: print __doc__ sys.exit(2) fs = FileStorage(fname, read_only=1) cn = ZODB.DB(fs).open() rt = cn.root() todo = [] add_if_new_persistent(todo, rt, '') found = 0 while todo: obj, path = todo.pop(0) found += 1 if not path: print "<root>", repr(obj) else: print path, repr(obj) mod = str(obj.__class__.__module__) if mod.startswith("BTrees"): if hasattr(obj, "_check"): try: obj._check() except AssertionError, msg: print "*" * 60 print msg print "*" * 60 try: check(obj) except AssertionError, msg: print "*" * 60 print msg print "*" * 60
def _cleanup(self): """Cleans up errors in the BTrees. Certain ZODB bugs have caused BTrees to become slightly insane. Fortunately, there is a way to clean up damaged BTrees that always seems to work: make a new BTree containing the items() of the old one. Returns 1 if no damage was detected, or 0 if damage was detected and fixed. """ from BTrees.check import check path = '/'.join(self.getPhysicalPath()) try: check(self._tree) for key in self._tree.keys(): if key not in self._tree: raise AssertionError( "Missing value for key: %s" % repr(key)) check(self._mt_index) keys = set(self._tree.keys()) for key, value in self._mt_index.items(): if (key not in self._mt_index or self._mt_index[key] is not value): raise AssertionError( "Missing or incorrect meta_type index: %s" % repr(key)) check(value) for k in value.keys(): if k not in value or k not in keys: raise AssertionError( "Missing values for meta_type index: %s" % repr(key)) return 1 except AssertionError: LOG.warn('Detected damage to %s. Fixing now.' % path, exc_info=sys.exc_info()) try: self._tree = OOBTree(self._tree) keys = set(self._tree.keys()) mt_index = OOBTree() for key, value in self._mt_index.items(): for name in tuple(value.keys()): if name not in keys: del value[name] mt_index[key] = OIBTree(value) self._mt_index = mt_index new = len(keys) if self._count() != new: self._count.set(new) except: LOG.error('Failed to fix %s.' % path, exc_info=sys.exc_info()) raise else: LOG.info('Fixed %s.' % path) return 0
def _cleanup(self): """Cleans up errors in the BTrees. Certain ZODB bugs have caused BTrees to become slightly insane. Fortunately, there is a way to clean up damaged BTrees that always seems to work: make a new BTree containing the items() of the old one. Returns 1 if no damage was detected, or 0 if damage was detected and fixed. """ from BTrees.check import check path = '/'.join(self.getPhysicalPath()) try: check(self._tree) for key in self._tree.keys(): if key not in self._tree: raise AssertionError( "Missing value for key: %s" % repr(key)) check(self._mt_index) keys = set(self._tree.keys()) for key, value in self._mt_index.items(): if (key not in self._mt_index or self._mt_index[key] is not value): raise AssertionError( "Missing or incorrect meta_type index: %s" % repr(key)) check(value) for k in value.keys(): if k not in value or k not in keys: raise AssertionError( "Missing values for meta_type index: %s" % repr(key)) return 1 except AssertionError: LOG.warning('Detected damage to %s. Fixing now.' % path, exc_info=sys.exc_info()) try: self._tree = OOBTree(self._tree) keys = set(self._tree.keys()) mt_index = OOBTree() for key, value in self._mt_index.items(): for name in tuple(value.keys()): if name not in keys: del value[name] mt_index[key] = OIBTree(value) self._mt_index = mt_index new = len(keys) if self._count() != new: self._count.set(new) except Exception: LOG.error('Failed to fix %s.' % path, exc_info=sys.exc_info()) raise else: LOG.info('Fixed %s.' % path) return 0
def _cleanup(self): """Cleans up errors in the BTrees. Certain ZODB bugs have caused BTrees to become slightly insane. Fortunately, there is a way to clean up damaged BTrees that always seems to work: make a new BTree containing the items() of the old one. Returns 1 if no damage was detected, or 0 if damage was detected and fixed. """ from BTrees.check import check path = '/'.join(self.getPhysicalPath()) try: check(self._tree) for key in self._tree.keys(): if not self._tree.has_key(key): raise AssertionError( "Missing value for key: %s" % repr(key)) check(self._mt_index) for key, value in self._mt_index.items(): if (not self._mt_index.has_key(key) or self._mt_index[key] is not value): raise AssertionError( "Missing or incorrect meta_type index: %s" % repr(key)) check(value) for k in value.keys(): if not value.has_key(k): raise AssertionError( "Missing values for meta_type index: %s" % repr(key)) return 1 except AssertionError: LOG('BTreeFolder2', WARNING, 'Detected damage to %s. Fixing now.' % path, error=sys.exc_info()) try: self._tree = OOBTree(self._tree) mt_index = OOBTree() for key, value in self._mt_index.items(): mt_index[key] = OIBTree(value) self._mt_index = mt_index except: LOG('BTreeFolder2', ERROR, 'Failed to fix %s.' % path, error=sys.exc_info()) raise else: LOG('BTreeFolder2', INFO, 'Fixed %s.' % path) return 0
def _callFUT(self, tree): from BTrees.check import check return check(tree)
args = parser.parse_args() logging.basicConfig(filename='/projects/dami9546/HPSC/final/hostgraph_test2120.out', \ format='%(asctime)s %(message)s',level=logging.DEBUG) logging.info('Starting graph script') D = DAG(args.in_file, args.host_mapping) Grf, H = D.slog2Dict() I = D.connectGraph(Grf,H) for i in H.keys(): logging.info("type of rank_dict[i]['MPI_Send']: %s" % (type(H[i]['MPI_Send']))) logging.info("type of rank_dict[i]['MPI_Wait']: %s" % (type(H[i]['MPI_Wait']))) logging.info("type of rank_dict[i]: %s" % (type(H[i]))) try: check(H[i]['MPI_Send']) check(H[i]['MPI_Wait']) logging.info('my type: %s' % type(i)) logging.info('%d is clean' % i) except: logging.info('my type: %s' % type(i)) logging.error('%d is corrupt' % i) logging.info("DAG size: %d" % (I.size())) logging.info("DAG numnodes %d" % (len(I))) logging.info("number of self-loops: %d" % (I.number_of_selfloops())) crit_path = D.find_critical(I) with open(args.crit_file, 'wb') as c: crit_path = map(lambda x:x+'\n', crit_path) c.writelines(crit_path)