def __init__(self, fn, callback=None): warnings.warn("deprecated", DeprecationWarning) precondition(hasattr(fn, 'im_self'), "fn is required to be a bound method.") self._cleanupcallback = callback self._obj = ref(fn.im_self, self.call_cleanup_cb) self._meth = fn.im_func
def next(self): if self.i is self.c.ts: raise StopIteration precondition(self.c.d.has_key(self.i), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c) (v, p, n,) = self.c.d[self.i] self.i = n return v
def measure_ref_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs): """ The idea is we are going to use sys.gettotalrefcount() to see how many references are extant, and keep track of that number with respect to how many times we've invoked f(), and return the slope of the best linear fit. @param numsamples: recommended: 2**7 @param iterspersample: how many times f() should be invoked per sample; Basically, choose iterspersample such that iterspersample * numsamples * how-long-it-takes-to-compute-f() is slightly less than how long you are willing to wait for this leak test. @return: the slope of the best linear fit, which can be interpreted as 'the approximate number of Python references created and not nullified per invocation of f()' """ precondition(numsamples > 0, "numsamples is required to be positive.", numsamples) precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample) try: sys.gettotalrefcount() except AttributeError, le: raise AttributeError(le, "Probably this is not a debug build of Python, so it doesn't have a sys.gettotalrefcount function.")
def next(self): precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c) precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c) if self.i == len(self.c._lru): raise StopIteration k = self.i self.i += 1 return dict.__getitem__(self.c, k)
def next(self): if self.i is self.c.hs: raise StopIteration k = self.i precondition(self.c.d.has_key(k), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", k, self.c) (v, p, n,) = self.c.d[k] self.i = p return k
def __init__(self, initialdata={}, maxsize=128): precondition(maxsize > 0) self.m = maxsize+2 # The +2 is for the head and tail nodes. self.d = {} # k: k, v: [v, prev, next,] # the dict self.hs = LRUCache.Sentinel("hs") self.ts = LRUCache.Sentinel("ts") self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes. self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes. self.update(initialdata) assert self._assert_invariants()
def measure_mem_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs): """ This does the same thing as measure_obj_leakage() but instead of using count_all_objects() it uses get_mem_usage(), which is currently implemented for Linux and barely implemented for Mac OS X. @param numsamples: recommended: 2**7 @param iterspersample: how many times `f()' should be invoked per sample; Basically, choose `iterspersample' such that (iterspersample * numsamples * how-long-it-takes-to-compute-`f()') is slightly less than how long you are willing to wait for this leak test. @return: the slope of the best linear fit, which can be interpreted as 'the approximate number of system bytes allocated and not freed per invocation of f()' """ precondition(numsamples > 0, "numsamples is required to be positive.", numsamples) precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample) resiters = [None] * numsamples # values: iters resmemusage = [None] * numsamples # values: memusage totaliters = 0 for i in range(numsamples): for j in range(iterspersample): f(*args, **kwargs) totaliters = totaliters + iterspersample resiters[i] = totaliters gc.collect() resmemusage[i] = get_mem_used_res() # print "totaliters: %s, numobjs: %s" % (resiters[-1], resmemusage[-1],) avex = float(reduce(operator.__add__, resiters)) / len(resiters) avey = float(reduce(operator.__add__, resmemusage)) / len(resmemusage) sxy = reduce( operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resmemusage))) sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex)**2, resiters)) if sxx == 0: return None return sxy / sxx
def measure_obj_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs): """ The idea is we are going to use count_all_objects() to see how many objects are in use, and keep track of that number with respect to how many times we've invoked f(), and return the slope of the best linear fit. @param numsamples: recommended: 2**7 @param iterspersample: how many times f() should be invoked per sample; Basically, choose iterspersample such that iterspersample * numsamples * how-long-it-takes-to-compute-f() is slightly less than how long you are willing to wait for this leak test. @return: the slope of the best linear fit, which can be interpreted as 'the approximate number of Python objects created and not destroyed per invocation of f()' """ precondition(numsamples > 0, "numsamples is required to be positive.", numsamples) precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample) resiters = [None] * numsamples # values: iters resnumobjs = [None] * numsamples # values: numobjs totaliters = 0 for i in range(numsamples): for j in range(iterspersample): f(*args, **kwargs) totaliters = totaliters + iterspersample resiters[i] = totaliters gc.collect() resnumobjs[i] = count_all_objects() # print "totaliters: %s, numobjs: %s" % (resiters[-1], resnumobjs[-1],) avex = float(reduce(operator.__add__, resiters)) / len(resiters) avey = float(reduce(operator.__add__, resnumobjs)) / len(resnumobjs) sxy = reduce( operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resnumobjs))) sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex)**2, resiters)) return sxy / sxx
def measure_mem_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs): """ This does the same thing as measure_obj_leakage() but instead of using count_all_objects() it uses get_mem_usage(), which is currently implemented for Linux and barely implemented for Mac OS X. @param numsamples: recommended: 2**7 @param iterspersample: how many times `f()' should be invoked per sample; Basically, choose `iterspersample' such that (iterspersample * numsamples * how-long-it-takes-to-compute-`f()') is slightly less than how long you are willing to wait for this leak test. @return: the slope of the best linear fit, which can be interpreted as 'the approximate number of system bytes allocated and not freed per invocation of f()' """ precondition(numsamples > 0, "numsamples is required to be positive.", numsamples) precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample) resiters = [None]*numsamples # values: iters resmemusage = [None]*numsamples # values: memusage totaliters = 0 for i in range(numsamples): for j in range(iterspersample): f(*args, **kwargs) totaliters = totaliters + iterspersample resiters[i] = totaliters gc.collect() resmemusage[i] = get_mem_used_res() # print "totaliters: %s, numobjs: %s" % (resiters[-1], resmemusage[-1],) avex = float(reduce(operator.__add__, resiters)) / len(resiters) avey = float(reduce(operator.__add__, resmemusage)) / len(resmemusage) sxy = reduce(operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resmemusage))) sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex) ** 2, resiters)) if sxx == 0: return None return sxy / sxx
def measure_obj_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs): """ The idea is we are going to use count_all_objects() to see how many objects are in use, and keep track of that number with respect to how many times we've invoked f(), and return the slope of the best linear fit. @param numsamples: recommended: 2**7 @param iterspersample: how many times f() should be invoked per sample; Basically, choose iterspersample such that iterspersample * numsamples * how-long-it-takes-to-compute-f() is slightly less than how long you are willing to wait for this leak test. @return: the slope of the best linear fit, which can be interpreted as 'the approximate number of Python objects created and not destroyed per invocation of f()' """ precondition(numsamples > 0, "numsamples is required to be positive.", numsamples) precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample) resiters = [None]*numsamples # values: iters resnumobjs = [None]*numsamples # values: numobjs totaliters = 0 for i in range(numsamples): for j in range(iterspersample): f(*args, **kwargs) totaliters = totaliters + iterspersample resiters[i] = totaliters gc.collect() resnumobjs[i] = count_all_objects() # print "totaliters: %s, numobjs: %s" % (resiters[-1], resnumobjs[-1],) avex = float(reduce(operator.__add__, resiters)) / len(resiters) avey = float(reduce(operator.__add__, resnumobjs)) / len(resnumobjs) sxy = reduce(operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resnumobjs))) sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex) ** 2, resiters)) return sxy / sxx
def __init__(self, fn, callback=None): warnings.warn("deprecated", DeprecationWarning) precondition(hasattr(fn, "im_self"), "fn is required to be a bound method.") self._cleanupcallback = callback self._obj = ref(fn.im_self, self.call_cleanup_cb) self._meth = fn.im_func