def objgraph(self, request): import objgraph limit = int(request.args.get('limit', 10)) types = objgraph.most_common_types(limit=limit, shortnames=False) leaking = objgraph.most_common_types( limit=limit, objects=objgraph.get_leaking_objects(), shortnames=False, ) # html only links limits = [ 'Number of items: ', Link('{}', request.path + '?limit={}', 10).html(), Link('{}', request.path + '?limit={}', 20).html(), Link('{}', request.path + '?limit={}', 50).html(), ] return info_response( request, 'Python Objects', Content( 'Python Objects for Worker pid {}'.format(os.getpid()), 'h1', ), Content(' '.join(limits), 'p', text=False, escape=False), Content('Most Common Objects', 'h2'), Table(types), Content('Leaking Objects (no referrer)', 'h2'), Table(leaking), )
def objgraph(self, request): import objgraph limit = int(request.args.get('limit', 10)) types = objgraph.most_common_types(limit=limit, shortnames=False) leaking = objgraph.most_common_types( limit=limit, objects=objgraph.get_leaking_objects(), shortnames=False, ) # html only links limits = [ 'Number of items: ', Link('{}', request.path + '?limit={}', 10).html(), Link('{}', request.path + '?limit={}', 20).html(), Link('{}', request.path + '?limit={}', 50).html(), ] return info_response( request.environ, 'Python Objects', Content( 'Python Objects for Worker pid {}'.format(os.getpid()), 'h1', ), Content(' '.join(limits), 'p', text=False, escape=False), Content('Most Common Objects', 'h2'), Table(types, id='types'), Content('Leaking Objects (no referrer)', 'h2'), Table(leaking, id='leaking'), )
def _dump_memory_impl(limit: int) -> Mapping[str, Any]: nb_collected = [gc.collect(generation) for generation in range(3)] return { 'nb_collected': nb_collected, 'most_common_types': objgraph.most_common_types(limit=limit, shortnames=False), 'leaking_objects': objgraph.most_common_types(limit=limit, shortnames=False, objects=objgraph.get_leaking_objects()) }
def __init__(self, *a, **kw): super(ObjGraphWidget, self).__init__(*a, **kw) if objgraph is None: self.setLayout(Row(QtGui.QLabel("objgraph is not installed (and you probably don't have GraphViz " "either...) "), None)) return self.inputWidget = QtGui.QLineEdit() self.inputWidget.setText(inputSetting.value("")) self.inputWidget.textChanged.connect(inputSetting.setValue) self.listWidget = QtGui.QListWidget() self.scrollArea = QtGui.QScrollArea() self.imageView = QtGui.QLabel() #self.scrollArea.setMinimumSize(300, 300) self.scrollArea.setWidget(self.imageView) for name, count in objgraph.most_common_types(100): item = QtGui.QListWidgetItem() item.setText("%s (%d)" % (name, count)) item.setData(Qt.UserRole, name) self.listWidget.addItem(item) self.listWidget.itemSelectionChanged.connect(self.itemChanged) refsButton = QtGui.QPushButton("Refs", clicked=self.showRefs) backrefsButton = QtGui.QPushButton("Backrefs", clicked=self.showBackrefs) graphButton = QtGui.QPushButton("Graph", clicked=self.showGraph) garbageButton = QtGui.QPushButton("Garbage", clicked=self.showGarbage) inputRow = Row(self.inputWidget, refsButton, backrefsButton, garbageButton, graphButton) self.widthLimitBox = QtGui.QSpinBox(value=14) self.depthLimitBox = QtGui.QSpinBox(value=7) limitRow = Row(QtGui.QLabel("Graph Width"), self.widthLimitBox, QtGui.QLabel("Graph Depth"), self.depthLimitBox) self.setLayout(Column(inputRow, limitRow, self.listWidget, (self.scrollArea, 1))) self.setMinimumSize(800, 600)
def get(self): results = objgraph.most_common_types(limit=100, shortnames=False) self.response.headers["Content-Type"] = "text/plain" for i, result in enumerate(results): data = "Top memory: %s" % (result, ) logging.info(data) self.response.out.write(data + '\n')
def get(self): results = objgraph.most_common_types(limit=100, shortnames=False) self.response.headers["Content-Type"] = "text/plain" for i, result in enumerate(results): data = "Top memory: %s" % (result,) logging.info(data) self.response.out.write(data + '\n')
def get(self): log = self.application.logger try: memstats = {} import gc log.info("start GC collect") memstats["collected"] = gc.collect() log.info("GC has finished collecting") try: import objgraph memstats["most_common_types"] = dict(objgraph.most_common_types(100)) except ImportError as err: log.warning("can't create objgraph: %s", err) try: from guppy import hpy hp = hpy() h = hp.heap() memstats["heap"] = str(h) except ImportError as err: log.warning("can't create heapdump: %s", err) self.write(memstats) except Exception as err: self.write("unable to generate memstats %s" % err)
def process(self, input_interface): logging.info("Start the assertion validation module") for submodule in self._submodules: input_interface = submodule.process(input_interface) submodule.clean() logging.info(objgraph.growth()) logging.info(objgraph.most_common_types()) return input_interface
def objects(self, request): types = objgraph.most_common_types(limit=1000) request.write('# HELP objgraph_objects active objects in memory'.encode()) request.write('# TYPE objgraph_objects gauge'.encode()) for name, count in types: request.write( ('objgraph_objects{name="%s"} %s\n' % (name, count)).encode()) return bytes()
def test_f(self): while True: print(self.pid) t = 'STATS %s\r\n' % self.pid for x in objgraph.most_common_types(): t += '%s %s\r\n' % (x[0], x[1]) print(t) time.sleep(20)
def most_common_types(diff_from=None): c = {k: v for k, v in objgraph.most_common_types()} if diff_from is not None: for k, v in diff_from.iteritems(): if k in c: c[k] -= diff_from[k] else: c[k] = -diff_from[k] return c
def run(): start = time.time() print("Process Memory before: %skB" % process_memory()) # After this the module should be cached. # Need to invent a path so that it's really cached. jedi.Script(wx_core, path="foobar.py").completions() gc.collect() # make sure that it's all fair and the gc did its job. print("Process Memory after: %skB" % process_memory()) print(objgraph.most_common_types(limit=50)) print("\nIt took %s seconds to parse the file." % (time.time() - start))
def process(self, input_interface): logging.info("Start the assertion generation module") generated_facts = [] logging.info(objgraph.growth()) # For now in sequence, we could make it be parallel for submodule in self._submodules: input_temp = submodule.process(input_interface) generated_facts += input_temp.get_generated_facts() submodule.clean() logging.info(objgraph.growth()) logging.info(objgraph.most_common_types()) return input_interface.add_generated_facts(generated_facts)
def run(): start = time.time() print('Process Memory before: %skB' % process_memory()) # After this the module should be cached. # Need to invent a path so that it's really cached. jedi.Script(wx_core, path='foobar.py').complete() gc.collect() # make sure that it's all fair and the gc did its job. print('Process Memory after: %skB' % process_memory()) print(objgraph.most_common_types(limit=50)) print('\nIt took %s seconds to parse the file.' % (time.time() - start))
def __call__(self, environ, start_response): request = webob.Request(environ) if request.path_info.startswith(self.signature): query = request.GET.get('query') obj_type = request.GET.get('object_type') obj_address = request.GET.get('object_address') if obj_address: # Create a graph for the object leaking = [objgraph.at(int(obj_address))] filename = tempfile.mktemp(suffix='.png') objgraph.show_refs(leaking, filename=filename) output = open(filename, 'r').read() os.unlink(filename) start_response( '200 Ok', [('Content-Type', 'image/png')]) return output output = StringIO() leaking = objgraph.get_leaking_objects() formats.htmlHeader(output, self.signature, request.host_url, query) output.write('<h3>Memory usage</h3>') output.write('<table>' + ''.join( map(lambda info: MEMORY_LINE.format(*info), objgraph.most_common_types(10, leaking))) + '</table>') if obj_type: # Display detail about leaking objects of a given type. output.write('<h4>Memory detail for %s</h4>' % obj_type) output.write('<ul>') for obj in leaking: if type(obj).__name__ == obj_type: output.write(OBJECT_LINE.format( id(obj), cgi.escape(str(obj)))) output.write('</ul>') output.write('<h3>Timing</h3>') formats.htmlFormat(output, query=query) start_response( '200 Ok', [('Content-Type', 'text/html; charset=UTF8')]) return output.getvalue() scale = self.find(request.path_info) scale.requests.mark() with scale.latency.time(): response = request.get_response(self.app) result = scale.statuses(response.status_int) if result is not None: result.mark() start_response( response.status, [a for a in response.headers.iteritems()]) return response.app_iter
def take_snapshot(): global previous_snapshot snapshot = {t: c for (t, c) in objgraph.most_common_types(limit=100)} print("Took snapshot of objgraph") if previous_snapshot: delta = { t: snapshot[t] - previous_snapshot.get(t, 0) for t in snapshot.keys() } print("Types with the most changes : ") for t in sorted(delta.keys(), key=lambda x: delta[x], reverse=True): if delta[t]: print("{0:20} {1}".format(t, delta[t])) previous_snapshot = snapshot
def most_common_types_app(environ, start_response): limit = 1000 if environ.get("QUERY_STRING"): query = parse_qs(environ.get("QUERY_STRING", "")) if "limit" in query: limit = int(query["limit"][0]) types = objgraph.most_common_types(limit=limit) html = [ "<html>", "<head>", "<title>ObjGraph most common types</title>" "</head>", "<body>", "<table>", "<tr><th>type</th><th>count</th></tr>", ] logger.debug("common_types: %s", types) for typename, count in types: parts = [ "<tr>", "<td>", typename, "</td>", "<td>", str(count), "</td>", "</tr>", ] html += parts html += [ "</table>", "</body>", "</html>", ] start_response( "200 OK", [('Content-type', 'text/html')], ) html = [h.encode('utf-8') for h in html] return html
def __init__(self, dict_c): self.dict_c = dict_c self.model = LSTM_(dict_c) self.dict_data = None self.count_no_cma = 0 self.count_no_cma_AUC = 0. self.max_AUC_val = 0 self.max_AUC_tr = 0 self.max_AUC_t = 0 self.min_val_loss = 10 self.AUC_no_cma = 0.0 self.state_loss = 0. self.path_o = self.model.return_path() self.epoch = 0. self.memory = [] self.obj_graph = {} lol = objgraph.most_common_types(limit=20) self.iter_memwatcher = 0 for obj in lol: self.obj_graph[obj[0]] = np.array([]) self.best_dict = {} self.dict_loss = {'loss': [], 'val_loss': []} self.time_stop = time.time()
def plot_obj(self): most_common_types_step = objgraph.most_common_types(limit=20) for key, value in most_common_types_step: if key not in self.obj_graph.keys(): self.obj_graph[key] = np.zeros(self.iter_memwatcher) self.obj_graph[key] = np.hstack((self.obj_graph[key], [value])) for key in self.obj_graph.keys(): if key not in np.array(most_common_types_step)[:, 0]: self.obj_graph[key] = np.hstack((self.obj_graph[key], [0])) fig = plt.figure(figsize=(16, 4)) ax1 = plt.subplot(121) sum = np.zeros(self.iter_memwatcher + 1) for key in self.obj_graph.keys(): ax1.plot(self.obj_graph[key], label=key) sum += self.obj_graph[key] ax1.plot(sum, label='sum') plt.title('object graph') plt.legend() # plot total mem ax2 = plt.subplot(122) ax2.plot(self.memory) plt.title('memory') plt.savefig(self.path_o + 'objgraph.png') self.iter_memwatcher += 1
def overview(self): types = objgraph.most_common_types(30) return self.render('flask-admin-profiler/memory/overview.html', common_types=types)
def stats(): print gc.collect() print print '\n'.join(map(str, objgraph.most_common_types(limit=10)))
def show_hogs(self, limit=20): self.logger.info("Top memory hogs:") stats = objgraph.most_common_types(limit=limit, shortnames=False) width = max(len(name) for name, count in stats) for name, count in stats: self.logger.info(' %-*s %i' % (width, name, count))
def objgraph_common_objects(self, args): """ This function can be used to display the most common objects for a running fuglu instance which can help finding memory leaks. For now this works best for fuglu with thread backend. Fuglu has to be running as a daemon. "fuglu_control" is used to communicate with the fuglu instance. Examples: (1) show most common fuglu objects ----------------------------------- $ fuglu_control objgraph_common_objects '{"must_contain": ["fuglu"], "nresults": 5}' ---------------- Most common objects: ---------------- params: * nresults: 5 * lowercase: True * dont_startwith: * must_startwith: * dont_contain: * must_contain: fuglu fuglu.extensions.filearchives.classproperty : 6 fuglu.threadpool.Worker : 2 fuglu.extensions.filetype.MIME_types_base : 2 fuglu.debug.ControlSession : 2 fuglu.connectors.smtpconnector.SMTPServer : 2 """ res = u"----------------\n" \ + u"Most common objects:\n" \ + u"----------------\n\n" if OBJGRAPH_EXTENSION_ENABLED: defaults = { "nresults": 20, "lowercase": True, "dont_startwith": ["builtins", "_"], "dont_contain": [], "must_startwith": [], "must_contain": [] } if not args: args = {} # fill filter lists and other vars from dict res, inputdict = ControlSession.prepare_objectgraph_list_from_dict( args, res, defaults) types_list = None finalfilter = None try: # build filter finalfilter = ControlSession.buildfilter( dont_contain=inputdict["dont_contain"], dont_startwith=inputdict["dont_startwith"], must_contain=inputdict["must_contain"], must_startwith=inputdict["must_startwith"], lowercase=inputdict["lowercase"]) types_list = objgraph.most_common_types( limit=inputdict["nresults"], shortnames=False, filter=finalfilter) for otype in types_list: res += u"%s : %u\n" % otype except Exception as e: res += force_uString(e) self.logger.exception(e) finally: if types_list: del types_list if finalfilter: del finalfilter else: res = u"please install module 'objgraph'" return res
def _memory_dump(opts): for typ, n in objgraph.most_common_types(): logging.info('{typ:30} {n:>10}'.format(typ=typ, n=n)) objects = [] rng = opts['size_range'] summ = { 'max_refsize': { 'size': 0, }, } for obj in gc.get_objects(): if not hasattr(obj, '__class__'): continue size = sys.getsizeof(obj, 0) if rng is not None: if not (rng[0] <= size < rng[1]): continue i = id(obj) # referrers = [id(o) # for o in gc.get_referrers(obj) # if hasattr(o, '__class__')] referents = [(id(o), _get_class(o), sys.getsizeof(o, 0)) for o in gc.get_referents(obj) # if hasattr(o, '__class__') ] refsize = sum([x[2] for x in referents]) cls = _get_class(obj) data = [ i, cls, size, # object size refsize, # size of all direct referents referents, # referents ] objects.append(data) if summ['max_refsize']['size'] < refsize: summ['max_refsize'] = { 'size': refsize, 'object': data, } for o in objects: logging.info('memory-dump: ' + json.dumps(o)) logging.info('memory-dump summary: ' + json.dumps(summ))
def objgraph_leaking_objects(self, args): """ This is supposed to count reference counting bugs in C-level, see https://mg.pov.lt/objgraph/#reference-counting-bugs Example: $ fuglu_control objgraph_leaking_objects '{"nresults": 5}' ---------------- Leaking objects: ---------------- params: * nresults: 5 * lowercase: True * dont_startwith: * must_startwith: * dont_contain: * must_contain: builtins.dict : 797 builtins.list : 132 builtins.tuple : 28 builtins.method : 13 builtins.weakref : 12 """ res = u"----------------\n" \ + u"Leaking objects:\n" \ + u"----------------\n\n" if OBJGRAPH_EXTENSION_ENABLED: defaults = { "nresults": 20, "lowercase": True, "dont_startwith": ["builtins", "_"], "dont_contain": [], "must_startwith": [], "must_contain": [] } if not args: args = {} # fill filter lists and other vars from dict res, inputdict = ControlSession.prepare_objectgraph_list_from_dict( args, res, defaults) roots = None types_list = None finalfilter = None try: roots = objgraph.get_leaking_objects() # build filter finalfilter = ControlSession.buildfilter( dont_contain=inputdict["dont_contain"], dont_startwith=inputdict["dont_startwith"], must_contain=inputdict["must_contain"], must_startwith=inputdict["must_startwith"], lowercase=inputdict["lowercase"]) types_list = objgraph.most_common_types( objects=roots, limit=inputdict["nresults"], shortnames=False, filter=finalfilter) for otype in types_list: res += u"%s : %u\n" % otype except Exception as e: res += force_uString(e) self.logger.exception(e) finally: if roots: del roots if types_list: del types_list if finalfilter: del finalfilter else: res = u"please install module 'objgraph'" return res
def _dump_memory_impl( limit: int, analyze_type: Optional[str], python_internals_map: bool = False, ) -> Mapping[str, Any]: nb_collected = [gc.collect(generation) for generation in range(3)] result = { "nb_collected": nb_collected, "most_common_types": objgraph.most_common_types(limit=limit, shortnames=False), "leaking_objects": objgraph.most_common_types(limit=limit, shortnames=False, objects=objgraph.get_leaking_objects()), } if python_internals_map and not analyze_type: analyze_type = "builtins.dict" if analyze_type: # timeout after one minute, must be set to a bit less that the timeout of the broadcast in _views.py timeout = time.monotonic() + 60 mod_counts: Dict[str, int] = {} biggest_objects: List[Tuple[float, Any]] = [] result[analyze_type] = {} for obj in objgraph.by_type(analyze_type): if analyze_type == "builtins.function": short = obj.__module__.split( ".")[0] if obj.__module__ is not None else "" mod_counts[short] = mod_counts.get(short, 0) + 1 else: if analyze_type == "builtins.dict": python_internal = False if not (FILES_FIELDS - set(obj.keys())): python_internal = True if (not ({"scope", "module", "locals", "globals"} - set(obj.keys())) and isinstance(obj["globals"], dict) and not (FILES_FIELDS - set(obj["globals"].keys()))): python_internal = True if (python_internal and not python_internals_map or not python_internal and python_internals_map): continue size = get_size(obj) / 1024 if len(biggest_objects ) < limit or size > biggest_objects[0][0]: biggest_objects.append((size, obj)) biggest_objects.sort(key=lambda x: x[0]) if len(biggest_objects) > limit: biggest_objects = biggest_objects[-limit:] if time.monotonic() > timeout: result[analyze_type]["timeout"] = True break if analyze_type == "builtins.function": result[analyze_type]["modules"] = [ dict(module=i[0], nb_func=i[1]) for i in sorted(mod_counts.items(), key=lambda x: -x[1])[:limit] ] elif analyze_type == "linecache": import linecache cache = linecache.cache result[analyze_type]["biggest_objects"] = sorted( (dict(filename=k, size_kb=get_size(v)) for k, v in cache.items()), key=lambda i: -(cast(int, i["size_kb"])), ) else: biggest_objects.reverse() result[analyze_type]["biggest_objects"] = [ dict(size_kb=i[0], repr=repr(i[1])) for i in biggest_objects ] return result
async def report(self, limit=None): return objgraph.most_common_types(shortnames=False, limit=limit)
def show_hogs(self, limit=20): self.logger.warning("Top memory hogs:") stats = objgraph.most_common_types(limit=limit, shortnames=False) width = max(len(name) for name, count in stats) for name, count in stats: self.logger.warning(' %-*s %i' % (width, name, count))