def call_gc(): (gen0, gen1, gen2) = gc.get_count() logger.info("Calling GC {0} garbage, {1}, {2}, {3}". format(len(gc.garbage), gen0, gen1, gen2)) unreach = gc.collect() (gen0, gen1, gen2) = gc.get_count() print("Called GC {0} unreachable, {1} garbage, {2}, {3}, {4}". format(unreach, len(gc.garbage), gen0, gen1, gen2))
def test_get_count(self): # Avoid future allocation of method object assertEqual = self.assertEqual gc.collect() assertEqual(gc.get_count(), (0, 0, 0)) a = dict() # since gc.collect(), we created two objects: # the dict, and the tuple returned by get_count() assertEqual(gc.get_count(), (2, 0, 0))
def test_collect_generations(): gc.collect() a = dict() gc.collect(0) expect(gc.get_count(), (0, 1, 0), "collect(0)") gc.collect(1) expect(gc.get_count(), (0, 0, 1), "collect(1)") gc.collect(2) expect(gc.get_count(), (0, 0, 0), "collect(1)")
def do_gc(self, mode, indent): if ResourceLogger.is_enabled(): c1 = gc.get_count() gc.collect() c2 = gc.get_count() ResourceLogger.print_res_spaces += indent GCPreempt.logger.info('%scollected (%d, %d, %d) objects %s %s' % (" "*ResourceLogger.print_res_spaces, c1[0]-c2[0], c1[1]-c2[1], c1[2]-c2[2], GCPreempt.WHEN_NAMES[mode], self.ctx_name)) else: gc.collect()
def IsLeaking(self, sslsocket_impl): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) gc.disable() gc.collect() count0 = gc.get_count()[0] for i in xrange(1000): sslsocket_impl(sock) count1 = gc.get_count()[0] self.assertTrue(count1 >= count0) return count1 - count0 >= 1000
def dismiss_callback(self, i): sel_photo_list = self.imageBrowse.fileChooser.selection if sel_photo_list: print("Selection: {}".format(sel_photo_list[0])) self.source = sel_photo_list[0] self.imageBrowse = None print(gc.get_count()) gc.collect() print(gc.get_count()) print(gc.garbage) self.popup_open = False
def test_collect_generations(self): # Avoid future allocation of method object assertEqual = self.assertEqual gc.collect() a = dict() gc.collect(0) assertEqual(gc.get_count(), (0, 1, 0)) gc.collect(1) assertEqual(gc.get_count(), (0, 0, 1)) gc.collect(2) assertEqual(gc.get_count(), (0, 0, 0))
def test_get_count(self): gc.collect() a, b, c = gc.get_count() x = [] d, e, f = gc.get_count() self.assertEqual((b, c), (0, 0)) self.assertEqual((e, f), (0, 0)) # This is less fragile than asserting that a equals 0. self.assertLess(a, 5) # Between the two calls to get_count(), at least one object was # created (the list). self.assertGreater(d, a)
def test_get_count(self): if test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): return # Avoid future allocation of method object assertEqual = self._baseAssertEqual gc.collect() assertEqual(gc.get_count(), (0, 0, 0)) a = dict() # since gc.collect(), we created two objects: # the dict, and the tuple returned by get_count() assertEqual(gc.get_count(), (2, 0, 0))
def test_gc_create0(self): gc.collect() cnt1 = gc.get_count() A = make_class("A", fields=2, gc=True) B = make_class("B", varsize=True, gc=True) b = B([], ()) a = A({}, b) cnt2 = gc.get_count() self.assertEqual(gc.is_tracked(a), True) self.assertEqual(gc.is_tracked(b), True) del a gc.collect() cnt3 = gc.get_count() self.assertEqual(cnt1, cnt3)
def test_collect_generations(self): if test_support.due_to_ironpython_bug( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314459" ): return # Avoid future allocation of method object assertEqual = self.assertEqual gc.collect() a = dict() gc.collect(0) assertEqual(gc.get_count(), (0, 1, 0)) gc.collect(1) assertEqual(gc.get_count(), (0, 0, 1)) gc.collect(2) assertEqual(gc.get_count(), (0, 0, 0))
def stats(self): hub = gevent.get_hub() threadpool, loop = hub.threadpool, hub.loop s = { 'endpoint': self.endpoint, 'identity': self.identity, 'greenlets': len(self.pool), 'service': self.service_name, 'gevent': { 'threadpool': { 'size': threadpool.size, 'maxsize': threadpool.maxsize, }, 'active': loop.activecnt, 'pending': loop.pendingcnt, 'iteration': loop.iteration, 'depth': loop.depth, }, 'gc': { 'garbage': len(gc.garbage), 'collections': gc.get_count(), }, 'rpc': self.rpc_stats(), 'connections': [c.stats() for c in self.connections.values()], } for name, interface in six.iteritems(self.installed_interfaces): s[name] = interface.stats() return s
def status(self, mess, args): """ If I am alive I should be able to respond to this one """ all_blacklisted = holder.bot.get_blacklisted_plugin() all_loaded = get_all_active_plugin_names() all_attempted = sorted([p.name for p in holder.bot.all_candidates]) plugins_statuses = [] for name in all_attempted: if name in all_blacklisted: if name in all_loaded: plugins_statuses.append(('BL', get_plugin_by_name(name).category, name)) else: plugins_statuses.append(('BU', name)) elif name in all_loaded: plugins_statuses.append(('L', get_plugin_by_name(name).category, name)) elif get_plugin_obj_by_name(name) is not None: plugins_statuses.append(('C', get_plugin_by_name(name).category, name)) else: plugins_statuses.append(('U', name)) #noinspection PyBroadException try: from posix import getloadavg loads = getloadavg() except Exception as _: loads = None # plugins_statuses = sorted(plugins_statuses, key=lambda c: c[2]) return {'plugins': plugins_statuses, 'loads': loads, 'gc': gc.get_count()}
def update_status_in_thread(self): # TODO: make sure performance is not a problem as current approach queries # database many times. """ tasks = get_tasks_by_service(service_id) clusters = [] for task in tasks: if task.job.cluster not in clusters: clusters.append(task.job.cluster) """ logger.info("updating clusters status, " "%d task in queue, %d workers, %d total threads", reactor.getThreadPool().q.qsize(), len(reactor.getThreadPool().working), len(reactor.getThreadPool().threads)) try: self.start_time = time.time() for cluster in Cluster.objects.filter(active=True).all(): self.update_cluster_status(cluster) logger.info("spent %f seconds for updating clusters status", time.time() - self.start_time) logger.info("gc: %r", gc.get_count()) logger.info("usage: %r", resource.getrusage(resource.RUSAGE_SELF)) except Exception as e: logger.warning("%Failed to update statu: %r", e) finally: # reactor.callLater is NOT thread-safe but reactor.callFromThread is, so we # put the callLater to the main loop. reactor.callFromThread( reactor.callLater, self.collector_config.period, self.update_status)
def get_info(self): peak, size, rss = self.get_vmsize() cputime = self.get_cputime() runtime = datetime.datetime.now() - self._start_time r = ((runtime.days * 86400 + runtime.seconds) * 1000000 + runtime.microseconds) / 10000 return { 'name' : self._name, 'version' : self._version, 'status' : self._status, 'load_now' : self._load, 'load_limit' : self._limit, 'sequence' : self._seq, 'start_time' : str(self._start_time), 'current_time' : str(datetime.datetime.now()), 'run_time' : str(datetime.datetime.now() - self._start_time), 'dir' : self._dir, 'auto_update' : self._auto_update, 'next_update' : str(self._next_update), 'heartbeat' : self._heartbeat, 'md5_base' : self._md5_base, 'md5_interface' : self._md5_interface, 'vmpeak' : peak, 'vmsize' : size, 'vmrss' : rss, 'gc' : str(gc.get_count()), 'cpu' : round(float(cputime * 100) / r, 4), }
def main(): a = 4 b = 5 c_list = [] c_list.append(123) c_list.append(456) # reference cycle c_list.append(c_list) c_list[2].append(789) # foo = ['hi'] # c_list = foo print(c_list) print("Stats: {}".format(gc.get_stats())) print("Count: {}".format(gc.get_count())) print("GC enabled: {}".format(gc.isenabled())) print("Threshold: {}".format(gc.get_threshold())) print("c_list is tracked: {}".format(gc.is_tracked(c_list))) """ The count returned is generally one higher than you might expect, because it includes the (temporary) reference as an argument to getrefcount(). """ print("Reference count for c_list: {}".format(sys.getrefcount(c_list))) del c_list[2] print("Reference count for c_list: {}".format(sys.getrefcount(c_list))) print("Collecting: {}".format(gc.collect())) print("Done.")
def create_system_counters(): import gc for i in [0,1,2]: Gauge('gc_count_{}'.format(i), lambda: gc.get_count()[int(i)]) Gauge('gc_count_objects', lambda: len(gc.get_objects())) Gauge('self', procstat.get_self) Gauge('cpu', procstat.get_cpu)
def stats(): global _memstat_enabled, _can_enable if not _memstat_enabled: return if not _can_enable: logging.warning('could not enable memstat') _memstat_enabled = False return try: s0, s1, s2 = gc.get_count() usage = resource.getrusage(resource.RUSAGE_SELF) kb = usage.ru_maxrss if kb == 0: kb = my_getrss() _frame = inspect.currentframe() frame = _frame.f_back fname = frame.f_code.co_filename fnum = frame.f_lineno logging.info('memstat:%s:%d: rss_kb: %d gb_stages: %d %d %d' % \ (fname, fnum, kb, s0, s1, s2)) except: log_lines(sys.exc_info(), logging.debug) log_lines(traceback.format_exc(), logging.debug) logging.warning('something went wrong with memstat, disabling') _can_enable = False _memstat_enabled = False finally: # Necessary to avoid cyclic references and leak memory! del frame del _frame
def status(self, mess, args): """ If I am alive I should be able to respond to this one """ all_blacklisted = self.get_blacklisted_plugin() all_loaded = get_all_active_plugin_names() all_attempted = sorted([p.name for p in self.all_candidates]) plugins_statuses = [] for name in all_attempted: if name in all_blacklisted: if name in all_loaded: plugins_statuses.append(('BL', name)) else: plugins_statuses.append(('BU', name)) elif name in all_loaded: plugins_statuses.append(('L', name)) elif get_plugin_obj_by_name(name) is not None and get_plugin_obj_by_name(name).get_configuration_template() is not None and self.get_plugin_configuration(name) is None: plugins_statuses.append(('C', name)) else: plugins_statuses.append(('U', name)) #noinspection PyBroadException try: from posix import getloadavg loads = getloadavg() except Exception as _: loads = None return {'plugins_statuses': plugins_statuses, 'loads': loads, 'gc': gc.get_count()}
def main(): print("Garbage Collector Test") print(" Live storage will peak at {} bytes." .format(2 * sys.getsizeof(Node()) * treeSize(kLongLivedTreeDepth) + sys.getsizeof(0.0) * kArraySize)) print(" Stretching memory with a binary tree of depth {}".format(kStretchTreeDepth)) tStart = time.monotonic() # Stretch the memory space quickly tempTree = makeTree(kStretchTreeDepth) tempTree = None # Create a long lived object print(" Creating a long-lived binary tree of depth {}".format(kLongLivedTreeDepth)) longLivedTree = Node() populate(kLongLivedTreeDepth, longLivedTree) # Create long-lived array, filling half of it print(" Creating a long-lived array of {} doubles".format(kArraySize)) longLivedArray = array('d', [0.0] + [1.0 / x for x in range(1, kArraySize)]) for d in range(kMinTreeDepth, kMaxTreeDepth+1, 2): timeConstruction(d) if longLivedTree is None or longLivedArray[1000] != 1.0 / 1000: print("Failed") tFinish = time.monotonic() print("Completed in {} ms".format(timeElapsed(tStart, tFinish))) print("Completed {} collections".format(sum(gc.get_count())))
def _make_html_response(self, results, healthy): try: hostname = socket.gethostname() except socket.error: hostname = None translated_results = [] for result in results: translated_results.append({ 'details': result.details or '', 'reason': result.reason, 'class': reflection.get_class_name(result, fully_qualified=False), }) params = { 'healthy': healthy, 'hostname': hostname, 'results': translated_results, 'detailed': self._show_details, 'now': str(timeutils.utcnow()), 'python_version': sys.version, 'platform': platform.platform(), 'gc': { 'counts': gc.get_count(), 'threshold': gc.get_threshold(), }, 'threads': self._get_threadstacks(), 'greenthreads': self._get_threadstacks(), } body = _expand_template(self.HTML_RESPONSE_TEMPLATE, params) return (body.strip(), 'text/html')
def _make_json_response(self, results, healthy): if self._show_details: body = { 'detailed': True, 'python_version': sys.version, 'now': str(timeutils.utcnow()), 'platform': platform.platform(), 'gc': { 'counts': gc.get_count(), 'threshold': gc.get_threshold(), }, } reasons = [] for result in results: reasons.append({ 'reason': result.reason, 'details': result.details or '', 'class': reflection.get_class_name(result, fully_qualified=False), }) body['reasons'] = reasons body['greenthreads'] = self._get_greenstacks() body['threads'] = self._get_threadstacks() else: body = { 'reasons': [result.reason for result in results], 'detailed': False, } return (self._pretty_json_dumps(body), 'application/json')
def __init__(self): ui.basicpage.__init__(self) self.addstr(1,1,"This till is locked.") self.updateheader() self._y=3 unsaved=[p for p in ui.basicpage._pagelist if p!=self] if unsaved: self.line("The following users have unsaved work " "on this terminal:") for p in unsaved: self.line(" {} ({})".format(p.pagename(),p.unsaved_data)) self.line("") else: # The till is idle - schedule an exit if configured if tillconfig.idle_exit_code is not None: event.eventlist.append(self) self.nexttime = max( tillconfig.start_time + tillconfig.minimum_run_time, time.time() + tillconfig.minimum_lock_screen_time) rpproblem=printer.driver.offline() if rpproblem: self.line("Receipt printer problem: {}".format(rpproblem)) log.info("Receipt printer problem: %s",rpproblem) kpproblem=foodorder.kitchenprinter.offline() if kpproblem: self.line("Kitchen printer problem: {}".format(kpproblem)) log.info("Kitchen printer problem: %s",kpproblem) self.addstr(self.h-1,0,"Till version: {}".format(version.version)) self.move(0, 0) log.info("lockpage gc stats: %s, len(gc.garbage)=%d",gc.get_count(), len(gc.garbage))
def gc_dump(): """Dump GC usage.""" try: dump_dir = settings.LOG_FOLDER tstamp = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") fname = os.path.join(dump_dir, 'gcdump-%s.txt' % (tstamp,)) fh = open(fname, "w") # count count = gc.get_count() fh.write("gc.get_count():\n%s\n" % (count,)) # garbage fh.write("gc.garbage:\n") c = 0 for x in gc.garbage: c += 1 try: line = repr(x) except Exception as e: line = "Error str'ing an object: " + str(e) fh.write(line + "\n") fh.close() m = 'GC count is %s and %d garbage items written to: %s' % ( count, c, fname) return m except Exception as e: return "Error while trying to dump GC: %s" % (e,)
def status(self, mess, args): """ If I am alive I should be able to respond to this one """ all_blacklisted = self.get_blacklisted_plugin() all_loaded = get_all_active_plugin_names() all_attempted = sorted([p.name for p in self.all_candidates]) plugins_statuses = [] for name in all_attempted: if name in all_blacklisted: plugins_statuses.append(("B", name)) elif name in all_loaded: plugins_statuses.append(("L", name)) elif ( get_plugin_obj_by_name(name) is not None and get_plugin_obj_by_name(name).get_configuration_template() is not None and self.get_plugin_configuration(name) is None ): plugins_statuses.append(("C", name)) else: plugins_statuses.append(("E", name)) try: from posix import getloadavg loads = getloadavg() except Exception as e: loads = None return {"plugins_statuses": plugins_statuses, "loads": loads, "gc": gc.get_count()}
def get_gc_info(): import gc ret = {} ret['is_enabled'] = gc.isenabled() ret['thresholds'] = gc.get_threshold() ret['counts'] = gc.get_count() ret['obj_count'] = len(gc.get_objects()) return ret
def closeEvent(self, event): """ Shut down the program, give some debug info :param event: """ QtWidgets.QMainWindow.closeEvent(self, event) if ctrl.print_garbage: # import objgraph log.debug('garbage stats: ' + str(gc.get_count())) gc.collect() log.debug('after collection: ' + str(gc.get_count())) if gc.garbage: log.debug('garbage: ' + str(gc.garbage)) # objgraph.show_most_common_types(limit =40) if self.save_prefs: prefs.save_preferences() log.info('...done')
def _DrawDebugInfo(self,dtime): """Dump debug information (i.e. entity stats) to the upper right corner of the window""" if not hasattr(self,"debug_info_font"): self.debug_info_font = FontCache.get(defaults.letter_height_debug_info,face=defaults.font_debug_info) if self.level: entity_count,entities_active,entities_visible,entities_nowindow = self.level.GetEntityStats() else: entity_count,entities_active,entities_visible,entities_nowindow = -1,-1,-1,-1 drawables_global = len(Renderer.GetDrawables()) fps = 1.0/dtime import gc gcc = gc.get_count() scaled_score = self.score/100 # this is expensive, but we will survive it. locdef = locals().copy() locdef.update(defaults.__dict__) locdef.update(self.__dict__) text = """ EntitiesTotal: {entity_count} EntitiesActive: {entities_active} EntitiesVisible: {entities_visible} DrawCalls: {draw_counter} EntitiesNoWindow: {entities_nowindow} DrawablesGlobal: {drawables_global} GCCollections: {gcc} GodMode: {debug_godmode} UpDownMove: {debug_updown_move} PreventFallDown: {debug_prevent_fall_down} ShowBoundingBoxes: {debug_draw_bounding_boxes} ScrollBoth: {debug_scroll_both} ScrollSpeed: {move_map_speed} SpeedScale/Round: {speed_scale_per_round} SpeedScale: {speed_scale} LevelSize: {level_size} TimeDelta: {dtime:.4} 1/TimeDelta: {fps:.4} MoneyAmount: {scaled_score} $ """.format(**locdef) s = sf.String(text,Font=self.debug_info_font,\ Size=defaults.letter_height_debug_info) s.SetPosition(defaults.resolution[0]-302,140) s.SetColor(sf.Color.White) self.DrawSingle(s)
def test_gc_create1(self): gc.collect() cnt1 = gc.get_count() A = make_class("A", fields=2, gc=True) B = make_class("B", varsize=True, gc=True) C = make_class("C", fields=2, varsize=True, gc=True) b = A([], ()) c = C(1,2,{1:2,3:4},[1,2]) b1 = B(1, b) a = [b1, c] cnt2 = gc.get_count() self.assertEqual(gc.is_tracked(b), True) self.assertEqual(gc.is_tracked(b1), True) self.assertEqual(gc.is_tracked(c), True) del a gc.collect() cnt3 = gc.get_count() self.assertEqual(cnt1, cnt3)
def GET(self): web.header('Content-Type','text/plain', unique=True) web.header('Refresh','3') s = [] resources = dict(zip( ('utime', 'stime', 'maxrss', 'ixrss','idrss','isrss','minflt','majflt','nswap','inblock','outblock','msgsnd','msgrcv','nsignals','nvcsw','nivcsw'), resource.getrusage(resource.RUSAGE_SELF))) pid, pagesize = os.getpid(), resource.getpagesize() vm, rm, sm = meminfo.memory() gc0, gc1, gc2 = gc.get_count() s.append( '''######## PID:%(pid)i total events:%(event_counter)i python objects, unreachable:%(gc_unreachable)i total:%(gc_objects)i dirty:%(gc0)i/%(gc1)i/%(gc2)i file descriptors:%(file_descriptors)i/%(max_descriptors)i \n''' '''######## virt memory:%(vm).0fMiB res memory:%(rm).0fMiB sys cpu time:%(sys).3fs user:%(user).3fs context switches, voluntary:%(vcs)i involuntary:%(ics)i \n''' % { 'pid':pid, 'event_counter':server.event_counter, 'gc0': gc0, 'gc1': gc1, 'gc2': gc2, 'gc_unreachable': len(gc.garbage), 'gc_objects':len(gc.get_objects()), 'file_descriptors':len(os.listdir('/proc/%i/fd' % pid)), 'max_descriptors':resource.getrlimit(resource.RLIMIT_NOFILE)[1], 'vm': vm/1048576.0, 'rm': rm/1048576.0, 'sm': sm/1048576.0, 'vcs':resources['nvcsw'], 'ics':resources['nivcsw'], 'sys':resources['stime'], 'user':resources['utime'], }, ) for vhostdata in server.vhosts: s.append(''' **** %(host)s:%(port)i connections:%(counter)i broken:%(broken)i cpu_time:%(cpu_time).3fs ****\n''' % vhostdata) for clock, req in vhostdata['requests'].items(): s.append(''' %(host)s:%(port)5s "%(method)s %(url)s %(http)s" %(status_code)3i %(content_length)8i/%(chunks)i/%(context_switches)i (%(cpu_time).3fms)\n''' % { 'host':req.environ['REMOTE_HOST'], 'port':req.environ['REMOTE_PORT'], 'method': req.environ['REQUEST_METHOD'], 'url': req.get_url(), 'http': req.environ['SERVER_PROTOCOL'], 'status_code': req.out_dict['code'], 'content_length':req.content_length, 'chunks':req.chunks_number, 'context_switches':req.context_switches, 'cpu_time':req.all_cpu_time * 1000.0, }) s.append('\n\n') s.append("took %.3fms\n" % ((time.time()-web.ctx.environ['wsgi.now']())*1000.0, )) return ''.join(s)