def test_main(): enabled = gc.isenabled() gc.disable() if not test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): assert not gc.isenabled() debug = gc.get_debug() if not test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: gc.collect() # Delete 2nd generation garbage run_unittest(GCTests, GCTogglingTests) finally: if not test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() assert gc.isenabled() if not enabled: gc.disable()
def test_no_garbage(self): device, data_format = device_and_data_format() model = resnet50.ResNet50(data_format) optimizer = tf.train.GradientDescentOptimizer(0.1) with tf.device(device): images, labels = random_batch(2, data_format) gc.disable() # Warm up. Note that this first run does create significant amounts of # garbage to be collected. The hope is that this is a build-only effect, # and a subsequent training loop will create nothing which needs to be # collected. apply_gradients(model, optimizer, compute_gradients(model, images, labels)) gc.collect() previous_gc_debug_flags = gc.get_debug() gc.set_debug(gc.DEBUG_SAVEALL) for _ in range(2): # Run twice to ensure that garbage that is created on the first # iteration is no longer accessible. apply_gradients(model, optimizer, compute_gradients(model, images, labels)) gc.collect() # There should be no garbage requiring collection. self.assertEqual(0, len(gc.garbage)) gc.set_debug(previous_gc_debug_flags) gc.enable()
def test_saveall(self): if test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): return # Verify that cyclic garbage like lists show up in gc.garbage if the # SAVEALL option is enabled. # First make sure we don't save away other stuff that just happens to # be waiting for collection. gc.collect() # if this fails, someone else created immortal trash self.assertEqual(gc.garbage, []) L = [] L.append(L) id_L = id(L) debug = gc.get_debug() gc.set_debug(debug | gc.DEBUG_SAVEALL) del L gc.collect() gc.set_debug(debug) self.assertEqual(len(gc.garbage), 1) obj = gc.garbage.pop() self.assertEqual(id(obj), id_L)
def _init_leak(self): if not self.leak: return if self.heap: return if self.gc: gc.set_debug(gc.DEBUG_LEAK) if guppy: self.heap = guppy.hpy() self.heap.setrelheap()
def main(): import gc gc.set_debug(gc.DEBUG_STATS) a = wx.PySimpleApp() test_threads() a.MainLoop()
def dump_garbage(): """ Show us what's in the garbage! Make a leak: >>> l = [] >>> l.append(l) >>> del l Show the dirt: >>> dump_garbage() GARBAGE: list: [[...]] """ gc.enable() gc.set_debug(gc.DEBUG_LEAK) # force collection gc.collect() print "GARBAGE:" for x in gc.garbage: s = repr(x) if len(s) > 80: # TODO: create a humanreadable for snippet repr size. s = s[:30] + ' ... ' + s[-30:] print ' %s: %s' % (type(x).__name__, s)
def checkMemory(): """as the name says""" # pylint: disable=too-many-branches if not Debug.gc: return gc.set_threshold(0) gc.set_debug(gc.DEBUG_LEAK) gc.enable() print('collecting {{{') gc.collect() # we want to eliminate all output print('}}} done') # code like this may help to find specific things if True: # pylint: disable=using-constant-test interesting = ('Client', 'Player', 'Game') for obj in gc.garbage: if hasattr(obj, 'cell_contents'): obj = obj.cell_contents if not any(x in repr(obj) for x in interesting): continue for referrer in gc.get_referrers(obj): if referrer is gc.garbage: continue if hasattr(referrer, 'cell_contents'): referrer = referrer.cell_contents if referrer.__class__.__name__ in interesting: for referent in gc.get_referents(referrer): print('%s refers to %s' % (referrer, referent)) else: print('referrer of %s/%s is: id=%s type=%s %s' % (type(obj), obj, id(referrer), type(referrer), referrer)) print('unreachable:%s' % gc.collect()) gc.set_debug(0)
def __init__(self): self.dirname = os.path.split(__file__)[0] sys.path.append(self.dirname) gc.set_debug(gc.DEBUG_LEAK) import runtests self.module = runtests self.done = []
def global_setup(self): # Set garbage collection debug flags self.old_flags = gc.get_debug() new_flags = 0 for op in self.flags: new_flags |= getattr(gc, op) gc.set_debug(new_flags)
def tearDown(me): me.meta.drop_all() me.meta = None #destroy ALL caches clear_mappers() if not config.reuse_db: me.db.dispose() me.db = None if config.gc: import gc gc.set_debug( gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_SAVEALL | gc.DEBUG_INSTANCES | gc.DEBUG_STATS ) #OBJECTS gc.collect() if config.leak: from sqlalchemy.orm import mapperlib from sqlalchemy.orm.session import _sessions from sqlalchemy.util import ArgSingleton print "MAPPER REG:", len( dict( getattr( mapperlib, 'mapper_registry', getattr( mapperlib, '_mapper_registry', None)) )) print "SESION REG:", len( dict( _sessions) ) print "CLASSKEYS:", len( dict( ArgSingleton.instances) ) if config.gc: i = 0 for x in gc.get_objects(): if isinstance(x, mapperlib.Mapper) or isinstance(x, MetaData): i+=1 #print x print 'gc/SA objects:', i
def testNoReferenceCyclesAfterCall(self): class ChildNetwork(network.Network): def __init__(self, name=None): super(ChildNetwork, self).__init__(name=name) def call(self, x): return x * 2. class ParentNetwork(network.Network): def __init__(self, name=None): super(ParentNetwork, self).__init__(name=name) self.l1 = self.track_layer(ChildNetwork()) def call(self, x): return self.l1(x) one = constant_op.constant([[1.0]]) gc.disable() gc.collect() previous_gc_debug_flags = gc.get_debug() gc.set_debug(gc.DEBUG_SAVEALL) preexisting = len(gc.garbage) net = ParentNetwork() net(one) del net gc.collect() # There should be no additional garbage requiring collection. self.assertEqual(preexisting, len(gc.garbage)) gc.set_debug(previous_gc_debug_flags) gc.enable()
def reportMemoryLeaks(): if printUnreachableNum() == 0: return import bz2, gc gc.set_debug(gc.DEBUG_SAVEALL) gc.collect() uncompressedReport = '' for s in gc.garbage: try: uncompressedReport += str(s) + '&' except TypeError: pass reportdata = bz2.compress(uncompressedReport, 9) headers = {'Content-type': 'application/x-bzip2', 'Accept': 'text/plain'} try: baseURL = patcherVer()[0].split('/lo')[0] except IndexError: print 'Base URL not available for leak submit' return basePort = 80 if baseURL.count(':') == 2: basePort = baseURL[-4:] baseURL = baseURL[:-5] baseURL = baseURL[7:] if basePort != 80: finalURL = 'http://' + baseURL + ':' + str(basePort) + '/logging/memory_leak.php?leakcount=' + str(printUnreachableNum()) else: finalURL = 'http://' + baseURL + '/logging/memory_leak.php?leakcount=' + str(printUnreachableNum()) reporthttp = HTTPClient() reporthttp.postForm(URLSpec(finalURL), reportdata)
def dump_garbage_2(verbose = True, generation = 2): import gc from weakref import ProxyType, ReferenceType gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_STATS) if generation is None: gc.collect() elif generation in xrange(0, 3): gc.collect(generation) else: LOG_ERROR('Value of generation is invalid. Generation may be an integer specifying which generation to collect (from 0 to 2)') return if verbose: print '=========================================' print '##DUMPSTART' del gc.garbage[:] d = defaultdict(lambda : 0) for i in gc.get_objects(): if not isinstance(i, ProxyType) and not isinstance(i, ReferenceType): if hasattr(i, '__class__'): t = i.__class__ else: t = type(i) d[t] += 1 if verbose: for (t, cnt,) in d.iteritems(): print '%d %s' % (cnt, t) d.clear() del gc.garbage[:] del d if verbose: print '##DUMPEND' print '========================================='
def testMemoryLeak(self): config = { 'init_config': {}, 'instances': [ {'url': "localhost"}, {'url': "localhost", 'port': PORT, 'tags': ['instance:mytag']}, {'url': "localhost", 'port': PORT, 'tags': ['foo']}, ] } self.run_check(config) import gc if not self.is_travis(): gc.set_debug(gc.DEBUG_LEAK) gc.collect() try: start = len(gc.garbage) for i in range(10): self.run_check(config) time.sleep(0.3) self.check.get_metrics() end = len(gc.garbage) self.assertEquals(end - start, 0, gc.garbage) finally: gc.set_debug(0)
def proc_mgr(self): if self.debug: gc.set_debug(gc.DEBUG_LEAK) while self.running: try: args = self.proc_queue.get() realtimes = [] i = 0 for group in self.realtimes: for name in self.realtimes[group]: realtimes.append(( getattr(self.modules[group], 'prio_' + name, 10), i, getattr(self.modules[group], name) )) i += 1 heapq.heapify(realtimes) stop = False while realtimes: realtime = heapq.heappop(realtimes)[2] try: if realtime(self, args): stop = True break except Exception: traceback.print_exc() if stop: continue self.work_queue.put((self.cmdresps.get(args['command'], Bot.nullh), [self, args])) except KeyboardInterrupt: self.rawmsg('QUIT :^C at console') except Exception: traceback.print_exc()
def setup(): global cfg, main cfg = config.Config('bot.config') if cfg.getboolean('debug', 'gc'): gc.set_debug(gc.DEBUG_LEAK) pidfile = open(cfg.pidfile, 'w') pidfile.write(str(os.getpid())) pidfile.close() main = Erebus(cfg) autoloads = [mod for mod, yes in cfg.items('autoloads') if int(yes) == 1] for mod in autoloads: ctlmod.load(main, mod) dbsetup() c = main.query("SELECT nick, user, bind, authname, authpass FROM bots WHERE active = 1") if c: rows = c.fetchall() c.close() for row in rows: main.newbot(row['nick'], row['user'], row['bind'], row['authname'], row['authpass'], cfg.host, cfg.port, cfg.realname) main.connectall()
def run_pl(self): if '--memdebug' in sys.argv: gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_INSTANCES | gc.DEBUG_OBJECTS) while len(self.queue): task = self.queue.popleft() try: task.execute(self.cache, self.regen) task.set_status("complete") for t2 in task.allows(): t2.req_complete(task) if t2.status() == "ready": print t2, "ready" # if self.schedule(t2): # print "because we finished", str(task) self.current_pl.todot() except Task.DependenciesNotCompleteException: for dep in task.dependencies: self.schedule(dep) self.queue.append(task)
def test(): if not hasattr(gc, 'get_debug'): if verbose: print "skipping test_gc: too many GC differences with CPython" return if verbose: print "disabling automatic collection" enabled = gc.isenabled() gc.disable() verify(not gc.isenabled()) debug = gc.get_debug() gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: test_all() finally: gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() verify(gc.isenabled()) if not enabled: gc.disable()
def cycles_created_by(callable): """ Return graph of cyclic garbage created by the given callable. Return an :class:`~refcycle.object_graph.ObjectGraph` representing those objects generated by the given callable that can't be collected by Python's usual reference-count based garbage collection. This includes objects that will eventually be collected by the cyclic garbage collector, as well as genuinely unreachable objects that will never be collected. `callable` should be a callable that takes no arguments; its return value (if any) will be ignored. """ with restore_gc_state(): gc.disable() gc.collect() gc.set_debug(gc.DEBUG_SAVEALL) callable() new_object_count = gc.collect() if new_object_count: objects = gc.garbage[-new_object_count:] del gc.garbage[-new_object_count:] else: objects = [] return ObjectGraph(objects)
def PrintLeaks(): import gc print 'LEAKS FOUND:---------------------------------' gc.set_debug(gc.DEBUG_LEAK) leaks = gc.collect() print '%s LEAKS"' % leaks print
def main (): parser = get_options () options = parser.parse_args ()[0] if options.garbage: gc.set_debug (gc.DEBUG_LEAK) test = DLNetSNMPTest ( hostname = options.hostname, version = options.protocol, public = options.public, private = options.private, verbose = options.verbose, timeout = options.timeout, ) c = set_config_params ( CONFIG, threaded = options.threaded, repeats = options.repeats, private = options.community == 'private', ) test.run_tests (c) r = pprint.pformat (test.async_results) time.sleep (options.sleep) print 'PENDING REQUESTS', [len (i.async_requests) for i in test.sessions.values ()] print 'ASYNC RESULTS:', r, pprint.pformat (test.async_results) test.destroy () if options.garbage: del options del parser print_garbage ()
def recv_mgr(self): '''Manage messages received from the server. This responds to PINGs immediately and delegates other tasks to the work thread. ''' if self.debug: gc.set_debug(gc.DEBUG_LEAK) cache = b'' while self.running: try: try: data = self.socket.recv(4096) except socket.error: traceback.print_exc() self.reconnect() continue if cache: data = cache + data cache = b'' if data.endswith(b'\r\n'): # Use an iterator to prevent any chance of reprocessing. messages = iter(data.split(b'\r\n')) else: cachelen = data.rfind(b'\r\n') if cachelen < 0: # This never should happen. cache += data continue else: cache = data[cachelen:] messages = iter(data[:cachelen].split(b'\r\n')) for message in messages: if not message: continue try: message = message.decode() except UnicodeDecodeError: message = message.decode('latin1') if self.debug: print('<', message) m = message_re.match(message) if m is None: if self.debug: print('Invalid message from server:', message, file=sys.stderr) continue args = m.groupdict() if args['response'] is not None: self.work_queue.put((self.handle_numeric, [self, args])) continue if args['command'] == 'PING': self.rawmsg('PONG ' + args['params']) continue self.proc_queue.put(args) except KeyboardInterrupt: self.rawmsg('QUIT :^C at console') except Exception: traceback.print_exc()
def run_server(debug_gc=False, persistent=False): """ Main function for rolekit server. Handles D-Bus and GLib mainloop. """ service = None if debug_gc: from pprint import pformat import gc gc.enable() gc.set_debug(gc.DEBUG_LEAK) gc_timeout = 10 def gc_collect(): gc.collect() if len(gc.garbage) > 0: print("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n") print("GARBAGE OBJECTS (%d):\n" % len(gc.garbage)) for x in gc.garbage: print(type(x),"\n ",) print(pformat(x)) print("\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n") GLib.timeout_add_seconds(gc_timeout, gc_collect) try: dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SystemBus() name = dbus.service.BusName(DBUS_INTERFACE, bus=bus) service = RoleD(name, DBUS_PATH, persistent=persistent) mainloop = GLib.MainLoop() slip.dbus.service.set_mainloop(mainloop) if debug_gc: GLib.timeout_add_seconds(gc_timeout, gc_collect) # use unix_signal_add if available, else unix_signal_add_full if hasattr(GLib, 'unix_signal_add'): unix_signal_add = GLib.unix_signal_add else: unix_signal_add = GLib.unix_signal_add_full unix_signal_add(GLib.PRIORITY_HIGH, signal.SIGHUP, sighup, None) unix_signal_add(GLib.PRIORITY_HIGH, signal.SIGTERM, sigterm, mainloop) mainloop.run() except KeyboardInterrupt as e: pass except SystemExit as e: log.error("Raising SystemExit in run_server") except Exception as e: log.error("Exception %s: %s", e.__class__.__name__, str(e)) if service: service.stop()
def async_worker(q, npz_dir, minibatch_size, apply_normalization): print "Hello from EvalTraining async_worker process!!!" gc.set_debug(gc.DEBUG_STATS) loader = NPZ.RandomizingLoader(npz_dir, minibatch_size) names = ('feature_planes', 'final_scores') while True: feed_dict_strings = build_feed_dict_strings(loader, apply_normalization) q.put(feed_dict_strings, block=True) # will block if queue is full
def tearDown(self): gc.collect() for obj in gc.garbage: if type(obj) is ObjectWithDel: obj.public = None gc.garbage.remove(obj) gc.set_debug(self.saved_flags) gc.enable()
def test_debug(self): self.assertEqual(0, REGISTRY.get_sample_value('python_gc_debug')) try: gc.set_debug(gc.DEBUG_STATS) self.assertEqual(gc.DEBUG_STATS, REGISTRY.get_sample_value( 'python_gc_enabled')) finally: gc.set_debug(0)
def testMemLeak(self): question = ["-rw-r--r-- 1 md staff 13500 Dec 25 08:44 ftpparse.c", "-rw-r--r-- 1 md staff 1719 Dec 25 08:44 ftpparse.h"] gc.set_debug(gc.DEBUG_LEAK) for i in range(2**16): answer = ftpparse.ftpparse(question) # this once produced a huge output gc.collect()
def main(): parser=argparse.ArgumentParser(description="access to pythons built-in garbage collector") parser.add_argument("command",help="what to do",choices=["enable","disable","status","collect","threshold","debug","break"],action="store") parser.add_argument("args",help="argument for command",action="store",nargs="*") ns=parser.parse_args() if ns.command=="enable": gc.enable() elif ns.command=="disable": gc.disable() elif ns.command=="collect": gc.collect() elif ns.command=="status": print "GC enabled: {s}".format(s=gc.isenabled()) tracked=gc.get_objects() n=len(tracked) print "Tracked objects: {n}".format(n=n) size=sum([sys.getsizeof(e) for e in tracked]) del tracked#this list may be big, better delete it print "Size of tracked objects: {s} bytes".format(s=size) print "Garbage: {n}".format(n=len(gc.garbage)) gsize=sum([sys.getsizeof(e) for e in gc.garbage]) print "Size of garbage: {s} bytes".format(s=gsize) print "Debug: {d}".format(d=gc.get_debug()) elif ns.command=="threshold": if len(ns.args)==0: print "Threshold:\n G1: {}\n G2: {}\n G3: {}".format(*gc.get_threshold()) elif len(ns.args)>3: print "Error: to many arguments for threshold!" sys.exit(1) else: try: ts=tuple([int(e) for e in ns.args]) except ValueError: print "Error: expected arguments to be integer!" sys.exit(1) gc.set_threshold(*ts) elif ns.command=="debug": if len(ns.args)==0: print "Debug: {d}".format(d=gc.get_debug()) elif len(ns.args)==1: try: flag=int(ns.args[0]) except ValueError: print "Error: expected argument to be an integer!" sys.exit(1) gc.set_debug(flag) else: print "Error: expected exactly one argument for threshold!" sys.exit(1) elif ns.command=="break": if len(gc.garbage)==0: print "Error: No Garbage found!" sys.exit(1) else: for k in dir(garbage[0]): try: delattr(garbage,k) except: pass del gc.garbage[:]
def setUp(self): # Save gc state and disable it. self.enabled = gc.isenabled() gc.disable() self.debug = gc.get_debug() gc.set_debug(0) gc.callbacks.append(self.cb1) gc.callbacks.append(self.cb2) self.othergarbage = []
def printUnreachableLen(): import gc gc.set_debug(gc.DEBUG_SAVEALL) gc.collect() unreachableL = [] for it in gc.garbage: unreachableL.append(it) return len(str(unreachableL))
def main(): # Be strict about most warnings (This is set in our test running # scripts to catch import-time warnings, but set it again here to # be sure). This also turns on warnings that are ignored by # default, including DeprecationWarnings and python 3.2's # ResourceWarnings. warnings.filterwarnings("error") # setuptools sometimes gives ImportWarnings about things that are on # sys.path even if they're not being used. warnings.filterwarnings("ignore", category=ImportWarning) # Tornado generally shouldn't use anything deprecated, but some of # our dependencies do (last match wins). warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("error", category=DeprecationWarning, module=r"tornado\..*") warnings.filterwarnings("ignore", category=PendingDeprecationWarning) warnings.filterwarnings("error", category=PendingDeprecationWarning, module=r"tornado\..*") # The unittest module is aggressive about deprecating redundant methods, # leaving some without non-deprecated spellings that work on both # 2.7 and 3.2 warnings.filterwarnings("ignore", category=DeprecationWarning, message="Please use assert.* instead") warnings.filterwarnings( "ignore", category=PendingDeprecationWarning, message="Please use assert.* instead", ) # Twisted 15.0.0 triggers some warnings on py3 with -bb. warnings.filterwarnings("ignore", category=BytesWarning, module=r"twisted\..*") if (3, ) < sys.version_info < (3, 6): # Prior to 3.6, async ResourceWarnings were rather noisy # and even # `python3.4 -W error -c 'import asyncio; asyncio.get_event_loop()'` # would generate a warning. warnings.filterwarnings("ignore", category=ResourceWarning, module=r"asyncio\..*") # This deprecation warning is introduced in Python 3.8 and is # triggered by pycurl. Unforunately, because it is raised in the C # layer it can't be filtered by module and we must match the # message text instead (Tornado's C module uses PY_SSIZE_T_CLEAN # so it's not at risk of running into this issue). warnings.filterwarnings( "ignore", category=DeprecationWarning, message="PY_SSIZE_T_CLEAN will be required", ) logging.getLogger("tornado.access").setLevel(logging.CRITICAL) define( "httpclient", type=str, default=None, callback=lambda s: AsyncHTTPClient.configure( s, defaults=dict(allow_ipv6=False)), ) define("httpserver", type=str, default=None, callback=HTTPServer.configure) define("resolver", type=str, default=None, callback=Resolver.configure) define( "debug_gc", type=str, multiple=True, help="A comma-separated list of gc module debug constants, " "e.g. DEBUG_STATS or DEBUG_COLLECTABLE,DEBUG_OBJECTS", callback=lambda values: gc.set_debug( reduce(operator.or_, (getattr(gc, v) for v in values))), ) def set_locale(x): locale.setlocale(locale.LC_ALL, x) define("locale", type=str, default=None, callback=set_locale) log_counter = LogCounter() add_parse_callback( lambda: logging.getLogger().handlers[0].addFilter(log_counter)) # Certain errors (especially "unclosed resource" errors raised in # destructors) go directly to stderr instead of logging. Count # anything written by anything but the test runner as an error. orig_stderr = sys.stderr counting_stderr = CountingStderr(orig_stderr) sys.stderr = counting_stderr # type: ignore import tornado.testing kwargs = {} # HACK: unittest.main will make its own changes to the warning # configuration, which may conflict with the settings above # or command-line flags like -bb. Passing warnings=False # suppresses this behavior, although this looks like an implementation # detail. http://bugs.python.org/issue15626 kwargs["warnings"] = False kwargs["testRunner"] = test_runner_factory(orig_stderr) try: tornado.testing.main(**kwargs) finally: # The tests should run clean; consider it a failure if they # logged anything at info level or above. if (log_counter.info_count > 0 or log_counter.warning_count > 0 or log_counter.error_count > 0 or counting_stderr.byte_count > 0): logging.error( "logged %d infos, %d warnings, %d errors, and %d bytes to stderr", log_counter.info_count, log_counter.warning_count, log_counter.error_count, counting_stderr.byte_count, ) sys.exit(1)
def main(): # The -W command-line option does not work in a virtualenv with # python 3 (as of virtualenv 1.7), so configure warnings # programmatically instead. import warnings # Be strict about most warnings. This also turns on warnings that are # ignored by default, including DeprecationWarnings and # python 3.2's ResourceWarnings. warnings.filterwarnings("error") # setuptools sometimes gives ImportWarnings about things that are on # sys.path even if they're not being used. warnings.filterwarnings("ignore", category=ImportWarning) # Tornado generally shouldn't use anything deprecated, but some of # our dependencies do (last match wins). warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("error", category=DeprecationWarning, module=r"tornado\..*") warnings.filterwarnings("ignore", category=PendingDeprecationWarning) warnings.filterwarnings("error", category=PendingDeprecationWarning, module=r"tornado\..*") # The unittest module is aggressive about deprecating redundant methods, # leaving some without non-deprecated spellings that work on both # 2.7 and 3.2 warnings.filterwarnings("ignore", category=DeprecationWarning, message="Please use assert.* instead") # unittest2 0.6 on py26 reports these as PendingDeprecationWarnings # instead of DeprecationWarnings. warnings.filterwarnings("ignore", category=PendingDeprecationWarning, message="Please use assert.* instead") # Twisted 15.0.0 triggers some warnings on py3 with -bb. warnings.filterwarnings("ignore", category=BytesWarning, module=r"twisted\..*") logging.getLogger("tornado.access").setLevel(logging.CRITICAL) define('httpclient', type=str, default=None, callback=lambda s: AsyncHTTPClient.configure( s, defaults=dict(allow_ipv6=False))) define('ioloop', type=str, default=None) define('ioloop_time_monotonic', default=False) define('resolver', type=str, default=None, callback=Resolver.configure) define('debug_gc', type=str, multiple=True, help="A comma-separated list of gc module debug constants, " "e.g. DEBUG_STATS or DEBUG_COLLECTABLE,DEBUG_OBJECTS", callback=lambda values: gc.set_debug( reduce(operator.or_, (getattr(gc, v) for v in values)))) define('locale', type=str, default=None, callback=lambda x: locale.setlocale(locale.LC_ALL, x)) def configure_ioloop(): kwargs = {} if options.ioloop_time_monotonic: from tornado.platform.auto import monotonic_time if monotonic_time is None: raise RuntimeError("monotonic clock not found") kwargs['time_func'] = monotonic_time if options.ioloop or kwargs: IOLoop.configure(options.ioloop, **kwargs) add_parse_callback(configure_ioloop) log_counter = LogCounter() add_parse_callback( lambda: logging.getLogger().handlers[0].addFilter(log_counter)) import tornado.testing kwargs = {} if sys.version_info >= (3, 2): # HACK: unittest.main will make its own changes to the warning # configuration, which may conflict with the settings above # or command-line flags like -bb. Passing warnings=False # suppresses this behavior, although this looks like an implementation # detail. http://bugs.python.org/issue15626 kwargs['warnings'] = False kwargs['testRunner'] = TornadoTextTestRunner try: tornado.testing.main(**kwargs) finally: # The tests should run clean; consider it a failure if they logged # any warnings or errors. We'd like to ban info logs too, but # we can't count them cleanly due to interactions with LogTrapTestCase. if log_counter.warning_count > 0 or log_counter.error_count > 0: logging.error("logged %d warnings and %d errors", log_counter.warning_count, log_counter.error_count) sys.exit(1)
def __init__(self, connectMethod, config, hasOwnerView=False, threadedNet=None): assert self.notify.debugCall() if threadedNet is None: # Default value. threadedNet = config.GetBool('threaded-net', False) # let the C connection repository know whether we're supporting # 'owner' views of distributed objects (i.e. 'receives ownrecv', # 'I own this object and have a separate view of it regardless of # where it currently is located') CConnectionRepository.__init__(self, hasOwnerView, threadedNet) self.setWantMessageBundling(config.GetBool('want-message-bundling', 1)) # DoInterestManager.__init__ relies on CConnectionRepository being # initialized DoInterestManager.__init__(self) DoCollectionManager.__init__(self) self.setPythonRepository(self) # Create a unique ID number for each ConnectionRepository in # the world, helpful for sending messages specific to each one. self.uniqueId = hash(self) # Accept this hook so that we can respond to lost-connection # events in the main thread, instead of within the network # thread (if there is one). self.accept(self._getLostConnectionEvent(), self.lostConnection) self.config = config if self.config.GetBool('verbose-repository'): self.setVerbose(1) # Set this to 'http' to establish a connection to the server # using the HTTPClient interface, which ultimately uses the # OpenSSL socket library (even though SSL is not involved). # This is not as robust a socket library as NET's, but the # HTTPClient interface does a good job of negotiating the # connection over an HTTP proxy if one is in use. # # Set it to 'net' to use Panda's net interface # (e.g. QueuedConnectionManager, etc.) to establish the # connection. This is a higher-level layer build on top of # the low-level "native net" library. There is no support for # proxies. This is a good, general choice. # # Set it to 'native' to use Panda's low-level native net # interface directly. This is much faster than either http or # net for high-bandwidth (e.g. server) applications, but it # doesn't support the simulated delay via the start_delay() # call. # # Set it to 'default' to use an appropriate interface # according to the type of ConnectionRepository we are # creating. userConnectMethod = self.config.GetString('connect-method', 'default') if userConnectMethod == 'http': connectMethod = self.CM_HTTP elif userConnectMethod == 'net': connectMethod = self.CM_NET elif userConnectMethod == 'native': connectMethod = self.CM_NATIVE self.connectMethod = connectMethod if self.connectMethod == self.CM_HTTP: self.notify.info("Using connect method 'http'") elif self.connectMethod == self.CM_NET: self.notify.info("Using connect method 'net'") elif self.connectMethod == self.CM_NATIVE: self.notify.info("Using connect method 'native'") self.connectHttp = None self.http = None # This DatagramIterator is constructed once, and then re-used # each time we read a datagram. self.private__di = PyDatagramIterator() self.recorder = None self.readerPollTaskObj = None # This is the string that is appended to symbols read from the # DC file. The AIRepository will redefine this to 'AI'. self.dcSuffix = '' self._serverAddress = '' if self.config.GetBool('gc-save-all', 1): # set gc to preserve every object involved in a cycle, even ones that # would normally be freed automatically during garbage collect # allows us to find and fix these cycles, reducing or eliminating the # need to run garbage collects # garbage collection CPU usage is O(n), n = number of Python objects gc.set_debug(gc.DEBUG_SAVEALL) if self.config.GetBool('want-garbage-collect-task', 1): # manual garbage-collect task taskMgr.add(self._garbageCollect, self.GarbageCollectTaskName, 200) # periodically increase gc threshold if there is no garbage taskMgr.doMethodLater( self.config.GetFloat('garbage-threshold-adjust-delay', 5 * 60.), self._adjustGcThreshold, self.GarbageThresholdTaskName) self._gcDefaultThreshold = gc.get_threshold()
#print UNI('안녕하세요') def collect_and_show_garbage(): "Show what garbage is present." print "Collecting..." n = gc.collect() print "Unreachable objects:", n if n > 0: print "Garbage:" for i in range(n): print "[%d] %s" % (i, gc.garbage[i]) if __name__ == "__main__": import gc gc.set_debug(gc.DEBUG_LEAK) print "before" collect_and_show_garbage() print "testing..." print "-" * 79 test() print '\n', "-" * 79 print "after" collect_and_show_garbage() raw_input("Hit any key to close this window...")
def _profiler(req): """ This handler wrap the default handler with a profiler. Profiling data is written into CFG_TMPDIR/invenio-profile-stats-datetime.raw, and is displayed at the bottom of the webpage. To use add profile=1 to your url. To change sorting algorithm you can provide profile=algorithm_name. You can add more than one profile requirement like ?profile=time&profile=cumulative. The list of available algorithm is displayed at the end of the profile. """ args = {} if req.args: args = cgi.parse_qs(req.args) if 'profile' in args: if not isUserSuperAdmin(collect_user_info(req)): return _handler(req) if 'memory' in args['profile']: gc.set_debug(gc.DEBUG_LEAK) ret = _handler(req) req.write("\n<pre>%s</pre>" % gc.garbage) gc.collect() req.write("\n<pre>%s</pre>" % gc.garbage) gc.set_debug(0) return ret from cStringIO import StringIO try: import pstats except ImportError: ret = _handler(req) req.write("<pre>%s</pre>" % "The Python Profiler is not installed!") return ret import datetime date = datetime.datetime.now().strftime('%Y%m%d%H%M%S') filename = '%s/invenio-profile-stats-%s.raw' % (CFG_TMPDIR, date) existing_sorts = pstats.Stats.sort_arg_dict_default.keys() required_sorts = [] profile_dump = [] for sort in args['profile']: if sort not in existing_sorts: sort = 'cumulative' if sort not in required_sorts: required_sorts.append(sort) if sys.hexversion < 0x02050000: import hotshot import hotshot.stats pr = hotshot.Profile(filename) ret = pr.runcall(_handler, req) for sort_type in required_sorts: tmp_out = sys.stdout sys.stdout = StringIO() hotshot.stats.load(filename).strip_dirs().sort_stats(sort_type).print_stats() # pylint: disable=E1103 # This is a hack. sys.stdout was replaced by a StringIO. profile_dump.append(sys.stdout.getvalue()) # pylint: enable=E1103 sys.stdout = tmp_out else: import cProfile pr = cProfile.Profile() ret = pr.runcall(_handler, req) pr.dump_stats(filename) for sort_type in required_sorts: strstream = StringIO() pstats.Stats(filename, stream=strstream).strip_dirs().sort_stats(sort_type).print_stats() profile_dump.append(strstream.getvalue()) profile_dump = '\n'.join(profile_dump) profile_dump += '\nYou can use profile=%s or profile=memory' % existing_sorts req.write("\n<pre>%s</pre>" % profile_dump) return ret elif 'debug' in args and args['debug']: #remote_debugger.start(["3"]) # example starting debugger on demand if remote_debugger: debug_starter = remote_debugger.get_debugger(args['debug']) if debug_starter: try: debug_starter() except Exception, msg: # TODO - should register_exception? raise Exception('Cannot start the debugger %s, please read instructions inside remote_debugger module. %s' % (debug_starter.__name__, msg)) else: raise Exception('Debugging requested, but no debugger registered: "%s"' % args['debug']) return _handler(req)
def main(): gc.set_threshold(*renpy.config.gc_thresholds) log_clock("Bootstrap to the start of init.init") renpy.game.exception_info = 'Before loading the script.' # Clear the line cache, since the script may have changed. linecache.clearcache() # Get ready to accept new arguments. renpy.arguments.pre_init() # Init the screen language parser. renpy.sl2.slparser.init() # Init the config after load. renpy.config.init() # Reset live2d if it exists. try: renpy.gl2.live2d.reset() except Exception: pass # Set up variants. choose_variants() renpy.display.touch = "touch" in renpy.config.variants log_clock("Early init") # Note the game directory. game.basepath = renpy.config.gamedir renpy.config.searchpath = [ renpy.config.gamedir ] # Find the common directory. commondir = __main__.path_to_common(renpy.config.renpy_base) # E1101 @UndefinedVariable if os.path.isdir(commondir): renpy.config.searchpath.append(commondir) renpy.config.commondir = commondir else: renpy.config.commondir = None # Add path from env variable, if any if "RENPY_SEARCHPATH" in os.environ: renpy.config.searchpath.extend(os.environ["RENPY_SEARCHPATH"].split("::")) if renpy.android: renpy.config.commondir = None android_searchpath() # Load Ren'Py extensions. for dir in renpy.config.searchpath: # @ReservedAssignment for fn in os.listdir(dir): if fn.lower().endswith(".rpe"): load_rpe(dir + "/" + fn) # Generate a list of extensions for each archive handler. archive_extensions = [ ] for handler in renpy.loader.archive_handlers: for ext in handler.get_supported_extensions(): if not (ext in archive_extensions): archive_extensions.append(ext) # Find archives. for dn in renpy.config.searchpath: if not os.path.isdir(dn): continue for i in sorted(os.listdir(dn)): base, ext = os.path.splitext(i) # Check if the archive does not have any of the extensions in archive_extensions if not (ext in archive_extensions): continue renpy.config.archives.append(base) renpy.config.archives.reverse() # Initialize archives. renpy.loader.index_archives() # Start auto-loading. renpy.loader.auto_init() log_clock("Loader init") # Initialize the log. game.log = renpy.python.RollbackLog() # Initialize the store. renpy.store.store = sys.modules['store'] # type: ignore # Set up styles. game.style = renpy.style.StyleManager() # @UndefinedVariable renpy.store.style = game.style # Run init code in its own context. (Don't log.) game.contexts = [ renpy.execution.Context(False) ] game.contexts[0].init_phase = True renpy.execution.not_infinite_loop(60) # Load the script. renpy.game.exception_info = 'While loading the script.' renpy.game.script = renpy.script.Script() if renpy.session.get("compile", False): renpy.game.args.compile = True # type: ignore # Set up error handling. renpy.exports.load_module("_errorhandling") if renpy.exports.loadable("tl/None/common.rpym") or renpy.exports.loadable("tl/None/common.rpymc"): renpy.exports.load_module("tl/None/common") renpy.config.init_system_styles() renpy.style.build_styles() # @UndefinedVariable log_clock("Loading error handling") # If recompiling everything, remove orphan .rpyc files. # Otherwise, will fail in case orphan .rpyc have same # labels as in other scripts (usually happens on script rename). if (renpy.game.args.command == 'compile') and not (renpy.game.args.keep_orphan_rpyc): # type: ignore for (fn, dn) in renpy.game.script.script_files: if dn is None: continue if not os.path.isfile(os.path.join(dn, fn + ".rpy")): try: name = os.path.join(dn, fn + ".rpyc") os.rename(name, name + ".bak") except OSError: # This perhaps shouldn't happen since either .rpy or .rpyc should exist pass # Update script files list, so that it doesn't contain removed .rpyc's renpy.loader.cleardirfiles() renpy.game.script.scan_script_files() # Load all .rpy files. renpy.game.script.load_script() # sets renpy.game.script. log_clock("Loading script") if renpy.game.args.command == 'load-test': # type: ignore start = time.time() for i in range(5): print(i) renpy.game.script = renpy.script.Script() renpy.game.script.load_script() print(time.time() - start) sys.exit(0) renpy.game.exception_info = 'After loading the script.' # Find the save directory. if renpy.config.savedir is None: renpy.config.savedir = __main__.path_to_saves(renpy.config.gamedir) # E1101 @UndefinedVariable if renpy.game.args.savedir: # type: ignore renpy.config.savedir = renpy.game.args.savedir # type: ignore # Init preferences. game.persistent = renpy.persistent.init() game.preferences = game.persistent._preferences for i in renpy.game.persistent._seen_translates: # type: ignore if i in renpy.game.script.translator.default_translates: renpy.game.seen_translates_count += 1 if game.persistent._virtual_size: renpy.config.screen_width, renpy.config.screen_height = game.persistent._virtual_size # Init save locations and loadsave. renpy.savelocation.init() # We need to be 100% sure we kill the savelocation thread. try: # Init save slots. renpy.loadsave.init() log_clock("Loading save slot metadata.") # Load persistent data from all save locations. renpy.persistent.update() game.preferences = game.persistent._preferences log_clock("Loading persistent") # Clear the list of seen statements in this game. game.seen_session = { } # Initialize persistent variables. renpy.store.persistent = game.persistent # type: ignore renpy.store._preferences = game.preferences # type: ignore renpy.store._test = renpy.test.testast._test # type: ignore if renpy.parser.report_parse_errors(): raise renpy.game.ParseErrorException() renpy.game.exception_info = 'While executing init code:' for _prio, node in game.script.initcode: if isinstance(node, renpy.ast.Node): node_start = time.time() renpy.game.context().run(node) node_duration = time.time() - node_start if node_duration > renpy.config.profile_init: renpy.display.log.write(" - Init at %s:%d took %.5f s.", node.filename, node.linenumber, node_duration) else: # An init function. node() renpy.game.exception_info = 'After initialization, but before game start.' # Check if we should simulate android. renpy.android = renpy.android or renpy.config.simulate_android # @UndefinedVariable # Re-set up the logging. renpy.log.post_init() # Run the post init code, if any. for i in renpy.game.post_init: i() renpy.game.script.report_duplicate_labels() # Sort the images. renpy.display.image.image_names.sort() game.persistent._virtual_size = renpy.config.screen_width, renpy.config.screen_height # type: ignore log_clock("Running init code") renpy.pyanalysis.load_cache() log_clock("Loading analysis data") # Analyze the script and compile ATL. renpy.game.script.analyze() renpy.atl.compile_all() log_clock("Analyze and compile ATL") # Index the archive files. We should not have loaded an image # before this point. (As pygame will not have been initialized.) # We need to do this again because the list of known archives # may have changed. renpy.loader.index_archives() log_clock("Index archives") # Check some environment variables. renpy.game.less_memory = "RENPY_LESS_MEMORY" in os.environ renpy.game.less_mouse = "RENPY_LESS_MOUSE" in os.environ renpy.game.less_updates = "RENPY_LESS_UPDATES" in os.environ renpy.dump.dump(False) renpy.game.script.make_backups() log_clock("Dump and make backups.") # Initialize image cache. renpy.display.im.cache.init() log_clock("Cleaning cache") # Make a clean copy of the store. renpy.python.make_clean_stores() log_clock("Making clean stores") gc.collect(2) if gc.garbage: del gc.garbage[:] if renpy.config.manage_gc: gc.set_threshold(*renpy.config.gc_thresholds) gc_debug = int(os.environ.get("RENPY_GC_DEBUG", 0)) if renpy.config.gc_print_unreachable: gc_debug |= gc.DEBUG_SAVEALL gc.set_debug(gc_debug) else: gc.set_threshold(700, 10, 10) log_clock("Initial gc.") # Start debugging file opens. renpy.debug.init_main_thread_open() # (Perhaps) Initialize graphics. if not game.interface: renpy.display.core.Interface() log_clock("Creating interface object") # Start things running. restart = None while True: if restart: renpy.display.screen.before_restart() try: try: run(restart) finally: restart = (renpy.config.end_game_transition, "_invoke_main_menu", "_main_menu") renpy.persistent.update(True) renpy.persistent.save_MP() except game.FullRestartException as e: restart = e.reason finally: # Reset live2d if it exists. try: renpy.gl2.live2d.reset_states() except Exception: pass # Flush any pending interface work. renpy.display.interface.finish_pending() # Give Ren'Py a couple of seconds to finish saving. renpy.loadsave.autosave_not_running.wait(3.0) # Run the at exit callbacks. for cb in renpy.config.at_exit_callbacks: cb() finally: gc.set_debug(0) for i in renpy.config.quit_callbacks: i() renpy.loader.auto_quit() renpy.savelocation.quit() renpy.translation.write_updated_strings() # This is stuff we do on a normal, non-error return. if not renpy.display.error.error_handled: renpy.display.render.check_at_shutdown()
# weakref_valuedict.py import gc from pprint import pprint import weakref gc.set_debug(gc.DEBUG_UNCOLLECTABLE) class ExpensiveObject: def __init__(self, name): self.name = name def __repr__(self): return 'ExpensiveObject({})'.format(self.name) def __del__(self): print(' (Deleting {})'.format(self)) def demo(cache_factory): # hold objects so any weak references # are not removed immediately all_refs = {} # create the cache using the factory print('CACHE TYPE:', cache_factory) cache = cache_factory() for name in ['one', 'two', 'three']: o = ExpensiveObject(name) cache[name] = o all_refs[name] = o del o # decref
def tearDown(self): tracemalloc.disable() gc.set_debug(0)
def set_debug(): gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_STATS)
class ClassA(): def __init__(self): print('object born,id:%s' % str(hex(id(self)))) # def __del__(self): # print('object del,id:%s'%str(hex(id(self)))) def f3(): print("-----0------") # print(gc.collect()) c1 = ClassA() c2 = ClassA() c1.t = c2 c2.t = c1 print("-----1------") del c1 del c2 print("-----2------") print(gc.garbage) print("-----3------") print(gc.collect()) #显式执行垃圾回收 print("-----4------") print(gc.garbage) print("-----5------") print(gc.get_count()) if __name__ == '__main__': gc.set_debug(gc.DEBUG_LEAK) #设置gc模块的日志 f3()
def __init__(self, watchers, endpoint, pubsub_endpoint, check_delay=1.0, prereload_fn=None, context=None, loop=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, plugins=None, sockets=None, warmup_delay=0, httpd=False, httpd_host='localhost', httpd_port=8080, httpd_close_outputs=False, debug=False, debug_gc=False, ssh_server=None, proc_name='circusd', pidfile=None, loglevel=None, logoutput=None, loggerconfig=None, fqdn_prefix=None, umask=None, endpoint_owner=None): self.watchers = watchers self.endpoint = endpoint self.check_delay = check_delay self.prereload_fn = prereload_fn self.pubsub_endpoint = pubsub_endpoint self.multicast_endpoint = multicast_endpoint self.proc_name = proc_name self.ssh_server = ssh_server self.evpub_socket = None self.pidfile = pidfile self.loglevel = loglevel self.logoutput = logoutput self.loggerconfig = loggerconfig self.umask = umask self.endpoint_owner = endpoint_owner self._running = False try: # getfqdn appears to fail in Python3.3 in the unittest # framework so fall back to gethostname socket_fqdn = socket.getfqdn() except KeyError: socket_fqdn = socket.gethostname() if fqdn_prefix is None: fqdn = socket_fqdn else: fqdn = '{}@{}'.format(fqdn_prefix, socket_fqdn) self.fqdn = fqdn self.ctrl = self.loop = None self._provided_loop = False self.socket_event = False if loop is not None: self._provided_loop = True self.loop = loop # initialize zmq context self._init_context(context) self.pid = os.getpid() self._watchers_names = {} self._stopping = False self._restarting = False self.debug = debug self._exclusive_running_command = None if self.debug: self.stdout_stream = self.stderr_stream = {'class': 'StdoutStream'} else: self.stdout_stream = self.stderr_stream = None self.debug_gc = debug_gc if debug_gc: gc.set_debug(gc.DEBUG_LEAK) # initializing circusd-stats as a watcher when configured self.statsd = statsd self.stats_endpoint = stats_endpoint if self.statsd: cmd = "%s -c 'from circus import stats; stats.main()'" % \ sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --pubsub %s' % self.pubsub_endpoint cmd += ' --statspoint %s' % self.stats_endpoint if ssh_server is not None: cmd += ' --ssh %s' % ssh_server if debug: cmd += ' --log-level DEBUG' elif self.loglevel: cmd += ' --log-level ' + self.loglevel if self.logoutput: cmd += ' --log-output ' + self.logoutput stats_watcher = Watcher('circusd-stats', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=statsd_close_outputs, close_child_stdout=statsd_close_outputs) self.watchers.append(stats_watcher) # adding the httpd if httpd: # adding the socket httpd_socket = CircusSocket(name='circushttpd', host=httpd_host, port=httpd_port) if sockets is None: sockets = [httpd_socket] else: sockets.append(httpd_socket) cmd = ("%s -c 'from circusweb import circushttpd; " "circushttpd.main()'") % sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --fd $(circus.sockets.circushttpd)' if ssh_server is not None: cmd += ' --ssh %s' % ssh_server # Adding the watcher httpd_watcher = Watcher('circushttpd', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=httpd_close_outputs, close_child_stdout=httpd_close_outputs) self.watchers.append(httpd_watcher) # adding each plugin as a watcher ch_stderr = self.stderr_stream is None ch_stdout = self.stdout_stream is None if plugins is not None: for plugin in plugins: fqn = plugin['use'] cmd = get_plugin_cmd(plugin, self.endpoint, self.pubsub_endpoint, self.check_delay, ssh_server, debug=self.debug, loglevel=self.loglevel, logoutput=self.logoutput) plugin_cfg = dict(cmd=cmd, priority=1, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=ch_stderr, close_child_stdout=ch_stdout) plugin_cfg.update(plugin) if 'name' not in plugin_cfg: plugin_cfg['name'] = fqn plugin_watcher = Watcher.load_from_config(plugin_cfg) self.watchers.append(plugin_watcher) self.sockets = CircusSockets(sockets) self.warmup_delay = warmup_delay
# # gc 垃圾回收 import gc import sys gc.set_debug(gc.DEBUG_STATS) a = [1, 2, 3] b = [4, 5, 6] a.append(b) del a, b print(gc.collect()) print("END") # gc: collecting generation 2... # gc: objects in each generation: 831 493 4500 # gc: objects in permanent generation: 0 # gc: done, 24 unreachable, 0 uncollectable, 0.0008s elapsed # 24 # END
from test.version import Version if HAVE_SSL: import ssl try: # Enable the fault handler to dump the traceback of each running thread # after a segfault. import faulthandler faulthandler.enable() except ImportError: pass # Enable debug output for uncollectable objects. PyPy does not have set_debug. if hasattr(gc, 'set_debug'): gc.set_debug(gc.DEBUG_UNCOLLECTABLE | getattr(gc, 'DEBUG_OBJECTS', 0) | getattr(gc, 'DEBUG_INSTANCES', 0)) # The host and port of a single mongod or mongos, or the seed host # for a replica set. host = os.environ.get("DB_IP", 'localhost') port = int(os.environ.get("DB_PORT", 27017)) db_user = os.environ.get("DB_USER", "user") db_pwd = os.environ.get("DB_PASSWORD", "password") CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'certificates') CLIENT_PEM = os.environ.get('CLIENT_PEM', os.path.join(CERT_PATH, 'client.pem')) CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem'))
def main(): parser = OptionParser() parser.add_option('-s', '--server-host', dest='server_host', type='string', default='localhost', help='Server host (can be a comma-delimited list)', action='callback', callback_args=(str, ), callback=get_comma_separated_args) parser.add_option( '-p', '--server-port', dest='server_port', type='string', default=8073, help='Server port, default 8073 (can be a comma delimited list)', action='callback', callback_args=(int, ), callback=get_comma_separated_args) parser.add_option( '--pw', '--password', dest='password', type='string', default='', help='Kiwi login password (if required, can be a comma delimited list)', action='callback', callback_args=(str, ), callback=get_comma_separated_args) parser.add_option('-u', '--user', dest='user', type='string', default='kiwirecorder.py', help='Kiwi connection user name', action='callback', callback_args=(str, ), callback=get_comma_separated_args) parser.add_option( '--station', dest='station', type='string', default=None, help= 'Station ID to be appended to filename (can be a comma-separated list)', action='callback', callback_args=(str, ), callback=get_comma_separated_args) parser.add_option( '--log', '--log-level', '--log_level', type='choice', dest='log_level', default='warn', choices=['debug', 'info', 'warn', 'error', 'critical'], help='Log level: debug|info|warn(default)|error|critical') parser.add_option('-q', '--quiet', dest='quiet', default=False, action='store_true', help='Don\'t print progress messages') parser.add_option('-d', '--dir', dest='dir', type='string', default=None, help='Optional destination directory for files') parser.add_option( '--fn', '--filename', dest='filename', type='string', default='', help= 'Use fixed filename instead of generated filenames (optional station ID(s) will apply)', action='callback', callback_args=(str, ), callback=get_comma_separated_args) parser.add_option('--tlimit', '--time-limit', dest='tlimit', type='float', default=None, help='Record time limit in seconds') parser.add_option('--dt-sec', dest='dt', type='int', default=0, help='Start a new file when mod(sec_of_day,dt) == 0') parser.add_option('--launch-delay', '--launch_delay', dest='launch_delay', type='int', default=0, help='Delay (secs) in launching multiple connections') parser.add_option('-k', '--socket-timeout', '--socket_timeout', dest='socket_timeout', type='int', default=10, help='Timeout(sec) for sockets') parser.add_option( '--stats', dest='stats', default=False, action='store_true', help= 'Print additional statistics (applies only to --S-meter mode currently)' ) group = OptionGroup(parser, "Audio connection options", "") group.add_option( '-f', '--freq', dest='frequency', type='string', default=1000, help='Frequency to tune to, in kHz (can be a comma-separated list)', action='callback', callback_args=(float, ), callback=get_comma_separated_args) group.add_option( '-m', '--modulation', dest='modulation', type='string', default='am', help= 'Modulation; one of am, lsb, usb, cw, nbfm, iq (default passband if -L/-H not specified)' ) group.add_option('--ncomp', '--no_compression', dest='compression', default=True, action='store_false', help='Don\'t use audio compression') group.add_option('-L', '--lp-cutoff', dest='lp_cut', type='float', default=None, help='Low-pass cutoff frequency, in Hz') group.add_option('-H', '--hp-cutoff', dest='hp_cut', type='float', default=None, help='High-pass cutoff frequency, in Hz') group.add_option( '-r', '--resample', dest='resample', type='int', default=0, help= 'Resample output file to new sample rate in Hz. The resampling ratio has to be in the range [1/256,256]' ) group.add_option('-T', '--squelch-threshold', dest='thresh', type='float', default=None, help='Squelch threshold, in dB.') group.add_option( '--squelch-tail', dest='squelch_tail', type='float', default=1, help= 'Time for which the squelch remains open after the signal is below threshold.' ) group.add_option( '-g', '--agc-gain', dest='agc_gain', type='string', default=None, help= 'AGC gain; if set, AGC is turned off (can be a comma-separated list)', action='callback', callback_args=(float, ), callback=get_comma_separated_args) group.add_option('--nb', dest='nb', action='store_true', default=False, help='Enable noise blanker with default parameters.') group.add_option( '--nb-gate', dest='nb_gate', type='int', default=100, help='Noise blanker gate time in usec (100 to 5000, default 100)') group.add_option( '--nb-th', '--nb-thresh', dest='nb_thresh', type='int', default=50, help='Noise blanker threshold in percent (0 to 100, default 50)') group.add_option( '-w', '--kiwi-wav', dest='is_kiwi_wav', default=False, action='store_true', help= 'In the wav file include KIWI header containing GPS time-stamps (only for IQ mode)' ) group.add_option( '--S-meter', '--s-meter', dest='S_meter', type='int', default=-1, help= 'Report S-meter(RSSI) value after S_METER number of averages. S_METER=0 does no averaging and reports each RSSI value received. Does not write wav data to file.' ) group.add_option('--kiwi-tdoa', dest='is_kiwi_tdoa', default=False, action='store_true', help='Used when called by Kiwi TDoA extension') group.add_option('--test-mode', dest='test_mode', default=False, action='store_true', help='Write wav data to /dev/null') parser.add_option_group(group) group = OptionGroup(parser, "Waterfall connection options", "") group.add_option('--wf', dest='waterfall', default=False, action='store_true', help='Process waterfall data instead of audio') group.add_option('-z', '--zoom', dest='zoom', type='int', default=0, help='Zoom level 0-14') group.add_option( '--snd', dest='sound', default=False, action='store_true', help= 'Also process sound data when in waterfall mode (sound connection options above apply)' ) parser.add_option_group(group) (options, unused_args) = parser.parse_args() ## clean up OptionParser which has cyclic references parser.destroy() FORMAT = '%(asctime)-15s pid %(process)5d %(message)s' logging.basicConfig(level=logging.getLevelName(options.log_level.upper()), format=FORMAT) if options.log_level.upper() == 'DEBUG': gc.set_debug(gc.DEBUG_SAVEALL | gc.DEBUG_LEAK | gc.DEBUG_UNCOLLECTABLE) run_event = threading.Event() run_event.set() if options.S_meter >= 0: options.quiet = True options.raw = False gopt = options multiple_connections, options = options_cross_product(options) snd_recorders = [] if not gopt.waterfall or (gopt.waterfall and gopt.sound): for i, opt in enumerate(options): opt.multiple_connections = multiple_connections opt.idx = i snd_recorders.append( KiwiWorker(args=(KiwiSoundRecorder(opt), opt, run_event))) wf_recorders = [] if gopt.waterfall: for i, opt in enumerate(options): opt.multiple_connections = multiple_connections opt.idx = i wf_recorders.append( KiwiWorker(args=(KiwiWaterfallRecorder(opt), opt, run_event))) try: for i, r in enumerate(snd_recorders): if opt.launch_delay != 0 and i != 0 and options[ i - 1].server_host == options[i].server_host: time.sleep(opt.launch_delay) r.start() #logging.info("started sound recorder %d, tstamp=%d" % (i, options[i].tstamp)) logging.info("started sound recorder %d" % i) for i, r in enumerate(wf_recorders): if i != 0 and options[i - 1].server_host == options[i].server_host: time.sleep(opt.launch_delay) r.start() logging.info("started waterfall recorder %d" % i) while run_event.is_set(): time.sleep(.1) except KeyboardInterrupt: run_event.clear() join_threads(snd_recorders, wf_recorders) print("KeyboardInterrupt: threads successfully closed") except Exception as e: print_exc() run_event.clear() join_threads(snd_recorders, wf_recorders) print("Exception: threads successfully closed") if gopt.is_kiwi_tdoa: for i, opt in enumerate(options): # NB: MUST be a print (i.e. not a logging.info) print("status=%d,%d" % (i, opt.status)) logging.debug('gc %s' % gc.garbage)
os.system(cmd) with open(fn) as f: while 1: ps = f.readline() if not ps: break chunk = ps.split() pid, cmd = chunk[0], chunk[4] if cmd == s: os.kill(int(pid), signal.SIGTERM) os.unlink(fn) if __name__ == '__main__': report_leaks = 0 if report_leaks: gc.enable() gc.set_debug(gc.DEBUG_LEAK & ~gc.DEBUG_SAVEALL) try: Rand.load_file('randpool.dat', -1) unittest.TextTestRunner().run(suite()) Rand.save_file('randpool.dat') finally: zap_servers() if report_leaks: from tests import alltests alltests.dump_garbage()
def __init__(self): # gc.set_debug(gc.DEBUG_UNCOLLECTABLE|gc.DEBUG_INSTANCES|gc.DEBUG_OBJECTS|gc.DEBUG_SAVEALL) gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_SAVEALL) self.pipresents_issue = "1.3.5" self.pipresents_minorissue = '1.3.5d' # position and size of window without -f command line option self.nonfull_window_width = 0.45 # proportion of width self.nonfull_window_height = 0.7 # proportion of height self.nonfull_window_x = 0 # position of top left corner self.nonfull_window_y = 0 # position of top left corner StopWatch.global_enable = False # set up the handler for SIGTERM signal.signal(signal.SIGTERM, self.handle_sigterm) # **************************************** # Initialisation # *************************************** # get command line options self.options = command_options() # get Pi Presents code directory pp_dir = sys.path[0] self.pp_dir = pp_dir if not os.path.exists(pp_dir + "/pipresents.py"): if self.options['manager'] is False: tkMessageBox.showwarning("Pi Presents", "Bad Application Directory") exit(102) # Initialise logging and tracing Monitor.log_path = pp_dir self.mon = Monitor() # Init in PiPresents only self.mon.init() # uncomment to enable control of logging from within a class # Monitor.enable_in_code = True # enables control of log level in the code for a class - self.mon.set_log_level() # make a shorter list to log/trace only some classes without using enable_in_code. Monitor.classes = [ 'PiPresents', 'HyperlinkShow', 'RadioButtonShow', 'ArtLiveShow', 'ArtMediaShow', 'MediaShow', 'LiveShow', 'MenuShow', 'GapShow', 'Show', 'ArtShow', 'AudioPlayer', 'BrowserPlayer', 'ImagePlayer', 'MenuPlayer', 'MessagePlayer', 'VideoPlayer', 'Player', 'MediaList', 'LiveList', 'ShowList', 'PathManager', 'ControlsManager', 'ShowManager', 'PluginManager', 'IOPluginManager', 'MplayerDriver', 'OMXDriver', 'UZBLDriver', 'TimeOfDay', 'ScreenDriver', 'Animate', 'OSCDriver', 'CounterManager', 'Network', 'Mailer' ] # Monitor.classes=['PiPresents','MediaShow','GapShow','Show','VideoPlayer','Player','OMXDriver'] # Monitor.classes=['OSCDriver'] # get global log level from command line Monitor.log_level = int(self.options['debug']) Monitor.manager = self.options['manager'] # print self.options['manager'] self.mon.newline(3) self.mon.sched( self, None, "Pi Presents is starting, Version:" + self.pipresents_minorissue + ' at ' + time.strftime("%Y-%m-%d %H:%M.%S")) self.mon.log( self, "Pi Presents is starting, Version:" + self.pipresents_minorissue + ' at ' + time.strftime("%Y-%m-%d %H:%M.%S")) # self.mon.log (self," OS and separator:" + os.name +' ' + os.sep) self.mon.log(self, "sys.path[0] - location of code: " + sys.path[0]) # log versions of Raspbian and omxplayer, and GPU Memory with open("/boot/issue.txt") as ifile: self.mon.log(self, '\nRaspbian: ' + ifile.read()) self.mon.log(self, '\n' + check_output(["omxplayer", "-v"])) self.mon.log( self, '\nGPU Memory: ' + check_output(["vcgencmd", "get_mem", "gpu"])) if os.geteuid() == 0: print 'Do not run Pi Presents with sudo' self.mon.log(self, 'Do not run Pi Presents with sudo') self.mon.finish() sys.exit(102) if "DESKTOP_SESSION" not in os.environ: print 'Pi Presents must be run from the Desktop' self.mon.log(self, 'Pi Presents must be run from the Desktop') self.mon.finish() sys.exit(102) else: self.mon.log(self, 'Desktop is ' + os.environ['DESKTOP_SESSION']) # optional other classes used self.root = None self.ppio = None self.tod = None self.animate = None self.ioplugin_manager = None self.oscdriver = None self.osc_enabled = False self.tod_enabled = False self.email_enabled = False user = os.getenv('USER') if user is None: tkMessageBox.showwarning( "You must be logged in to run Pi Presents") exit(102) if user != 'pi': self.mon.warn(self, "You must be logged as pi to use GPIO") self.mon.log(self, 'User is: ' + user) # self.mon.log(self,"os.getenv('HOME') - user home directory (not used): " + os.getenv('HOME')) # does not work # self.mon.log(self,"os.path.expanduser('~') - user home directory: " + os.path.expanduser('~')) # does not work # check network is available self.network_connected = False self.network_details = False self.interface = '' self.ip = '' self.unit = '' # sets self.network_connected and self.network_details self.init_network() # start the mailer and send email when PP starts self.email_enabled = False if self.network_connected is True: self.init_mailer() if self.email_enabled is True and self.mailer.email_at_start is True: subject = '[Pi Presents] ' + self.unit + ': PP Started on ' + time.strftime( "%Y-%m-%d %H:%M") message = time.strftime( "%Y-%m-%d %H:%M" ) + '\nUnit: ' + self.unit + ' Profile: ' + self.options[ 'profile'] + '\n ' + self.interface + '\n ' + self.ip self.send_email('start', subject, message) # get profile path from -p option if self.options['profile'] != '': self.pp_profile_path = "/pp_profiles/" + self.options['profile'] else: self.mon.err(self, "Profile not specified in command ") self.end('error', 'Profile not specified with the commands -p option') # get directory containing pp_home from the command, if self.options['home'] == "": home = os.sep + 'home' + os.sep + user + os.sep + "pp_home" else: home = self.options['home'] + os.sep + "pp_home" self.mon.log(self, "pp_home directory is: " + home) # check if pp_home exists. # try for 10 seconds to allow usb stick to automount found = False for i in range(1, 10): self.mon.log(self, "Trying pp_home at: " + home + " (" + str(i) + ')') if os.path.exists(home): found = True self.pp_home = home break time.sleep(1) if found is True: self.mon.log( self, "Found Requested Home Directory, using pp_home at: " + home) else: self.mon.err(self, "Failed to find pp_home directory at " + home) self.end('error', "Failed to find pp_home directory at " + home) # check profile exists self.pp_profile = self.pp_home + self.pp_profile_path if os.path.exists(self.pp_profile): self.mon.sched(self, None, "Running profile: " + self.pp_profile_path) self.mon.log( self, "Found Requested profile - pp_profile directory is: " + self.pp_profile) else: self.mon.err( self, "Failed to find requested profile: " + self.pp_profile) self.end('error', "Failed to find requested profile: " + self.pp_profile) self.mon.start_stats(self.options['profile']) if self.options['verify'] is True: self.mon.err(self, "Validation option not supported - use the editor") self.end('error', 'Validation option not supported - use the editor') # initialise and read the showlist in the profile self.showlist = ShowList() self.showlist_file = self.pp_profile + "/pp_showlist.json" if os.path.exists(self.showlist_file): self.showlist.open_json(self.showlist_file) else: self.mon.err(self, "showlist not found at " + self.showlist_file) self.end('error', "showlist not found at " + self.showlist_file) # check profile and Pi Presents issues are compatible if self.showlist.profile_version() != self.pipresents_version(): self.mon.err( self, "Version of showlist " + self.showlist.profile_version_string + " is not same as Pi Presents") self.end( 'error', "Version of showlist " + self.showlist.profile_version_string + " is not same as Pi Presents") # get the 'start' show from the showlist index = self.showlist.index_of_start_show() if index >= 0: self.showlist.select(index) self.starter_show = self.showlist.selected_show() else: self.mon.err(self, "Show [start] not found in showlist") self.end('error', "Show [start] not found in showlist") # ******************** # SET UP THE GUI # ******************** # turn off the screenblanking and saver if self.options['noblank'] is True: call(["xset", "s", "off"]) call(["xset", "s", "-dpms"]) self.root = Tk() self.title = 'Pi Presents - ' + self.pp_profile self.icon_text = 'Pi Presents' self.root.title(self.title) self.root.iconname(self.icon_text) self.root.config(bg=self.starter_show['background-colour']) self.mon.log( self, 'monitor screen dimensions are ' + str(self.root.winfo_screenwidth()) + ' x ' + str(self.root.winfo_screenheight()) + ' pixels') if self.options['screensize'] == '': self.screen_width = self.root.winfo_screenwidth() self.screen_height = self.root.winfo_screenheight() else: reason, message, self.screen_width, self.screen_height = self.parse_screen( self.options['screensize']) if reason == 'error': self.mon.err(self, message) self.end('error', message) self.mon.log( self, 'forced screen dimensions (--screensize) are ' + str(self.screen_width) + ' x ' + str(self.screen_height) + ' pixels') # set window dimensions and decorations if self.options['fullscreen'] is False: self.window_width = int(self.root.winfo_screenwidth() * self.nonfull_window_width) self.window_height = int(self.root.winfo_screenheight() * self.nonfull_window_height) self.window_x = self.nonfull_window_x self.window_y = self.nonfull_window_y self.root.geometry("%dx%d%+d%+d" % (self.window_width, self.window_height, self.window_x, self.window_y)) else: self.window_width = self.screen_width self.window_height = self.screen_height self.root.attributes('-fullscreen', True) os.system('unclutter &') self.window_x = 0 self.window_y = 0 self.root.geometry("%dx%d%+d%+d" % (self.window_width, self.window_height, self.window_x, self.window_y)) self.root.attributes('-zoomed', '1') # canvas cover the whole screen whatever the size of the window. self.canvas_height = self.screen_height self.canvas_width = self.screen_width # make sure focus is set. self.root.focus_set() # define response to main window closing. self.root.protocol("WM_DELETE_WINDOW", self.handle_user_abort) # setup a canvas onto which will be drawn the images or text self.canvas = Canvas(self.root, bg=self.starter_show['background-colour']) if self.options['fullscreen'] is True: self.canvas.config(height=self.canvas_height, width=self.canvas_width, highlightthickness=0) else: self.canvas.config(height=self.canvas_height, width=self.canvas_width, highlightthickness=1, highlightcolor='yellow') self.canvas.place(x=0, y=0) # self.canvas.config(bg='black') self.canvas.focus_set() # **************************************** # INITIALISE THE TOUCHSCREEN DRIVER # **************************************** # each driver takes a set of inputs, binds them to symboic names # and sets up a callback which returns the symbolic name when an input event occurs self.sr = ScreenDriver() # read the screen click area config file reason, message = self.sr.read(pp_dir, self.pp_home, self.pp_profile) if reason == 'error': self.end('error', 'cannot find, or error in screen.cfg') # create click areas on the canvas, must be polygon as outline rectangles are not filled as far as find_closest goes # click areas are made on the Pi Presents canvas not the show canvases. reason, message = self.sr.make_click_areas(self.canvas, self.handle_input_event) if reason == 'error': self.mon.err(self, message) self.end('error', message) # **************************************** # INITIALISE THE APPLICATION AND START # **************************************** self.shutdown_required = False self.reboot_required = False self.terminate_required = False self.exitpipresents_required = False # initialise the I/O plugins by importing their drivers self.ioplugin_manager = IOPluginManager() reason, message = self.ioplugin_manager.init(self.pp_dir, self.pp_profile, self.root, self.handle_input_event) if reason == 'error': # self.mon.err(self,message) self.end('error', message) # kick off animation sequencer self.animate = Animate() self.animate.init(pp_dir, self.pp_home, self.pp_profile, self.canvas, 200, self.handle_output_event) self.animate.poll() #create a showmanager ready for time of day scheduler and osc server show_id = -1 self.show_manager = ShowManager(show_id, self.showlist, self.starter_show, self.root, self.canvas, self.pp_dir, self.pp_profile, self.pp_home) # first time through set callback to terminate Pi Presents if all shows have ended. self.show_manager.init(self.canvas, self.all_shows_ended_callback, self.handle_command, self.showlist) # Register all the shows in the showlist reason, message = self.show_manager.register_shows() if reason == 'error': self.mon.err(self, message) self.end('error', message) # Init OSCDriver, read config and start OSC server self.osc_enabled = False if self.network_connected is True: if os.path.exists(self.pp_profile + os.sep + 'pp_io_config' + os.sep + 'osc.cfg'): self.oscdriver = OSCDriver() reason, message = self.oscdriver.init( self.pp_profile, self.unit, self.interface, self.ip, self.handle_command, self.handle_input_event, self.e_osc_handle_animate) if reason == 'error': self.mon.err(self, message) self.end('error', message) else: self.osc_enabled = True self.root.after(1000, self.oscdriver.start_server()) # initialise ToD scheduler calculating schedule for today self.tod = TimeOfDay() reason, message, self.tod_enabled = self.tod.init( pp_dir, self.pp_home, self.pp_profile, self.showlist, self.root, self.handle_command) if reason == 'error': self.mon.err(self, message) self.end('error', message) # warn if the network not available when ToD required if self.tod_enabled is True and self.network_connected is False: self.mon.warn( self, 'Network not connected so Time of Day scheduler may be using the internal clock' ) # init the counter manager self.counter_manager = CounterManager() self.counter_manager.init() # warn about start shows and scheduler if self.starter_show['start-show'] == '' and self.tod_enabled is False: self.mon.sched( self, None, "No Start Shows in Start Show and no shows scheduled") self.mon.warn( self, "No Start Shows in Start Show and no shows scheduled") if self.starter_show['start-show'] != '' and self.tod_enabled is True: self.mon.sched( self, None, "Start Shows in Start Show and shows scheduled - conflict?") self.mon.warn( self, "Start Shows in Start Show and shows scheduled - conflict?") # run the start shows self.run_start_shows() # kick off the time of day scheduler which may run additional shows if self.tod_enabled is True: self.tod.poll() # start the I/O plugins input event generation self.ioplugin_manager.start() # start Tkinters event loop self.root.mainloop()
def test_display_uncollectable_cumulative(self): gc.set_debug(gc.DEBUG_SAVEALL) self._test_display_uncollectable_cumulative(True)
def test_display_uncollectable_saveall(self): gc.set_debug(gc.DEBUG_SAVEALL) self._test_display_uncollectable(True)
# This is for the REST calls made by Hue with the requests library. if desktop.conf.SSL_CACERTS.get( ) and os.environ.get('REQUESTS_CA_BUNDLE') is None: os.environ['REQUESTS_CA_BUNDLE'] = desktop.conf.SSL_CACERTS.get() # Preventing local build failure by not validating the default value of REQUESTS_CA_BUNDLE if os.environ.get('REQUESTS_CA_BUNDLE') and os.environ.get('REQUESTS_CA_BUNDLE') != desktop.conf.SSL_CACERTS.config.default \ and not os.path.isfile(os.environ['REQUESTS_CA_BUNDLE']): raise Exception( _('SSL Certificate pointed by REQUESTS_CA_BUNDLE does not exist: %s') % os.environ['REQUESTS_CA_BUNDLE']) # Instrumentation if desktop.conf.INSTRUMENTATION.get(): if sys.version_info[0] > 2: gc.set_debug(gc.DEBUG_UNCOLLECTABLE) else: gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_OBJECTS) if not desktop.conf.DATABASE_LOGGING.get(): def disable_database_logging(): from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.utils import CursorWrapper BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper( cursor, self) disable_database_logging() ############################################################
import uwsgi import os import gc import sys from uwsgidecorators import rpc, signal, postfork from os.path import abspath, dirname, join logo_png = abspath(join(dirname(__file__), "../logo_uWSGI.png")) print(sys.version) print(sys.version_info) if "set_debug" in gc.__dict__: gc.set_debug(gc.DEBUG_SAVEALL) print(os.environ) print(sys.modules) print(sys.argv) try: DEBUG = sys.argv[1] == "debug" except IndexError: DEBUG = False def after_request_hook(): print("request finished") uwsgi.after_req_hook = after_request_hook
import gc class A(object): pass class B(object): pass gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_LEAK) a = A() b = B() a.b = b b.a = a del a del b gc.collect()
def parse_args_hook(*args, **kwargs): arguments = parser._parse_args(*args, **kwargs) configure_stream_logger(arguments.logger, arguments.loglvl) gc.set_debug(arguments.gc_debug_stats | arguments.gc_debug_leak) return arguments
def tearDown(self): gc.set_debug(0)