def check_gc_during_creation(self, makeref): thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc.collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) if test_support.due_to_ironpython_incompatibility(): keepalive(referenced) finally: gc.set_threshold(*thresholds)
def init(**kwargs): global screen, shoot, xFrame, yFrame os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (0,0) pygame.init() #sound staff pygame.mixer.pre_init(buffer=4096) pygame.mixer.init() shoot.append(pygame.mixer.Sound("gun1-1.wav")) pygame.mixer.set_num_channels(10) pygame.mixer.set_reserved(8) for i in range(0,7): channel.append(pygame.mixer.Channel(i)) gc.enable() #garbig kolektor gc.set_threshold(1,1,1) for i in range(0, kwargs["numb"]): rawClips.append(videoClip( folder=kwargs["folder"] , name = repr(i))) #displej stuff if kwargs["mod"] == "full": screen = pygame.display.set_mode(kwargs["resolution"],pygame.FULLSCREEN) xFrame = screen.get_width() yFrame = screen.get_height() else: screen = pygame.display.set_mode(kwargs["resolution"], pygame.NOFRAME) xFrame = kwargs["resolution"][0] yFrame = kwargs["resolution"][1] print xFrame, yFrame pygame.display.set_caption('KIA game') screen.fill((235,235,235))
def handle(self, *args, **options): # Check that we can run if not settings.CENTRAL_SERVER: raise CommandError("This must only be run on the central server.") supported_langs = get_supported_languages() if not options["lang_codes"]: lang_codes = supported_langs else: requested_codes = set(options["lang_codes"].split(",")) lang_codes = [lcode_to_ietf(lc) for lc in requested_codes if lc in supported_langs] unsupported_codes = requested_codes - set(lang_codes) if unsupported_codes: raise CommandError("Requested unsupported languages: %s" % sorted(list(unsupported_codes))) # Scrub options for key in options: # If no_update is set, then disable all update options. if key.startswith("update_"): options[key] = options[key] and not options["no_update"] if version_diff(options["version"], COMMAND_MIN_VERSION) < 0: raise CommandError("This command cannot be used for versions before %s" % COMMAND_MIN_VERSION) if options['low_mem']: logging.info('Making the GC more aggressive...') gc.set_threshold(36, 2, 2) # For dealing with central server changes across versions upgrade_old_schema() # Now, we're going to build the language packs, collecting metadata long the way. package_metadata = update_language_packs(lang_codes, options)
def check_gc_during_creation(self, makeref): # XXX: threshold not applicable to Jython if not test_support.is_jython: thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc.collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) finally: # XXX: threshold not applicable to Jython if not test_support.is_jython: gc.set_threshold(*thresholds)
def checkMemory(): """as the name says""" # pylint: disable=too-many-branches if not Debug.gc: return gc.set_threshold(0) gc.set_debug(gc.DEBUG_LEAK) gc.enable() print('collecting {{{') gc.collect() # we want to eliminate all output print('}}} done') # code like this may help to find specific things if True: # pylint: disable=using-constant-test interesting = ('Client', 'Player', 'Game') for obj in gc.garbage: if hasattr(obj, 'cell_contents'): obj = obj.cell_contents if not any(x in repr(obj) for x in interesting): continue for referrer in gc.get_referrers(obj): if referrer is gc.garbage: continue if hasattr(referrer, 'cell_contents'): referrer = referrer.cell_contents if referrer.__class__.__name__ in interesting: for referent in gc.get_referents(referrer): print('%s refers to %s' % (referrer, referent)) else: print('referrer of %s/%s is: id=%s type=%s %s' % (type(obj), obj, id(referrer), type(referrer), referrer)) print('unreachable:%s' % gc.collect()) gc.set_debug(0)
def check_gc_during_creation(self, makeref): if test_support.check_impl_detail(): import gc thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc_collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) finally: if test_support.check_impl_detail(): gc.set_threshold(*thresholds)
def setUp(self): TestCase.setUp(self) gc.set_threshold(500, 10, 10) self._started = False self._ioloop = rpclib.ioloop() rpclib.__IOLOOP__ = self._ioloop self._exc_info = None
def run(): # make sure that we run the reactor with the sentinel log context, # otherwise other PreserveLoggingContext instances will get confused # and complain when they see the logcontext arbitrarily swapping # between the sentinel and `run` logcontexts. with PreserveLoggingContext(): logger.info("Running") if cpu_affinity is not None: # Turn the bitmask into bits, reverse it so we go from 0 up mask_to_bits = bin(cpu_affinity)[2:][::-1] cpus = [] cpu_num = 0 for i in mask_to_bits: if i == "1": cpus.append(cpu_num) cpu_num += 1 p = psutil.Process() p.cpu_affinity(cpus) change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) reactor.run()
def main(): arguments = parse_arguments() Monster.ATTRIBUTE_NAME_LIST = [ "attribute_{0}".format(i) for i in xrange(arguments.attribute_count)] test_list = [ ("GC(threshold0:700)", 700, 0), ("GC(threshold0:10000)", 10000, 0), ("Manual", None, 1), ("Weakref", None, 2), ] for test_name, threshold, unloop_model in test_list: if threshold is not None: gc.set_threshold(threshold) run_test_and_plot( test_name, arguments.max_iteration, arguments.dungeon_per_iter, arguments.monster_per_dungeon, arguments.attribute_count, unloop_model) lgd = plt.legend(loc="center left", bbox_to_anchor=(1, 0.5)) plt.savefig( arguments.plot_path, bbox_extra_artists=[lgd,], bbox_inches="tight")
def setUp (self): super (NotifyTestCase, self).setUp () gc.set_threshold (0, 0, 0) self.__num_collectable_objects = self.collect_garbage () self.__num_active_protections = AbstractGCProtector.default.num_active_protections
def run(): with LoggingContext("run"): logger.info("Running") change_resource_limit(config.soft_file_limit) if config.gc_thresholds: gc.set_threshold(*config.gc_thresholds) reactor.run()
def check_gc_during_creation(self, makeref): # gc.get/set_threshold does not exist e.g. in pypy thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc.collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) finally: gc.set_threshold(*thresholds)
def gc_callback(option, opt, GC_THRESHOLD, *args): import gc if GC_THRESHOLD == 0: gc.disable() print "gc disabled" else: gc.set_threshold(GC_THRESHOLD) print "gc threshold:", gc.get_threshold()
def in_thread(): # Uncomment to enable tracing of log context changes. # sys.settrace(logcontext_tracer) with LoggingContext("run"): change_resource_limit(hs.config.soft_file_limit) if hs.config.gc_thresholds: gc.set_threshold(*hs.config.gc_thresholds) reactor.run()
def tearDown(test): sys.path[:], sys.argv[:] = test.globs['saved-sys-info'][:2] if hasattr(gc, 'get_threshold'): gc.set_threshold(*test.globs['saved-gc-threshold']) sys.modules.clear() sys.modules.update(test.globs['saved-sys-info'][2]) if sys.version_info[0] >= 3: doctest._SpoofOut = test.globs['saved-doctest-SpoofOut']
def main(): parser=argparse.ArgumentParser(description="access to pythons built-in garbage collector") parser.add_argument("command",help="what to do",choices=["enable","disable","status","collect","threshold","debug","break"],action="store") parser.add_argument("args",help="argument for command",action="store",nargs="*") ns=parser.parse_args() if ns.command=="enable": gc.enable() elif ns.command=="disable": gc.disable() elif ns.command=="collect": gc.collect() elif ns.command=="status": print "GC enabled: {s}".format(s=gc.isenabled()) tracked=gc.get_objects() n=len(tracked) print "Tracked objects: {n}".format(n=n) size=sum([sys.getsizeof(e) for e in tracked]) del tracked#this list may be big, better delete it print "Size of tracked objects: {s} bytes".format(s=size) print "Garbage: {n}".format(n=len(gc.garbage)) gsize=sum([sys.getsizeof(e) for e in gc.garbage]) print "Size of garbage: {s} bytes".format(s=gsize) print "Debug: {d}".format(d=gc.get_debug()) elif ns.command=="threshold": if len(ns.args)==0: print "Threshold:\n G1: {}\n G2: {}\n G3: {}".format(*gc.get_threshold()) elif len(ns.args)>3: print "Error: to many arguments for threshold!" sys.exit(1) else: try: ts=tuple([int(e) for e in ns.args]) except ValueError: print "Error: expected arguments to be integer!" sys.exit(1) gc.set_threshold(*ts) elif ns.command=="debug": if len(ns.args)==0: print "Debug: {d}".format(d=gc.get_debug()) elif len(ns.args)==1: try: flag=int(ns.args[0]) except ValueError: print "Error: expected argument to be an integer!" sys.exit(1) gc.set_debug(flag) else: print "Error: expected exactly one argument for threshold!" sys.exit(1) elif ns.command=="break": if len(gc.garbage)==0: print "Error: No Garbage found!" sys.exit(1) else: for k in dir(garbage[0]): try: delattr(garbage,k) except: pass del gc.garbage[:]
def tearDown(self): gc.collect() self.assertEqual(len(gc.garbage), self.gc_count) if len(gc.garbage)>0: if self.verbose>1: print gc.get_objects() #TODO be pedantic and fail? del gc.garbage[:] gc.set_threshold(*self.gc_threshold_old) gc.set_debug(self.gc_flags_old)
def setUp (self): super (NotifyTestCase, self).setUp () if not self.HAVE_CONTROLLABLE_GC: return gc.set_threshold (0, 0, 0) self.__num_collectable_objects = self.collect_garbage () self.__num_active_protections = AbstractGCProtector.default.num_active_protections
def set_gc_threshold(self, iter1, iter2, iter3): """ Unknown @param @type @return: Unknown @rtype: Unknown """ gc.set_threshold(iter1, iter2, iter3)
def main(): """ The initial setup for blamehangle. Parse command line options, read config file, and start the Postman. """ #gc.set_debug(gc.DEBUG_STATS|gc.DEBUG_COLLECTABLE|gc.DEBUG_UNCOLLECTABLE|gc.DEBUG_INSTANCES|gc.DEBUG_OBJECTS) # set our own gc thresholds, to keep mem usage from creeping. It seems # that the default is extremely slow, and since blamehangle is not an # interactive program the minor performance hit taken during garbage # collection won't even be noticable. # # The default is (700, 10, 10) ! gc.set_threshold(50, 5, 2) # Parse our command line options try: opts, args = getopt.getopt(sys.argv[1:], 'c:p', [ 'config=', 'profile' ]) except getopt.GetoptError: Show_Usage() ConfigFile = None Profiled = 0 for opt, arg in opts: if opt in ('-c', '--config'): ConfigFile = arg if opt in ('-p', '--profile'): Profiled = 1 # Load our config file if not ConfigFile or not os.path.exists(ConfigFile): Show_Usage() Config = ConfigParser() Config.read(ConfigFile) # Start up the Postman, and run him forever. If we're profiling, do that. Post = Postman(ConfigFile, Config) if Profiled: import hotshot prof = hotshot.Profile('hangle.prof') prof.runcall(Post.run_forever) prof.close() # Print some profile stats import hotshot.stats stats = hotshot.stats.load('hangle.prof') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(25) else: Post.run_forever()
def global_setup(self): self.old_threshold = gc.get_threshold() if self.threshold[0]: self.runner.options.output.info( "Cyclic garbage collection threshold set to: %s" % repr(tuple(self.threshold))) else: self.runner.options.output.info( "Cyclic garbage collection is disabled.") gc.set_threshold(*self.threshold)
def doctestsTearDown(test): sys.path[:], sys.argv[:] = test.globs['saved-sys-info'][:2] gc.set_threshold(*test.globs['saved-sys-info'][3]) sys.modules.clear() sys.modules.update(test.globs['saved-sys-info'][2]) try: testrunner.configure_logging = test.globs['old_configure_logging'] del test.globs['old_configure_logging'] except KeyError: pass del os.environ[z3c.etree.testing.engine_env_key] del test.globs['old_engine']
def Main(): """Parse command line, fork, and start stream request handler.""" # Remember the time spent in the parent. times_at_start = os.times() include_server_port, pid_file = _ParseCommandLineOptions() # Get locking mechanism. include_server_port_ready = _IncludeServerPortReady() # Now spawn child so that parent can exit immediately after writing # the process id of child to the pid file. times_at_fork = os.times() pid = os.fork() if pid != 0: # In parent. # if pid_file: pid_file_fd = open(pid_file, "w") print >> pid_file_fd, pid pid_file_fd.close() # Just run to completion now -- after making sure that child is ready. include_server_port_ready.Acquire() # concerned. else: # In child. # # We call _Setup only now, because the process id, used in naming the client # root, must be that of this process, not that of the parent process. See # _CleanOutOthers for the importance of the process id. (include_analyzer, server) = _SetUp(include_server_port) include_server_port_ready.Release() try: try: gc.set_threshold(basics.GC_THRESHOLD) # Use commented-out line below to have a message printed for each # collection. # gc.set_debug(gc.DEBUG_STATS + gc.DEBUG_COLLECTABLE) server.serve_forever() except KeyboardInterrupt: print >> sys.stderr, ( "Include server: keyboard interrupt, quitting after cleaning up.") _CleanOut(include_analyzer, include_server_port) except SignalSIGTERM: Debug(DEBUG_TRACE, "Include server shutting down.") _CleanOut(include_analyzer, include_server_port) except: print >> sys.stderr, ( "Include server: exception occurred, quitting after cleaning up.") _PrintStackTrace(sys.stderr) _CleanOut(include_analyzer, include_server_port) raise # reraise exception finally: if basics.opt_print_times: _PrintTimes(times_at_start, times_at_fork, os.times())
def applySettings(self): # GC t = gc.get_threshold() multi = self.gcMemoryThreshold gc.set_threshold(int(t[0] * multi), int(t[1] * multi), int(t[2] * multi)) # Fonts self.applyMonospaceFont(self.monospaceFont) self.applyStandardFont(self.standardFont) self.applyTheme(self.themeName) if os.name == "nt": self.applyMenuFont(self.standardFont)
def _adjustGcThreshold(self, task): numGarbage = GarbageReport.checkForGarbageLeaks() if numGarbage == 0: self.gcNotify.debug('no garbage found, doubling gc threshold') a, b, c = gc.get_threshold() gc.set_threshold(min(a * 2, 1 << 30), b, c) task.delayTime = task.delayTime * 2 retVal = Task.again else: self.gcNotify.warning('garbage found, reverting gc threshold') gc.set_threshold(*self._gcDefaultThreshold) retVal = Task.done return retVal
def test_set_threshold(): #the method has three arguments gc.set_threshold(0,-2,2) result = gc.get_threshold() AreEqual(result[0],0) AreEqual(result[1],-2) AreEqual(result[2],2) ##the method has two argument gc.set_threshold(0,128) result = gc.get_threshold() AreEqual(result[0],0) AreEqual(result[1],128) #CodePlex Work Item 8523 #AreEqual(result[2],2) #the method has only one argument gc.set_threshold(-10009) result= gc.get_threshold() AreEqual(result[0],-10009) #CodePlex Work Item 8523 #AreEqual(result[1],128) #AreEqual(result[2],2) #the argument is a random int for i in xrange(1,65535,6): gc.set_threshold(i) result = gc.get_threshold() AreEqual(result[0],i)
def test_del_newclass(self): # __del__ methods can trigger collection, make this to happen thresholds = gc.get_threshold() gc.enable() gc.set_threshold(1) class A(object): def __del__(self): dir(self) a = A() del a gc.disable() gc.set_threshold(*thresholds)
def setUp(self): self.mem_ini = MemorySingleton(self.verbose - 1) self.mem_ref = MemoryStatistics(self.verbose - 1) self.mem_cur = self.mem_ref.copy() self.gc_threshold_old = gc.get_threshold() self.gc_flags_old = gc.get_debug() gc.set_threshold(*self.gc_threshold) gc.set_debug(self.gc_flags) # Try to obtain a clean slate gc.collect() self.gc_count = len(gc.garbage) del gc.garbage[:]
def unreachable_items(): # see http://stackoverflow.com/questions/16911559/trouble-understanding-pythons-gc-garbage-for-tracing-memory-leaks # first time setup import gc gc.set_threshold( 0 ) # only manual sweeps gc.set_debug( gc.DEBUG_SAVEALL ) # keep unreachable items as garbage gc.enable() # start gc if not yet running (is this necessary?) # operation if gc.collect() == 0: log.info( 'no unreachable items' ) else: fmt = '[%%0%dd] %%s' % len(str(len(gc.garbage)-1)) log.info( 'unreachable items:\n ' + '\n '.join( fmt % item for item in enumerate( gc.garbage ) ) ) _deep_purge_list( gc.garbage ) # remove unreachable items
def test_set_threshold(self): """get_threshold, set_threshold""" #the method has three arguments gc.set_threshold(0,-2,2) result = gc.get_threshold() self.assertEqual(result[0],0) self.assertEqual(result[1],-2) self.assertEqual(result[2],2) ##the method has two argument gc.set_threshold(0,128) result = gc.get_threshold() self.assertEqual(result[0],0) self.assertEqual(result[1],128) #CodePlex Work Item 8523 self.assertEqual(result[2],2) #the method has only one argument gc.set_threshold(-10009) result= gc.get_threshold() self.assertEqual(result[0],-10009) #CodePlex Work Item 8523 self.assertEqual(result[1],128) self.assertEqual(result[2],2) #the argument is a random int for i in xrange(1,65535,6): gc.set_threshold(i) result = gc.get_threshold() self.assertEqual(result[0],i) #a argument is a float #CodePlex Work Item 8522 self.assertRaises(TypeError,gc.set_threshold,2.1) self.assertRaises(TypeError,gc.set_threshold,3,-1.3) #a argument is a string #CodePlex Work Item 8522 self.assertRaises(TypeError,gc.set_threshold,"1") self.assertRaises(TypeError,gc.set_threshold,"str","xdv#4") self.assertRaises(TypeError,gc.set_threshold,2,"1") self.assertRaises(TypeError,gc.set_threshold,31,-123,"asdfasdf","1") #a argument is a object #CodePlex Work Item 8522 o = object() o2 = object() self.assertRaises(TypeError,gc.set_threshold,o) self.assertRaises(TypeError,gc.set_threshold,o,o2) self.assertRaises(TypeError,gc.set_threshold,1,-123,o) o = _random.Random() o2 = _random.Random() self.assertRaises(TypeError,gc.set_threshold,o) self.assertRaises(TypeError,gc.set_threshold,o,o2) self.assertRaises(TypeError,gc.set_threshold,8,64,o)
def main(): log_clock("Bootstrap to the start of init.init") renpy.game.exception_info = 'Before loading the script.' # Get ready to accept new arguments. renpy.arguments.pre_init() # Init the screen language parser. renpy.sl2.slparser.init() # Init the config after load. renpy.config.init() # Set up variants. choose_variants() renpy.display.touch = "touch" in renpy.config.variants log_clock("Early init") # Note the game directory. game.basepath = renpy.config.gamedir renpy.config.searchpath = [renpy.config.gamedir] # Find the common directory. commondir = __main__.path_to_common( renpy.config.renpy_base) # E1101 @UndefinedVariable if os.path.isdir(commondir): renpy.config.searchpath.append(commondir) renpy.config.commondir = commondir else: renpy.config.commondir = None # Add path from env variable, if any if "RENPY_SEARCHPATH" in os.environ: renpy.config.searchpath.extend( os.environ["RENPY_SEARCHPATH"].split("::")) if renpy.android: renpy.config.searchpath = [] renpy.config.commondir = None if "ANDROID_PUBLIC" in os.environ: android_game = os.path.join(os.environ["ANDROID_PUBLIC"], "game") print("Android searchpath: ", android_game) if os.path.exists(android_game): renpy.config.searchpath.insert(0, android_game) # Load Ren'Py extensions. for dir in renpy.config.searchpath: # @ReservedAssignment for fn in os.listdir(dir): if fn.lower().endswith(".rpe"): load_rpe(dir + "/" + fn) # The basename is the final component of the path to the gamedir. for i in sorted(os.listdir(renpy.config.gamedir)): if not i.endswith(".rpa"): continue i = i[:-4] renpy.config.archives.append(i) renpy.config.archives.reverse() # Initialize archives. renpy.loader.index_archives() # Start auto-loading. renpy.loader.auto_init() log_clock("Loader init") # Initialize the log. game.log = renpy.python.RollbackLog() # Initialize the store. renpy.store.store = sys.modules['store'] # Set up styles. game.style = renpy.style.StyleManager() # @UndefinedVariable renpy.store.style = game.style # Run init code in its own context. (Don't log.) game.contexts = [renpy.execution.Context(False)] game.contexts[0].init_phase = True renpy.execution.not_infinite_loop(60) # Load the script. renpy.game.exception_info = 'While loading the script.' renpy.game.script = renpy.script.Script() if renpy.session.get("compile", False): renpy.game.args.compile = True # Set up error handling. renpy.exports.load_module("_errorhandling") if renpy.exports.loadable("tl/None/common.rpym") or renpy.exports.loadable( "tl/None/common.rpymc"): renpy.exports.load_module("tl/None/common") renpy.config.init_system_styles() renpy.style.build_styles() # @UndefinedVariable log_clock("Loading error handling") # If recompiling everything, remove orphan .rpyc files. # Otherwise, will fail in case orphan .rpyc have same # labels as in other scripts (usually happens on script rename). if (renpy.game.args.command == 'compile' ) and not (renpy.game.args.keep_orphan_rpyc): # @UndefinedVariable for (fn, dn) in renpy.game.script.script_files: if dn is None: continue if not os.path.isfile(os.path.join(dn, fn + ".rpy")): try: name = os.path.join(dn, fn + ".rpyc") os.rename(name, name + ".bak") except OSError: # This perhaps shouldn't happen since either .rpy or .rpyc should exist pass # Update script files list, so that it doesn't contain removed .rpyc's renpy.loader.cleardirfiles() renpy.game.script.scan_script_files() # Load all .rpy files. renpy.game.script.load_script() # sets renpy.game.script. log_clock("Loading script") if renpy.game.args.command == 'load-test': # @UndefinedVariable start = time.time() for i in range(5): print(i) renpy.game.script = renpy.script.Script() renpy.game.script.load_script() print(time.time() - start) sys.exit(0) renpy.game.exception_info = 'After loading the script.' # Find the save directory. if renpy.config.savedir is None: renpy.config.savedir = __main__.path_to_saves( renpy.config.gamedir) # E1101 @UndefinedVariable if renpy.game.args.savedir: # @UndefinedVariable renpy.config.savedir = renpy.game.args.savedir # @UndefinedVariable # Init preferences. game.persistent = renpy.persistent.init() game.preferences = game.persistent._preferences for i in renpy.game.persistent._seen_translates: # @UndefinedVariable if i in renpy.game.script.translator.default_translates: renpy.game.seen_translates_count += 1 if game.persistent._virtual_size: renpy.config.screen_width, renpy.config.screen_height = game.persistent._virtual_size # Init save locations and loadsave. renpy.savelocation.init() # We need to be 100% sure we kill the savelocation thread. try: # Init save slots. renpy.loadsave.init() log_clock("Loading save slot metadata.") # Load persistent data from all save locations. renpy.persistent.update() game.preferences = game.persistent._preferences log_clock("Loading persistent") # Clear the list of seen statements in this game. game.seen_session = {} # Initialize persistent variables. renpy.store.persistent = game.persistent renpy.store._preferences = game.preferences renpy.store._test = renpy.test.testast._test if renpy.parser.report_parse_errors(): raise renpy.game.ParseErrorException() renpy.game.exception_info = 'While executing init code:' for _prio, node in game.script.initcode: if isinstance(node, renpy.ast.Node): node_start = time.time() renpy.game.context().run(node) node_duration = time.time() - node_start if node_duration > renpy.config.profile_init: renpy.display.log.write(" - Init at %s:%d took %.5f s.", node.filename, node.linenumber, node_duration) else: # An init function. node() renpy.game.exception_info = 'After initialization, but before game start.' # Check if we should simulate android. renpy.android = renpy.android or renpy.config.simulate_android # @UndefinedVariable # Re-set up the logging. renpy.log.post_init() # Run the post init code, if any. for i in renpy.game.post_init: i() renpy.game.script.report_duplicate_labels() # Sort the images. renpy.display.image.image_names.sort() game.persistent._virtual_size = renpy.config.screen_width, renpy.config.screen_height log_clock("Running init code") renpy.pyanalysis.load_cache() log_clock("Loading analysis data") # Analyze the script and compile ATL. renpy.game.script.analyze() renpy.atl.compile_all() log_clock("Analyze and compile ATL") # Index the archive files. We should not have loaded an image # before this point. (As pygame will not have been initialized.) # We need to do this again because the list of known archives # may have changed. renpy.loader.index_archives() log_clock("Index archives") # Check some environment variables. renpy.game.less_memory = "RENPY_LESS_MEMORY" in os.environ renpy.game.less_mouse = "RENPY_LESS_MOUSE" in os.environ renpy.game.less_updates = "RENPY_LESS_UPDATES" in os.environ renpy.dump.dump(False) renpy.game.script.make_backups() log_clock("Dump and make backups.") # Initialize image cache. renpy.display.im.cache.init() log_clock("Cleaning cache") # Make a clean copy of the store. renpy.python.make_clean_stores() log_clock("Making clean stores") gc.collect() if renpy.config.manage_gc: gc.set_threshold(*renpy.config.gc_thresholds) gc_debug = int(os.environ.get("RENPY_GC_DEBUG", 0)) if renpy.config.gc_print_unreachable: gc_debug |= gc.DEBUG_SAVEALL gc.set_debug(gc_debug) log_clock("Initial gc.") # Start debugging file opens. renpy.debug.init_main_thread_open() # (Perhaps) Initialize graphics. if not game.interface: renpy.display.core.Interface() log_clock("Creating interface object") # Start things running. restart = None while True: if restart: renpy.display.screen.before_restart() try: try: run(restart) finally: restart = (renpy.config.end_game_transition, "_invoke_main_menu", "_main_menu") renpy.persistent.update(True) except game.FullRestartException as e: restart = e.reason finally: # Flush any pending interface work. renpy.display.interface.finish_pending() # Give Ren'Py a couple of seconds to finish saving. renpy.loadsave.autosave_not_running.wait(3.0) finally: gc.set_debug(0) renpy.loader.auto_quit() renpy.savelocation.quit() renpy.translation.write_updated_strings() # This is stuff we do on a normal, non-error return. if not renpy.display.error.error_handled: renpy.display.render.check_at_shutdown()
if count[r] > 0: break r += 1 else: return max_flips if __name__ == "__main__": imp = platform.python_implementation() # gc.disable() if imp == "PyPy": hooks = MyHooks() gc.hooks.set(hooks) elif imp == "CPython": gc.set_threshold(1,2,2) gc.set_debug(gc.DEBUG_STATS) arg = DEFAULT_ARG fannkuch(arg) if imp == "PyPy": #print(gc.get_stats()) pass elif imp == "CPython": gc.set_debug(0)
def main( host, port, bokeh_port, show, dashboard, bokeh, dashboard_prefix, use_xheaders, pid_file, scheduler_file, interface, protocol, local_directory, preload, preload_argv, tls_ca_file, tls_cert, tls_key, dashboard_address, idle_timeout, ): g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653 gc.set_threshold(g0 * 3, g1 * 3, g2 * 3) enable_proctitle_on_current() enable_proctitle_on_children() if bokeh_port is not None: warnings.warn( "The --bokeh-port flag has been renamed to --dashboard-address. " "Consider adding ``--dashboard-address :%d`` " % bokeh_port ) dashboard_address = bokeh_port if bokeh is not None: warnings.warn( "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. " ) dashboard = bokeh if port is None and (not host or not re.search(r":\d", host)): port = 8786 sec = Security( **{ k: v for k, v in [ ("tls_ca_file", tls_ca_file), ("tls_scheduler_cert", tls_cert), ("tls_scheduler_key", tls_key), ] if v is not None } ) if not host and (tls_ca_file or tls_cert or tls_key): host = "tls://" if pid_file: with open(pid_file, "w") as f: f.write(str(os.getpid())) def del_pid_file(): if os.path.exists(pid_file): os.remove(pid_file) atexit.register(del_pid_file) local_directory_created = False if local_directory: if not os.path.exists(local_directory): os.mkdir(local_directory) local_directory_created = True else: local_directory = tempfile.mkdtemp(prefix="scheduler-") local_directory_created = True if local_directory not in sys.path: sys.path.insert(0, local_directory) if sys.platform.startswith("linux"): import resource # module fails importing on Windows soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) limit = max(soft, hard // 2) resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard)) loop = IOLoop.current() logger.info("-" * 47) scheduler = Scheduler( loop=loop, scheduler_file=scheduler_file, security=sec, host=host, port=port, interface=interface, protocol=protocol, dashboard_address=dashboard_address if dashboard else None, service_kwargs={"dashboard": {"prefix": dashboard_prefix}}, idle_timeout=idle_timeout, preload=preload, preload_argv=preload_argv, ) logger.info("Local Directory: %26s", local_directory) logger.info("-" * 47) install_signal_handlers(loop) async def run(): await scheduler await scheduler.finished() try: loop.run_sync(run) finally: scheduler.stop() if local_directory_created: shutil.rmtree(local_directory) logger.info("End scheduler at %r", scheduler.address)
generate = True elif o in ('-x', '--exclude'): exclude = True elif o in ('-s', '--single'): single = True elif o in ('-r', '--randomize'): randomize = True elif o in ('-f', '--fromfile'): fromfile = a elif o in ('-l', '--findleaks'): findleaks = True elif o in ('-L', '--runleaks'): runleaks = True elif o in ('-t', '--threshold'): import gc gc.set_threshold(int(a)) elif o in ('-T', '--coverage'): trace = True elif o in ('-D', '--coverdir'): coverdir = os.path.join(os.getcwd(), a) elif o in ('-N', '--nocoverdir'): coverdir = None elif o in ('-R', '--huntrleaks'): huntrleaks = a.split(':') if len(huntrleaks) != 3: print a, huntrleaks usage(2, '-R takes three colon-separated arguments') if len(huntrleaks[0]) == 0: huntrleaks[0] = 5 else: huntrleaks[0] = int(huntrleaks[0])
def setup_tests(ns): try: stderr_fd = sys.__stderr__.fileno() except (ValueError, AttributeError): # Catch ValueError to catch io.UnsupportedOperation on TextIOBase # and ValueError on a closed stream. # # Catch AttributeError for stderr being None. stderr_fd = None else: # Display the Python traceback on fatal errors (e.g. segfault) faulthandler.enable(all_threads=True, file=stderr_fd) # Display the Python traceback on SIGALRM or SIGUSR1 signal signals = [] if hasattr(signal, 'SIGALRM'): signals.append(signal.SIGALRM) if hasattr(signal, 'SIGUSR1'): signals.append(signal.SIGUSR1) for signum in signals: faulthandler.register(signum, chain=True, file=stderr_fd) _adjust_resource_limits() replace_stdout() support.record_original_stdout(sys.stdout) if ns.testdir: # Prepend test directory to sys.path, so runtest() will be able # to locate tests sys.path.insert(0, os.path.abspath(ns.testdir)) # Some times __path__ and __file__ are not absolute (e.g. while running from # Lib/) and, if we change the CWD to run the tests in a temporary dir, some # imports might fail. This affects only the modules imported before os.chdir(). # These modules are searched first in sys.path[0] (so '' -- the CWD) and if # they are found in the CWD their __file__ and __path__ will be relative (this # happens before the chdir). All the modules imported after the chdir, are # not found in the CWD, and since the other paths in sys.path[1:] are absolute # (site.py absolutize them), the __file__ and __path__ will be absolute too. # Therefore it is necessary to absolutize manually the __file__ and __path__ of # the packages to prevent later imports to fail when the CWD is different. for module in sys.modules.values(): if hasattr(module, '__path__'): for index, path in enumerate(module.__path__): module.__path__[index] = os.path.abspath(path) if getattr(module, '__file__', None): module.__file__ = os.path.abspath(module.__file__) if ns.huntrleaks: unittest.BaseTestSuite._cleanup = False if ns.memlimit is not None: support.set_memlimit(ns.memlimit) if ns.threshold is not None: gc.set_threshold(ns.threshold) support.suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2) support.use_resources = ns.use_resources if hasattr(sys, 'addaudithook'): # Add an auditing hook for all tests to ensure PySys_Audit is tested def _test_audit_hook(name, args): pass sys.addaudithook(_test_audit_hook) setup_unraisable_hook() setup_threading_excepthook() if ns.timeout is not None: # For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, ns.timeout / 40) support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, ns.timeout / 4) # If --timeout is short: reduce timeouts support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, ns.timeout) support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, ns.timeout) support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, ns.timeout) support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, ns.timeout) if ns.xmlpath: from test.support.testresult import RegressionTestResult RegressionTestResult.USE_XML = True # Ensure there's a non-ASCII character in env vars at all times to force # tests consider this case. See BPO-44647 for details. os.environ.setdefault( UNICODE_GUARD_ENV, "\N{SMILING FACE WITH SUNGLASSES}", )
def main(logger): opt = parse_args() print(opt) # Garbage collection, default threshold is (700, 10, 10). # Set threshold lower to collect garbage more frequently and release more CPU memory for heavy data loading. gc.set_threshold(100, 5, 5) # set env num_gpus = opt.num_gpus batch_size = opt.batch_size context = [mx.cpu()] if num_gpus > 0: batch_size *= max(1, num_gpus) context = [mx.gpu(i) for i in range(num_gpus)] num_workers = opt.num_workers print('Total batch size is set to %d on %d GPUs' % (batch_size, num_gpus)) # get data image_norm_mean = [0.485, 0.456, 0.406] image_norm_std = [0.229, 0.224, 0.225] if opt.ten_crop: transform_test = transforms.Compose([ video.VideoTenCrop(opt.input_size), video.VideoToTensor(), video.VideoNormalize(image_norm_mean, image_norm_std) ]) opt.num_crop = 10 elif opt.three_crop: transform_test = transforms.Compose([ video.VideoThreeCrop(opt.input_size), video.VideoToTensor(), video.VideoNormalize(image_norm_mean, image_norm_std) ]) opt.num_crop = 3 else: transform_test = video.VideoGroupValTransform(size=opt.input_size, mean=image_norm_mean, std=image_norm_std) opt.num_crop = 1 if not opt.deploy: # get model if opt.use_pretrained and len(opt.hashtag) > 0: opt.use_pretrained = opt.hashtag classes = opt.num_classes model_name = opt.model # Currently, these is no hashtag for int8 models. if opt.quantized: model_name += '_int8' opt.use_pretrained = True net = get_model(name=model_name, nclass=classes, pretrained=opt.use_pretrained, num_segments=opt.num_segments, num_crop=opt.num_crop) net.cast(opt.dtype) net.collect_params().reset_ctx(context) if opt.mode == 'hybrid': net.hybridize(static_alloc=True, static_shape=True) if opt.resume_params is not '' and not opt.use_pretrained: net.load_parameters(opt.resume_params, ctx=context) print('Pre-trained model %s is successfully loaded.' % (opt.resume_params)) else: print( 'Pre-trained model is successfully loaded from the model zoo.') else: model_name = 'deploy' net = mx.gluon.SymbolBlock.imports( '{}-symbol.json'.format(opt.model_prefix), ['data'], '{}-0000.params'.format(opt.model_prefix)) net.hybridize(static_alloc=True, static_shape=True) print("Successfully loaded model {}".format(model_name)) # dummy data for benchmarking performance if opt.benchmark: benchmarking(opt, net, context) sys.exit() if opt.dataset == 'ucf101': val_dataset = UCF101(setting=opt.val_list, root=opt.data_dir, train=False, new_width=opt.new_width, new_height=opt.new_height, new_length=opt.new_length, target_width=opt.input_size, target_height=opt.input_size, test_mode=True, num_segments=opt.num_segments, transform=transform_test) elif opt.dataset == 'kinetics400': val_dataset = Kinetics400( setting=opt.val_list, root=opt.data_dir, train=False, new_width=opt.new_width, new_height=opt.new_height, new_length=opt.new_length, new_step=opt.new_step, target_width=opt.input_size, target_height=opt.input_size, video_loader=opt.video_loader, use_decord=opt.use_decord, slowfast=opt.slowfast, slow_temporal_stride=opt.slow_temporal_stride, fast_temporal_stride=opt.fast_temporal_stride, test_mode=True, num_segments=opt.num_segments, num_crop=opt.num_crop, transform=transform_test) elif opt.dataset == 'somethingsomethingv2': val_dataset = SomethingSomethingV2(setting=opt.val_list, root=opt.data_dir, train=False, new_width=opt.new_width, new_height=opt.new_height, new_length=opt.new_length, new_step=opt.new_step, target_width=opt.input_size, target_height=opt.input_size, video_loader=opt.video_loader, use_decord=opt.use_decord, num_segments=opt.num_segments, transform=transform_test) elif opt.dataset == 'hmdb51': val_dataset = HMDB51(setting=opt.val_list, root=opt.data_dir, train=False, new_width=opt.new_width, new_height=opt.new_height, new_length=opt.new_length, new_step=opt.new_step, target_width=opt.input_size, target_height=opt.input_size, video_loader=opt.video_loader, use_decord=opt.use_decord, num_segments=opt.num_segments, transform=transform_test) else: logger.info('Dataset %s is not supported yet.' % (opt.dataset)) val_data = gluon.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, prefetch=int(opt.prefetch_ratio * num_workers), last_batch='discard') print('Load %d test samples in %d iterations.' % (len(val_dataset), len(val_data))) # calibrate FP32 model into INT8 model if opt.calibration: calibration(net, val_data, opt, context, logger) sys.exit() start_time = time.time() acc_top1_val, acc_top5_val = test(context, val_data, opt, net) end_time = time.time() print('Test accuracy: acc-top1=%f acc-top5=%f' % (acc_top1_val * 100, acc_top5_val * 100)) print('Total evaluation time is %4.2f minutes' % ((end_time - start_time) / 60))
import csv import gc start=time.time() csvpd=pd.DataFrame(columns=['date','time','close','volume']) global microsec microsec=0.0000 global lasttime lasttime='' with open('data.csv',mode='r',encoding='utf-8',newline='') as file: rows=csv.reader(file) for row in rows: if row[0].strip() !='\ufeff成交日期' and row[1].strip()=='TX' and row[2].strip()=='201809': #成交日期,商品代號,到期月份(週別),成交時間,成交價格,成交數量(B+S),近月價格,遠月價格,開盤集合競價 row[0]=datetime.datetime.strptime(str(row[0]).strip(),'%Y%m%d').strftime('%Y/%m/%d') if row[3].strip() != lasttime : lasttime=row[3].strip() microsec=0.0000 row[3]=datetime.datetime.strptime(row[3].strip(),'%H%M%S').strftime('%H:%M:%S') else: microsec+=0.0001 row[3]=datetime.datetime.strptime(row[3].strip(),'%H%M%S').strftime('%H:%M:%S')+str(microsec)[1:6] #print(type(row[0]),type(row[3]),type(row[4]),type(row[5])) newlist=[[row[0],row[3],row[4].strip(),row[5].strip()]] csvpd=csvpd.append(pd.DataFrame(newlist,columns=['date','time','close','volume']),ignore_index=True) print(csvpd.head()) csvpd.to_csv('output.csv',index=False) gc.set_threshold(700, 10, 5) end=time.time() elapsed = end - start print('運行時間: ',elapsed)
import mod_openopc_common __version__ = mod_openopc_common.VERSION OPC_STATUS = mod_openopc_common.OPC_STATUS BROWSER_TYPE = mod_openopc_common.BROWSER_TYPE ACCESS_RIGHTS = mod_openopc_common.ACCESS_RIGHTS OPC_QUALITY = mod_openopc_common.OPC_QUALITY OPC_CLASS = mod_openopc_common.OPC_CLASS OPC_SERVER = mod_openopc_common.OPC_SERVER OPC_CLIENT = mod_openopc_common.OPC_CLIENT OPC_GATE_PORT = mod_openopc_common.OPC_GATEWAY_PORT mod_openopc_GARBAGE_0 = mod_openopc_common.GARBAGE_0 mod_openopc_GARBAGE_1 = mod_openopc_common.GARBAGE_1 mod_openopc_GARBAGE_2 = mod_openopc_common.GARBAGE_2 # --------------------- -- GARBAGE COLLECTION ----------------------- import gc gc.set_threshold(mod_openopc_GARBAGE_0, mod_openopc_GARBAGE_1, mod_openopc_GARBAGE_2) gc.enable() # --------------------- LOAD LIBRARIES ------------------------------ # --------------------- -- STANDARD --------------------------------- import os import sys import time import types import string import socket import re import Queue # --------------------- -- DEPENDANCIES ----------------------------- import Pyro import Pyro.core import Pyro.protocol
# end process, close CrossEPG DB saving data crossdb.close_db() self.log("TOTAL EPG EVENTS PROCESSED: %d" % total_events) self.log("--- END ---") self.log2video("END , events processed: %d" % total_events) # **************************************************************************************************************************** # MAIN CODE: SCRIPT START HERE # increase this process niceness (other processes have higher priority) os.nice(10) # set Garbage Collector to do a "generational jump" more frequently than default 700 # memory saving: about 50% (!!), some performance loss (obviously) gc.set_threshold(50, 10, 10) SCRIPT_DIR = 'scripts/mediaprem/' # get CrossEPG installation dir. crossepg_instroot = crossepg.epgdb_get_installroot() if crossepg_instroot == False: sys.exit(1) scriptlocation = os.path.join(crossepg_instroot, SCRIPT_DIR) # get where CrossEPG save data (dbroot) and use it as script cache repository crossepg_dbroot = crossepg.epgdb_get_dbroot() if crossepg_dbroot == False: sys.exit(1) # initialize script class
def enable_gc(): """enable garbage collection""" gc.set_threshold(700)
def main(): opt = parse_args() makedirs(opt.save_dir) filehandler = logging.FileHandler( os.path.join(opt.save_dir, opt.logging_file)) streamhandler = logging.StreamHandler() logger = logging.getLogger('') logger.setLevel(logging.INFO) logger.addHandler(filehandler) logger.addHandler(streamhandler) logger.info(opt) gc.set_threshold(100, 5, 5) # set env if opt.gpu_id == -1: context = mx.cpu() else: gpu_id = opt.gpu_id context = mx.gpu(gpu_id) # get data preprocess image_norm_mean = [0.485, 0.456, 0.406] image_norm_std = [0.229, 0.224, 0.225] if opt.ten_crop: transform_test = transforms.Compose([ video.VideoTenCrop(opt.input_size), video.VideoToTensor(), video.VideoNormalize(image_norm_mean, image_norm_std) ]) opt.num_crop = 10 elif opt.three_crop: transform_test = transforms.Compose([ video.VideoThreeCrop(opt.input_size), video.VideoToTensor(), video.VideoNormalize(image_norm_mean, image_norm_std) ]) opt.num_crop = 3 else: transform_test = video.VideoGroupValTransform(size=opt.input_size, mean=image_norm_mean, std=image_norm_std) opt.num_crop = 1 # get model if opt.use_pretrained and len(opt.hashtag) > 0: opt.use_pretrained = opt.hashtag classes = opt.num_classes model_name = opt.model net = get_model(name=model_name, nclass=classes, pretrained=opt.use_pretrained, num_segments=opt.num_segments, num_crop=opt.num_crop) net.cast(opt.dtype) net.collect_params().reset_ctx(context) if opt.mode == 'hybrid': net.hybridize(static_alloc=True, static_shape=True) if opt.resume_params != '' and not opt.use_pretrained: net.load_parameters(opt.resume_params, ctx=context) logger.info('Pre-trained model %s is successfully loaded.' % (opt.resume_params)) else: logger.info( 'Pre-trained model is successfully loaded from the model zoo.') logger.info("Successfully built model {}".format(model_name)) # get classes list, if we are using a pretrained network from the model_zoo classes = None if opt.use_pretrained: if "kinetics400" in model_name: classes = Kinetics400Attr().classes elif "ucf101" in model_name: classes = UCF101Attr().classes elif "hmdb51" in model_name: classes = HMDB51Attr().classes elif "sthsth" in model_name: classes = SomethingSomethingV2Attr().classes # get data anno_file = opt.data_list f = open(anno_file, 'r') data_list = f.readlines() logger.info('Load %d video samples.' % len(data_list)) # build a pseudo dataset instance to use its children class methods video_utils = VideoClsCustom(root=opt.data_dir, setting=opt.data_list, num_segments=opt.num_segments, num_crop=opt.num_crop, new_length=opt.new_length, new_step=opt.new_step, new_width=opt.new_width, new_height=opt.new_height, video_loader=opt.video_loader, use_decord=opt.use_decord, slowfast=opt.slowfast, slow_temporal_stride=opt.slow_temporal_stride, fast_temporal_stride=opt.fast_temporal_stride, data_aug=opt.data_aug, lazy_init=True) start_time = time.time() for vid, vline in enumerate(data_list): video_path = vline.split()[0] video_name = video_path.split('/')[-1] if opt.need_root: video_path = os.path.join(opt.data_dir, video_path) video_data = read_data(opt, video_path, transform_test, video_utils) video_input = video_data.as_in_context(context) pred = net(video_input.astype(opt.dtype, copy=False)) if opt.save_logits: logits_file = '%s_%s_logits.npy' % (model_name, video_name) np.save(os.path.join(opt.save_dir, logits_file), pred.asnumpy()) pred_label = np.argmax(pred.asnumpy()) if opt.save_preds: preds_file = '%s_%s_preds.npy' % (model_name, video_name) np.save(os.path.join(opt.save_dir, preds_file), pred_label) # Try to report a text label instead of the number. if classes: pred_label = classes[pred_label] logger.info('%04d/%04d: %s is predicted to class %s' % (vid, len(data_list), video_name, pred_label)) end_time = time.time() logger.info('Total inference time is %4.2f minutes' % ((end_time - start_time) / 60))
def setup_tests(ns): try: stderr_fd = sys.__stderr__.fileno() except (ValueError, AttributeError): # Catch ValueError to catch io.UnsupportedOperation on TextIOBase # and ValueError on a closed stream. # # Catch AttributeError for stderr being None. stderr_fd = None else: # Display the Python traceback on fatal errors (e.g. segfault) faulthandler.enable(all_threads=True, file=stderr_fd) # Display the Python traceback on SIGALRM or SIGUSR1 signal signals = [] if hasattr(signal, 'SIGALRM'): signals.append(signal.SIGALRM) if hasattr(signal, 'SIGUSR1'): signals.append(signal.SIGUSR1) for signum in signals: faulthandler.register(signum, chain=True, file=stderr_fd) replace_stdout() support.record_original_stdout(sys.stdout) if ns.testdir: # Prepend test directory to sys.path, so runtest() will be able # to locate tests sys.path.insert(0, os.path.abspath(ns.testdir)) # Some times __path__ and __file__ are not absolute (e.g. while running from # Lib/) and, if we change the CWD to run the tests in a temporary dir, some # imports might fail. This affects only the modules imported before os.chdir(). # These modules are searched first in sys.path[0] (so '' -- the CWD) and if # they are found in the CWD their __file__ and __path__ will be relative (this # happens before the chdir). All the modules imported after the chdir, are # not found in the CWD, and since the other paths in sys.path[1:] are absolute # (site.py absolutize them), the __file__ and __path__ will be absolute too. # Therefore it is necessary to absolutize manually the __file__ and __path__ of # the packages to prevent later imports to fail when the CWD is different. for module in sys.modules.values(): if hasattr(module, '__path__'): for index, path in enumerate(module.__path__): module.__path__[index] = os.path.abspath(path) if getattr(module, '__file__', None): module.__file__ = os.path.abspath(module.__file__) if ns.huntrleaks: unittest.BaseTestSuite._cleanup = False # Avoid false positives due to various caches # filling slowly with random data: warm_caches() if ns.memlimit is not None: support.set_memlimit(ns.memlimit) if ns.threshold is not None: gc.set_threshold(ns.threshold) try: import msvcrt except ImportError: pass else: msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS| msvcrt.SEM_NOALIGNMENTFAULTEXCEPT| msvcrt.SEM_NOGPFAULTERRORBOX| msvcrt.SEM_NOOPENFILEERRORBOX) try: msvcrt.CrtSetReportMode except AttributeError: # release build pass else: for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: if ns.verbose and ns.verbose >= 2: msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) else: msvcrt.CrtSetReportMode(m, 0) support.use_resources = ns.use_resources
def main(driver): gc.enable() gc.set_threshold(50, 10, 10) WebDriverWait(driver, 5).until( EC.element_to_be_clickable( (By.ID, "secondaryCurrencyProgressWrapper"))).click() time.sleep(1.5) while True: if len( WebDriverWait(driver, 5).until( EC.presence_of_element_located( (By.ID, "secondaryCurrencyScratchOffWrapper" ))).get_attribute("style")) is 14: WebDriverWait(driver, 5).until( EC.element_to_be_clickable( (By.ID, "secondaryCurrencyProgressWrapper"))).click() center_game(driver) if pyautogui.locateCenterOnScreen("top left.png", grayscale=True) != None: x1, y1 = pyautogui.locateCenterOnScreen("top left.png", grayscale=True) if pyautogui.pixelMatchesColor(int(x1 + 61), int(y1 + 63), (43, 234, 246), tolerance=25): pyautogui.click(x1 + 234, y1 + 303, duration=0.15) break else: if pyautogui.locateOnScreen("play button.png") != None: pyautogui.click( pyautogui.locateCenterOnScreen("play button.png"), duration=0.15) print((x1, y1)) side_length = 385 counter = 0 counter1 = 0 counter2 = 0 timer = time.time() timer_stop = timer + random.randint(3600, 4200) past_time = time.time() while True: if time.time() > timer_stop: print("Stopped at " + str((timer_stop / 60) / 60) + " hours") return center_game(driver) try: WebDriverWait(driver, 0).until( EC.visibility_of_element_located((By.ID, "error-window"))) go_candy_jam(driver) print("reset") except: pass rows_color, rows_pos = horizontials(x1 + 27, y1 + 29) #Use selenium for scratch and win if len( WebDriverWait(driver, 0).until( EC.presence_of_element_located( (By.ID, "scratchOffWrapperTier-1" ))).get_attribute("class")) is 8 and counter1 is 0: print( str(int((time.time() - past_time) / 60)) + " min" + " - Tier 1") center_game(driver) counter1 += 1 if len( WebDriverWait(driver, 0).until( EC.presence_of_element_located( (By.ID, "scratchOffWrapperTier-2" ))).get_attribute("class")) is 8 and counter2 is 0: print( str(int((time.time() - past_time) / 60)) + " min" + " - Tier 2") center_game(driver) #counter2 += 1 WebDriverWait(driver, 5).until( EC.element_to_be_clickable( (By.ID, "scratchNowButtonTier-2"))).click() scratch_all_tier_2(driver) WebDriverWait(driver, 30).until( EC.element_to_be_clickable( (By.ID, "scYouWinEarnMoreButton"))).click() counter1 = 0 past_time = time.time() if len( WebDriverWait(driver, 0).until( EC.presence_of_element_located( (By.ID, "secondaryCurrencyScratchOffWrapper" ))).get_attribute("style")) is 14: WebDriverWait(driver, 5).until( EC.element_to_be_clickable( (By.ID, "secondaryCurrencyProgressWrapper"))).click() if pyautogui.pixelMatchesColor(int(x1 + 61), int(y1 + 63), (43, 234, 246), tolerance=25): pyautogui.click(x1 + 234, y1 + 303, duration=0.15) if pyautogui.pixelMatchesColor(int(x1 + 195), int(y1 + 189), (255, 151, 7), tolerance=10): pyautogui.click(int(x1 + 195), int(y1 + 189), duration=0.15) #print("next_rows") if do_next_rows(rows_color, rows_pos) is False: #print("next verticals") if do_next_verticals(rows_color, rows_pos) is False: #print("skipped rows") if do_skipped_rows(rows_color, rows_pos) is False: #print("skipped verticals") if do_skipped_verticals(rows_color, rows_pos) is False: if bomb(rows_color, rows_pos) is False: if counter >= 10: print("no solutions") pyautogui.screenshot("ending picture.png") counter = 0 else: counter += 1 else: counter = 0
def main(logger): opt = parse_args() makedirs(opt.save_dir) filehandler = logging.FileHandler( os.path.join(opt.save_dir, opt.logging_file)) streamhandler = logging.StreamHandler() logger = logging.getLogger('') logger.setLevel(logging.INFO) logger.addHandler(filehandler) logger.addHandler(streamhandler) logger.info(opt) gc.set_threshold(100, 5, 5) # set env gpu_id = opt.gpu_id context = mx.gpu(gpu_id) # get data preprocess image_norm_mean = [0.485, 0.456, 0.406] image_norm_std = [0.229, 0.224, 0.225] if opt.ten_crop: transform_test = transforms.Compose([ video.VideoTenCrop(opt.input_size), video.VideoToTensor(), video.VideoNormalize(image_norm_mean, image_norm_std) ]) opt.num_crop = 10 elif opt.three_crop: transform_test = transforms.Compose([ video.VideoThreeCrop(opt.input_size), video.VideoToTensor(), video.VideoNormalize(image_norm_mean, image_norm_std) ]) opt.num_crop = 3 else: transform_test = video.VideoGroupValTransform(size=opt.input_size, mean=image_norm_mean, std=image_norm_std) opt.num_crop = 1 # get model if opt.use_pretrained and len(opt.hashtag) > 0: opt.use_pretrained = opt.hashtag classes = opt.num_classes model_name = opt.model net = get_model(name=model_name, nclass=classes, pretrained=opt.use_pretrained, num_segments=opt.num_segments, num_crop=opt.num_crop) net.cast(opt.dtype) net.collect_params().reset_ctx(context) if opt.mode == 'hybrid': net.hybridize(static_alloc=True, static_shape=True) if opt.resume_params is not '' and not opt.use_pretrained: net.load_parameters(opt.resume_params, ctx=context) logger.info('Pre-trained model %s is successfully loaded.' % (opt.resume_params)) else: logger.info( 'Pre-trained model is successfully loaded from the model zoo.') logger.info("Successfully built model {}".format(model_name)) # get data anno_file = opt.data_list f = open(anno_file, 'r') data_list = f.readlines() logger.info('Load %d video samples.' % len(data_list)) start_time = time.time() for vid, vline in enumerate(data_list): video_path = vline.split()[0] video_name = video_path.split('/')[-1] if opt.need_root: video_path = os.path.join(opt.data_dir, video_path) video_data = read_data(opt, video_path, transform_test) video_input = video_data.as_in_context(context) pred = net(video_input.astype(opt.dtype, copy=False)) if opt.save_logits: logits_file = '%s_%s_logits.npy' % (model_name, video_name) np.save(os.path.join(opt.save_dir, logits_file), pred.asnumpy()) pred_label = np.argmax(pred.asnumpy()) if opt.save_preds: preds_file = '%s_%s_preds.npy' % (model_name, video_name) np.save(os.path.join(opt.save_dir, preds_file), pred_label) logger.info('%04d/%04d: %s is predicted to class %d' % (vid, len(data_list), video_name, pred_label)) end_time = time.time() logger.info('Total inference time is %4.2f minutes' % ((end_time - start_time) / 60))
def main(scheduler, host, worker_port, listen_address, contact_address, nanny_port, nthreads, nprocs, nanny, name, pid_file, resources, dashboard, bokeh, bokeh_port, scheduler_file, dashboard_prefix, tls_ca_file, tls_cert, tls_key, dashboard_address, worker_class, preload_nanny, **kwargs): g0, g1, g2 = gc.get_threshold( ) # https://github.com/dask/distributed/issues/1653 gc.set_threshold(g0 * 3, g1 * 3, g2 * 3) enable_proctitle_on_current() enable_proctitle_on_children() if bokeh_port is not None: warnings.warn( "The --bokeh-port flag has been renamed to --dashboard-address. " "Consider adding ``--dashboard-address :%d`` " % bokeh_port) dashboard_address = bokeh_port if bokeh is not None: warnings.warn( "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. " ) dashboard = bokeh sec = { k: v for k, v in [ ("tls_ca_file", tls_ca_file), ("tls_worker_cert", tls_cert), ("tls_worker_key", tls_key), ] if v is not None } if nprocs == "auto": nprocs, nthreads = nprocesses_nthreads() else: nprocs = int(nprocs) if nprocs < 0: nprocs = CPU_COUNT + 1 + nprocs if nprocs <= 0: logger.error( "Failed to launch worker. Must specify --nprocs so that there's at least one process." ) sys.exit(1) if nprocs > 1 and not nanny: logger.error( "Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1." ) sys.exit(1) if contact_address and not listen_address: logger.error( "Failed to launch worker. " "Must specify --listen-address when --contact-address is given") sys.exit(1) if nprocs > 1 and listen_address: logger.error("Failed to launch worker. " "You cannot specify --listen-address when nprocs > 1.") sys.exit(1) if (worker_port or host) and listen_address: logger.error( "Failed to launch worker. " "You cannot specify --listen-address when --worker-port or --host is given." ) sys.exit(1) try: if listen_address: (host, worker_port) = get_address_host_port(listen_address, strict=True) if contact_address: # we only need this to verify it is getting parsed (_, _) = get_address_host_port(contact_address, strict=True) else: # if contact address is not present we use the listen_address for contact contact_address = listen_address except ValueError as e: logger.error("Failed to launch worker. " + str(e)) sys.exit(1) if nanny: port = nanny_port else: port = worker_port if not nthreads: nthreads = CPU_COUNT // nprocs if pid_file: with open(pid_file, "w") as f: f.write(str(os.getpid())) def del_pid_file(): if os.path.exists(pid_file): os.remove(pid_file) atexit.register(del_pid_file) if resources: resources = resources.replace(",", " ").split() resources = dict(pair.split("=") for pair in resources) resources = valmap(float, resources) else: resources = None loop = IOLoop.current() worker_class = import_term(worker_class) if nanny: kwargs["worker_class"] = worker_class kwargs["preload_nanny"] = preload_nanny if nanny: kwargs.update({ "worker_port": worker_port, "listen_address": listen_address }) t = Nanny else: if nanny_port: kwargs["service_ports"] = {"nanny": nanny_port} t = worker_class if (not scheduler and not scheduler_file and dask.config.get("scheduler-address", None) is None): raise ValueError("Need to provide scheduler address like\n" "dask-worker SCHEDULER_ADDRESS:8786") with suppress(TypeError, ValueError): name = int(name) nannies = [ t(scheduler, scheduler_file=scheduler_file, nthreads=nthreads, loop=loop, resources=resources, security=sec, contact_address=contact_address, host=host, port=port, dashboard=dashboard, dashboard_address=dashboard_address, name=name if nprocs == 1 or name is None or name == "" else str(name) + "-" + str(i), **kwargs) for i in range(nprocs) ] async def close_all(): # Unregister all workers from scheduler if nanny: await asyncio.gather(*[n.close(timeout=2) for n in nannies]) signal_fired = False def on_signal(signum): nonlocal signal_fired signal_fired = True if signum != signal.SIGINT: logger.info("Exiting on signal %d", signum) return asyncio.ensure_future(close_all()) async def run(): await asyncio.gather(*nannies) await asyncio.gather(*[n.finished() for n in nannies]) install_signal_handlers(loop, cleanup=on_signal) try: loop.run_sync(run) except TimeoutError: # We already log the exception in nanny / worker. Don't do it again. if not signal_fired: logger.info("Timed out starting worker") sys.exit(1) except KeyboardInterrupt: pass finally: logger.info("End worker")
import fsps from powderday.image_processing import add_transmission_filters, convolve from powderday.m_control_tools import m_control_sph, m_control_enzo, m_control_arepo import powderday.backwards_compatibility as bc import powderday.error_handling as eh import powderday.SED_gen as sg from powderday.front_ends.front_end_controller import stream from astropy import units as u import powderday.config as cfg import h5py import matplotlib as mpl import copy import numpy as np import sys import gc gc.set_threshold(0) script, pardir, parfile, modelfile = sys.argv mpl.use('Agg') sys.path.insert(0, pardir) par = __import__(parfile) model = __import__(modelfile) cfg.par = par # re-write cfg.par for all modules that read this in now cfg.model = model # ========================================================= # CHECK FOR THE EXISTENCE OF A FEW CRUCIAL FILES FIRST # =========================================================
# ==================== # Import required modules from collections import OrderedDict import gc import getopt import locale import sys from amed.amed_pre import * from amed.amed_post import * # Set locale to assist with sorting locale.setlocale(locale.LC_ALL, '') # Set threshold for garbage collection (helps prevent the program run out of memory) gc.set_threshold(400, 5, 5) __author__ = 'Victoria Morris' __license__ = 'MIT License' __version__ = '1.0.0' __status__ = '4 - Beta Development' # ==================== # Constants # ==================== OPTIONS = OrderedDict([ ('E1', ('Prepare AMED files for import to Excel', amed_pre_Excel)), ('E2', ('Process AMED files exported from Excel', amed_post_Excel)), ('LM1', ('Prepare AMED files for import to Library Master', amed_pre_LM)), ('LM2', ('Process AMED files exported from Library Master', amed_post_LM)),
def startService(self): hs = setup(self.config) change_resource_limit(hs.config.soft_file_limit) if hs.config.gc_thresholds: gc.set_threshold(*hs.config.gc_thresholds)
def setup_tests(ns): # Display the Python traceback on fatal errors (e.g. segfault) faulthandler.enable(all_threads=True) # Display the Python traceback on SIGALRM or SIGUSR1 signal signals = [] if hasattr(signal, 'SIGALRM'): signals.append(signal.SIGALRM) if hasattr(signal, 'SIGUSR1'): signals.append(signal.SIGUSR1) for signum in signals: faulthandler.register(signum, chain=True) replace_stdout() support.record_original_stdout(sys.stdout) if ns.testdir: # Prepend test directory to sys.path, so runtest() will be able # to locate tests sys.path.insert(0, os.path.abspath(ns.testdir)) # Some times __path__ and __file__ are not absolute (e.g. while running from # Lib/) and, if we change the CWD to run the tests in a temporary dir, some # imports might fail. This affects only the modules imported before os.chdir(). # These modules are searched first in sys.path[0] (so '' -- the CWD) and if # they are found in the CWD their __file__ and __path__ will be relative (this # happens before the chdir). All the modules imported after the chdir, are # not found in the CWD, and since the other paths in sys.path[1:] are absolute # (site.py absolutize them), the __file__ and __path__ will be absolute too. # Therefore it is necessary to absolutize manually the __file__ and __path__ of # the packages to prevent later imports to fail when the CWD is different. for module in sys.modules.values(): if hasattr(module, '__path__'): for index, path in enumerate(module.__path__): module.__path__[index] = os.path.abspath(path) if hasattr(module, '__file__'): module.__file__ = os.path.abspath(module.__file__) # MacOSX (a.k.a. Darwin) has a default stack size that is too small # for deeply recursive regular expressions. We see this as crashes in # the Python test suite when running test_re.py and test_sre.py. The # fix is to set the stack limit to 2048. # This approach may also be useful for other Unixy platforms that # suffer from small default stack limits. if sys.platform == 'darwin': try: import resource except ImportError: pass else: soft, hard = resource.getrlimit(resource.RLIMIT_STACK) newsoft = min(hard, max(soft, 1024 * 2048)) resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard)) if ns.huntrleaks: unittest.BaseTestSuite._cleanup = False # Avoid false positives due to various caches # filling slowly with random data: warm_caches() if ns.memlimit is not None: support.set_memlimit(ns.memlimit) if ns.threshold is not None: gc.set_threshold(ns.threshold) try: import msvcrt except ImportError: pass else: msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS | msvcrt.SEM_NOALIGNMENTFAULTEXCEPT | msvcrt.SEM_NOGPFAULTERRORBOX | msvcrt.SEM_NOOPENFILEERRORBOX) try: msvcrt.CrtSetReportMode except AttributeError: # release build pass else: for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: if ns.verbose and ns.verbose >= 2: msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) else: msvcrt.CrtSetReportMode(m, 0) support.use_resources = ns.use_resources
try: threshold = int(sys.argv[1]) except (IndexError, ValueError, TypeError): print('Missing or invalid threshold, using default') threshold = 5 class MyObj: def __init__(self, name): self.name = name print('Created', self.name) gc.set_debug(gc.DEBUG_STATS) gc.set_threshold(threshold, 1, 1) print('Thresholds:', gc.get_threshold()) print('Clear the collector by forcing a run') gc.collect() print() print('Creating objects') objs = [] for i in range(10): objs.append(MyObj(i)) print('Exiting') # Turn off debugging gc.set_debug(0)
def gc_threshold_demo(): print 'gc threshold:', gc.get_threshold() gc.set_threshold(500, 10, 5) print "gc threshold (after 'gc.set_threshold(500, 10, 5)'):", gc.get_threshold()
def setup_tests(ns): try: stderr_fd = sys.__stderr__.fileno() except (ValueError, AttributeError): # Catch ValueError to catch io.UnsupportedOperation on TextIOBase # and ValueError on a closed stream. # # Catch AttributeError for stderr being None. stderr_fd = None else: # Display the Python traceback on fatal errors (e.g. segfault) faulthandler.enable(all_threads=True, file=stderr_fd) # Display the Python traceback on SIGALRM or SIGUSR1 signal signals = [] if hasattr(signal, 'SIGALRM'): signals.append(signal.SIGALRM) if hasattr(signal, 'SIGUSR1'): signals.append(signal.SIGUSR1) for signum in signals: faulthandler.register(signum, chain=True, file=stderr_fd) replace_stdout() support.record_original_stdout(sys.stdout) if ns.testdir: # Prepend test directory to sys.path, so runtest() will be able # to locate tests sys.path.insert(0, os.path.abspath(ns.testdir)) # Some times __path__ and __file__ are not absolute (e.g. while running from # Lib/) and, if we change the CWD to run the tests in a temporary dir, some # imports might fail. This affects only the modules imported before os.chdir(). # These modules are searched first in sys.path[0] (so '' -- the CWD) and if # they are found in the CWD their __file__ and __path__ will be relative (this # happens before the chdir). All the modules imported after the chdir, are # not found in the CWD, and since the other paths in sys.path[1:] are absolute # (site.py absolutize them), the __file__ and __path__ will be absolute too. # Therefore it is necessary to absolutize manually the __file__ and __path__ of # the packages to prevent later imports to fail when the CWD is different. for module in sys.modules.values(): if hasattr(module, '__path__'): for index, path in enumerate(module.__path__): module.__path__[index] = os.path.abspath(path) if getattr(module, '__file__', None): module.__file__ = os.path.abspath(module.__file__) # MacOSX (a.k.a. Darwin) has a default stack size that is too small # for deeply recursive regular expressions. We see this as crashes in # the Python test suite when running test_re.py and test_sre.py. The # fix is to set the stack limit to 2048. # This approach may also be useful for other Unixy platforms that # suffer from small default stack limits. if sys.platform == 'darwin': try: import resource except ImportError: pass else: soft, hard = resource.getrlimit(resource.RLIMIT_STACK) newsoft = min(hard, max(soft, 1024 * 2048)) resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard)) if ns.huntrleaks: unittest.BaseTestSuite._cleanup = False if ns.memlimit is not None: support.set_memlimit(ns.memlimit) if ns.threshold is not None: gc.set_threshold(ns.threshold) suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2) support.use_resources = ns.use_resources if hasattr(sys, 'addaudithook'): # Add an auditing hook for all tests to ensure PySys_Audit is tested def _test_audit_hook(name, args): pass sys.addaudithook(_test_audit_hook)
import gc, inspect, types, traceback, threading, time, pprint from pyGlobus.io import GSITCPSocket from AccessGrid.Security.Utilities import CreateTCPAttrAlwaysAuth gc.set_debug(gc.DEBUG_LEAK) print "GC Threshold: ", gc.get_threshold() gc.set_threshold(1, 1, 1) def Info(obj): if inspect.ismethod(obj): print "Method (%d): " % id(obj) print " Class: ", obj.im_class print " Name: ", obj.__name__ elif isinstance(obj, types.InstanceType): print "Instance (%d): " % id(obj) pprint.pprint(obj) elif inspect.isframe(obj): print "Frame (%d): " % id(obj) traceback.print_stack(obj) else: print "Other (%d): " % id(obj) print type(obj) # pprint.pprint(obj) print " ---- " class EventService: """
import logging.handlers import gc import re #agent base directory exe_path = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 1)[0] import helper import controller import api import exit_status from universal import Universal from constructs import * _log = logging.getLogger(__name__) #module level logging gc.set_threshold(50, 5, 5) #set gc threshold logging_level = logging.INFO #default logging level #setup logging for StreamHandler #in StreamHandler we display only messages till everything initializes. This is done not polute the terminal when running as service logging.basicConfig(level=logging_level, format='%(message)s') formatter = logging.Formatter( '%(asctime)-15s %(levelname)-7s %(thread)d - %(module)-s[%(lineno)-d]: %(message)s' ) #formatter instance for logging logger = logging.getLogger() #get the root logger try: #create rotating file logger and add to root logger #this can raise exception if the file cannot be created lf = logging.handlers.RotatingFileHandler(
hope_mvector = theoracle.clean(hope_vector) if log.level >= 1: log.write("hope hyp: %s\n" % " ".join(sym.tostring(e) for e in hope)) log.write("hope features: %s\n" % hope_mvector) log.write("hope oracle: %s\n" % hope_ovector) return maxmargin.Hypothesis(hope_mvector, hope_ovector) def make_oracle(): return oracle.Oracle(order=4, variant=opts.bleuvariant, oracledoc_size=10) if __name__ == "__main__": gc.set_threshold(100000, 10, 10) import optparse optparser = optparse.OptionParser() optparser.add_option("-W", dest="outweightfilename", help="filename to write weights to") optparser.add_option("-L", dest="outscorefilename", help="filename to write BLEU scores to") optparser.add_option("-B", dest="bleuvariant", default="NIST") optparser.add_option("-S", dest="stopfile") optparser.add_option("--input-lattice", dest="input_lattice", action="store_true") optparser.add_option("--holdout",
def testThreshold(t0=700, t1=10, t2=10): gc.set_threshold(t0, t1, t2) for i in range(1000000): a = A() gc.collect()
def main(): parser = argparse.ArgumentParser( description="access to pythons built-in garbage collector") parser.add_argument( "command", help="what to do", choices=[ "enable", "disable", "status", "collect", "threshold", "debug", "break", ], action="store") parser.add_argument( "args", help="argument for command", action="store", nargs="*" ) ns = parser.parse_args() if ns.command == "enable": gc.enable() elif ns.command == "disable": gc.disable() elif ns.command == "collect": gc.collect() elif ns.command == "status": if gc.isenabled(): gcs = _stash.text_color("Enabled", "green") else: gcs = _stash.text_color("Disabled", "red") sys.stdout.write("GC status: {s}\n".format(s=gcs)) tracked = gc.get_objects() n = len(tracked) sys.stdout.write("Tracked objects: {n}\n".format(n=n)) size = sum([sys.getsizeof(e) for e in tracked]) del tracked # this list may be big, better delete it sys.stdout.write("Size of tracked objects: {s} bytes\n".format(s=size)) sys.stdout.write("Garbage: {n}\n".format(n=len(gc.garbage))) gsize = sum([sys.getsizeof(e) for e in gc.garbage]) sys.stdout.write("Size of garbage: {s} bytes\n".format(s=gsize)) sys.stdout.write("Debug: {d}\n".format(d=gc.get_debug())) elif ns.command == "threshold": if len(ns.args) == 0: sys.stdout.write( "Threshold:\n G1: {}\n G2: {}\n G3: {}\n".format(*gc.get_threshold()) ) elif len(ns.args) > 3: errmsg = _stash.text_color( "Error: to many arguments for threshold!\n", "red" ) sys.stdout.write(errmsg) sys.exit(1) else: try: ts = tuple([int(e) for e in ns.args]) except ValueError: errmsg = _stash.text_color( "Error: expected arguments to be integer!\n", "red" ) sys.stdout.write(errmsg) sys.exit(1) gc.set_threshold(*ts) elif ns.command == "debug": if len(ns.args) == 0: sys.stdout.write("Debug: {d}\n".format(d=gc.get_debug())) elif len(ns.args) == 1: try: flag = int(ns.args[0]) except ValueError: sys.stdout.write( _stash.text_color("Error: expected argument to be an integer!\n", "red") ) sys.exit(1) gc.set_debug(flag) else: sys.stdout.write( _stash.text_color( "Error: expected exactly one argument for debug!\n", "red" ) ) sys.exit(1) elif ns.command == "break": if len(gc.garbage) == 0: sys.stdout.write( _stash.text_color("Error: No Garbage found!\n", "red") ) sys.exit(1) else: for k in dir(gc.garbage[0]): try: delattr(gc.garbage[0], k) except: pass del gc.garbage[:]
# 2. 按归属放入报警队列alert_queue # import sys import gc import time, shutil, os import helper from config import setting db = setting.db_web if __name__ == '__main__': print "DETECTOR: %s started" % helper.time_str() gc.set_threshold(300, 5, 5) try: _count = _dir = _ins = 0 while 1: # 检查ftp目录 _count += 1 ftp_dir = os.listdir(setting.ftp_path) for ip in ftp_dir: _dir += 1 db_cam = db.cams.find_one({'cam_ip': ip}, { 'motion_detect': 1, 'owner': 1 }) if db_cam != None and db_cam['motion_detect'] > 0:
# -*- coding: utf-8 -*- from channels.handler import AsgiHandler from channels import Channel, Group from channels.sessions import channel_session from channels.auth import http_session_user, channel_session_user, channel_session_user_from_http import service.msg_agent as msg_agent import multiprocessing, threading import json, gc, sys, time gc.enable() gc.set_threshold(360, 8, 8) # 全局缓存对象 # cache = {} lock = multiprocessing.Lock() usermap = dict() def stop_process(user): try: if user in usermap: usermap[user]['thread_stop'] = True time.sleep(0.003) proc = usermap[user]['process'] # if proc.is_alive(): usermap[user]['queue'].close() usermap[user]['thread_queue'].close() proc.terminate() proc.join() time.sleep(0.002) del usermap[user]['queue']
def run(): logger.info("Running") change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) run_command()