def test_main(): enabled = gc.isenabled() gc.disable() if not test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): assert not gc.isenabled() debug = gc.get_debug() if not test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: gc.collect() # Delete 2nd generation garbage run_unittest(GCTests, GCTogglingTests) finally: if not test_support.due_to_ironpython_incompatibility( "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470" ): gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() assert gc.isenabled() if not enabled: gc.disable()
def test(): if not hasattr(gc, 'get_debug'): if verbose: print "skipping test_gc: too many GC differences with CPython" return if verbose: print "disabling automatic collection" enabled = gc.isenabled() gc.disable() verify(not gc.isenabled()) debug = gc.get_debug() gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: test_all() finally: gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() verify(gc.isenabled()) if not enabled: gc.disable()
def test_isenabled(): gc.enable() result = gc.isenabled() Assert(result,"enable Method can't set gc.isenabled as true.") if not is_cli and not is_silverlight: gc.disable() result = gc.isenabled() Assert(result == False,"enable Method can't set gc.isenabled as false.")
def test_isenabled(self): gc.enable() result = gc.isenabled() self.assertTrue(result,"enable Method can't set gc.isenabled as true.") if not is_cli: gc.disable() result = gc.isenabled() self.assertTrue(result == False,"enable Method can't set gc.isenabled as false.")
def test_enable(self): import gc assert gc.isenabled() gc.disable() assert not gc.isenabled() gc.enable() assert gc.isenabled() gc.enable() assert gc.isenabled()
def test_gc_manager(): gc.disable() assert not gc.callbacks with gc_manager(enabled=True) as m: assert gc.callbacks assert gc.isenabled() # Make sure we've rolled back the old state of ``gc``. assert not gc.callbacks assert not gc.isenabled()
def test_standardOptions(self): """ L{WorkerOptions} supports a subset of standard options supported by trial. """ self.addCleanup(sys.setrecursionlimit, sys.getrecursionlimit()) if gc.isenabled(): self.addCleanup(gc.enable) gc.enable() self.options.parseOptions(["--recursionlimit", "2000", "--disablegc"]) self.assertEqual(2000, sys.getrecursionlimit()) self.assertFalse(gc.isenabled())
def test_set_gc_state(): gc_status = gc.isenabled() try: for state in (True, False): gc.enable() set_gc_state(state) assert_equal(gc.isenabled(), state) gc.disable() set_gc_state(state) assert_equal(gc.isenabled(), state) finally: if gc_status: gc.enable()
def test_garbage_collector_thread(self): assert gc.isenabled() collector = GarbageCollector() # don't sleep, with mock.patch.object(collector, 'timer'): with mock.patch('gc.collect') as collect: with collector.gc_disabled(): assert not gc.isenabled() # FIXME: this is a little flakey time.sleep(0.05) assert collect.called assert gc.isenabled()
def test_shared_test_base_cover(self): # Just coverage. import gc class MyTest(base.AbstractSharedTestBase): HANDLE_GC = True def test_thing(self): raise AssertionError("Not called") MyTest.setUpClass() assert_that(gc.isenabled(), is_(False if not base._is_pypy else True)) MyTest.tearDownClass() assert_that(gc.isenabled(), is_(True)) MyTest('test_thing').setUp() MyTest('test_thing').tearDown()
def run(self, func, *args, **kw): """ Run function func(*args, **kw), validate statistics, and display the result on stdout. Disable garbage collector if asked too. """ # Disable garbarge collector is needed and if it does exist # (Jython 2.2 don't have it for example) if self.disable_gc: try: import gc except ImportError: self.disable_gc = False if self.disable_gc: gc_enabled = gc.isenabled() gc.disable() else: gc_enabled = False # Run the benchmark stat = self._run(func, args, kw) if gc_enabled: gc.enable() # Validate and display stats self.validateStat(stat) self.displayStat(stat)
def connectionAndCursor(self, connection=None): """Return the connection and cursor needed for executing SQL. Takes into account factors such as setting('Threaded') and the threadsafety level of the DB API module. You can pass in a connection to force a particular one to be used. Uses newConnection() and connect(). """ if aggressiveGC: import gc assert gc.isenabled() gc.collect() if connection: conn = connection elif self._threaded: if self._pool: conn = self._pool.connection() elif self._threadSafety == 1: conn = self.newConnection() else: # safety = 2, 3 if not self._connected: self.connect() conn = self._connection else: # Non-threaded if not self._connected: self.connect() conn = self._connection cursor = conn.cursor() return conn, cursor
def executeSQL(self, sql, connection=None, commit=False): """Execute the given SQL. This will connect to the database for the first time if necessary. This method will also log the SQL to self._sqlEcho, if it is not None. Returns the connection and cursor used and relies on connectionAndCursor() to obtain these. Note that you can pass in a connection to force a particular one to be used and a flag to commit immediately. """ sql = str(sql) # Excel-based models yield Unicode strings which some db modules don't like sql = sql.strip() if aggressiveGC: import gc assert gc.isenabled() gc.collect() self._sqlCount += 1 if self._sqlEcho: timestamp = funcs.timestamp()['pretty'] self._sqlEcho.write('SQL %04i. %s %s\n' % (self._sqlCount, timestamp, sql)) self._sqlEcho.flush() conn, cur = self.connectionAndCursor(connection) self._executeSQL(cur, sql) if commit: conn.commit() return conn, cur
def _execute_child(self, args, pass_fds): executable = args[0] self._loop.install_sigchld() gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. http://bugs.python.org/issue1336 gc.disable() try: self.pid = fork() except: if gc_was_enabled: gc.enable() raise if self.pid == 0: _close_fds(pass_fds) try: os.execvp(executable, args) finally: os._exit(1) # Parent self._watcher = self._loop.child(self.pid) self._watcher.start(self._on_child, self._watcher) if gc_was_enabled: gc.enable()
def main(): a = 4 b = 5 c_list = [] c_list.append(123) c_list.append(456) # reference cycle c_list.append(c_list) c_list[2].append(789) # foo = ['hi'] # c_list = foo print(c_list) print("Stats: {}".format(gc.get_stats())) print("Count: {}".format(gc.get_count())) print("GC enabled: {}".format(gc.isenabled())) print("Threshold: {}".format(gc.get_threshold())) print("c_list is tracked: {}".format(gc.is_tracked(c_list))) """ The count returned is generally one higher than you might expect, because it includes the (temporary) reference as an argument to getrefcount(). """ print("Reference count for c_list: {}".format(sys.getrefcount(c_list))) del c_list[2] print("Reference count for c_list: {}".format(sys.getrefcount(c_list))) print("Collecting: {}".format(gc.collect())) print("Done.")
def test(self, view): """ Calls the given view and measures the time for it to return. The garbage collector is diabled during execution. """ gc_old = gc.isenabled() gc.disable() try: start = timeit.default_timer() if view.method == 'GET': response = self.client.get(view.url, view.data) elif view.method == 'POST': response = self.client.post(view.url, view.data) else: raise ValueError('Unknown view method: %s' % view.method) end = timeit.default_timer() # Return result in milliseconds time_ms = (end - start) * 1000 # Try to get version information version = subprocess.check_output(['git', 'describe']) from .models import TestResult return TestResult(view=view, time=time_ms, result=response, result_code=response.status_code, version=version) finally: if gc_old: gc.enable()
def time_numexpr(N=100, trials=5, dtype=np.double): #NOTE: This is giving me none sense results. At this moment I am not using this test x = np.asarray(np.linspace(-1, 1, N), dtype=dtype) y = np.asarray(np.linspace(-1, 1, N), dtype=dtype) #x = np.arange(N) #y = np.arange(N) z = np.empty_like(x) gcold = gc.isenabled() gc.disable() tic = time.time() times = [] for i in range(trials): #t_start = time.time() #ne.evaluate('2*y+4*x', out = z) ne.evaluate('x*y - 4.1*x > 2.5*y', out = z) #times.append(time.time() - t_start) toc = time.time()-tic if gcold: gc.enable() #calculate average time and min time and also keep track of outliers (max time in the loop) #all_times = np.asarray(times) #min_time = np.amin(all_times) #max_time = np.amax(all_times) #mean_time = np.mean(all_times) #stdev_time = np.std(all_times) #print("Min = %.5f, Max = %.5f, Mean = %.5f, stdev = %.5f " % (min_time, max_time, mean_time, stdev_time)) #final_times = [min_time, max_time, mean_time, stdev_time] final_times = [0, 0, 0, 0] #return (toc/trials, dtype().itemsize*3*N*1e-9) return toc/trials, 3.*N*1e-9, final_times
def loadblk(self, blk, buf): # we are in sighandler - establish cycle which also referenced obj_4del and trigger full GC assert self.obj_4del is not None w = weakref.ref(self.obj_4del) assert w() is self.obj_4del # establish cycle with leaf ref to obj_4del a = C() b = C() a.b = b b.a = a a.obj_4del = self.obj_4del self.obj_4del = None assert w() is not None # del a=b cycle - it should stay alice, while gc is disabled gc_save = gc.isenabled() gc.disable() del a, b assert w() is not None # gc - a=b and obj_4del collected gc.collect() assert w() is None if gc_save: gc.enable() self.marker_list.append(2)
def CalcMemoryUsage(what, iterations = 1): if session and session.role & (service.ROLE_PROGRAMMER | service.ROLE_SERVICE) == 0: raise RuntimeError('Requires role programmer') iterations = max(1, iterations) iterations = min(iterations, 10) enabled = gc.isenabled() if enabled: gc.disable() oldWhitelist = blue.marshal.globalsWhitelist.copy() blue.marshal.globalsWhitelist.clear() oldCollectWhitelist = blue.marshal.collectWhitelist blue.marshal.collectWhitelist = True try: zippedSum = pickleSum = changeSum = 0 for i in xrange(iterations): pickle = blue.marshal.Save(what) zipped = zlib.compress(pickle) before = sys.getpymalloced() unpickled = blue.marshal.Load(pickle) after = sys.getpymalloced() pickleSum += len(pickle) zippedSum += len(zipped) changeSum += after - before denominator = float(iterations) return (pickleSum / denominator, zippedSum / denominator, changeSum / denominator) finally: if enabled: gc.enable() blue.marshal.globalsWhitelist.clear() blue.marshal.globalsWhitelist.update(oldWhitelist) blue.marshal.collectWhitelist = oldCollectWhitelist
def _exitfunc(cls): # At shutdown invoke finalizers for which atexit is true. # This is called once all other non-daemonic threads have been # joined. reenable_gc = False try: if cls._registry: import gc if gc.isenabled(): reenable_gc = True gc.disable() pending = None while True: if pending is None or finalize._dirty: pending = cls._select_for_exit() finalize._dirty = False if not pending: break f = pending.pop() try: # gc is disabled, so (assuming no daemonic # threads) the following is the only line in # this function which might trigger creation # of a new finalizer f() except Exception: sys.excepthook(*sys.exc_info()) assert f not in cls._registry finally: # prevent any more finalizers from executing during shutdown finalize._shutdown = True if reenable_gc: gc.enable()
def new_func(*args, **kwargs): status = gc.isenabled() gc.disable() output = func(*args, **kwargs) if status is True: gc.enable() return output
def popen(cmd, stdin=None, stdout=None, wait=False): if stdin is None: _stdin = stdin = open('/dev/null', 'rb') if not isinstance(stdin, int): stdin = stdin.fileno() if stdout is None: _stdout = stdout = open('/dev/null', 'wb') if not isinstance(stdout, int): stdout = stdout.fileno() # exec if isinstance(cmd, basestring): cmd = [cmd] else: cmd = list(cmd) cmd = ["/bin/sh", "-c"]+cmd gc_was_enabled = gc.isenabled() gc.disable() try: pid = os.fork() except: if gc_was_enabled: gc.enable() raise if pid == 0: if stdout == 0: stdout = os.dup(stdout) os.dup2(stdin, 0) os.dup2(stdout, 1) if hasattr(os, 'closerange'): os.closerange(3, MAXFD) else: for i in xrange(3, MAXFD): try: os.close(i) except: pass os.execvp(cmd[0], cmd) os._exit(255) # parent if gc_was_enabled: gc.enable() if stdin is not None and stdin != 0: os.close(stdin) if stdout is not None and stdout != 1: os.close(stdout) if wait: pid, sts = os.waitpid(pid, 0) if os.WIFSIGNALED(sts): return os.WTERMSIG(sts) elif os.WIFEXITED(sts): return os.WEXITSTATUS(sts) return pid
def test_get_stats(self): stats = gc.get_stats() self.assertEqual(len(stats), 3) for st in stats: self.assertIsInstance(st, dict) self.assertEqual(set(st), {"collected", "collections", "uncollectable"}) self.assertGreaterEqual(st["collected"], 0) self.assertGreaterEqual(st["collections"], 0) self.assertGreaterEqual(st["uncollectable"], 0) # Check that collection counts are incremented correctly if gc.isenabled(): self.addCleanup(gc.enable) gc.disable() old = gc.get_stats() gc.collect(0) new = gc.get_stats() self.assertEqual(new[0]["collections"], old[0]["collections"] + 1) self.assertEqual(new[1]["collections"], old[1]["collections"]) self.assertEqual(new[2]["collections"], old[2]["collections"]) gc.collect(2) new = gc.get_stats() self.assertEqual(new[0]["collections"], old[0]["collections"] + 1) self.assertEqual(new[1]["collections"], old[1]["collections"]) self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def start(self): active_benchmarks[id(self)] = self self._current_counter = {} self.iteration += 1 self.gcold = gc.isenabled() gc.disable() self.ts = time.time()
def process_item(self, item, spider): gcenabled = gc.isenabled() gc.disable() try: contentExtractor = lambda _: spider.contentExtractor(parseHTML(_)) boilerpipeExtractor = lambda _: Extractor(html=_).getText() gooseExtractor = lambda _: Goose().extract(raw_html=_).cleaned_text readabilityExtractor = lambda _: cleanTags(Document(_).summary()) # CE, BP, GO, RE ntimes = range(11) contents = map( lambda _: timeMeThis(partial(contentExtractor, item.rawHtml)), ntimes) boilerpipes = map( lambda _: timeMeThis(partial(boilerpipeExtractor, item.rawHtml)), ntimes) gooses = map( lambda _: timeMeThis(partial(gooseExtractor, item.rawHtml)), ntimes) readabilitys = map( lambda _: timeMeThis(partial(readabilityExtractor, item.rawHtml)), ntimes) log.msg("{} {} {} {} {} {} {} {}".format( mean(contents), std(contents), mean(boilerpipes), std(boilerpipes), mean(gooses), std(gooses), mean(readabilitys), std(readabilitys) )) finally: if gcenabled: gc.enable()
def _assertIteratorIdsEmpty(self): # Account for the need to run a GC collection # under non-refcounted implementations like PyPy # for storage._iterator_gc to fully do its job. # First, confirm that it ran self.assertTrue(self._storage._iterators._last_gc > 0) gc_enabled = gc.isenabled() # make sure there's no race conditions cleaning out the weak refs gc.disable() try: self.assertEqual(0, len(self._storage._iterator_ids)) except AssertionError: # Ok, we have ids. That should also mean that the # weak dictionary has the same length. self.assertEqual(len(self._storage._iterators), len(self._storage._iterator_ids)) # Now if we do a collection and re-ask for iterator_gc # everything goes away as expected. gc.enable() gc.collect() gc.collect() # sometimes PyPy needs it twice to clear weak refs self._storage._iterator_gc() self.assertEqual(len(self._storage._iterators), len(self._storage._iterator_ids)) self.assertEqual(0, len(self._storage._iterator_ids)) finally: if gc_enabled: gc.enable() else: gc.disable()
def import_descriptors(wherefrom, persistence_file): if not gc.isenabled(): gc.enable() reader = DescriptorReader(wherefrom, persistence_path=persistence_file) log('info', 'recalled %d files processed from my source(s) provided' % len(reader.get_processed_files())) with reader: i = 0 for i, desc in enumerate(reader): # 'enumerate' might be memory-inefficient here desc_model = Descriptor( descriptor=desc._path.split('/')[-1], # do we have to be so ugly, though? nickname=desc.nickname, address=desc.address, orport=desc.or_port, dirport=desc.dir_port, fingerprint=desc.fingerprint, platform=desc.platform, published=desc.published, uptime=desc.uptime) db_session.add(desc_model) if (i+1) % 100000 == 0: # total could be e.g. 323715 descriptors # (find ../../data/server-descriptors-2013-02 -type f | wc -l) print 'committing..', db_session.commit() # committed ORM objects can be garbage collected, which is important # when dealing with these amounts of rows print 'done. collecting garbage..', gc.collect() print 'done.' log('info', 'iterated over %d files' % i) db_session.commit()
def __init__(self): Harbinger.__init__(self) if not gc.isenabled(): gc.enable() self.project_dict = {} self.project_list = [] self.parser = OptionParser(conflict_handler="resolve") self.parser.add_option( "-n", "--no-color", action="store_true", dest="no_color", default=False, help="Text does not use colors" ) self.parser.add_option( "-s", "--sequential", action="store_true", dest="sequential", default=False, help="Run projects one at a time", ) self.parser.add_option( "-q", "--quiet", action="store_true", dest="quiet", default=False, help="Minimal Information Printed" ) self.parser.add_option( "-a", "--hash", action="store", type="string", dest="hash_type", default="md5", help="Which hash algorithm to use.", )
def mem_stats(): import gc print "DEBUG: OBJ STATS" print "enabled:", gc.isenabled() print "objs", len(gc.get_objects()) print "collected (now)", gc.collect() # after collection hist = {} for obj in gc.get_objects(): key = str(type(obj)) if key not in hist: hist[key] =1 else: hist[key] += 1 best = hist.items() best.sort(key=lambda x:x[1], reverse=True) print "\n".join("%s: %d" % (k,v) for k,v in best[:10]) our = [] gtk = [] for item in best: if "objects." in item[0] or "kupfer." in item[0]: our.append(item) if "gtk" in item[0]: gtk.append(item) #print "---just gtk (top)" #print "\n".join("%s: %d" % (k,v) for k,v in gtk[:10]) print "---Just our objects (all > 1)" print "\n".join("%s: %d" % (k,v) for k,v in our if v > 1)
def run_repeat(self, method, data): """ Call method(data) specified number of times. @param method: callable to be benchmarked @param data: argument for method @return: tupe (number of seconds as C{float}, number of calls as C{int}) """ gc_enabled = gc.isenabled() gc.collect() if self.disable_gc: gc.disable() doit = method if data is _no_data else lambda: method(data) try: timer_func = self.timer_func start = timer_func() called = doit() stop = timer_func() assert isinstance(called, int), "Benchmark method should return number of times it has been called" return (stop - start, called) finally: if gc_enabled: gc.enable()
def gcShouldBeDisabled(self): self.assertFalse(gc.isenabled())
# basic tests for gc module try: import gc except ImportError: print("SKIP") raise SystemExit print(gc.isenabled()) gc.disable() print(gc.isenabled()) gc.enable() print(gc.isenabled()) gc.collect() if hasattr(gc, 'mem_free'): # uPy has these extra functions # just test they execute and return an int assert type(gc.mem_free()) is int assert type(gc.mem_alloc()) is int if hasattr(gc, 'threshold'): # uPy has this extra function # check execution and returns assert (gc.threshold(1) is None) assert (gc.threshold() == 0) assert (gc.threshold(-1) is None) assert (gc.threshold() == -1) # Setting a low threshold should trigger collection at the list alloc
def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, string_types): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable: args[0] = executable if executable is None: executable = args[0] self._loop.install_sigchld() # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = self.pipe_cloexec() try: try: gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. http://bugs.python.org/issue1336 gc.disable() try: self.pid = fork() except: if gc_was_enabled: gc.enable() raise if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) os.close(errpipe_read) # When duping fds, if there arises a situation # where one of the fds is either 0, 1 or 2, it # is possible that it is overwritten (#12607). if c2pwrite == 0: c2pwrite = os.dup(c2pwrite) if errwrite == 0 or errwrite == 1: errwrite = os.dup(errwrite) # Dup fds for child def _dup2(a, b): # dup2() removes the CLOEXEC flag but # we must do it ourselves if dup2() # would be a no-op (issue #10806). if a == b: self._set_cloexec_flag(a, False) elif a is not None: os.dup2(a, b) self._remove_nonblock_flag(b) _dup2(p2cread, 0) _dup2(c2pwrite, 1) _dup2(errwrite, 2) # Close pipe fds. Make sure we don't close the # same fd more than once, or standard fds. closed = set([None]) for fd in [p2cread, c2pwrite, errwrite]: if fd not in closed and fd > 2: os.close(fd) closed.add(fd) # Close all other fds, if asked for if close_fds: self._close_fds(but=errpipe_write) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception object exc_lines = traceback.format_exception( exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) finally: # Make sure that the process exits no matter what. # The return code does not matter much as it won't be # reported to the application os._exit(1) # Parent self._watcher = self._loop.child(self.pid) self._watcher.start(self._on_child, self._watcher) if gc_was_enabled: gc.enable() finally: # be sure the FD is closed no matter what os.close(errpipe_write) if p2cread is not None and p2cwrite is not None: os.close(p2cread) if c2pwrite is not None and c2pread is not None: os.close(c2pwrite) if errwrite is not None and errread is not None: os.close(errwrite) # Wait for exec to fail or succeed; possibly raising exception errpipe_read = FileObject(errpipe_read, 'rb') data = errpipe_read.read() finally: if hasattr(errpipe_read, 'close'): errpipe_read.close() else: os.close(errpipe_read) if data != b"": self.wait() child_exception = pickle.loads(data) for fd in (p2cwrite, c2pread, errread): if fd is not None: os.close(fd) raise child_exception
def setUp(self): self.using_gc = gc.isenabled() gc.disable() self.addCleanup(sys.settrace, sys.gettrace())
def __enter__(self): self.gcold = gc.isenabled() if self.disable_gc: gc.disable() self.start_time = timeit.default_timer()
def __enter__(self): self.prev = gc.isenabled() if self.flag: gc.enable() else: gc.disable()
def test_plot(n=100, m=100000, trace='new', newfig=True, proc_time_ms=20, drawit=True, closeit=True, close_before=True, collect=True, sendpost=False): """ This will create a plot or a traces.Trace (if trace=True) with m data points. multiple times (n times). Using the defaults should not cause a memory leak. If it does we have a problem (again). Running with trace=True, should cause a problem (which is fixed with trace='new') n: number of repeats m: number of data points per graph trace: if True (default) uses traces.Trace, when False uses figure, clf and plot newfig: wether to use a new figure for each iteration or to reuse the same one (when False) drawit: When True, forces a call to draw to update the plot. Only usefull for plot, since traces always does that call internally. Now that draw produces the data then calls the widget update which queues a paintEvent. closeit: When True, 'old' or 'new', the figure/trace gets closed (like pressing the close button on the window). In theory this should clear the memory. Obviously, turning this off will not release any memory so it can quickly produces a memory error (with default n,m) when using a 32 bit python. 'new' or True selects the new way of closing the traces.Trace window. 'old' is the old one which was causing a memory leak. close_before: When True, the close is done before running processEvents When False, it is after. When closeit is False, this has no effect. collect: When True, calls the python garbage collector before the start of the next iteration sendpost: When True, will call sendpost with DeferredDelete just after processEvents is called (and only if that is enabled) to handle the deleteLater events caused by closing a window (as long as the windows has the Qt::WA_DeleteOnClose flag). When disabled the DeferredDelete are not handled by processEvents, so the Widget is only deleted if python deletes it or when we return to the main event loop (when the function terminates if called from the console.) proc_time_ms: the number of milliseconds to use for processEvents. This empties the event queue and is called as long as new event show up and the duration is less than proc_time_ms. Therefore the actual time can be shorter (event 0) if no events are pending. And longer if emptying the queue takes a long time (try: foo=plot(randn(1000000)); to=time.time(); QtGui.QApplication.processEvents(QtCore.QEventLoop.AllEvents, 20); print time.time()-to) Use a negative value (like -1) to disable this call. Without this call, no widget update will happen (no paint no response to mouse, ...) """ if not collect: print 'auto collect enable: ', gc.isenabled(), ' thesholds: ', gc.get_threshold() for i in range(n): if newfig: if trace: f=traces.Trace() else: f=figure() else: f=gcf() clf() if trace: f.setPoints(arange(m),randn(1,m)) else: plot(randn(m)) if drawit: if trace: pass else: draw() if close_before and closeit: do_close(f, trace) if proc_time_ms>=0: QtGui.QApplication.processEvents(QtCore.QEventLoop.AllEvents, proc_time_ms) if sendpost: QtGui.QApplication.sendPostedEvents(None, QtCore.QEvent.DeferredDelete) if not close_before and closeit: do_close(f, trace) print 'i=%03i'%i,'memory(rss: %.3f, vms: %.3f)'%get_memory(), 'garbage:', gc.get_count(), if trace: # refecount -1 for the temporary. So if it is 1, the next del will really remove it # from memory print 'Traces len',len(traces._figlist), 'ref_count', sys.getrefcount(f)-1, del f if collect: print 'Collecting:', gc.collect() else: print
def testDoesNotEnableGCIfDisabled(self): gc.disable() self.assertFalse(gc.isenabled()) self.gcShouldBeDisabled() self.assertFalse(gc.isenabled()) gc.enable()
def testDisableGarbageCollection(self): self.assertTrue(gc.isenabled()) with self.controller._DisableGarbageCollection(): self.assertFalse(gc.isenabled()) self.assertTrue(gc.isenabled())
""" Parse function for setuptools_scm that ignores tags for non-C++ subprojects, e.g. apache-arrow-js-XXX tags. """ from setuptools_scm.git import parse kwargs['describe_command'] = \ "git describe --dirty --tags --long --match 'apache-arrow-[0-9].*'" return parse(root, **kwargs) __version__ = setuptools_scm.get_version('../', parse=parse_git) except ImportError: __version__ = None # ARROW-8684: Disable GC while initializing Cython extension module, # to workaround Cython bug in https://github.com/cython/cython/issues/3603 _gc_enabled = _gc.isenabled() _gc.disable() import pyarrow.lib as _lib if _gc_enabled: _gc.enable() from pyarrow.lib import (BuildInfo, VersionInfo, cpp_build_info, cpp_version, cpp_version_info, cpu_count, set_cpu_count) def show_versions(): """ Print various version information, to help with error reporting. """ # TODO: CPU information and flags
def loadRooms(self) -> None: if gc.isenabled(): gc.disable() self.output("Loading the database file.") errors: Union[str, None] db: Union[Dict[str, Dict[str, Any]], None] errors, db = loadRooms() if db is None: if errors is not None: self.output(errors) return None self.output("Creating room objects.") terrainReplacements: Dict[str, str] = { "random": "undefined", "death": "deathtrap", "shallowwater": "shallow", } mobFlagReplacements: Dict[str, str] = { "any": "passive_mob", "smob": "aggressive_mob", "quest": "quest_mob", "scoutguild": "scout_guild", "mageguild": "mage_guild", "clericguild": "cleric_guild", "warriorguild": "warrior_guild", "rangerguild": "ranger_guild", "armourshop": "armour_shop", "foodshop": "food_shop", "petshop": "pet_shop", "weaponshop": "weapon_shop", } loadFlagReplacements: Dict[str, str] = {"packhorse": "pack_horse", "trainedhorse": "trained_horse"} doorFlagReplacements: Dict[str, str] = { "noblock": "no_block", "nobreak": "no_break", "nopick": "no_pick", "needkey": "need_key", } for vnum, roomDict in db.items(): newRoom: Room = Room() newRoom.vnum = vnum newRoom.name = roomDict["name"] newRoom.desc = roomDict["desc"] newRoom.dynamicDesc = roomDict["dynamicDesc"] newRoom.note = roomDict["note"] terrain: str = roomDict["terrain"] newRoom.terrain = terrain if terrain not in terrainReplacements else terrainReplacements[terrain] newRoom.light = roomDict["light"] newRoom.align = roomDict["align"] newRoom.portable = roomDict["portable"] newRoom.ridable = roomDict["ridable"] with suppress(KeyError): newRoom.avoid = roomDict["avoid"] newRoom.mobFlags = {mobFlagReplacements.get(flag, flag) for flag in roomDict["mobFlags"]} newRoom.loadFlags = {loadFlagReplacements.get(flag, flag) for flag in roomDict["loadFlags"]} newRoom.x = roomDict["x"] newRoom.y = roomDict["y"] newRoom.z = roomDict["z"] newRoom.calculateCost() for direction, exitDict in roomDict["exits"].items(): newExit: Exit = self.getNewExit(direction, exitDict["to"], vnum) newExit.exitFlags = set(exitDict["exitFlags"]) newExit.doorFlags = {doorFlagReplacements.get(flag, flag) for flag in exitDict["doorFlags"]} newExit.door = exitDict["door"] newRoom.exits[direction] = newExit self.rooms[vnum] = newRoom roomDict.clear() del roomDict self.currentRoom = self.rooms["0"] if not gc.isenabled(): gc.enable() gc.collect() self.output("Map database loaded.")
def setUp(self): self.reenable_gc = gc.isenabled() gc.collect() gc.disable() super(PerformanceTestCase, self).setUp()
def restart_gc(): """ Finaliser for restarting garbage collection """ gc.enable() assert gc.isenabled()
def __init__(self, title): assert gc.isenabled(), "Garbage collection should be enabled" self.title = title
def f2(): print(gc.get_threshold()) print(gc.get_count()) # help(gc.isenabled) print(gc.isenabled())
def testDisableGCWhenEnabled(self): self.assertTrue(gc.isenabled()) self.gcShouldBeDisabled() self.assertTrue(gc.isenabled()) # make sure we enable afterwards
import gc print("GC ENABLE:", gc.isenabled()) print("GC collect():", gc.collect()) a = 123 print("GC collect():", gc.collect()) # print("GC get_objects():", gc.get_objects()) print("GC get_stats():", gc.get_stats()) print("gc.garbage", gc.garbage) print("gc.callbacks:", gc.callbacks) # gc.set_debug(gc.DEBUG_SAVEALL) # gc.set_debug(gc.DEBUG_COLLECTABLE) gc.set_debug(gc.DEBUG_STATS) class A: pass while True: a += 1 A = {'1': a} del A['1']
raise return if not st: return # Python's garbage collector triggers a GC each time a certain number # of container objects (the number being defined by # gc.get_threshold()) are allocated. parse_dirstate creates a tuple # for each file in the dirstate. The C version then immediately marks # them as not to be tracked by the collector. However, this has no # effect on when GCs are triggered, only on what objects the GC looks # into. This means that O(number of files) GCs are unavoidable. # Depending on when in the process's lifetime the dirstate is parsed, # this can get very expensive. As a workaround, disable GC while # parsing the dirstate. gcenabled = gc.isenabled() gc.disable() try: p = parsers.parse_dirstate(self._map, self._copymap, st) finally: if gcenabled: gc.enable() if not self._dirtypl: self._pl = p def invalidate(self): for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs", "_ignore"): if a in self.__dict__: delattr(self, a) self._lastnormaltime = 0
def _execute_child(self, args, executable, preexec_fn, close_fds, pass_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session): """Execute program (POSIX version)""" if PY3 and isinstance(args, (str, bytes)): args = [args] elif not PY3 and isinstance(args, string_types): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable: args[0] = executable if executable is None: executable = args[0] self._loop.install_sigchld() # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = self.pipe_cloexec() # errpipe_write must not be in the standard io 0, 1, or 2 fd range. low_fds_to_close = [] while errpipe_write < 3: low_fds_to_close.append(errpipe_write) errpipe_write = os.dup(errpipe_write) for low_fd in low_fds_to_close: os.close(low_fd) try: try: gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. http://bugs.python.org/issue1336 gc.disable() try: self.pid = fork_and_watch(self._on_child, self._loop, True, fork) except: if gc_was_enabled: gc.enable() raise if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) os.close(errpipe_read) # When duping fds, if there arises a situation # where one of the fds is either 0, 1 or 2, it # is possible that it is overwritten (#12607). if c2pwrite == 0: c2pwrite = os.dup(c2pwrite) if errwrite == 0 or errwrite == 1: errwrite = os.dup(errwrite) # Dup fds for child def _dup2(a, b): # dup2() removes the CLOEXEC flag but # we must do it ourselves if dup2() # would be a no-op (issue #10806). if a == b: self._set_cloexec_flag(a, False) elif a is not None: os.dup2(a, b) self._remove_nonblock_flag(b) _dup2(p2cread, 0) _dup2(c2pwrite, 1) _dup2(errwrite, 2) # Close pipe fds. Make sure we don't close the # same fd more than once, or standard fds. closed = set([None]) for fd in [p2cread, c2pwrite, errwrite]: if fd not in closed and fd > 2: os.close(fd) closed.add(fd) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() # Close all other fds, if asked for. This must be done # after preexec_fn runs. if close_fds: fds_to_keep = set(pass_fds) fds_to_keep.add(errpipe_write) self._close_fds(fds_to_keep) elif hasattr(os, 'get_inheritable'): # close_fds was false, and we're on # Python 3.4 or newer, so "all file # descriptors except standard streams # are closed, and inheritable handles # are only inherited if the close_fds # parameter is False." for i in xrange(3, MAXFD): try: if i == errpipe_write or os.get_inheritable( i): continue os.close(i) except: pass if restore_signals: # restore the documented signals back to sig_dfl; # not all will be defined on every platform for sig in 'SIGPIPE', 'SIGXFZ', 'SIGXFSZ': sig = getattr(signal, sig, None) if sig is not None: signal.signal(sig, signal.SIG_DFL) if start_new_session: os.setsid() if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception object exc_lines = traceback.format_exception( exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) finally: # Make sure that the process exits no matter what. # The return code does not matter much as it won't be # reported to the application os._exit(1) # Parent if gc_was_enabled: gc.enable() finally: # be sure the FD is closed no matter what os.close(errpipe_write) # self._devnull is not always defined. devnull_fd = getattr(self, '_devnull', None) if p2cread is not None and p2cwrite is not None and p2cread != devnull_fd: os.close(p2cread) if c2pwrite is not None and c2pread is not None and c2pwrite != devnull_fd: os.close(c2pwrite) if errwrite is not None and errread is not None and errwrite != devnull_fd: os.close(errwrite) if devnull_fd is not None: os.close(devnull_fd) # Prevent a double close of these fds from __init__ on error. self._closed_child_pipe_fds = True # Wait for exec to fail or succeed; possibly raising exception errpipe_read = FileObject(errpipe_read, 'rb') data = errpipe_read.read() finally: if hasattr(errpipe_read, 'close'): errpipe_read.close() else: os.close(errpipe_read) if data != b"": self.wait() child_exception = pickle.loads(data) for fd in (p2cwrite, c2pread, errread): if fd is not None: os.close(fd) raise child_exception
def __init__(self, fname=None, fdata=None, decompress=False, decrypt=False, password='', disable_gc=True, verbose=True): self.private.verbose = verbose # Runs a lot faster with GC off. disable_gc = disable_gc and gc.isenabled() if disable_gc: gc.disable() try: if fname is not None: assert fdata is None # Allow reading preexisting streams like pyPdf if hasattr(fname, 'read'): fdata = fname.read() else: try: f = open(fname, 'rb') fdata = f.read() f.close() except IOError: raise PdfParseError('Could not read PDF file %s' % fname) assert fdata is not None fdata = convert_load(fdata) if not fdata.startswith('%PDF-'): startloc = fdata.find('%PDF-') if startloc >= 0: log.warning('PDF header not at beginning of file') else: lines = fdata.lstrip().splitlines() if not lines: raise PdfParseError('Empty PDF file!') raise PdfParseError('Invalid PDF header: %s' % repr(lines[0])) self.private.version = fdata[5:8] endloc = fdata.rfind('%EOF') if endloc < 0: raise PdfParseError('EOF mark not found: %s' % repr(fdata[-20:])) endloc += 6 junk = fdata[endloc:] fdata = fdata[:endloc] if junk.rstrip('\00').strip(): log.warning('Extra data at end of file') private = self.private private.indirect_objects = {} private.deferred_objects = set() private.special = { '<<': self.readdict, '[': self.readarray, 'endobj': self.empty_obj, } for tok in r'\ ( ) < > { } ] >> %'.split(): self.special[tok] = self.badtoken startloc, source = self.findxref(fdata) private.source = source # Find all the xref tables/streams, and # then deal with them backwards. xref_list = [] while 1: source.obj_offsets = {} trailer, is_stream = self.parsexref(source) prev = trailer.Prev if prev is None: token = source.next() if token != 'startxref' and not xref_list: source.warning('Expected "startxref" ' 'at end of xref table') break xref_list.append((source.obj_offsets, trailer, is_stream)) source.floc = int(prev) # Handle document encryption private.crypt_filters = None if decrypt and PdfName.Encrypt in trailer: identity_filter = crypt.IdentityCryptFilter() crypt_filters = {PdfName.Identity: identity_filter} private.crypt_filters = crypt_filters private.stream_crypt_filter = identity_filter private.string_crypt_filter = identity_filter if not crypt.HAS_CRYPTO: raise PdfParseError( 'Install PyCrypto to enable encryption support') self._parse_encrypt_info(source, password, trailer) if is_stream: self.load_stream_objects(trailer.object_streams) while xref_list: later_offsets, later_trailer, is_stream = xref_list.pop() source.obj_offsets.update(later_offsets) if is_stream: trailer.update(later_trailer) self.load_stream_objects(later_trailer.object_streams) else: trailer = later_trailer trailer.Prev = None if (trailer.Version and float(trailer.Version) > float(self.version)): self.private.version = trailer.Version if decrypt: self.decrypt_all() trailer.Encrypt = None if is_stream: self.Root = trailer.Root self.Info = trailer.Info self.ID = trailer.ID self.Size = trailer.Size self.Encrypt = trailer.Encrypt else: self.update(trailer) # self.read_all_indirect(source) private.pages = self.readpages(self.Root) if decompress: self.uncompress() # For compatibility with pyPdf private.numPages = len(self.pages) finally: if disable_gc: gc.enable()
def solve(self): # disable garbage collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# # measure how much memory is used until here process = psutil.Process() memstart = process.memory_info().vms # starts timer tstart = time.time() if self.show_gui: import netgen.gui # create mesh with initial size 0.1 geo = geom2d.SplineGeometry() p1 = geo.AppendPoint (-2,-2) p2 = geo.AppendPoint (2,-2) p3 = geo.AppendPoint (2,2) p4 = geo.AppendPoint (-2,2) geo.Append (["line", p1, p2]) geo.Append (["line", p2, p3]) geo.Append (["line", p3, p4]) geo.Append (["line", p4, p1]) self._mesh = ngs.Mesh(geo.GenerateMesh(maxh=0.1)) #create finite element space self._fes = ngs.H1(self._mesh, order=2, dirichlet=".*", autoupdate=True) # test and trail function u = self._fes.TrialFunction() v = self._fes.TestFunction() # create bilinear form and enable static condensation self._a = ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional and apply RHS self._f = ngs.LinearForm(self._fes) self._f += - ( \ 2*ngs.exp(-2 * (ngs.x**2 + ngs.y**2))*(2 * ngs.sin(-2 * (ngs.x**2 + ngs.y**2)) + 2 * (1-8 * ngs.x**2)*ngs.cos(-2 * (ngs.x**2 + ngs.y**2))) + \ 2*ngs.exp(-2 * (ngs.x**2 + ngs.y**2))*(2 * ngs.sin(-2 * (ngs.x**2 + ngs.y**2)) + 2 * (1-8 * ngs.y**2)*ngs.cos(-2 * (ngs.x**2 + ngs.y**2))) + \ 2*ngs.exp(-1 * (ngs.x**2 + ngs.y**2))*(1 * ngs.sin(-1 * (ngs.x**2 + ngs.y**2)) + 1 * (1-4 * ngs.x**2)*ngs.cos(-1 * (ngs.x**2 + ngs.y**2))) + \ 2*ngs.exp(-1 * (ngs.x**2 + ngs.y**2))*(1 * ngs.sin(-1 * (ngs.x**2 + ngs.y**2)) + 1 * (1-4 * ngs.y**2)*ngs.cos(-1 * (ngs.x**2 + ngs.y**2))) + \ 2*ngs.exp(-0.1*(ngs.x**2 + ngs.y**2))*(0.1*ngs.sin(-0.1*(ngs.x**2 + ngs.y**2)) + 0.1*(1-0.4*ngs.x**2)*ngs.cos(-0.1*(ngs.x**2 + ngs.y**2))) + \ 2*ngs.exp(-0.1*(ngs.x**2 + ngs.y**2))*(0.1*ngs.sin(-0.1*(ngs.x**2 + ngs.y**2)) + 0.1*(1-0.4*ngs.y**2)*ngs.cos(-0.1*(ngs.x**2 + ngs.y**2))) )*v*ngs.dx # preconditioner: multigrid - what prerequisits must the problem have? self._c = ngs.Preconditioner(self._a,"multigrid") # create grid function that holds the solution and set the boundary to 0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution self._g = self._ngs_ex self._gfu.Set(self._g, definedon=self._mesh.Boundaries(".*")) # draw grid function in gui if self.show_gui: ngs.Draw(self._gfu) # create Hcurl space for flux calculation and estimate error self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, "flux", autoupdate=True) # TaskManager starts threads that (standard thread nr is numer of cores) #with ngs.TaskManager(): # this is the adaptive loop while self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() # since the adaptive loop stopped with a mesh refinement, the gfu must be # calculated one last time self._solveStep() if self.show_gui: ngs.Draw(self._gfu) # set measured exectution time self._exec_time = time.time() - tstart # set measured used memory memstop = process.memory_info().vms - memstart self._mem_consumption = memstop # enable garbage collector # --------------------------------------------------------------------# gc.enable() gc.collect()
def test_implicit_parent_with_threads(self): if not gc.isenabled(): return # cannot test with disabled gc N = gc.get_threshold()[0] if N < 50: return # cannot test with such a small N def attempt(): lock1 = threading.Lock() lock1.acquire() lock2 = threading.Lock() lock2.acquire() recycled = [False] def another_thread(): lock1.acquire() # wait for gc greenlet.getcurrent() # update ts_current lock2.release() # release gc t = threading.Thread(target=another_thread) t.start() class gc_callback(object): def __del__(self): lock1.release() lock2.acquire() recycled[0] = True class garbage(object): def __init__(self): self.cycle = self self.callback = gc_callback() l = [] x = range(N * 2) current = greenlet.getcurrent() g = garbage() for _ in x: g = None # lose reference to garbage if recycled[0]: # gc callback called prematurely t.join() return False last = greenlet() if recycled[0]: break # yes! gc called in green_new l.append(last) # increase allocation counter else: # gc callback not called when expected gc.collect() if recycled[0]: t.join() return False self.assertEqual(last.parent, current) for g in l: self.assertEqual(g.parent, current) return True for _ in range(5): if attempt(): break
def test_enable(self): gc.enable() result = gc.isenabled() self.assertTrue(result, "enable Method can't set gc.isenabled as true.")
def _fork(self, path, uid, gid, executable, args, environment, **kwargs): """ Fork and then exec sub-process. @param path: the path where to run the new process. @type path: C{str} @param uid: if defined, the uid used to run the new process. @type uid: C{int} @param gid: if defined, the gid used to run the new process. @type gid: C{int} @param executable: the executable to run in a new process. @type executable: C{str} @param args: arguments used to create the new process. @type args: C{list}. @param environment: environment used for the new process. @type environment: C{dict}. @param kwargs: keyword arguments to L{_setupChild} method. """ settingUID = (uid is not None) or (gid is not None) if settingUID: curegid = os.getegid() currgid = os.getgid() cureuid = os.geteuid() curruid = os.getuid() if uid is None: uid = cureuid if gid is None: gid = curegid # prepare to change UID in subprocess os.setuid(0) os.setgid(0) collectorEnabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: # Still in the parent process if settingUID: os.setregid(currgid, curegid) os.setreuid(curruid, cureuid) if collectorEnabled: gc.enable() raise else: if self.pid == 0: # pid is 0 in the child process # do not put *ANY* code outside the try block. The child process # must either exec or _exit. If it gets outside this block (due # to an exception that is not handled here, but which might be # handled higher up), there will be two copies of the parent # running in parallel, doing all kinds of damage. # After each change to this code, review it to make sure there # are no exit paths. try: # Stop debugging. If I am, I don't care anymore. sys.settrace(None) self._setupChild(**kwargs) self._execChild(path, settingUID, uid, gid, executable, args, environment) except: # If there are errors, bail and try to write something # descriptive to stderr. # XXX: The parent's stderr isn't necessarily fd 2 anymore, or # even still available # XXXX: however even libc assumes write(2, err) is a useful # thing to attempt try: stderr = os.fdopen(2, 'w') stderr.write( "Upon execvpe %s %s in environment %s\n:" % (executable, str(args), "id %s" % id(environment))) traceback.print_exc(file=stderr) stderr.flush() for fd in range(3): os.close(fd) except: pass # make *sure* the child terminates # Did you read the comment about not adding code here? os._exit(1) # we are now in parent process if settingUID: os.setregid(currgid, curegid) os.setreuid(curruid, cureuid) if collectorEnabled: gc.enable() self.status = -1 # this records the exit status of the child
def collect_python_metadata(metadata): # Implementation impl = pyperf.python_implementation() metadata['python_implementation'] = impl # Version version = platform.python_version() match = re.search(r'\[(PyPy [^ ]+)', sys.version) if match: version = '%s (Python %s)' % (match.group(1), version) bits = platform.architecture()[0] if bits: if bits == '64bit': bits = '64-bit' elif bits == '32bit': bits = '32-bit' version = '%s (%s)' % (version, bits) # '74667320778e' in 'Python 2.7.12+ (2.7:74667320778e,' match = re.search(r'^[^(]+\([^:]+:([a-f0-9]{6,}\+?),', sys.version) if match: revision = match.group(1) else: # 'bbd45126bc691f669c4ebdfbd74456cd274c6b92' # in 'Python 2.7.10 (bbd45126bc691f669c4ebdfbd74456cd274c6b92,' match = re.search(r'^[^(]+\(([a-f0-9]{6,}\+?),', sys.version) if match: revision = match.group(1) else: revision = None if revision: version = '%s revision %s' % (version, revision) metadata['python_version'] = version if sys.executable: metadata['python_executable'] = sys.executable # Before PEP 393 (Python 3.3) if sys.version_info < (3, 3): if sys.maxunicode == 0xffff: unicode_impl = 'UTF-16' else: unicode_impl = 'UCS-4' metadata['python_unicode'] = unicode_impl # timer if (hasattr(time, 'perf_counter') and pyperf.perf_counter == time.perf_counter): info = time.get_clock_info('perf_counter') metadata['timer'] = ('%s, resolution: %s' % (info.implementation, format_timedelta(info.resolution))) elif pyperf.perf_counter == time.clock: metadata['timer'] = 'time.clock()' elif pyperf.perf_counter == time.time: metadata['timer'] = 'time.time()' # PYTHONHASHSEED if os.environ.get('PYTHONHASHSEED'): hash_seed = os.environ['PYTHONHASHSEED'] try: if hash_seed != "random": hash_seed = int(hash_seed) except ValueError: pass else: metadata['python_hash_seed'] = hash_seed # CFLAGS try: import sysconfig except ImportError: pass else: cflags = sysconfig.get_config_var('CFLAGS') if cflags: cflags = normalize_text(cflags) metadata['python_cflags'] = cflags # GC disabled? try: import gc except ImportError: pass else: if not gc.isenabled(): metadata['python_gc'] = 'disabled'
def _fork(self, path, uid, gid, executable, args, environment, **kwargs): """ Fork and then exec sub-process. @param path: the path where to run the new process. @type path: L{bytes} or L{unicode} @param uid: if defined, the uid used to run the new process. @type uid: L{int} @param gid: if defined, the gid used to run the new process. @type gid: L{int} @param executable: the executable to run in a new process. @type executable: L{str} @param args: arguments used to create the new process. @type args: L{list}. @param environment: environment used for the new process. @type environment: L{dict}. @param kwargs: keyword arguments to L{_setupChild} method. """ collectorEnabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: # Still in the parent process if collectorEnabled: gc.enable() raise else: if self.pid == 0: # A return value of 0 from fork() indicates that we are now # executing in the child process. # Do not put *ANY* code outside the try block. The child # process must either exec or _exit. If it gets outside this # block (due to an exception that is not handled here, but # which might be handled higher up), there will be two copies # of the parent running in parallel, doing all kinds of damage. # After each change to this code, review it to make sure there # are no exit paths. try: # Stop debugging. If I am, I don't care anymore. sys.settrace(None) self._setupChild(**kwargs) self._execChild(path, uid, gid, executable, args, environment) except: # If there are errors, try to write something descriptive # to stderr before exiting. # The parent's stderr isn't *necessarily* fd 2 anymore, or # even still available; however, even libc assumes that # write(2, err) is a useful thing to attempt. try: stderr = os.fdopen(2, 'wb') msg = ("Upon execvpe {0} {1} in environment id {2}" "\n:").format(executable, str(args), id(environment)) if _PY3: # On Python 3, print_exc takes a text stream, but # on Python 2 it still takes a byte stream. So on # Python 3 we will wrap up the byte stream returned # by os.fdopen using TextIOWrapper. # We hard-code UTF-8 as the encoding here, rather # than looking at something like # getfilesystemencoding() or sys.stderr.encoding, # because we want an encoding that will be able to # encode the full range of code points. We are # (most likely) talking to the parent process on # the other end of this pipe and not the filesystem # or the original sys.stderr, so there's no point # in trying to match the encoding of one of those # objects. stderr = io.TextIOWrapper(stderr, encoding="utf-8") stderr.write(msg) traceback.print_exc(file=stderr) stderr.flush() for fd in range(3): os.close(fd) except: # Handle all errors during the error-reporting process # silently to ensure that the child terminates. pass # See comment above about making sure that we reach this line # of code. os._exit(1) # we are now in parent process if collectorEnabled: gc.enable() self.status = -1 # this records the exit status of the child
scream.cout('How many Github objects in github_clients: ' + str(len(github_clients))) scream.cout( 'Assigning current github client to the first object in a list') github_client = github_clients[0] lapis = local_gh.get_api_status() scream.say('Current status of GitHub API...: ' + lapis.status + ' (last update: ' + str(lapis.last_updated) + ')') if intelli_no_of_threads: scream.say('Adjusting no of threads to: ' + str(len(github_clients))) no_of_threads = len(github_clients) scream.say('No of threads is currently: ' + str(no_of_threads)) is_gc_turned_on = 'turned on' if str(gc.isenabled()) else 'turned off' scream.ssay('Garbage collector is ' + is_gc_turned_on) scream.say( 'WORKING WITH INPUT FILE : ' + input_filename) # simply 'result_stargazers_2013_final_mature.csv' scream.say('This can take a while, max aprox. 2 minutes...') filename_ = 'data/' if sys.platform == 'linux2' else 'data\\' filename__ = filename_ + input_filename # remember it is in a /data subdir with open(filename__, 'rb') as source_csvfile: reposReader = UnicodeReader(f=source_csvfile, dialect=RepoReaderDialect) reposReader.next() previous = '' for row in reposReader: scream.log('Processing row: ' + str(row))
def setUp(self): self.using_gc = gc.isenabled() gc.disable()
def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, (str,)): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable is None: executable = args[0] # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = os.pipe() self._set_cloexec_flag(errpipe_write) gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. http://bugs.python.org/issue1336 gc.disable() try: self.pid = os.fork() except: if gc_was_enabled: gc.enable() raise self._child_created = True if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite: os.close(p2cwrite) if c2pread: os.close(c2pread) if errread: os.close(errread) os.close(errpipe_read) # Dup fds for child if p2cread: os.dup2(p2cread, 0) if c2pwrite: os.dup2(c2pwrite, 1) if errwrite: os.dup2(errwrite, 2) # Close pipe fds. Make sure we don't close the same # fd more than once, or standard fds. if p2cread and p2cread not in (0,): os.close(p2cread) if c2pwrite and c2pwrite not in (p2cread, 1): os.close(c2pwrite) if errwrite and errwrite not in (p2cread, c2pwrite, 2): os.close(errwrite) # Close all other fds, if asked for if close_fds: self._close_fds(but=errpipe_write) if cwd is not None: os.chdir(cwd) if preexec_fn: apply(preexec_fn) if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception object exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) self._write_no_intr(errpipe_write, pickle.dumps(exc_value)) # This exitcode won't be reported to applications, so it # really doesn't matter what we return. os._exit(255) # Parent if gc_was_enabled: gc.enable() os.close(errpipe_write) if p2cread and p2cwrite: os.close(p2cread) if c2pwrite and c2pread: os.close(c2pwrite) if errwrite and errread: os.close(errwrite) # Wait for exec to fail or succeed; possibly raising exception data = self._read_no_intr(errpipe_read, 1048576) # Exceptions limited to 1 MB os.close(errpipe_read) if data != "": self._waitpid_no_intr(self.pid, 0) child_exception = pickle.loads(data) raise child_exception
def disable_generational_garbage_collection(self): gc_enabled = gc.isenabled() gc.disable() yield if gc_enabled: gc.enable()