Ejemplo n.º 1
1
def test_main():
    enabled = gc.isenabled()
    gc.disable()
    if not test_support.due_to_ironpython_incompatibility(
        "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
    ):
        assert not gc.isenabled()
    debug = gc.get_debug()
    if not test_support.due_to_ironpython_incompatibility(
        "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
    ):
        gc.set_debug(debug & ~gc.DEBUG_LEAK)  # this test is supposed to leak

    try:
        gc.collect()  # Delete 2nd generation garbage
        run_unittest(GCTests, GCTogglingTests)
    finally:
        if not test_support.due_to_ironpython_incompatibility(
            "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
        ):
            gc.set_debug(debug)
        # test gc.enable() even if GC is disabled by default
        if verbose:
            print "restoring automatic collection"
        # make sure to always test gc.enable()
        gc.enable()
        assert gc.isenabled()
        if not enabled:
            gc.disable()
Ejemplo n.º 2
0
def main():
	parser=argparse.ArgumentParser(description="access to pythons built-in garbage collector")
	parser.add_argument("command",help="what to do",choices=["enable","disable","status","collect","threshold","debug","break"],action="store")
	parser.add_argument("args",help="argument for command",action="store",nargs="*")
	ns=parser.parse_args()
	if ns.command=="enable":
		gc.enable()
	elif ns.command=="disable":
		gc.disable()
	elif ns.command=="collect":
		gc.collect()
	elif ns.command=="status":
		print "GC enabled:              {s}".format(s=gc.isenabled())
		tracked=gc.get_objects()
		n=len(tracked)
		print "Tracked objects:         {n}".format(n=n)
		size=sum([sys.getsizeof(e) for e in tracked])
		del tracked#this list may be big, better delete it
		print "Size of tracked objects: {s} bytes".format(s=size)
		print "Garbage:                 {n}".format(n=len(gc.garbage))
		gsize=sum([sys.getsizeof(e) for e in gc.garbage])
		print "Size of garbage:         {s} bytes".format(s=gsize)
		print "Debug:                   {d}".format(d=gc.get_debug())
	elif ns.command=="threshold":
		if len(ns.args)==0:
			print "Threshold:\n   G1: {}\n   G2: {}\n   G3: {}".format(*gc.get_threshold())
		elif len(ns.args)>3:
			print "Error: to many arguments for threshold!"
			sys.exit(1)
		else:
			try:
				ts=tuple([int(e) for e in ns.args])
			except ValueError:
				print "Error: expected arguments to be integer!"
				sys.exit(1)
			gc.set_threshold(*ts)
	elif ns.command=="debug":
		if len(ns.args)==0:
			print "Debug: {d}".format(d=gc.get_debug())
		elif len(ns.args)==1:
			try:
				flag=int(ns.args[0])
			except ValueError:
				print "Error: expected argument to be an integer!"
				sys.exit(1)
			gc.set_debug(flag)
		else:
			print "Error: expected exactly one argument for threshold!"
			sys.exit(1)
	elif ns.command=="break":
		if len(gc.garbage)==0:
			print "Error: No Garbage found!"
			sys.exit(1)
		else:
			for k in dir(garbage[0]):
				try: delattr(garbage,k)
				except: pass
			del gc.garbage[:]
Ejemplo n.º 3
0
def test_setdebug():
    if is_cli or is_silverlight:
        for debug in debug_list:
            AssertError(NotImplementedError, gc.set_debug,debug)
            AreEqual(None,gc.get_debug())
    else:
        for debug in debug_list:
            gc.set_debug(debug)
            AreEqual(debug,gc.get_debug())
Ejemplo n.º 4
0
 def test_setdebug(self):
     if is_cli:
         for debug in debug_list:
             self.assertRaises(NotImplementedError, gc.set_debug,debug)
             self.assertEqual(0,gc.get_debug())
     else:
         for debug in debug_list:
             gc.set_debug(debug)
             self.assertEqual(debug,gc.get_debug())
Ejemplo n.º 5
0
def test_setdebug():
    if is_cli or is_silverlight:
        for debug in debug_list:
            AssertError(NotImplementedError, gc.set_debug, debug)
            AreEqual(None, gc.get_debug())
    else:
        for debug in debug_list:
            gc.set_debug(debug)
            AreEqual(debug, gc.get_debug())
Ejemplo n.º 6
0
 def test_setdebug(self):
     if is_cli:
         for debug in debug_list:
             self.assertRaises(NotImplementedError, gc.set_debug,debug)
             self.assertEqual(0,gc.get_debug())
     else:
         for debug in debug_list:
             gc.set_debug(debug)
             self.assertEqual(debug,gc.get_debug())
Ejemplo n.º 7
0
def test():
    if not hasattr(gc, 'get_debug'):
        if verbose:
            print "skipping test_gc: too many GC differences with CPython"
        return
    if verbose:
        print "disabling automatic collection"
    enabled = gc.isenabled()
    gc.disable()
    verify(not gc.isenabled())
    debug = gc.get_debug()
    gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak

    try:
        test_all()
    finally:
        gc.set_debug(debug)
        # test gc.enable() even if GC is disabled by default
        if verbose:
            print "restoring automatic collection"
        # make sure to always test gc.enable()
        gc.enable()
        verify(gc.isenabled())
        if not enabled:
            gc.disable()
Ejemplo n.º 8
0
def mem_garbage_dump(_connection=None):
    old_flags = gc.get_debug()
    gc.set_debug(gc.DEBUG_SAVEALL)
    gc.collect()

    def write_to_file(file):
        garbage_ids = set(id(i) for i in gc.garbage)
        for o in gc.garbage:
            referents = tuple(r for r in gc.get_referents(o)
                              if id(r) in garbage_ids)
            try:
                orepr = repr(o)
            except:
                orepr = '<exc>'
            file.write('{};{};{}{};{}\n'.format(
                len(referents), id(o),
                ''.join('{};'.format(id(r)) for r in referents),
                type(o).__name__, orepr))

    output = sims4.commands.CheatOutput(_connection)
    output('Garbage count: {}'.format(len(gc.garbage)))
    filename = 'garbage_graph'
    create_csv(filename, callback=write_to_file, connection=_connection)
    gc.garbage.clear()
    gc.set_debug(old_flags)
Ejemplo n.º 9
0
def run_gc(debug_gc=None, gc_wait=0, is_final=False):
    if debug_gc is None:
        debug_gc = xu.getenv_as('XLA_DEBUG_GC', int, 0)
    print_fn = xu.get_print_fn(debug_gc)
    gc_flags = gc.get_debug()
    if debug_gc > 1:
        gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_UNCOLLECTABLE)
    # Run GC so that eventual XLA resource objects wrapped by std::shared_ptr<>
    # gets released.
    while True:
        collected = gc.collect()
        print_fn('GC collected %d objects' % collected)
        if collected == 0:
            break
    print_fn('GC found %d uncollectable objects' % len(gc.garbage))
    gc.set_debug(gc_flags)
    # Unfortunately the Python GC does not immediately release the objects, but
    # it instead delegates the task to a background thread (like we do for
    # handles). To make things worse, there is no way to flush that work
    # immediately. So we look at the handle counters and we wait, up to a max,
    # until all the created handles have been destroyed.
    if (not _wait_for_released_tensors(max_wait=gc_wait, print_fn=print_fn)
            and is_final):
        _force_release_tensors()
    print_fn(torch_xla._XLAC._xla_metrics_report())
Ejemplo n.º 10
0
    def test_saveall(self):
        if test_support.due_to_ironpython_incompatibility(
            "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
        ):
            return
        # Verify that cyclic garbage like lists show up in gc.garbage if the
        # SAVEALL option is enabled.

        # First make sure we don't save away other stuff that just happens to
        # be waiting for collection.
        gc.collect()
        # if this fails, someone else created immortal trash
        self.assertEqual(gc.garbage, [])

        L = []
        L.append(L)
        id_L = id(L)

        debug = gc.get_debug()
        gc.set_debug(debug | gc.DEBUG_SAVEALL)
        del L
        gc.collect()
        gc.set_debug(debug)

        self.assertEqual(len(gc.garbage), 1)
        obj = gc.garbage.pop()
        self.assertEqual(id(obj), id_L)
Ejemplo n.º 11
0
    def testNoReferenceCyclesAfterCall(self):
        class ChildNetwork(network.Network):
            def __init__(self, name=None):
                super(ChildNetwork, self).__init__(name=name)

            def call(self, x):
                return x * 2.

        class ParentNetwork(network.Network):
            def __init__(self, name=None):
                super(ParentNetwork, self).__init__(name=name)
                self.l1 = self.track_layer(ChildNetwork())

            def call(self, x):
                return self.l1(x)

        one = constant_op.constant([[1.0]])
        gc.disable()
        gc.collect()
        previous_gc_debug_flags = gc.get_debug()
        gc.set_debug(gc.DEBUG_SAVEALL)
        preexisting = len(gc.garbage)
        net = ParentNetwork()
        net(one)
        del net
        gc.collect()
        # There should be no additional garbage requiring collection.
        self.assertEqual(preexisting, len(gc.garbage))
        gc.set_debug(previous_gc_debug_flags)
        gc.enable()
Ejemplo n.º 12
0
def test_main():
    enabled = gc.isenabled()
    gc.disable()
    if not test_support.due_to_ironpython_incompatibility(
            "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
    ):
        assert not gc.isenabled()
    debug = gc.get_debug()
    if not test_support.due_to_ironpython_incompatibility(
            "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
    ):
        gc.set_debug(debug & ~gc.DEBUG_LEAK)  # this test is supposed to leak

    try:
        gc.collect()  # Delete 2nd generation garbage
        run_unittest(GCTests, GCTogglingTests)
    finally:
        if not test_support.due_to_ironpython_incompatibility(
                "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
        ):
            gc.set_debug(debug)
        # test gc.enable() even if GC is disabled by default
        if verbose:
            print "restoring automatic collection"
        # make sure to always test gc.enable()
        gc.enable()
        assert gc.isenabled()
        if not enabled:
            gc.disable()
Ejemplo n.º 13
0
 def test_no_garbage(self):
   device, data_format = device_and_data_format()
   model = resnet50.ResNet50(data_format)
   optimizer = tf.train.GradientDescentOptimizer(0.1)
   with tf.device(device):
     images, labels = random_batch(2, data_format)
     gc.disable()
     # Warm up. Note that this first run does create significant amounts of
     # garbage to be collected. The hope is that this is a build-only effect,
     # and a subsequent training loop will create nothing which needs to be
     # collected.
     apply_gradients(model, optimizer,
                     compute_gradients(model, images, labels))
     gc.collect()
     previous_gc_debug_flags = gc.get_debug()
     gc.set_debug(gc.DEBUG_SAVEALL)
     for _ in range(2):
       # Run twice to ensure that garbage that is created on the first
       # iteration is no longer accessible.
       apply_gradients(model, optimizer,
                       compute_gradients(model, images, labels))
     gc.collect()
     # There should be no garbage requiring collection.
     self.assertEqual(0, len(gc.garbage))
     gc.set_debug(previous_gc_debug_flags)
     gc.enable()
def detect_leaks(enabled=None):
    """A context manager that optionally detects Python object leaks in the
	`with` statement body.

	Set `enabled` to True to enable leak detection, False to disable leak
	detection, or default to let the 'DEBUG_GC' environment variable
	(int, "0") enable leak detection if non-zero.

	Leak detection has some overhead including running a full collection and
	printing a list of uncollectable objects.

	Per https://docs.python.org/2/library/gc.html, "Objects that have
	__del__() methods and are part of a reference cycle cause the entire
	reference cycle to be uncollectable, including objects not necessarily in
	the cycle but reachable only from it. Python doesn't collect such cycles
	automatically because, in general, it isn't possible for Python to guess
	a safe order in which to run the __del__() methods."
	"""
    if enabled is None:
        enabled = bool(int(os.environ.get('DEBUG_GC', '0')))

    saved_debug_flags = gc.get_debug()
    gc.set_debug(TRACE_UNCOLLECTABLES if enabled else TRACE_NONE)

    yield  # yield to the `with` statement body

    if enabled:
        gc.collect(
        )  # prints lines like "gc: uncollectable <CleanupGraph 0x10045f810>"
        # Examine the gc.garbage list here?

    gc.set_debug(saved_debug_flags)
Ejemplo n.º 15
0
 def global_setup(self):
     # Set garbage collection debug flags
     self.old_flags = gc.get_debug()
     new_flags = 0
     for op in self.flags:
         new_flags |= getattr(gc, op)
     gc.set_debug(new_flags)
Ejemplo n.º 16
0
    def test_saveall(self):
        if test_support.due_to_ironpython_incompatibility("http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"):
            return
        # Verify that cyclic garbage like lists show up in gc.garbage if the
        # SAVEALL option is enabled.

        # First make sure we don't save away other stuff that just happens to
        # be waiting for collection.
        gc.collect()
        # if this fails, someone else created immortal trash
        self.assertEqual(gc.garbage, [])

        L = []
        L.append(L)
        id_L = id(L)

        debug = gc.get_debug()
        gc.set_debug(debug | gc.DEBUG_SAVEALL)
        del L
        gc.collect()
        gc.set_debug(debug)

        self.assertEqual(len(gc.garbage), 1)
        obj = gc.garbage.pop()
        self.assertEqual(id(obj), id_L)
Ejemplo n.º 17
0
 def global_setup(self):
     # Set garbage collection debug flags
     self.old_flags = gc.get_debug()
     new_flags = 0
     for op in self.flags:
         new_flags |= getattr(gc, op)
     gc.set_debug(new_flags)
Ejemplo n.º 18
0
  def testNoReferenceCyclesAfterCall(self):

    class ChildNetwork(network.Network):

      def __init__(self, name=None):
        super(ChildNetwork, self).__init__(name=name)

      def call(self, x):
        return x * 2.

    class ParentNetwork(network.Network):

      def __init__(self, name=None):
        super(ParentNetwork, self).__init__(name=name)
        self.l1 = self.track_layer(ChildNetwork())

      def call(self, x):
        return self.l1(x)

    one = constant_op.constant([[1.0]])
    gc.disable()
    gc.collect()
    previous_gc_debug_flags = gc.get_debug()
    gc.set_debug(gc.DEBUG_SAVEALL)
    preexisting = len(gc.garbage)
    net = ParentNetwork()
    net(one)
    del net
    gc.collect()
    # There should be no additional garbage requiring collection.
    self.assertEqual(preexisting, len(gc.garbage))
    gc.set_debug(previous_gc_debug_flags)
    gc.enable()
Ejemplo n.º 19
0
def test_MultiError_catch_doesnt_create_cyclic_garbage():
    # https://github.com/python-trio/trio/pull/2063
    gc.collect()
    old_flags = gc.get_debug()

    def make_multi():
        # make_tree creates cycles itself, so a simple
        raise MultiError([get_exc(raiser1), get_exc(raiser2)])

    def simple_filter(exc):
        if isinstance(exc, ValueError):
            return Exception()
        if isinstance(exc, KeyError):
            return RuntimeError()
        assert False, "only ValueError and KeyError should exist"  # pragma: no cover

    try:
        gc.set_debug(gc.DEBUG_SAVEALL)
        with pytest.raises(MultiError):
            # covers MultiErrorCatcher.__exit__ and _multierror.copy_tb
            with MultiError.catch(simple_filter):
                raise make_multi()
        gc.collect()
        assert not gc.garbage
    finally:
        gc.set_debug(old_flags)
        gc.garbage.clear()
Ejemplo n.º 20
0
 def test_no_garbage(self):
     device, data_format = resnet50_test_util.device_and_data_format()
     model = resnet50.ResNet50(data_format)
     optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
     with tf.device(device):
         images, labels = resnet50_test_util.random_batch(2, data_format)
         gc.disable()
         # Warm up. Note that this first run does create significant amounts of
         # garbage to be collected. The hope is that this is a build-only effect,
         # and a subsequent training loop will create nothing which needs to be
         # collected.
         apply_gradients(model, optimizer,
                         compute_gradients(model, images, labels))
         gc.collect()
         previous_gc_debug_flags = gc.get_debug()
         gc.set_debug(gc.DEBUG_SAVEALL)
         for _ in range(2):
             # Run twice to ensure that garbage that is created on the first
             # iteration is no longer accessible.
             apply_gradients(model, optimizer,
                             compute_gradients(model, images, labels))
         gc.collect()
         # There should be no garbage requiring collection.
         self.assertEqual(0, len(gc.garbage))
         gc.set_debug(previous_gc_debug_flags)
         gc.enable()
Ejemplo n.º 21
0
 def _CleanGc(self):
     old = gc.get_debug()
     gc.set_debug(0)
     del gc.garbage[:]
     gc.collect()
     gc.set_debug(old)
     gc.collect()
     assert not gc.garbage
Ejemplo n.º 22
0
def setUpModule():
    global enabled, debug
    enabled = gc.isenabled()
    gc.disable()
    assert not gc.isenabled()
    debug = gc.get_debug()
    gc.set_debug(debug & ~gc.DEBUG_LEAK)  # this test is supposed to leak
    gc.collect()  # Delete 2nd generation garbage
Ejemplo n.º 23
0
 def setUp(self):
     self.enabled = gc.isenabled()
     gc.disable()
     self.debug = gc.get_debug()
     gc.set_debug(0)
     gc.callbacks.append(self.cb1)
     gc.callbacks.append(self.cb2)
     self.othergarbage = []
Ejemplo n.º 24
0
def get_garbage_count():
    old_flags = gc.get_debug()
    gc.set_debug(gc.DEBUG_SAVEALL)
    gc.collect()
    garbage_count = len(gc.garbage)
    gc.garbage.clear()
    gc.set_debug(old_flags)
    return garbage_count
Ejemplo n.º 25
0
 def __len__(self):
     oldFlags = gc.get_debug()
     gc.set_debug(0)
     gc.collect()
     numGarbage = len(gc.garbage)
     del gc.garbage[:]
     gc.set_debug(oldFlags)
     return numGarbage
 def setUp(self):
     # Save gc state and disable it.
     self.enabled = gc.isenabled()
     gc.disable()
     self.debug = gc.get_debug()
     gc.set_debug(0)
     gc.callbacks.append(self.cb1)
     gc.callbacks.append(self.cb2)
     self.othergarbage = []
Ejemplo n.º 27
0
def test_get_debug():
    state = [
        0, gc.DEBUG_STATS, gc.DEBUG_COLLECTABLE, gc.DEBUG_UNCOLLECTABLE,
        gc.DEBUG_INSTANCES, gc.DEBUG_OBJECTS, gc.DEBUG_SAVEALL, gc.DEBUG_LEAK
    ]
    result = gc.get_debug()
    if result not in state:
        Fail("Returned value of getdebug method is not valid value:" +
             str(result))
Ejemplo n.º 28
0
    def __gc_info():
        d = {}

        d['gc.get_stats'] = gc.get_stats()
        d['gc.isenabled'] = gc.isenabled()
        d['gc.get_debug'] = gc.get_debug()
        d['gc.get_threshold'] = gc.get_threshold()

        return d
def gcIsLeakDetect():
    try:
        import gc
        if (gc.isenabled() and gc.get_debug() & gc.DEBUG_LEAK) > 0:
            return True
    except ImportError:
        ERROR_MSG('Could not import gc module; garbage collection support is not compiled in')

    return False
 def __len__(self):
     # do a garbage collection
     oldFlags = gc.get_debug()
     gc.set_debug(0)
     gc.collect()
     numGarbage = len(gc.garbage)
     del gc.garbage[:]
     gc.set_debug(oldFlags)
     return numGarbage
def check_leak_ndarray(request):
    garbage_expected = request.node.get_closest_marker('garbage_expected')
    if garbage_expected:  # Some tests leak references. They should be fixed.
        yield  # run test
        return

    if 'centos' in platform.platform():
        # Multiple tests are failing due to reference leaks on CentOS. It's not
        # yet known why there are more memory leaks in the Python 3.6.9 version
        # shipped on CentOS compared to the Python 3.6.9 version shipped in
        # Ubuntu.
        yield
        return

    del gc.garbage[:]
    # Collect garbage prior to running the next test
    gc.collect()
    # Enable gc debug mode to check if the test leaks any arrays
    gc_flags = gc.get_debug()
    gc.set_debug(gc.DEBUG_SAVEALL)

    # Run the test
    yield

    # Check for leaked NDArrays
    gc.collect()
    gc.set_debug(gc_flags)  # reset gc flags

    seen = set()

    def has_array(element):
        try:
            if element in seen:
                return False
            seen.add(element)
        except (TypeError, ValueError):  # unhashable
            pass

        if isinstance(element, mx.nd._internal.NDArrayBase):
            return True
        elif isinstance(element, mx.sym._internal.SymbolBase):
            return False
        elif hasattr(element, '__dict__'):
            return any(has_array(x) for x in vars(element))
        elif isinstance(element, dict):
            return any(has_array(x) for x in element.items())
        else:
            try:
                return any(has_array(x) for x in element)
            except (TypeError, KeyError, RecursionError):
                return False

    assert not any(
        has_array(x)
        for x in gc.garbage), 'Found leaked NDArrays due to reference cycles'
    del gc.garbage[:]
Ejemplo n.º 32
0
    def _showGarbage(self):
        """
        show us what's the garbage about
        """
        if gc.isenabled():
            #
            # Save the current settings
            #
            flags = gc.get_debug()
            th = gc.get_threshold()
            #sys.stdout.write("GC: Thresholds = %s\n" % str(th))

            try:
                #
                # Perform aggressive collection
                #
                gc.set_debug(gc.DEBUG_LEAK)
                gc.set_threshold(1, 1, 1)
                # force collection
                sys.stdout.write("GC: Collecting...\n")
                for i in range(6):
                    gc.collect()

                #
                # Remember what is garbage now
                #
                garbage = gc.garbage
            finally:
                gc.set_debug(flags)
                gc.set_threshold(*th)

            #
            # Report on current garbage
            #
            if not garbage:
                sys.stdout.write('GC: no garbage\n')
            else:
                sys.stdout.write("GC: Garbage objects:\n")
                for x in garbage:
                    for c in [
                            scanner.ModuleTree,
                    ]:
                        if isinstance(x, c):
                            # ignore
                            continue

                    s = str(x)
                    #if len(s) > 80: s = s[:80]
                    sys.stdout.write(str(type(x)))
                    sys.stdout.write("\n  %s\n" % s)

            # collect again without DEBUG_LEAK
            gc.collect()
        #else:
        #    print 'GC: disabled'
        return
Ejemplo n.º 33
0
 def __enter__(self):
     # Collect any already-accumulated garbage; we only want to ignore
     # *new* garbage after this point.
     gc.collect()
     self.old_gc = list(gc.garbage)
     self.old_debug = gc.get_debug()
     # Disable gc debugging and clean out the garbage bin, so we don't
     # log any more garbage collections until later.
     gc.set_debug(0)
     del gc.garbage[:]
Ejemplo n.º 34
0
def debug_collect():
    if hasattr(gc, 'set_debug') and hasattr(gc, 'get_debug'):
        old = gc.get_debug()
        gc.set_debug(gc.DEBUG_LEAK)
        gc.collect()
        gc.set_debug(old)
    else:
        for x in range(10):
            # PyPy doesn't collect objects with __del__ on first attempt.
            gc.collect()
Ejemplo n.º 35
0
def gc_collect_log_callback(phase, info):
    if phase != 'stop':
        return
    if gc.get_debug() & gc.DEBUG_SAVEALL == gc.DEBUG_SAVEALL:
        return
    generation = info['generation']
    if generation == 2 or generation == 4:
        now = services.time_service().sim_now
        gc_output('***** GC-{} [{}]: collected {}. Alive: {} *****',
                  generation, now, info['collected'], gc.get_num_objects())
Ejemplo n.º 36
0
def debug_collect():
    if hasattr(gc, 'set_debug') and hasattr(gc, 'get_debug'):
        old = gc.get_debug()
        gc.set_debug(gc.DEBUG_LEAK)
        gc.collect()
        gc.set_debug(old)
    else:
        for x in range(10):
            # PyPy doesn't collect objects with __del__ on first attempt.
            gc.collect()
Ejemplo n.º 37
0
def test_memory(tester, domMod):
    tester.startGroup("Memory")
    flags = gc.get_debug()
    try:
        gc.set_debug(0)
        test_cycles(tester, domMod)
        test_refcounts(tester, domMod)
    finally:
        gc.set_debug(flags)
    tester.groupDone()
    return
Ejemplo n.º 38
0
 def __init__(self):
     self.done = False
     self.was_enabled = gc.isenabled()
     self.was_debug = gc.get_debug()
     self.was_threshold = gc.get_threshold()
     gc.set_debug(self.was_debug | gc.DEBUG_LEAK)
     gc.enable()
     gc.set_threshold(1)
     gc.collect()
     print 'gc: GcChecker1: %d collected objects' % len(gc.garbage)
     self._CleanGc()
Ejemplo n.º 39
0
 def __len__(self):
     # do a garbage collection
     wasOn = gcDebugOn()
     oldFlags = gc.get_debug()
     if not wasOn:
         gc.set_debug(gc.DEBUG_SAVEALL)
     gc.collect()
     numGarbage = len(gc.garbage)
     del gc.garbage[:]
     if not wasOn:
         gc.set_debug(oldFlags)
     return numGarbage
Ejemplo n.º 40
0
def restore_gc_state():
    """
    Restore the garbage collector state on leaving the with block.

    """
    old_isenabled = gc.isenabled()
    old_flags = gc.get_debug()
    try:
        yield
    finally:
        gc.set_debug(old_flags)
        (gc.enable if old_isenabled else gc.disable)()
Ejemplo n.º 41
0
def gcIsLeakDetect():
    """
    Check if leak detection is on.
    """
    try:
        import gc
        if (gc.isenabled() and gc.get_debug() & gc.DEBUG_LEAK) > 0:
            return True
    except ImportError:
        ERROR_MSG('Could not import gc module; garbage collection support is not compiled in')

    return False
Ejemplo n.º 42
0
def _gc_prep():
    '''
    turn off GC, set flags so they will populate gc.garbage,
    then run the collector
    returns the number of objects collected
    '''
    gc.disable()
    flags = gc.get_debug()
    gc.set_debug(gc.DEBUG_UNCOLLECTABLE)  # make gc.garbage useful
    num_collected = gc.collect()
    gc.set_debug(flags)
    gc.enable()
    return num_collected
Ejemplo n.º 43
0
 def _run(self):
     self.log.debug("Health monitor started")
     saved_flags = gc.get_debug()
     gc.set_debug(0)
     try:
         while not self._done.wait(self._interval):
             try:
                 self._check()
             except Exception:
                 self.log.exception("Error checking health")
     finally:
         gc.set_debug(saved_flags)
     self.log.debug("Health monitor stopped")
Ejemplo n.º 44
0
Archivo: health.py Proyecto: xin49/vdsm
 def _run(self):
     self.log.debug("Health monitor started")
     saved_flags = gc.get_debug()
     gc.set_debug(0)
     try:
         while not self._done.wait(self._interval):
             try:
                 self._check()
             except Exception:
                 self.log.exception("Error checking health")
     finally:
         gc.set_debug(saved_flags)
     self.log.debug("Health monitor stopped")
Ejemplo n.º 45
0
    def setUp(self):
        self.mem_ini = MemorySingleton(self.verbose - 1)
        self.mem_ref = MemoryStatistics(self.verbose - 1)
        self.mem_cur = self.mem_ref.copy()

        self.gc_threshold_old = gc.get_threshold()
        self.gc_flags_old = gc.get_debug()
        gc.set_threshold(*self.gc_threshold)
        gc.set_debug(self.gc_flags)

        # Try to obtain a clean slate
        gc.collect()
        self.gc_count = len(gc.garbage)
        del gc.garbage[:]
Ejemplo n.º 46
0
def test_get_debug():
    state = [
        0,
        gc.DEBUG_STATS,
        gc.DEBUG_COLLECTABLE,
        gc.DEBUG_UNCOLLECTABLE,
        gc.DEBUG_INSTANCES,
        gc.DEBUG_OBJECTS,
        gc.DEBUG_SAVEALL,
        gc.DEBUG_LEAK,
    ]
    result = gc.get_debug()
    if result not in state:
        Fail("Returned value of getdebug method is not valid value:" + str(result))
Ejemplo n.º 47
0
 def test_saveall(self):
     gc.collect()
     self.assertEqual(gc.garbage, [])
     L = []
     L.append(L)
     id_L = id(L)
     debug = gc.get_debug()
     gc.set_debug(debug | gc.DEBUG_SAVEALL)
     del L
     gc.collect()
     gc.set_debug(debug)
     self.assertEqual(len(gc.garbage), 1)
     obj = gc.garbage.pop()
     self.assertEqual(id(obj), id_L)
Ejemplo n.º 48
0
    def setUp(self):
        self.mem_ini = MemorySingleton(self.verbose - 1)
        self.mem_ref = MemoryStatistics(self.verbose - 1)
        self.mem_cur = self.mem_ref.copy()

        self.gc_threshold_old = gc.get_threshold()
        self.gc_flags_old = gc.get_debug()
        gc.set_threshold(*self.gc_threshold)
        gc.set_debug(self.gc_flags)

        # Try to obtain a clean slate
        gc.collect()
        self.gc_count = len(gc.garbage)
        del gc.garbage[:]
Ejemplo n.º 49
0
    def run( self ):
        # get the current debug flags
        self._old_debug_flags = gc.get_debug()

        if self._monitor_all_unreachable:
            # and set the new ones we are interested in
            gc.set_debug( gc.DEBUG_SAVEALL )

        # Output some log information here so we can tell from the logs when the garbage monitor has been reloaded
        self._logger.info( "Starting garbage monitor. Outputting max %d types" % self._max_type_dump )
        if len( self._object_dump_types ):
            self._logger.info( "\tDumping %d objects of type(s) %s" % (self._max_object_dump, str( self._object_dump_types ) ) )
        else:
            self._logger.info( "\tNot dumping individual objects." )
            
        ScalyrMonitor.run( self )
Ejemplo n.º 50
0
 def decorator(self, **kwargs):
   """Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
   gc.disable()
   previous_debug_flags = gc.get_debug()
   gc.set_debug(gc.DEBUG_SAVEALL)
   gc.collect()
   previous_garbage = len(gc.garbage)
   f(self, **kwargs)
   gc.collect()
   # This will fail if any garbage has been created, typically because of a
   # reference cycle.
   self.assertEqual(previous_garbage, len(gc.garbage))
   # TODO(allenl): Figure out why this debug flag reset doesn't work. It would
   # be nice to be able to decorate arbitrary tests in a large test suite and
   # not hold on to every object in other tests.
   gc.set_debug(previous_debug_flags)
   gc.enable()
Ejemplo n.º 51
0
    def setUp(self):
        self._loop = _async.get_test_event_loop()

        if self._gc_checks:
            # Disable the garbage collector, collecting any existing cycles.
            # Enable debug options so that unreachable objects go into
            # `gc.garbage`.
            gc.disable()
            debug = gc.get_debug()
            try:
                gc.set_debug(0)
                gc.collect()
                gc.garbage[:] = []
            finally:
                gc.set_debug(debug)
            gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_SAVEALL)

        # Check no tasks are currently running.
        _async.Task.running = []
def test_main():
    enabled = gc.isenabled()
    gc.disable()
    assert not gc.isenabled()
    debug = gc.get_debug()
    gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak

    try:
        gc.collect() # Delete 2nd generation garbage
        run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
    finally:
        gc.set_debug(debug)
        # test gc.enable() even if GC is disabled by default
        if verbose:
            print("restoring automatic collection")
        # make sure to always test gc.enable()
        gc.enable()
        assert gc.isenabled()
        if not enabled:
            gc.disable()
Ejemplo n.º 53
0
def test_saveall():
    # Verify that cyclic garbage like lists show up in gc.garbage if the
    # SAVEALL option is enabled.
    debug = gc.get_debug()
    gc.set_debug(debug | gc.DEBUG_SAVEALL)
    l = []
    l.append(l)
    id_l = id(l)
    del l
    gc.collect()
    try:
        for obj in gc.garbage:
            if id(obj) == id_l:
                del obj[:]
                break
        else:
            raise TestFailed
        gc.garbage.remove(obj)
    finally:
        gc.set_debug(debug)
Ejemplo n.º 54
0
def test():
    if verbose:
        print "disabling automatic collection"
    enabled = gc.isenabled()
    gc.disable()
    assert not gc.isenabled() 
    debug = gc.get_debug()
    gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak

    try:
        test_all()
    finally:
        gc.set_debug(debug)
        # test gc.enable() even if GC is disabled by default
        if verbose:
            print "restoring automatic collection"
        # make sure to always test gc.enable()
        gc.enable()
        assert gc.isenabled()
        if not enabled:
            gc.disable()
Ejemplo n.º 55
0
def test_saveall():
    # Verify that cyclic garbage like lists show up in gc.garbage if the
    # SAVEALL option is enabled.

    # First make sure we don't save away other stuff that just happens to
    # be waiting for collection.
    gc.collect()
    vereq(gc.garbage, []) # if this fails, someone else created immortal trash

    L = []
    L.append(L)
    id_L = id(L)

    debug = gc.get_debug()
    gc.set_debug(debug | gc.DEBUG_SAVEALL)
    del L
    gc.collect()
    gc.set_debug(debug)

    vereq(len(gc.garbage), 1)
    obj = gc.garbage.pop()
    vereq(id(obj), id_L)
Ejemplo n.º 56
0
 def gc_collect(self, evt):
     print(gc.collect())
     print(gc.get_debug())
     print(gc.get_stats())
Ejemplo n.º 57
0
    def process_request(self, stream, request):
        ''' Process HTTP request and return response '''

        response = Message()

        if request.uri == '/debugmem/collect':
            body = gc.collect(2)

        elif request.uri == '/debugmem/count':
            counts = gc.get_count()
            body = {
                    'len_gc_objects': len(gc.get_objects()),
                    'len_gc_garbage': len(gc.garbage),
                    'gc_count0': counts[0],
                    'gc_count1': counts[1],
                    'gc_count2': counts[2],

                    # Add the length of the most relevant globals
                    'NEGOTIATE_SERVER.queue': len(NEGOTIATE_SERVER.queue),
                    'NEGOTIATE_SERVER.known': len(NEGOTIATE_SERVER.known),
                    'NEGOTIATE_SERVER_BITTORRENT.peers': \
                        len(NEGOTIATE_SERVER_BITTORRENT.peers),
                    'NEGOTIATE_SERVER_SPEEDTEST.clients': \
                        len(NEGOTIATE_SERVER_SPEEDTEST.clients),
                    'POLLER.readset': len(POLLER.readset),
                    'POLLER.writeset': len(POLLER.writeset),
                    'LOG._queue': len(LOG._queue),
                    'CONFIG.conf': len(CONFIG.conf),
                    'NOTIFIER._timestamps': len(NOTIFIER._timestamps),
                    'NOTIFIER._subscribers': len(NOTIFIER._subscribers),
                    'NOTIFIER._tofire': len(NOTIFIER._tofire),
                   }

        elif request.uri == '/debugmem/garbage':
            body = [str(obj) for obj in gc.garbage]

        elif request.uri == '/debugmem/saveall':
            enable = json.load(request.body)
            flags = gc.get_debug()
            if enable:
                flags |= gc.DEBUG_SAVEALL
            else:
                flags &= ~gc.DEBUG_SAVEALL
            gc.set_debug(flags)
            body = enable

        elif request.uri.startswith('/debugmem/types'):
            if request.uri.startswith('/debugmem/types/'):
                typename = request.uri.replace('/debugmem/types/', '')
                objects = objgraph.by_type(typename)
                body = [str(obj) for obj in objects]
            else:
                body = objgraph.typestats()

        else:
            body = None

        if body is not None:
            body = json.dumps(body, indent=4, sort_keys=True)
            response.compose(code="200", reason="Ok", body=body,
                             mimetype="application/json")
        else:
            response.compose(code="404", reason="Not Found")

        stream.send_response(request, response)
Ejemplo n.º 58
0
    def run(self):
        # do the garbage collection
        oldFlags = gc.get_debug()

        if self._args.delOnly:
            # do a collect without SAVEALL, to identify the instances that are involved in
            # cycles with instances that define __del__
            # cycles that do not involve any instances that define __del__ are cleaned up
            # automatically by Python, but they also appear in gc.garbage when SAVEALL is set
            gc.set_debug(0)
            if self._args.collect:
                gc.collect()
            garbageInstances = gc.garbage[:]
            del gc.garbage[:]
            # only yield if there's more time-consuming work to do,
            # if there's no garbage, give instant feedback
            if len(garbageInstances) > 0:
                yield None
            # don't repr the garbage list if we don't have to
            if self.notify.getDebug():
                self.notify.debug('garbageInstances == %s' % fastRepr(garbageInstances))

            self.numGarbageInstances = len(garbageInstances)
            # grab the ids of the garbage instances (objects with __del__)
            self.garbageInstanceIds = set()
            for i in xrange(len(garbageInstances)):
                self.garbageInstanceIds.add(id(garbageInstances[i]))
                if not (i % 20):
                    yield None
            # then release the list of instances so that it doesn't interfere with the gc.collect() below
            del garbageInstances
        else:
            self.garbageInstanceIds = set()

        # do a SAVEALL pass so that we have all of the objects involved in legitimate garbage cycles
        # without SAVEALL, gc.garbage only contains objects with __del__ methods
        gc.set_debug(gc.DEBUG_SAVEALL)
        if self._args.collect:
            gc.collect()
        self.garbage = gc.garbage[:]
        del gc.garbage[:]
        # only yield if there's more time-consuming work to do,
        # if there's no garbage, give instant feedback
        if len(self.garbage) > 0:
            yield None
        # don't repr the garbage list if we don't have to
        if self.notify.getDebug():
            self.notify.debug('self.garbage == %s' % fastRepr(self.garbage))
        gc.set_debug(oldFlags)

        self.numGarbage = len(self.garbage)
        # only yield if there's more time-consuming work to do,
        # if there's no garbage, give instant feedback
        if self.numGarbage > 0:
            yield None

        if self._args.verbose:
            self.notify.info('found %s garbage items' % self.numGarbage)

        """ spammy
        # print the types of the garbage first, in case the repr of an object
        # causes a crash
        if self.numGarbage > 0:
            self.notify.info('TYPES ONLY (this is only needed if a crash occurs before GarbageReport finishes):')
            for result in printNumberedTypesGen(self.garbage):
                yield None
                """

        # Py obj id -> garbage list index
        self._id2index = {}

        self.referrersByReference = {}
        self.referrersByNumber = {}

        self.referentsByReference = {}
        self.referentsByNumber = {}

        self._id2garbageInfo = {}

        self.cycles = []
        self.cyclesBySyntax = []
        self.uniqueCycleSets = set()
        self.cycleIds = set()

        # make the id->index table to speed up the next steps
        for i in xrange(self.numGarbage):
            self._id2index[id(self.garbage[i])] = i
            if not (i % 20):
                yield None

        # grab the referrers (pointing to garbage)
        if self._args.fullReport and (self.numGarbage != 0):
            if self._args.verbose:
                self.notify.info('getting referrers...')
            for i in xrange(self.numGarbage):
                yield None
                for result in self._getReferrers(self.garbage[i]):
                    yield None
                byNum, byRef = result
                self.referrersByNumber[i] = byNum
                self.referrersByReference[i] = byRef

        # grab the referents (pointed to by garbage)
        if self.numGarbage > 0:
            if self._args.verbose:
                self.notify.info('getting referents...')
            for i in xrange(self.numGarbage):
                yield None
                for result in self._getReferents(self.garbage[i]):
                    yield None
                byNum, byRef = result                    
                self.referentsByNumber[i] = byNum
                self.referentsByReference[i] = byRef

        for i in xrange(self.numGarbage):
            if hasattr(self.garbage[i], '_garbageInfo') and callable(self.garbage[i]._garbageInfo):
                try:
                    info = self.garbage[i]._garbageInfo()
                except Exception, e:
                    info = str(e)
                self._id2garbageInfo[id(self.garbage[i])] = info
                yield None
            else:
                if not (i % 20):
                    yield None
Ejemplo n.º 59
0
 def setUp(self):
     self.saved_flags = gc.get_debug()
     gc.disable()
     gc.set_debug(0)