Example #1
0
def measure_type(cls=None, group=None, before=None):
    #     gc.collect()

    #     d = defaultdict(list)
    d = defaultdict(int)

    if before is None:
        before = tuple()

    if cls:
        #         else:
        #             before = before[cls]
        #
        #         print 'before', len(before)
        d[cls] = set([id(o) for o in gc.get_objects()
                      if type(o) == cls])
    #         print 'xs', len(xs)
    #         d[cls] = filter(lambda x: not x in before, xs)
    else:
        objs = gc.get_objects()
        if group:
            objs = filter(lambda x: group in str(type(x)), objs)

        for o in objs:
            d[type(o)] += 1
        #             i = id(o)
        #             if before and not i in before:
        #                 d[type(o)].append(i)

    return d
def test_garbage_collect_cycle(Class):
    from nose.tools import assert_not_in, assert_in
    import gc

    structure = get_a_structure(Class)
    atom_ids = [id(u) for u in structure]
    structure_id = id(structure)
    scale_id = id(structure.scale)
    # add a cycle
    structure.parent_structure = structure

    for this_id in atom_ids + [structure_id, scale_id]:
        assert_in(this_id, [id(u) for u in gc.get_objects()])
    assert_in(
        structure_id,
        [id(u) for u in gc.get_referents(structure.__dict__)]
    )

    # Deletes atom and collect garbage
    # structure should then be truly destroyed, e.g. neither tracked nor in
    # unreachables.
    del structure
    gc.collect()
    for this_id in atom_ids + [structure_id, scale_id]:
        assert_not_in(this_id, [id(u) for u in gc.get_objects()])
        assert_not_in(this_id, [id(u) for u in gc.garbage])
    def wrapper(*args, **kwargs):
        """
        """
        # grab the pre-run memory info:
        num_objects_before = len(gc.get_objects())
        bytes_before = 0
        if sys.platform == "Darwin":
            import resource
            bytes_before = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0

        # run the function:
        res = func(*args, **kwargs)

        # report any non-destroyed QObjects:
        # Note, this will usually run before the main objects have been destroyed by the
        # event loop so it's important to cross-check the output with subsequent lines.
        report_non_destroyed_qobjects()

        # cleanup and grab the post-run memory info:
        gc.collect()
        bytes_after = 0
        if sys.platform == "Darwin":
            bytes_after = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0
        num_objects_after = len(gc.get_objects())

        # and report any difference in memory usage:
        bytes_diff = bytes_after - bytes_before
        obj_diff = num_objects_after - num_objects_before
        msg = ("Memory before: %0.2fMb, current: %0.2fMb, leaked: %0.2fMb (%d new Python objects)"
               % (bytes_before, bytes_after, bytes_diff, obj_diff))
        app = sgtk.platform.current_bundle()
        app.log_debug(msg)

        # return the result:
        return res
Example #4
0
def assert_memory_leak(func, specific_objects=None):
    """Determines memory leaks based on :func:`gc.get_objects`.

    Parameters
    ----------
    func : callable
        A function that does something and should in theory after being
        finished not leak any objects.

    specific_objects : type or None, optional
        If None then it checks if any objects exist that didn't exist before
        the function was called. If given a class it checks only if objects
        of this class are present that were not before the function was called.

    Raises
    ------
    AssertionError
        If any objects or one specific object leaked.
    """
    before = defaultdict(int)
    after = defaultdict(int)

    for i in get_objects():
        before[type(i)] += 1
    func()
    for i in get_objects():
        after[type(i)] += 1

    if specific_objects is None:
        assert all(after[k] - before[k] == 0 for k in after)
    else:
        assert after[specific_objects] - before[specific_objects] == 0
def collectprint ( stage=None ):
	gc.collect()	# garbage collect
	if stage != None:
		print stage
		print "\t#objects: %d" % len(gc.get_objects())
	else:
		print "number of objects: %d" % len(gc.get_objects())
Example #6
0
  def decorator(self, **kwargs):
    """Finds existing Tensors, runs the test, checks for new Tensors."""

    def _is_tensor(obj):
      try:
        return (isinstance(obj, ops.Tensor) or
                isinstance(obj, variables.Variable))
      except ReferenceError:
        # If the object no longer exists, we don't care about it.
        return False

    tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
    outside_container_prefix = ops.get_default_graph()._container_prefix
    with IsolateTest():
      # Run the test in a new graph so that collections get cleared when it's
      # done, but inherit the container prefix so that we can print the values
      # of variables which get leaked when executing eagerly.
      ops.get_default_graph()._container_prefix = outside_container_prefix
      f(self, **kwargs)
    # Make an effort to clear caches, which would otherwise look like leaked
    # Tensors.
    backprop._last_zero = [None]
    backprop._shape_dtype = [None, None]
    context.get_default_context().scalar_cache().clear()
    gc.collect()
    tensors_after = [
        obj for obj in gc.get_objects()
        if _is_tensor(obj) and id(obj) not in tensors_before
    ]
    if tensors_after:
      raise AssertionError(("%d Tensors not deallocated after test: %s" % (
          len(tensors_after),
          str(tensors_after),
      )))
Example #7
0
def listObjs(regex='Q', typ=None):
    """List all objects managed by python gc with class name matching regex.
    Finds 'Q...' classes by default."""
    if typ is not None:
        return [x for x in gc.get_objects() if isinstance(x, typ)]
    else:
        return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
Example #8
0
 def stats(self, gc_collect=False):
     stats = dict((key, comp.stats()) for key, comp
         in self.components.items())
     hub = hubs.get_hub()
     gettypes = lambda t: [o for o in gc.get_objects() if
         type(o).__name__ == t]
     if gc_collect:
         gc.collect() and gc.collect()
     stats.update({
         'server': {
             'items': self._mqueue.qsize(),
             'wsgi_free': self.server_pool.free(),
             'wsgi_running': self.server_pool.running(),
         },
         'eventlet': {
             'next_timers': len(hub.next_timers),
             'timers': len(hub.timers),
             'readers': len(hub.listeners['read']),
             'writers': len(hub.listeners['write']),
             'timers_count': hub.get_timers_count(),
         },
         'python': {
             'greenthreads': len(gettypes('GreenThread')),
             'gc_tracked_objs': len(gc.get_objects()),
         }
     })
     return stats
Example #9
0
    def imap_memory_check(self, concurrency):
        # checks that imap is strictly
        # ordered and consumes a constant amount of memory
        p = greenpool.GreenPool(concurrency)
        count = 1000
        it = p.imap(passthru, six.moves.range(count))
        latest = -1
        while True:
            try:
                i = it.next()
            except StopIteration:
                break

            if latest == -1:
                gc.collect()
                initial_obj_count = len(gc.get_objects())
            self.assert_(i > latest)
            latest = i
            if latest % 5 == 0:
                eventlet.sleep(0.001)
            if latest % 10 == 0:
                gc.collect()
                objs_created = len(gc.get_objects()) - initial_obj_count
                self.assert_(objs_created < 25 * concurrency, objs_created)
        # make sure we got to the end
        self.assertEquals(latest, count - 1)
Example #10
0
def refcount():
      count = 0
      print len(gc.get_objects())
      for obj in gc.get_objects():
         count += sys.getrefcount(obj)
   
      return count
Example #11
0
def rational_powers_memleak():
    """
    Check that there is no memory leak in rational powers

    OUTPUT:

    Boolean. Whether the memory leak was detected.

    See :trac:`9129`.

    EXAMPLES::

        sage: from sage.symbolic.tests import rational_powers_memleak
        sage: rational_powers_memleak()
        False
    """
    from sage.rings.all import ZZ
    import gc
    gc.collect()
    c0 = sum(1 for obj in gc.get_objects())
    for i in range(1000):
        a = ZZ(2).sqrt()
    gc.collect()
    c1 = sum(1 for obj in gc.get_objects())
    # Test that we do not leak an object at each iteration
    return (c1 - c0) >= 1000
Example #12
0
def mem_stats():
	import gc
	print "DEBUG: OBJ STATS"

	print "enabled:", gc.isenabled()
	print "objs", len(gc.get_objects())
	print "collected (now)", gc.collect()

	# after collection
	hist = {}
	for obj in gc.get_objects():
		key = str(type(obj))
		if key not in hist:
			hist[key] =1
		else:
			hist[key] += 1
	
	best = hist.items()
	best.sort(key=lambda x:x[1], reverse=True)
	print "\n".join("%s: %d" % (k,v) for k,v in best[:10])

	our = []
	gtk = []
	for item in best:
		if "objects." in item[0] or "kupfer." in item[0]:
			our.append(item)
		if "gtk" in item[0]:
			gtk.append(item)
	
	#print "---just gtk (top)"
	#print "\n".join("%s: %d" % (k,v) for k,v in gtk[:10])
	print "---Just our objects (all > 1)"
	print "\n".join("%s: %d" % (k,v) for k,v in our if v > 1)
Example #13
0
    def start(self):
        """
        Start. Initialize the asynch io on accept.
        """

        print "EventServiceAsynch: Event service start"
        self.registerForListen()

        self.junk = map(lambda o: id(o), gc.get_objects())

        while (1):
            gc.collect()
            try:
                self.junk3 = gc.get_objects()
                junk = list()
                for o in self.junk3:
                    if id(o) not in self.junk and id(o) != id(self.junk) and id(o) != id(junk):
                        junk.append(o)
                print "JUNK REPORT: %d %d" % (len(self.junk), len(self.junk3))
                print " ************* "
                map(lambda o: Info(o), junk)
                print " ************* "
                del self.junk3
                del junk
            except:
                log.exception("Showing j3")

            time.sleep(0.1)
Example #14
0
	def OnKeyChar(self, keyCode):
		if keyCode == wx.WXK_DELETE or keyCode == wx.WXK_NUMPAD_DELETE:
			if self._selectedObj is not None:
				self._deleteObject(self._selectedObj)
				self.QueueRefresh()
		if keyCode == wx.WXK_UP:
			self.layerSelect.setValue(self.layerSelect.getValue() + 1)
			self.QueueRefresh()
		elif keyCode == wx.WXK_DOWN:
			self.layerSelect.setValue(self.layerSelect.getValue() - 1)
			self.QueueRefresh()
		elif keyCode == wx.WXK_PAGEUP:
			self.layerSelect.setValue(self.layerSelect.getValue() + 10)
			self.QueueRefresh()
		elif keyCode == wx.WXK_PAGEDOWN:
			self.layerSelect.setValue(self.layerSelect.getValue() - 10)
			self.QueueRefresh()

		if keyCode == wx.WXK_F3 and wx.GetKeyState(wx.WXK_SHIFT):
			shaderEditor(self, self.ShaderUpdate, self._objectLoadShader.getVertexShader(), self._objectLoadShader.getFragmentShader())
		if keyCode == wx.WXK_F4 and wx.GetKeyState(wx.WXK_SHIFT):
			from collections import defaultdict
			from gc import get_objects
			self._beforeLeakTest = defaultdict(int)
			for i in get_objects():
				self._beforeLeakTest[type(i)] += 1
		if keyCode == wx.WXK_F5 and wx.GetKeyState(wx.WXK_SHIFT):
			from collections import defaultdict
			from gc import get_objects
			self._afterLeakTest = defaultdict(int)
			for i in get_objects():
				self._afterLeakTest[type(i)] += 1
			for k in self._afterLeakTest:
				if self._afterLeakTest[k]-self._beforeLeakTest[k]:
					print k, self._afterLeakTest[k], self._beforeLeakTest[k], self._afterLeakTest[k] - self._beforeLeakTest[k]
Example #15
0
def memfootprint():
    import gc
    objs = gc.get_objects()
    classes = set( c.__class__ for c in gc.get_objects() if hasattr(c,'__class__') )
    # print "\n".join([c.__name__ for c in classes])
    print "#objects=",len(objs)
    print "#classes=",len(classes)
Example #16
0
def checkExceptionPerGreenlet(outfileName=None, ignoreHealthyOnes=True):
    mylog("Trying to detect greenlets...", verboseLevel=-2)
    if not outfileName:
        for ob in gc.get_objects():
            if not hasattr(ob, 'parent_args'):
                continue
            if not ob:
                continue
            if ignoreHealthyOnes and (not ob.exception):
                 continue
            mylog('%s[%s] called with parent arg\n(%s)\n%s' % (ob.name, repr(ob.args), repr(ob.parent_args),
                ''.join(traceback.format_stack(ob.gr_frame))), verboseLevel=-2)
            mylog(ob.exception, verboseLevel=-2)
    else:
        handler = open(outfileName, 'w')
        for ob in gc.get_objects():
            if not hasattr(ob, 'parent_args'):
                continue
            if not ob:
                continue
            if ignoreHealthyOnes and (not ob.exception):
                 continue
            handler.write('%s[%s] called with parent arg\n(%s)\n%s' % (ob.name, repr(ob.args), repr(ob.parent_args),
                ''.join(traceback.format_stack(ob.gr_frame))))
            handler.write(ob.exception)
Example #17
0
 def assertNoMemoryLeaks(self, f):
   object_ids_before = {id(o) for o in gc.get_objects()}
   f()
   gc.collect()
   objects_after = tuple(
       o for o in gc.get_objects() if id(o) not in object_ids_before)
   self.assertEmpty(
       tuple(o for o in objects_after if isinstance(o, TestResource)))
Example #18
0
def select_objects(string):
	if not string:
		return gc.get_objects()
	elif string[0] in "0123456789":
		number=int(string, 16)
		return tuple(object for object in gc.get_objects() if id(object)==number)
	else:
		return tuple(object for object in gc.get_objects() if get_type_name(object)==string)
Example #19
0
 def assertNoMemoryLeaks(self, target_f):
   refs_before = set(id(obj) for obj in gc.get_objects())
   target_f()
   gc.collect()
   objs_after = [obj for obj in gc.get_objects() if id(obj) not in refs_before]
   leaked = [obj for obj in objs_after if isinstance(obj, TestResource)]
   self.assertFalse(leaked,
                    'Resources {} were leaked by AutoGraph.'.format(leaked))
Example #20
0
 def tearDown(self):
     gc.collect()
     self.assertEqual(len(gc.garbage), self.gc_count)
     if len(gc.garbage)>0:
         if self.verbose>1: print gc.get_objects()
         #TODO be pedantic and fail?
     del gc.garbage[:]
     gc.set_threshold(*self.gc_threshold_old)
     gc.set_debug(self.gc_flags_old)
Example #21
0
 def get_objects_skipping_sqlite_issue():
     # pysqlite keeps adding weakref objects which only
     # get reset after 220 iterations.  We'd like to keep these
     # tests under 50 iterations and ideally about ten, so
     # just filter them out so that we get a "flatline" more quickly.
     if testing.against("sqlite+pysqlite"):
         return [o for o in gc.get_objects()
                 if not isinstance(o, weakref.ref)]
     else:
         return gc.get_objects()
Example #22
0
def main():
    for _ in xrange(5):
        logging.debug(memory_usage())
        logging.debug(all_info(gc.get_objects()))
        x = make_big_list()
        logging.debug(memory_usage())
        logging.debug(all_info(gc.get_objects()))
        del x
        logging.debug(memory_usage())
        logging.debug(all_info(gc.get_objects()))
Example #23
0
def test_leakage(env):
    import gc
    env.make_file('xxx.h', 'SOME SIGNIFICANTLY LONG CONTENT' * 10 * 1024)
    env.make_file('a.cpp', '#include "xxx.h"\n')

    memviews = len(list(None for x in gc.get_objects() if type(x) == memoryview))
    for x in range(16):
        env.run('a.cpp')
    memviews2 = len(list(None for x in gc.get_objects() if type(x) == memoryview))
    assert memviews == memviews2
Example #24
0
 def gc_object_count(self):
     # Python 2.7 doesn't seem to free all objects even for a full
     # collection, so collect repeatedly until no further objects get freed.
     old_count, count = len(gc.get_objects()), 0
     while True:
         gc.collect()
         count = len(gc.get_objects())
         if count == old_count:
             return count
         old_count = count
Example #25
0
    def test_errobj_reference_leak(self, level=rlevel):
        """Ticket #955"""
        z = int(0)
        p = np.int32(-1)

        gc.collect()
        n_before = len(gc.get_objects())
        z**p  # this shouldn't leak a reference to errobj
        gc.collect()
        n_after = len(gc.get_objects())
        assert n_before >= n_after, (n_before, n_after)
Example #26
0
 def test_memory(self):
     # make sure we aren't leaking memory in our C module
     gc.collect()
     start_count = len(gc.get_objects())
     results = ngrams.breakup_list(['foo', 'bar', 'bazbaz'], 1, 3)
     results2 = ngrams.breakup_word('miroiscool', 1, 3)
     del results
     del results2
     gc.collect()
     end_count = len(gc.get_objects())
     self.assertEquals(start_count, end_count)
Example #27
0
def _print_memory_status():

    _stop_memory_tracing()

    mem_usage = memory_usage()
    num_objects = len(gc.get_objects())

    obj_mem_usage = sum(sys.getsizeof(obj) for obj in gc.get_objects())

    log_info("GC objects: %s, size: %.1f KiB, heap memory usage: %s Kb",
             num_objects, obj_mem_usage / 1024, mem_usage)
Example #28
0
def gc_objects_by_type(tipe):
    """
    Return a list of objects from the garbage collector by type.
    """
    import inspect
    import gc
    if isinstance(tipe, str):
        return [o for o in gc.get_objects() if type(o).__name__ == tipe]
    elif inspect.isclass(tipe):
        return [o for o in gc.get_objects() if isinstance(o, tipe)]
    else:
        return [o for o in gc.get_objects() if isinstance(o, type(tipe))]
Example #29
0
def check_for_obj_leakage(f, *args, **kwargs):
    """
    The idea is that I am going to invoke f(), then run gc.collect(), then run
    gc.get_objects() to get a complete list of all objects in the system, then
    invoke f() a second time, then run gc.collect(), then run gc.get_objects()
    to get a list of all the objects *now* in the system.

    Then I return a tuple two things: the first element of the tuple is the
    difference between the number of objects in the second list and the number
    of objects in the first list.

    I.e., if this number is zero then you can be pretty sure there is no memory
    leak, unless f is deleting some objects and replacing them by exactly the
    same number of objects but the new objects take up more memory. If this
    number is greater than zero then you can pretty sure there is a memory
    leak, unless f is doing some memoization/caching behavior and it will
    eventually stabilize, which you can detect by running
    check_for_obj_leakage() more times and seeing if it stabilizes.

    (Actually we run f() followed by gc.collect() one time before we start in
    order to account for any static objects which are created the first time
    you run f() and then re-used after that.)

    The second element in the return value is the set of all objects which were
    present in the second list and not in the first. Some of these objects
    might be memory-leaked objects, or perhaps f deleted some objects and
    replaced them with equivalent objects, in which case these objects are not
    leaked.

    (We actually invoke gc.collect() three times in a row in case there are
    objects which get collected in the first pass that have finalizers which
    create new reference-cycled objects... "3" is a superstitious number -- we
    figure most of the time the finalizers of the things produced by the first
    round of finalizers won't themselves product another round of
    reference-cycled objects.)
    """
    f()
    gc.collect();gc.collect();gc.collect()
    f()
    gc.collect();gc.collect();gc.collect()
    r1 = gc.get_objects()
    f()
    gc.collect();gc.collect();gc.collect()
    r2 = gc.get_objects()
    d2 = dict([(id(x), x) for x in r2])

    # Now remove everything from r1, and r1 itself, from d2.
    del d2[id(r1)]
    for o in r1:
        if id(o) in d2:
            del d2[id(o)]

    return (len(r2) - len(r1) - 1, d2)
Example #30
0
def search_object(string):
	if not string:
		raise None
	elif string[0] in "0123456789":
		number=int(string, 16)
		for object in gc.get_objects():
			if id(object)==number: return object
		return None
	else:
		for object in gc.get_objects():
			if get_type_name(object)==string: return object
		raise None
def _find_objects(t):
    return [o for o in gc.get_objects() if isinstance(o, t)]
Example #32
0
def instances_by_name(name_filter):
    """
    Return the list of objects that exactly match the given
    name_filter.
    """
    return [o for o in gc.get_objects() if name_filter == typename(o)]
Example #33
0
def get_greenlets():
    for object in gc.get_objects():
        if isinstance(object, gevent.Greenlet):
            yield object
Example #34
0
def get_new_ids(skip_update=False,
                limit=10,
                sortby='deltas',
                shortnames=None,
                file=None,
                _state={}):
    """Find and display new objects allocated since last call.

    Shows the increase in object counts since last call to this
    function and returns the memory address ids for new objects.

    Returns a dictionary mapping object type names to sets of object IDs
    that have been created since the last time this function was called.

    ``skip_update`` (bool): If True, returns the same dictionary that
    was returned during the previous call without updating the internal
    state or examining the objects currently in memory.

    ``limit`` (int): The maximum number of rows that you want to print
    data for.  Use 0 to suppress the printing.  Use None to print everything.

    ``sortby`` (str): This is the column that you want to sort by in
    descending order.  Possible values are: 'old', 'current', 'new',
    'deltas'

    ``shortnames`` (bool): If True, classes with the same name but
    defined in different modules will be lumped together.  If False,
    all type names will be qualified with the module name.  If None (default),
    ``get_new_ids`` will remember the value from previous calls, so it's
    enough to prime this once.  By default the primed value is True.

    ``_state`` (dict): Stores old, current, and new_ids in memory.
    It is used by the function to store the internal state between calls.
    Never pass in this argument unless you know what you're doing.

    The caveats documented in :func:`growth` apply.

    When one gets new_ids from :func:`get_new_ids`, one can use
    :func:`at_addrs` to get a list of those objects. Then one can iterate over
    the new objects, print out what they are, and call :func:`show_backrefs` or
    :func:`show_chain` to see where they are referenced.

    Example:

        >>> _ = get_new_ids() # store current objects in _state
        >>> _ = get_new_ids() # current_ids become old_ids in _state
        >>> a = [0, 1, 2] # list we don't know about
        >>> b = [3, 4, 5] # list we don't know about
        >>> new_ids = get_new_ids(limit=3) # we see new lists
        ======================================================================
        Type                    Old_ids  Current_ids      New_ids Count_Deltas
        ======================================================================
        list                        324          326           +3           +2
        dict                       1125         1125           +0           +0
        wrapper_descriptor         1001         1001           +0           +0
        ======================================================================
        >>> new_lists = at_addrs(new_ids['list'])
        >>> a in new_lists
        True
        >>> b in new_lists
        True

    .. versionadded:: 3.4
    """
    if not _state:
        _state['old'] = collections.defaultdict(set)
        _state['current'] = collections.defaultdict(set)
        _state['new'] = collections.defaultdict(set)
        _state['shortnames'] = True
    new_ids = _state['new']
    if skip_update:
        return new_ids
    old_ids = _state['old']
    current_ids = _state['current']
    if shortnames is None:
        shortnames = _state['shortnames']
    else:
        _state['shortnames'] = shortnames
    gc.collect()
    objects = gc.get_objects()
    for class_name in old_ids:
        old_ids[class_name].clear()
    for class_name, ids_set in current_ids.items():
        old_ids[class_name].update(ids_set)
    for class_name in current_ids:
        current_ids[class_name].clear()
    for o in objects:
        if shortnames:
            class_name = _short_typename(o)
        else:
            class_name = _long_typename(o)
        id_number = id(o)
        current_ids[class_name].add(id_number)
    for class_name in new_ids:
        new_ids[class_name].clear()
    rows = []
    keys_to_remove = []
    for class_name in current_ids:
        num_old = len(old_ids[class_name])
        num_current = len(current_ids[class_name])
        if num_old == 0 and num_current == 0:
            # remove the key from our dicts if we don't have any old or
            # current class_name objects
            keys_to_remove.append(class_name)
            continue
        new_ids_set = current_ids[class_name] - old_ids[class_name]
        new_ids[class_name].update(new_ids_set)
        num_new = len(new_ids_set)
        num_delta = num_current - num_old
        row = (class_name, num_old, num_current, num_new, num_delta)
        rows.append(row)
    for key in keys_to_remove:
        del old_ids[key]
        del current_ids[key]
        del new_ids[key]
    index_by_sortby = {'old': 1, 'current': 2, 'new': 3, 'deltas': 4}
    rows.sort(key=operator.itemgetter(index_by_sortby[sortby], 0),
              reverse=True)
    if limit is not None:
        rows = rows[:limit]
    if not rows:
        return new_ids
    if file is None:
        file = sys.stdout
    width = max(len(row[0]) for row in rows)
    print('=' * (width + 13 * 4), file=file)
    print('%-*s%13s%13s%13s%13s' %
          (width, 'Type', 'Old_ids', 'Current_ids', 'New_ids', 'Count_Deltas'),
          file=file)
    print('=' * (width + 13 * 4), file=file)
    for row_class, old, current, new, delta in rows:
        print('%-*s%13d%13d%+13d%+13d' %
              (width, row_class, old, current, new, delta),
              file=file)
    print('=' * (width + 13 * 4), file=file)
    return new_ids
Example #35
0
def _find_objects(instance_type):
    """ Find objects by type.
    """
    return [o for o in gc.get_objects() if isinstance(o, instance_type)]
Example #36
0
def dump_info(signal=None,
              frame=None,
              file=sys.stdout,
              testing=False):  # pragma: no cover
    print("****************************************************", file=file)
    print("Summary", file=file)
    print("=======", file=file)

    try:
        import psutil
    except:
        print("(psutil not installed, skipping some debug info)", file=file)
    else:
        p = psutil.Process()
        print("num threads: ", p.num_threads(), file=file)
        if hasattr(p, "num_fds"):
            print("num fds: ", p.num_fds(), file=file)
        print("memory: ", p.memory_info(), file=file)

        print(file=file)
        print("Files", file=file)
        print("=====", file=file)
        for i in p.open_files():
            print(i, file=file)

        print(file=file)
        print("Connections", file=file)
        print("===========", file=file)
        for i in p.connections():
            print(i, file=file)

    print(file=file)
    print("Threads", file=file)
    print("=======", file=file)
    bthreads = []
    for i in threading.enumerate():
        if hasattr(i, "_threadinfo"):
            bthreads.append(i)
        else:
            print(i.name, file=file)
    bthreads.sort(key=lambda x: x._thread_started)
    for i in bthreads:
        print(i._threadinfo(), file=file)

    print()
    print("Memory", file=file)
    print("=======", file=file)
    gc.collect()
    d = {}
    for i in gc.get_objects():
        t = str(type(i))
        if "mitmproxy" in t or "netlib" in t:
            d[t] = d.setdefault(t, 0) + 1
    itms = list(d.items())
    itms.sort(key=lambda x: x[1])
    for i in itms[-20:]:
        print(i[1], i[0], file=file)
    print("****************************************************", file=file)

    if not testing:
        sys.exit(1)
Example #37
0
        dtl.loadTestsFromModule(test_interactivetree),
        dtl.loadTestsFromModule(test_iterators_lists),
        dtl.loadTestsFromModule(test_matching),
        dtl.loadTestsFromModule(test_pickling),
        dtl.loadTestsFromModule(test_names),
        dtl.loadTestsFromModule(test_properties),
        dtl.loadTestsFromModule(test_retrieval),
        dtl.loadTestsFromModule(test_setting),
        dtl.loadTestsFromModule(test_update),
        dtl.loadTestsFromModule(test_regressions)
    ])

    if '--verbose' in sys.argv:

        import gc
        start_count = len(gc.get_objects())

        print "Running tests with base TreeDict module."
        common._inheritance_level = 0
        unittest.TextTestRunner(verbosity=2).run(ts)
        gc.collect()

        print "Running tests with inherited class."
        common._inheritance_level = 1
        unittest.TextTestRunner(verbosity=2).run(ts)
        gc.collect()

        print "Running tests with twice-inherited class."
        common._inheritance_level = 2
        unittest.TextTestRunner(verbosity=2).run(ts)
        gc.collect()
Example #38
0
    def doMagicWord(self, word, avId, zoneId):
        wordIs = self.getWordIs(word)

        print word
        if wordIs("~oobe"):
            base.oobe()
        elif wordIs("~oobeCull"):
            base.oobeCull()
        elif wordIs("~tex"):
            self.doTex(word)
        elif wordIs("~texmem"):
            base.toggleTexMem()
        elif wordIs("~verts"):
            base.toggleShowVertices()
        elif wordIs("~wire"):
            base.toggleWireframe()
        elif wordIs("~stereo"):
            base.toggleStereo()
        elif wordIs("~showfont"):
            self.showfont(word[9:])
        elif wordIs("~hidefont"):
            self.hidefont()
        elif wordIs("~guiPopup"):
            self.toggleGuiPopup()

        elif wordIs("~showCS") or wordIs("~showcs"):
            bitmask = self.getCSBitmask(word[7:])
            render.showCS(bitmask)
            self.csShown = 1

        elif wordIs("~hideCS") or wordIs("~hidecs"):
            bitmask = self.getCSBitmask(word[7:])
            render.hideCS(bitmask)
            self.csShown = 0

        elif wordIs("~cs"):
            # Toggle hide/show collision solids:
            # (Also a shorthand for ~hideCS and ~showCS).
            bitmask = self.getCSBitmask(word[3:])
            if self.csShown:
                render.hideCS(bitmask)
                self.csShown = 0
            else:
                render.showCS(bitmask)
                self.csShown = 1

        elif wordIs("~showShadowCollisions"):
            self.showShadowCollisions()

        elif wordIs("~hideShadowCollisions"):
            self.hideShadowCollisions()

        elif wordIs("~showCollisions"):
            self.showCollisions()

        elif wordIs("~hideCollisions"):
            self.hideCollisions()

        elif wordIs("~showCameraCollisions"):
            self.showCameraCollisions()

        elif wordIs("~hideCameraCollisions"):
            self.hideCameraCollisions()

        elif wordIs("~collidespam"):
            n = Notify.ptr().getCategory(':collide')
            if hasattr(self, '_collideSpamSeverity'):
                n.setSeverity(self._collideSpamSeverity)
                del self._collideSpamSeverity
            else:
                self._collideSpamSeverity = n.getSeverity()
                n.setSeverity(NSSpam)

        elif wordIs("~notify"):
            args = word.split()
            n = Notify.ptr().getCategory(args[1])
            n.setSeverity({
                'error': NSError,
                'warning': NSWarning,
                'info': NSInfo,
                'debug': NSDebug,
                'spam': NSSpam,
            }[args[2]])

        # MPG we probably need generic versions of these
        #elif wordIs("~listen"):
        #    base.localAvatar.garbleChat = 0

        #elif wordIs("~nochat") or wordIs("~chat") or wordIs("~superchat"):
        #    base.localAvatar.garbleChat = 1

        elif wordIs("~stress"):
            factor = word[7:]
            if factor:
                factor = float(factor)
                LOD.setStressFactor(factor)
                response = "Set LOD stress factor to %s" % (factor)
            else:
                factor = LOD.getStressFactor()
                response = "LOD stress factor is %s" % (factor)

            self.setMagicWordResponse(response)

        elif wordIs("~for"):
            self.forAnother(word, avId, zoneId)

        elif wordIs("~badname"):
            # ~badname with an argument becomes ~for ... ~badname
            word = "~for %s ~badname" % (word[9:])
            print "word is %s" % (word)
            self.forAnother(word, avId, zoneId)

        elif wordIs('~avId'):
            self.setMagicWordResponse(str(localAvatar.doId))

        elif wordIs("~doId"):
            name = string.strip(word[6:])

            objs = self.identifyDistributedObjects(name)
            if (len(objs) == 0):
                response = "%s is unknown." % (name)
            else:
                response = ""
                for name, obj in objs:
                    response += "\n%s %d" % (name, obj.doId)
                response = response[1:]

            self.setMagicWordResponse(response)

        # MPG - need generic versions of these
        #elif wordIs("~collisions_on"):
        #    base.localAvatar.collisionsOn()

        #elif wordIs("~collisions_off"):
        #    base.localAvatar.collisionsOff()

        #elif wordIs('~addCameraPosition'):
        #    base.localAvatar.addCameraPosition()

        #elif wordIs('~removeCameraPosition'):
        #    base.localAvatar.removeCameraPosition()

        #elif wordIs('~printCameraPosition'):
        #    base.localAvatar.printCameraPosition(
        #        base.localAvatar.cameraIndex)

        #elif wordIs('~printCameraPositions'):
        #    base.localAvatar.printCameraPositions()

        elif wordIs("~exec"):
            # Enable execChat.
            from otp.chat import ChatManager
            ChatManager.ChatManager.execChat = 1

        elif wordIs("~run"):
            self.toggleRun()

        elif wordIs("~runFaster"):
            if (config.GetBool("want-running", 1)):
                args = word.split()
                if (len(args) > 1):
                    base.debugRunningMultiplier = float(args[1])
                else:
                    base.debugRunningMultiplier = 10
                inputState.set("debugRunning", True)

        elif wordIs("~who"):
            # Get all the nearby avIds and send them to the AI.
            avIds = []
            for av in Avatar.Avatar.ActiveAvatars:
                # If the avatar has a friends list, it's probably a
                # real avatar and not an NPC.
                if hasattr(av, "getFriendsList"):
                    avIds.append(av.doId)
            self.d_setWho(avIds)

        elif wordIs("~sync"):
            # Sync with the AI, like F6, but rather than accumulating
            # sync informatoin, throw away whatever information was
            # there from before.  If a second parameter is supplied,
            # it is a number of seconds of temporary extra skew to
            # apply; the default is 0.

            tm = self.cr.timeManager
            if tm == None:
                response = "No TimeManager."
                self.setMagicWordResponse(response)
            else:
                tm.extraSkew = 0.0
                skew = string.strip(word[5:])
                if skew != "":
                    tm.extraSkew = float(skew)
                globalClockDelta.clear()
                tm.handleHotkey()

        elif wordIs("~period"):
            # Reset the period timer to expire in the indicated number
            # of seconds, or with no parameter, report the number of
            # seconds remaining.

            timeout = string.strip(word[7:])
            if timeout != "":
                seconds = int(timeout)
                self.cr.stopPeriodTimer()
                self.cr.resetPeriodTimer(seconds)
                self.cr.startPeriodTimer()

            # Now report the number of seconds remaining.
            if self.cr.periodTimerExpired:
                response = "Period timer has expired."

            elif self.cr.periodTimerStarted:
                elapsed = globalClock.getFrameTime(
                ) - self.cr.periodTimerStarted
                secondsRemaining = self.cr.periodTimerSecondsRemaining - elapsed
                response = "Period timer expires in %s seconds." % (
                    int(secondsRemaining))
            else:
                response = "Period timer not set."

            self.setMagicWordResponse(response)

        elif wordIs("~DIRECT"):
            args = word.split()
            fEnableLight = 0
            if len(args) > 1:
                if direct and (args[1] == 'CAM'):
                    direct.enable()
                    taskMgr.removeTasksMatching('updateSmartCamera*')
                    camera.wrtReparentTo(render)
                    direct.cameraControl.enableMouseFly()
                    self.setMagicWordResponse("Enabled DIRECT camera")
                    return
                elif args[1] == 'LIGHT':
                    fEnableLight = 1
            # Start up DIRECT
            base.startTk()
            from direct.directtools import DirectSession
            if fEnableLight:
                direct.enableLight()
            else:
                direct.enable()
            self.setMagicWordResponse("Enabled DIRECT")

        elif wordIs("~TT"):
            if not direct:
                return
            args = word.split()
            if len(args) > 1:
                if (args[1] == 'CAM'):
                    direct.cameraControl.disableMouseFly()
                    camera.wrtReparentTo(base.localAvatar)
                    base.localAvatar.startUpdateSmartCamera()
                    self.setMagicWordResponse("Disabled DIRECT camera")
                    return
            # Return to toontown mode
            direct.disable()
            camera.wrtReparentTo(base.localAvatar)
            base.localAvatar.startUpdateSmartCamera()
            self.setMagicWordResponse("Disabled DIRECT")

        elif wordIs("~net"):
            # Simulate pulling or restoring the network plug.
            if self.cr.networkPlugPulled():
                self.cr.restoreNetworkPlug()
                self.cr.startHeartbeat()
                response = "Network restored."
            else:
                self.cr.pullNetworkPlug()
                self.cr.stopHeartbeat()
                response = "Network disconnected."
            self.setMagicWordResponse(response)

        elif wordIs('~disconnect'):
            # force a simulated disconnect
            # you can also do this from the OTP webpage
            base.cr.distributedDistrict.sendUpdate('broadcastMessage')

        elif wordIs("~model"):
            # load a model into the scene graph at the location of localAvatar
            args = word.split()
            path = args[1]
            model = loader.loadModel(path)
            model.reparentTo(localAvatar)
            model.wrtReparentTo(render)
            self.setMagicWordResponse('loaded %s' % path)

        elif wordIs("~axis"):
            # Show a 10 foot and 100 foot axis at the spot of the avatar
            # axis aligned to render
            axis = loader.loadModel("models/misc/xyzAxis.bam")
            axis.reparentTo(render)
            axis.setPos(base.localAvatar, 0, 0, 0)
            axis.setHpr(render, 0, 0, 0)
            axis10 = loader.loadModel("models/misc/xyzAxis.bam")
            axis10.reparentTo(render)
            axis10.setPos(base.localAvatar, 0, 0, 0)
            axis10.setScale(10)
            axis10.setHpr(render, 0, 0, 0)
            axis10.setColorScale(1, 1, 1, 0.4)
            axis10.setTransparency(1)

        elif (wordIs("~clearAxes") or wordIs("~clearAxis")):
            # Remove the effects of ~axis calls
            render.findAllMatches("**/xyzAxis.egg").detach()

        elif wordIs("~myAxis"):
            if hasattr(self, 'myAxis'):
                self.myAxis.detachNode()
                del self.myAxis
            else:
                self.myAxis = loader.loadModel("models/misc/xyzAxis.bam")
                self.myAxis.reparentTo(localAvatar)

        elif (wordIs("~osd")):
            onScreenDebug.enabled = not onScreenDebug.enabled

        elif wordIs("~osdScale"):
            args = word.split()
            defScale = .05
            if len(args) > 1:
                scale = float(args[1])
            else:
                scale = 1.
            onScreenDebug.onScreenText.setScale(defScale * scale)

        elif wordIs('~osdTaskMgr'):
            if taskMgr.osdEnabled():
                taskMgr.stopOsd()
            else:
                if not onScreenDebug.enabled:
                    onScreenDebug.enabled = True
                taskMgr.startOsd()

        elif wordIs("~fps"):
            self.doFps(word, avId, zoneId)

        elif wordIs("~sleep"):
            args = word.split()
            if len(args) > 1:
                s = float(args[1])
                base.setSleep(s)
                response = 'sleeping %s' % s
            else:
                base.setSleep(0.0)
                response = 'not sleeping'
            self.setMagicWordResponse(response)

        elif wordIs('~objects'):
            args = word.split()
            from direct.showbase import ObjectReport
            report = ObjectReport.ObjectReport('client ~objects')

            if 'all' in args:
                self.notify.info('printing full object set...')
                report.getObjectPool().printObjsByType(
                    printReferrers='ref' in args)

            if hasattr(self, 'baselineObjReport'):
                self.notify.info(
                    'calculating diff from baseline ObjectReport...')
                self.lastDiff = self.baselineObjReport.diff(report)
                self.lastDiff.printOut(full=('diff' in args or 'dif' in args))

            if 'baseline' in args or not hasattr(self, 'baselineObjReport'):
                self.notify.info('recording baseline ObjectReport...')
                if hasattr(self, 'baselineObjReport'):
                    self.baselineObjReport.destroy()
                self.baselineObjReport = report

            self.setMagicWordResponse('objects logged')

        elif wordIs('~objecthg'):
            import gc
            objs = gc.get_objects()
            type2count = {}
            for obj in objs:
                tn = safeTypeName(obj)
                type2count.setdefault(tn, 0)
                type2count[tn] += 1
            count2type = invertDictLossless(type2count)
            counts = count2type.keys()
            counts.sort()
            counts.reverse()
            for count in counts:
                print '%s: %s' % (count, count2type[count])
            self.setMagicWordResponse('~aiobjecthg complete')

        elif wordIs('~containers'):
            args = word.split()
            limit = 30
            if 'full' in args:
                limit = None
            ContainerReport.ContainerReport('~containers',
                                            log=True,
                                            limit=limit,
                                            threaded=True)

        elif wordIs('~garbage'):
            args = word.split()
            # it can take a LOOONG time to print out the garbage referrers and referents
            # by reference (as opposed to by number)
            full = 'full' in args
            safeMode = 'safe' in args
            delOnly = 'delonly' in args
            # This does a garbage collection and dumps the list of leaked (uncollectable) objects to the log.
            GarbageReport.GarbageLogger('~garbage',
                                        fullReport=full,
                                        threaded=True,
                                        safeMode=safeMode,
                                        delOnly=delOnly,
                                        doneCallback=self.garbageReportDone)
            # this is coming back from the AI
            #self.setMagicWordResponse('garbage logged')

        elif wordIs('~guicreates'):
            base.printGuiCreates = True
            self.setMagicWordResponse('printing gui creation stacks')

        elif wordIs("~creategarbage"):
            GarbageReport._createGarbage()
            # this is coming back from the AI
            #self.setMagicWordResponse(senderId, 'leaked garbage created')

        elif wordIs('~leakTask'):

            def leakTask(task):
                return task.cont

            taskMgr.add(leakTask, uniqueName('leakedTask'))
            leakTask = None
            # this is coming back from the AI
            #self.setMagicWordResponse(senderId, 'leaked task created')

        elif wordIs('~leakmessage'):
            MessengerLeakDetector._leakMessengerObject()
            self.down_setMagicWordResponse(senderId,
                                           'messenger leak object created')

        elif wordIs('~pstats'):
            args = word.split()
            hostname = None
            port = None
            if len(args) > 1:
                hostname = args[1]
            if len(args) > 2:
                port = int(args[2])
            # make sure pstats is enabled
            base.wantStats = 1
            Task.TaskManager.pStatsTasks = 1
            result = base.createStats(hostname, port)
            connectionName = '%s' % hostname
            if port is not None:
                connectionName += ':%s' % port
            if result:
                response = 'connected client pstats to %s' % connectionName
            else:
                response = 'could not connect pstats to %s' % connectionName
            self.setMagicWordResponse(response)

        elif wordIs('~profile'):
            args = word.split()
            if len(args) > 1:
                num = int(args[1])
            else:
                num = 5
            session = taskMgr.getProfileSession('~profile')
            session.setLogAfterProfile(True)
            taskMgr.profileFrames(num, session)
            self.setMagicWordResponse('profiling %s client frames...' % num)

        elif wordIs('~frameprofile'):
            args = word.split()
            wasOn = bool(taskMgr.getProfileFrames())
            if len(args) > 1:
                setting = bool(int(args[1]))
            else:
                setting = not wasOn
            taskMgr.setProfileFrames(setting)
            self.setMagicWordResponse(
                'frame profiling %s%s' %
                (choice(setting, 'ON',
                        'OFF'), choice(wasOn == setting, ' already', '')))

        elif wordIs('~taskprofile'):
            args = word.split()
            wasOn = bool(taskMgr.getProfileTasks())
            if len(args) > 1:
                setting = bool(int(args[1]))
            else:
                setting = not wasOn
            taskMgr.setProfileTasks(setting)
            self.setMagicWordResponse(
                'task profiling %s%s' %
                (choice(setting, 'ON',
                        'OFF'), choice(wasOn == setting, ' already', '')))

        elif wordIs('~taskspikethreshold'):
            args = word.split()
            if len(args) > 1:
                threshold = float(args[1])
                response = 'task spike threshold set to %ss' % threshold
            else:
                threshold = TaskProfiler.GetDefaultSpikeThreshold()
                response = 'task spike threshold reset to %ss' % threshold
            TaskProfiler.SetSpikeThreshold(threshold)
            self.setMagicWordResponse(response)

        elif wordIs('~logtaskprofiles'):
            args = word.split()
            if len(args) > 1:
                name = args[1]
            else:
                name = None
            taskMgr.logTaskProfiles(name)
            response = 'logged task profiles%s' % choice(
                name, ' for %s' % name, '')
            self.setMagicWordResponse(response)

        elif wordIs('~taskprofileflush'):
            args = word.split()
            if len(args) > 1:
                name = args[1]
            else:
                name = None
            taskMgr.flushTaskProfiles(name)
            response = 'flushed AI task profiles%s' % choice(
                name, ' for %s' % name, '')
            self.setMagicWordResponse(response)

        elif wordIs('~objectcount'):
            base.cr.printObjectCount()
            self.setMagicWordResponse(
                'logging client distributed object count...')

        elif wordIs('~taskmgr'):
            print taskMgr
            self.setMagicWordResponse('logging client taskMgr...')

        elif wordIs('~jobmgr'):
            print jobMgr
            self.setMagicWordResponse('logging client jobMgr...')

        elif wordIs('~jobtime'):
            args = word.split()
            if len(args) > 1:
                time = float(args[1])
            else:
                time = None
            response = ''
            if time is None:
                time = jobMgr.getDefaultTimeslice()
                response = 'reset client jobMgr timeslice to %s ms' % time
            else:
                response = 'set client jobMgr timeslice to %s ms' % time
                time = time / 1000.
            jobMgr.setTimeslice(time)
            self.setMagicWordResponse(response)

        elif wordIs('~detectleaks'):
            started = self.cr.startLeakDetector()
            self.setMagicWordResponse(
                choice(
                    started,
                    'leak detector started',
                    'leak detector already started',
                ))

        elif wordIs('~taskthreshold'):
            args = word.split()
            if len(args) > 1.:
                threshold = float(args[1])
            else:
                threshold = None
            response = ''
            if threshold is None:
                threshold = taskMgr.DefTaskDurationWarningThreshold
                response = 'reset task duration warning threshold to %s' % threshold
            else:
                response = 'set task duration warning threshold to %s' % threshold
            taskMgr.setTaskDurationWarningThreshold(threshold)
            self.setMagicWordResponse(response)

        elif wordIs('~messenger'):
            print messenger
            self.setMagicWordResponse('logging client messenger...')

        elif wordIs('~clientcrash'):
            # if we call notify.error directly, the magic word mgr will catch it
            # self.notify.error doesn't seem to work either
            DelayedCall(
                Functor(self.notify.error,
                        '~clientcrash: simulating a client crash'))

        elif wordIs('~badDelete'):
            doId = 0
            while doId in base.cr.doId2do:
                doId += 1
            # location (0,0) is special, pass in (1,1)
            # deleteObjectLocation expects a DO, pass in a ScratchPad instead
            # we must delay the call because magicWordMgr is in a big try/except block
            DelayedCall(
                Functor(base.cr.deleteObjectLocation, ScratchPad(doId=doId), 1,
                        1))
            self.setMagicWordResponse('doing bad delete')

        elif wordIs("~idTags"):
            messenger.send('nameTagShowAvId', [])
            base.idTags = 1

        elif wordIs("~nameTags"):
            messenger.send('nameTagShowName', [])
            base.idTags = 0

        elif wordIs("~hideNames"):
            # note do ~hideNames before ~hideGui if you want both off
            if NametagGlobals.getMasterNametagsVisible():
                NametagGlobals.setMasterNametagsVisible(0)
            else:
                NametagGlobals.setMasterNametagsVisible(1)

        elif wordIs("~hideGui"):
            if aspect2d.isHidden():
                aspect2d.show()
            else:
                aspect2d.hide()

        elif wordIs('~flush'):
            base.cr.doDataCache.flush()
            base.cr.cache.flush()
            self.setMagicWordResponse('client object and data caches flushed')

        elif wordIs('~prof'):
            import time

            ### set up ###
            name = 'default'
            p = Point3()
            ##############

            ts = time.time()
            for i in xrange(1000000):

                ### code to be timed ###
                p.set(1, 2, 3)
                ########################

            tf = time.time()
            dt = tf - ts
            response = 'prof(%s): %s secs' % (name, dt)
            print response
            self.setMagicWordResponse(response)

        elif wordIs('~gptc'):
            args = word.split()
            if len(args) > 1. and hasattr(self.cr, 'leakDetector'):
                gptcJob = self.cr.leakDetector.getPathsToContainers(
                    '~gptc', args[1], Functor(self._handleGPTCfinished,
                                              args[1]))
            else:
                self.setMagicWordResponse('error')

        elif wordIs('~gptcn'):
            args = word.split()
            if len(args) > 1. and hasattr(self.cr, 'leakDetector'):
                gptcnJob = self.cr.leakDetector.getPathsToContainersNamed(
                    '~gptcn', args[1],
                    Functor(self._handleGPTCNfinished, args[1]))
            else:
                self.setMagicWordResponse('error')

        else:
            # Not a magic word I know!
            return 0

        return 1
Example #39
0
load_file_in_context('script.py')
import gc
import re

objs = gc.get_objects()[:]
blockchain_objects = []

for obj in objs:
    if (isinstance(obj, Blockchain)):
        blockchain_objects.append(obj)

blockchain = blockchain_objects[0]

# checking for fake transactions
if blockchain.chain[2].transactions != fake_transactions:
    fail_tests(
        'Did you modify the transactions to fake_transactions in the second block that was added?'
    )

with open('script.py', 'r') as file:
    if re.search('(local_blockchain)(\\.)(validate_chain)(\\(\\))',
                 file.read()):
        pass_tests()
    else:
        fail_tests(
            'Did you use the correct method to validate the blockchain?')
Example #40
0
#!/usr/bin/env python3.8
# using_gc.py
import gc

found_objects = gc.get_objects()
print('이전:', len(found_objects))

import waste_memory

hold_reference = waste_memory.run()

found_objects = gc.get_objects()
print('이후: ', len(found_objects))
for obj in found_objects[:3]:
    print(repr(obj)[:100])
Example #41
0
    def debug_mem(self):
        self.logger.info("self.proxy.recorded_url_q.qsize()=%s",
                         self.proxy.recorded_url_q.qsize())
        with open("/proc/self/status") as f:
            for line in f:
                fields = line.split()
                if len(fields) >= 2:
                    k, v = fields[0:2]
                if k == "VmHWM:":
                    hwm = int(v)
                elif k == "VmRSS:":
                    rss = int(v)
                elif k == "VmData:":
                    data = int(v)
                elif k == "VmStk:":
                    stk = int(v)
        self.logger.info("rss=%s data=%s stack=%s hwm=%s", rss, data, stk, hwm)
        self._last_rss = self._last_rss or rss  # to set initial value

        if rss - self._last_rss > 1024:
            num_unreachable = gc.collect()
            all_objects = gc.get_objects()
            total_size = 0
            summary = {}
            biggest_objects = [None] * 10
            for obj in all_objects:
                size = sys.getsizeof(obj)
                total_size += size
                if not type(obj) in summary:
                    summary[type(obj)] = {"count": 0, "size": 0}
                summary[type(obj)]["count"] += 1
                summary[type(obj)]["size"] += size
                if size > sys.getsizeof(biggest_objects[-1]):
                    for i in range(len(biggest_objects)):
                        if size > sys.getsizeof(biggest_objects[i]):
                            index = i
                            break
                    biggest_objects[index + 1:] = biggest_objects[index:-1]
                    biggest_objects[index] = obj

            self.logger.info("%s objects totaling %s bytes", len(all_objects),
                             total_size)

            self.logger.info("=== biggest types ===")
            for item in sorted(summary.items(),
                               key=lambda item: item[1]["size"],
                               reverse=True)[:10]:
                self.logger.info("%s bytes in %s instances of %s",
                                 item[1]["size"], item[1]["count"], item[0])

            self.logger.info("=== warcprox types ===")
            for t in (t for t in summary if str(t).find("warcprox") >= 0):
                self.logger.info("%s bytes in %s instances of %s",
                                 summary[t]["size"], summary[t]["count"], t)

            for i in range(len(biggest_objects)):
                obj = biggest_objects[i]
                try:
                    value = repr(bytes(obj.getbuffer()[:100]))
                except:
                    try:
                        value = repr(obj)[:100]
                    except BaseException as e:
                        value = "<{} getting value>".format(e)
                self.logger.info("#%s (%s) (%s bytes) (%s refs) (id=%s): %s",
                                 i + 1, type(obj), sys.getsizeof(obj),
                                 sys.getrefcount(obj), id(obj), value)
            self.logger.info("%s unreachable objects totaling %s bytes",
                             len(gc.garbage),
                             sum(sys.getsizeof(x) for x in gc.garbage))

        self._last_rss = rss
Example #42
0
            d[c].setParent(d['a'])
        for key in d.keys()[:]:
            d[key].destroy()
    else:
        # testing with gui
        from direct.directbase import DirectStart
        x = DirectTree(treeStructure=d['a'])
        x.render()
        x.update()
        x.treeStructure = d['C']
        x.render()
        x.update()
        x.destroy()

    import gc
    print len(gc.get_objects())
    del d
    gc.collect()
    gc.collect()
    print len(gc.get_objects())
    gc.collect()
    gc.collect()
    print len(gc.get_objects())
    for i in xrange(100):
        gc.collect()
    print len(gc.get_objects())

if __name__ == '__main__' and False:
    print "testing tree structure"

    from string import ascii_letters
Example #43
0
def get_instances(cls):
    return [x for x in gc.get_objects() if isinstance(x, cls)]
Example #44
0
# 'free' from the Class

s1 = Student("Virginia Balseiro", "yesvirginia [Gold] [Volunteer]", "pasta",
             "moving to Europe")

s2 = Student("Deb Cupitt", "deb[Gold]", "chocolate", "gender equity")

s3 = Student("Marta Bodojra", "marta [Gold] [Volunteer]", "dark chocolate",
             "become a developer and help all of you to do it together!😉")

# for attr, value in s1.__dict__.items():
#     print(attr, value)

# s1.my_iter()

for obj in gc.get_objects():
    if isinstance(obj, Student):
        obj.my_print()

# s2 = Student("Andreea[Gold]")

# s3 = Student("​CristyTarantino[Gold]")

# dot notation
# print(s1.name)

# s1.my_print()

# s1.fav_food = "ice cream"

# s1.my_print()
Example #45
0
def object_by_id(object_id):
    for obj in gc.get_objects():
        if id(obj) == object_id:
            return "Object exists"
    return "Not found"
Example #46
0
def _find_objects(t):
    return filter(lambda o: isinstance(o, t), gc.get_objects())
def count_instances_of(cls):
    return sum(isinstance(obj, cls) for obj in gc.get_objects())
Example #48
0
    def actionStats(self):
        import gc
        import sys
        from Ui import UiRequest
        from Crypt import CryptConnection

        hpy = None
        if self.get.get("size") == "1":  # Calc obj size
            try:
                import guppy
                hpy = guppy.hpy()
            except:
                pass
        self.sendHeader()
        s = time.time()
        main = sys.modules["main"]

        # Style
        yield """
        <style>
         * { font-family: monospace }
         table td, table th { text-align: right; padding: 0px 10px }
        </style>
        """

        # Memory
        try:
            yield "rev%s | " % config.rev
            yield "%s | " % config.ip_external
            yield "Opened: %s | " % main.file_server.port_opened
            yield "Crypt: %s | " % CryptConnection.manager.crypt_supported
            yield "In: %.2fMB, Out: %.2fMB  | " % (
                float(main.file_server.bytes_recv) / 1024 / 1024,
                float(main.file_server.bytes_sent) / 1024 / 1024
            )
            yield "Peerid: %s  | " % main.file_server.peer_id
            import psutil
            process = psutil.Process(os.getpid())
            mem = process.get_memory_info()[0] / float(2 ** 20)
            yield "Mem: %.2fMB | " % mem
            yield "Threads: %s | " % len(process.threads())
            yield "CPU: usr %.2fs sys %.2fs | " % process.cpu_times()
            yield "Files: %s | " % len(process.open_files())
            yield "Sockets: %s | " % len(process.connections())
            yield "Calc size <a href='?size=1'>on</a> <a href='?size=0'>off</a>"
        except Exception:
            pass
        yield "<br>"

        # Connections
        yield "<b>Connections</b> (%s, total made: %s):<br>" % (
            len(main.file_server.connections), main.file_server.last_connection_id
        )
        yield "<table><tr> <th>id</th> <th>proto</th>  <th>type</th> <th>ip</th> <th>open</th> <th>crypt</th> <th>ping</th>"
        yield "<th>buff</th> <th>idle</th> <th>open</th> <th>delay</th> <th>out</th> <th>in</th> <th>last sent</th>"
        yield "<th>waiting</th> <th>version</th> <th>peerid</th> </tr>"
        for connection in main.file_server.connections:
            if "cipher" in dir(connection.sock):
                cipher = connection.sock.cipher()[0]
            else:
                cipher = connection.crypt
            yield self.formatTableRow([
                ("%3d", connection.id),
                ("%s", connection.protocol),
                ("%s", connection.type),
                ("%s:%s", (connection.ip, connection.port)),
                ("%s", connection.handshake.get("port_opened")),
                ("<span title='%s'>%s</span>", (connection.crypt, cipher)),
                ("%6.3f", connection.last_ping_delay),
                ("%s", connection.incomplete_buff_recv),
                ("since", max(connection.last_send_time, connection.last_recv_time)),
                ("since", connection.start_time),
                ("%.3f", connection.last_sent_time - connection.last_send_time),
                ("%.0fkB", connection.bytes_sent / 1024),
                ("%.0fkB", connection.bytes_recv / 1024),
                ("%s", connection.last_cmd),
                ("%s", connection.waiting_requests.keys()),
                ("%s r%s", (connection.handshake.get("version"), connection.handshake.get("rev", "?"))),
                ("%s", connection.handshake.get("peer_id")),
            ])
        yield "</table>"

        # Sites
        yield "<br><br><b>Sites</b>:"
        yield "<table>"
        yield "<tr><th>address</th> <th>connected</th> <th title='connected/good/total'>peers</th> <th>content.json</th> <th>out</th> <th>in</th>  </tr>"
        for site in self.server.sites.values():
            yield self.formatTableRow([
                (
                    """<a href='#' onclick='document.getElementById("peers_%s").style.display="initial"; return false'>%s</a>""",
                    (site.address, site.address)
                ),
                ("%s", [peer.connection.id for peer in site.peers.values() if peer.connection and peer.connection.connected]),
                ("%s/%s/%s", (
                    len([peer for peer in site.peers.values() if peer.connection and peer.connection.connected]),
                    len(site.getConnectablePeers(100)),
                    len(site.peers)
                )),
                ("%s", len(site.content_manager.contents)),
                ("%.0fkB", site.settings.get("bytes_sent", 0) / 1024),
                ("%.0fkB", site.settings.get("bytes_recv", 0) / 1024),
            ])
            yield "<tr><td id='peers_%s' style='display: none; white-space: pre'>" % site.address
            for key, peer in site.peers.items():
                if peer.time_found:
                    time_found = int(time.time()-peer.time_found)/60
                else:
                    time_found = "--"
                if peer.connection:
                    connection_id = peer.connection.id
                else:
                    connection_id = None
                yield "(#%s, err: %s, found: %s min ago) %22s -<br>" % (connection_id, peer.connection_error, time_found, key)
            yield "<br></td></tr>"
        yield "</table>"

        # No more if not in debug mode
        if not config.debug:
            raise StopIteration

        # Object types


        obj_count = {}
        for obj in gc.get_objects():
            obj_type = str(type(obj))
            if obj_type not in obj_count:
                obj_count[obj_type] = [0, 0]
            obj_count[obj_type][0] += 1  # Count
            obj_count[obj_type][1] += float(sys.getsizeof(obj)) / 1024  # Size

        yield "<br><br><b>Objects in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
            len(obj_count),
            sum([stat[0] for stat in obj_count.values()]),
            sum([stat[1] for stat in obj_count.values()])
        )

        for obj, stat in sorted(obj_count.items(), key=lambda x: x[1][0], reverse=True):  # Sorted by count
            yield " - %.1fkb = %s x <a href=\"/Listobj?type=%s\">%s</a><br>" % (stat[1], stat[0], obj, cgi.escape(obj))

        # Classes

        class_count = {}
        for obj in gc.get_objects():
            obj_type = str(type(obj))
            if obj_type != "<type 'instance'>":
                continue
            class_name = obj.__class__.__name__
            if class_name not in class_count:
                class_count[class_name] = [0, 0]
            class_count[class_name][0] += 1  # Count
            class_count[class_name][1] += float(sys.getsizeof(obj)) / 1024  # Size

        yield "<br><br><b>Classes in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
            len(class_count),
            sum([stat[0] for stat in class_count.values()]),
            sum([stat[1] for stat in class_count.values()])
        )

        for obj, stat in sorted(class_count.items(), key=lambda x: x[1][0], reverse=True):  # Sorted by count
            yield " - %.1fkb = %s x <a href=\"/Dumpobj?class=%s\">%s</a><br>" % (stat[1], stat[0], obj, cgi.escape(obj))

        from greenlet import greenlet
        objs = [obj for obj in gc.get_objects() if isinstance(obj, greenlet)]
        yield "<br>Greenlets (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        from Worker import Worker
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Worker)]
        yield "<br>Workers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        from Connection import Connection
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Connection)]
        yield "<br>Connections (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        from socket import socket
        objs = [obj for obj in gc.get_objects() if isinstance(obj, socket)]
        yield "<br>Sockets (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        from msgpack import Unpacker
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Unpacker)]
        yield "<br>Msgpack unpacker (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        from Site import Site
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Site)]
        yield "<br>Sites (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        objs = [obj for obj in gc.get_objects() if isinstance(obj, self.server.log.__class__)]
        yield "<br>Loggers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj.name)))

        objs = [obj for obj in gc.get_objects() if isinstance(obj, UiRequest)]
        yield "<br>UiRequests (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        from Peer import Peer
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Peer)]
        yield "<br>Peers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))

        objs = [(key, val) for key, val in sys.modules.iteritems() if val is not None]
        objs.sort()
        yield "<br>Modules (%s):<br>" % len(objs)
        for module_name, module in objs:
            yield " - %.3fkb: %s %s<br>" % (self.getObjSize(module, hpy), module_name, cgi.escape(repr(module)))

        gc.collect()  # Implicit grabage collection
        yield "Done in %.1f" % (time.time() - s)
Example #49
0
                inner_cv = StratifiedKFold(n_splits=3,
                                           shuffle=True,
                                           random_state=42)
                outer_cv = StratifiedKFold(n_splits=10,
                                           shuffle=True,
                                           random_state=42)

                # Inner CV.

                gsPipeline = GridSearchCV(pipeline,
                                          param_grid,
                                          cv=inner_cv,
                                          scoring='f1_macro',
                                          n_jobs=-1)

                # Delete object from previous iteration
                gc.collect()
                len(gc.get_objects())
                # Outer CV. gs_lr.fit() gets called in cross_validate.
                cross_validate(gsPipeline,
                               X=Corpus[preprocessing.COMMENT],
                               y=Corpus[preprocessing.CLASS],
                               scoring=utilities.scoringFunction,
                               cv=outer_cv)

                utilities.printAverageValuesOfClassificationReportList(
                    output, parameters, functionalOnlyFlag)
                output.flush()
                print(cnt)
                cnt = cnt + 1
Example #50
0
        time_pre = time.time()
        proof = SWProofOfRetrievability.genproof(pk, data, authenticators,
                                                 challenge, filehandle)
        time_post = time.time()
        duration = time_post - time_pre
        timings['genproof'][i].append(duration)

        time_pre = time.time()
        SWProofOfRetrievability.verify(proof, pk, challenge, filehandle)
        time_post = time.time()
        duration = time_post - time_pre
        timings['verify'][i].append(duration)

    print("Size of timing object is " + str(asizeof.asizeof(timings)) +
          " Bytes")
    print("Number of gc-tracked objects: " + str(len(gc.get_objects())))

    # Save the data in complicated JSON-format
    with open('timings_SWPoR.json', 'w') as f:
        json_obj = jsonpickle.encode(timings)
        f.write(json_obj)

print("Running postprocessing steps")

# Transform to handy Dataframes
for algo in ['keygen', 'encode', 'genchallenge', 'genproof', 'verify']:
    timings[algo] = pd.DataFrame(timings[algo]).T
    timings[algo].columns = messagesizes

for algo in ['keygen', 'encode', 'genchallenge', 'genproof', 'verify']:
    # Save the data in handy .csv
Example #51
0
def getVariable(dbg, thread_id, frame_id, scope, attrs):
    """
    returns the value of a variable

    :scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME

    BY_ID means we'll traverse the list of all objects alive to get the object.

    :attrs: after reaching the proper scope, we have to get the attributes until we find
            the proper location (i.e.: obj\tattr1\tattr2)

    :note: when BY_ID is used, the frame_id is considered the id of the object to find and
           not the frame (as we don't care about the frame in this case).
    """
    if scope == 'BY_ID':
        if thread_id != get_current_thread_id(threading.currentThread()):
            raise VariableError("getVariable: must execute on same thread")

        try:
            import gc
            objects = gc.get_objects()
        except:
            pass  # Not all python variants have it.
        else:
            frame_id = int(frame_id)
            for var in objects:
                if id(var) == frame_id:
                    if attrs is not None:
                        attrList = attrs.split('\t')
                        for k in attrList:
                            _type, _type_name, resolver = get_type(var)
                            var = resolver.resolve(var, k)

                    return var

        # If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
        sys.stderr.write('Unable to find object with id: %s\n' % (frame_id, ))
        return None

    frame = dbg.find_frame(thread_id, frame_id)
    if frame is None:
        return {}

    if attrs is not None:
        attrList = attrs.split('\t')
    else:
        attrList = []

    for attr in attrList:
        attr.replace("@_@TAB_CHAR@_@", '\t')

    if scope == 'EXPRESSION':
        for count in xrange(len(attrList)):
            if count == 0:
                # An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
                var = evaluate_expression(dbg, frame, attrList[count], False)
            else:
                _type, _type_name, resolver = get_type(var)
                var = resolver.resolve(var, attrList[count])
    else:
        if scope == "GLOBAL":
            var = frame.f_globals
            del attrList[
                0]  # globals are special, and they get a single dummy unused attribute
        else:
            # in a frame access both locals and globals as Python does
            var = {}
            var.update(frame.f_globals)
            var.update(frame.f_locals)

        for k in attrList:
            _type, _type_name, resolver = get_type(var)
            var = resolver.resolve(var, k)

    return var
Example #52
0
def memReport():
    for obj in gc.get_objects():
        if torch.is_tensor(obj) or (hasattr(obj, 'data')
                                    and torch.is_tensor(obj.data)):
            print(type(obj), obj.size())
Example #53
0
def mem_report(print_all: bool = False) -> None:
    """
    Report the memory usage of the tensor.storage in pytorch
    Both on CPUs and GPUs are reported
    if print_all is True, print size and shape info for each tensor
    """
    def _mem_report(tensors: Iterable, mem_type: str) -> None:
        """Print the selected tensors of type

        There are two major storage types in our major concern:
            - GPU: tensors transferred to CUDA devices
            - CPU: tensors remaining on the system memory (usually unimportant)

        Args:
            - tensors: the tensors of specified type
            - mem_type: 'CPU' or 'GPU' in current implementation """
        print(f"Storage on {mem_type}")
        print("-" * LEN)
        total_numel = 0
        total_mem = 0
        visited_data: List[Any] = []
        for tensor in tensors:
            if tensor.is_sparse:
                continue
            # a data_ptr indicates a memory block allocated
            data_ptr = tensor.storage().data_ptr()
            if data_ptr in visited_data:
                continue
            visited_data.append(data_ptr)

            numel = tensor.storage().size()
            total_numel += numel
            element_size = tensor.storage().element_size()
            mem = numel * element_size / 1024 / 1024  # 32bit=4Byte, MByte
            total_mem += mem
            element_type = type(tensor).__name__
            size = tuple(tensor.size())

            if print_all:
                print(f"{element_type}\t\t{size}\t\t{mem}")
        print("-" * LEN)
        print(f"Total Tensors: {total_numel} \tUsed Memory Space: {total_mem}")
        print("-" * LEN)

    LEN = 65
    if print_all:
        print("=" * LEN)
    print("Element type\tSize\t\t\tUsed MEM(MBytes)")
    tensors = []
    for obj in gc.get_objects():
        try:
            if t.is_tensor(obj) or (hasattr(obj, "data")
                                    and t.is_tensor(obj.data)):
                tensors.append(obj)
        except Exception:
            pass
    cuda_tensors = [tensor for tensor in tensors if tensor.is_cuda]
    host_tensors = [tensor for tensor in tensors if not tensor.is_cuda]
    _mem_report(cuda_tensors, "GPU")
    _mem_report(host_tensors, "CPU")
    if print_all:
        print("=" * LEN)
Example #54
0
def debug_memory():
    import collections, gc, torch
    tensors = collections.Counter((str(o.device), o.dtype, tuple(o.shape))
                                  for o in gc.get_objects()
                                  if torch.is_tensor(o))
Example #55
0
def _get_in_memory_objects():
    """Returns all objects in memory."""
    gc.collect()
    return gc.get_objects()
Example #56
0
import gc
print(len(gc.get_objects()))
import torch
print(len(gc.get_objects()))
g = gc.get_objects()
print(type(g))
for i in g:
    print(i)
Example #57
0
def autoprocess(parallel=1,
                failed_processing=False,
                maxtasksperchild=7,
                memory_debugging=False,
                processing_timeout=300):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    # pool = multiprocessing.Pool(parallel, init_worker)
    pool = pebble.ProcessPool(max_workers=parallel,
                              max_tasks=maxtasksperchild,
                              initializer=init_worker)
    try:
        memory_limit()
        log.info("Processing analysis data")
        # CAUTION - big ugly loop ahead.
        while count < maxcount or not maxcount:
            # If still full, don't add more (necessary despite pool).
            if len(pending_task_id_map) >= parallel:
                time.sleep(5)
                continue
            if failed_processing:
                tasks = db.list_tasks(status=TASK_FAILED_PROCESSING,
                                      limit=parallel,
                                      order_by=Task.completed_on.asc())
            else:
                tasks = db.list_tasks(status=TASK_COMPLETED,
                                      limit=parallel,
                                      order_by=Task.completed_on.asc())
            added = False
            # For loop to add only one, nice. (reason is that we shouldn't overshoot maxcount)
            for task in tasks:
                # Not-so-efficient lock.
                if pending_task_id_map.get(task.id):
                    continue
                log.info("Processing analysis data for Task #%d", task.id)
                if task.category == "file":
                    sample = db.view_sample(task.sample_id)
                    copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                             "binaries", sample.sha256)
                else:
                    copy_path = None
                args = task.target, copy_path
                kwargs = dict(report=True,
                              auto=True,
                              task=task,
                              memory_debugging=memory_debugging)
                if memory_debugging:
                    gc.collect()
                    log.info("[%d] (before) GC object counts: %d, %d", task.id,
                             len(gc.get_objects()), len(gc.garbage))
                # result = pool.apply_async(process, args, kwargs)
                future = pool.schedule(process,
                                       args,
                                       kwargs,
                                       timeout=processing_timeout)
                pending_future_map[future] = task.id
                pending_task_id_map[task.id] = future
                future.add_done_callback(processing_finished)
                if memory_debugging:
                    gc.collect()
                    log.info("[%d] (after) GC object counts: %d, %d", task.id,
                             len(gc.get_objects()), len(gc.garbage))
                count += 1
                added = True
                break
            if not added:
                # don't hog cpu
                time.sleep(5)
    except KeyboardInterrupt:
        # ToDo verify in finally
        # pool.terminate()
        raise
    except MemoryError:
        mem = get_memory() / 1024 / 1024
        print('Remain: %.2f GB' % mem)
        sys.stderr.write('\n\nERROR: Memory Exception\n')
        sys.exit(1)
    except Exception as e:
        import traceback
        traceback.print_exc()
    finally:
        pool.close()
        pool.join()
Example #58
0
def checkCleanExit():

    # Get the Gaffer and GafferUI modules, but only if the app actually
    # imported them. We don't want to force their importation because it's
    # just a waste of time if they weren't used.
    Gaffer = sys.modules.get("Gaffer")
    GafferUI = sys.modules.get("GafferUI")

    if Gaffer is None and GafferUI is None:
        return

    # Clean up any garbage left behind by Cortex's wrapper mechanism - because
    # the Gaffer.Application itself is derived from IECore.Parameterised, which
    # as far as I can tell is wrapped unnecessarily, we must call this to allow
    # the application to be deleted at all. Note that we're deliberately not also
    # calling gc.collect() - our intention here isn't to clean up on shutdown, but
    # to highlight problems caused by things not cleaning up after themselves during
    # execution. We aim to eliminate all circular references from our code, to avoid
    # garbage collection overhead and to avoid problems caused by referencing Qt widgets
    # which were long since destroyed in C++.
    ## \todo Reevaluate the need for this call after Cortex 9 development.
    IECore.RefCounted.collectGarbage()
    # Importing here rather than at the top of the file prevents false
    # positives being reported in gc.get_objects() below. I have no idea why,
    # but if not imported here, get_objects() will report objects which have
    # nothing referring to them and which should be dead, even with an
    # explicit call to gc.collect() beforehand.
    import gc

    # Check for things that shouldn't exist at shutdown, and
    # warn of anything we find.
    scriptNodes = []
    widgets = []
    for o in gc.get_objects():
        if Gaffer is not None and isinstance(o, Gaffer.ScriptNode):
            scriptNodes.append(o)
        elif GafferUI is not None and isinstance(o, GafferUI.Widget):
            widgets.append(o)

    if scriptNodes:
        IECore.msg(
            IECore.Msg.Level.Debug, "Gaffer shutdown",
            "%d remaining ScriptNode%s detected. Debugging with objgraph is recommended."
            % (
                len(scriptNodes),
                "s" if len(scriptNodes) > 1 else "",
            ))

    if widgets:

        count = {}
        for widget in widgets:
            widgetType = widget.__class__.__name__
            count[widgetType] = count.get(widgetType, 0) + 1

        summaries = ["%s (%d)" % (k, count[k]) for k in sorted(count.keys())]

        IECore.msg(
            IECore.Msg.Level.Debug, "Gaffer shutdown",
            "%d remaining Widget%s detected : \n\n%s\n\nDebugging with objgraph is recommended."
            % (len(widgets), "s" if len(widgets) > 1 else "",
               "\t" + "\n\t".join(summaries)))
Example #59
0
def close_open_files():
    everything = gc.get_objects()
    for obj in everything:
        if isinstance(obj, io.IOBase):
            obj.close()
Example #60
0
def process(target=None,
            copy_path=None,
            task=None,
            report=False,
            auto=False,
            capeproc=False,
            memory_debugging=False):
    # This is the results container. It's what will be used by all the
    # reporting modules to make it consumable by humans and machines.
    # It will contain all the results generated by every processing
    # module available. Its structure can be observed through the JSON
    # dump in the analysis' reports folder. (If jsondump is enabled.)
    task_dict = task.to_dict() or {}
    task_id = task_dict.get("id") or 0
    results = {
        "statistics": {
            "processing": [],
            "signatures": [],
            "reporting": []
        }
    }
    if memory_debugging:
        gc.collect()
        log.info("[%s] (1) GC object counts: %d, %d", task_id,
                 len(gc.get_objects()), len(gc.garbage))
    if memory_debugging:
        gc.collect()
        log.info("[%s] (2) GC object counts: %d, %d", task_id,
                 len(gc.get_objects()), len(gc.garbage))
    RunProcessing(task=task_dict, results=results).run()
    if memory_debugging:
        gc.collect()
        log.info("[%s] (3) GC object counts: %d, %d", task_id,
                 len(gc.get_objects()), len(gc.garbage))

    RunSignatures(task=task_dict, results=results).run()
    if memory_debugging:
        gc.collect()
        log.info("[%s] (4) GC object counts: %d, %d", task_id,
                 len(gc.get_objects()), len(gc.garbage))

    if report:
        if repconf.mongodb.enabled:
            host = repconf.mongodb.host
            port = repconf.mongodb.port
            db = repconf.mongodb.db
            conn = MongoClient(host,
                               port=port,
                               username=repconf.mongodb.get("username", None),
                               password=repconf.mongodb.get("password", None),
                               authSource=db)
            mdata = conn[db]
            analyses = mdata.analysis.find({"info.id": int(task_id)})
            if analyses.count() > 0:
                log.debug("Deleting analysis data for Task %s" % task_id)
                for analysis in analyses:
                    for process in analysis["behavior"].get("processes", []):
                        for call in process["calls"]:
                            mdata.calls.remove({"_id": ObjectId(call)})
                    mdata.analysis.remove({"_id": ObjectId(analysis["_id"])})
            conn.close()
            log.debug("Deleted previous MongoDB data for Task %s" % task_id)

        if repconf.elasticsearchdb.enabled and not repconf.elasticsearchdb.searchonly:
            analyses = es.search(index=fullidx,
                                 doc_type="analysis",
                                 q='info.id: "%s"' % task_id)["hits"]["hits"]
            if analyses:
                for analysis in analyses:
                    esidx = analysis["_index"]
                    esid = analysis["_id"]
                    # Check if behavior exists
                    if analysis["_source"]["behavior"]:
                        for process in analysis["_source"]["behavior"][
                                "processes"]:
                            for call in process["calls"]:
                                es.delete(
                                    index=esidx,
                                    doc_type="calls",
                                    id=call,
                                )
                    # Delete the analysis results
                    es.delete(
                        index=esidx,
                        doc_type="analysis",
                        id=esid,
                    )
        if auto or capeproc:
            reprocess = False
        else:
            reprocess = report

        RunReporting(task=task.to_dict(), results=results,
                     reprocess=reprocess).run()
        Database().set_status(task_id, TASK_REPORTED)

        if auto:
            if cfg.cuckoo.delete_original and os.path.exists(target):
                os.unlink(target)

            if cfg.cuckoo.delete_bin_copy and os.path.exists(copy_path):
                os.unlink(copy_path)

    if memory_debugging:
        gc.collect()
        log.info("[%s] (5) GC object counts: %d, %d", task_id,
                 len(gc.get_objects()), len(gc.garbage))
        for i, obj in enumerate(gc.garbage):
            log.info("[%s] (garbage) GC object #%d: type=%s", task_id, i,
                     type(obj).__name__)