def process_response(self, request, response):
         req = request.META['PATH_INFO']
         if req.find('static') == -1 and req.find('media') == -1:
                 print req
                 self.end_objects = muppy.get_objects()
                 sum_start = summary.summarize(self.start_objects)
                 sum_end = summary.summarize(self.end_objects)
                 diff = summary.get_diff(sum_start, sum_end)
                 summary.print_(diff)
                 #print '~~~~~~~~~'
                 #cb = refbrowser.ConsoleBrowser(response, maxdepth=2, \
                         #str_func=output_function)
                 #cb.print_tree()
                 print '~~~~~~~~~'
                 a = asizeof(response)
                 print 'Total size of response object in kB: %s' % \
                     str(a / 1024.0)
                 print '~~~~~~~~~'
                 a = asizeof(self.end_objects)
                 print 'Total size of end_objects in MB: %s' % \
                     str(a / 1048576.0)
                 b = asizeof(self.start_objects)
                 print 'Total size of start_objects in MB: %s' % \
                     str(b / 1048576.0)
                 print '~~~~~~~~~'
         return response
Example #2
0
def file_test(rows=500000, cols=50):
    "File test"
    print("Creating file with {} rows and {} columns".format(rows, cols))
    file = create_file(rows, cols)
    print("Size of the file: {:.2f} MiB".format(getsize(file) / (1024 * 1024)))
    print("Reading file")
    sum1 = summarize(get_objects())
    las = read(file)
    sum2 = summarize(get_objects())
    diff = get_diff(sum1, sum2)
    print_(diff)

    for curve in las.curves:
        print("Name: {}, Min: {:.2f}, Mean: {:.2f}, Max: {:.2f}"
              .format(curve.mnemonic, nanmin(curve.data), nanmean(curve.data),
                      nanmax(curve.data)))

    del las
    las = read(file)
    del las
    las = read(file)
    del las
    las = read(file)
    del las
    print("Happy end")
Example #3
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.debug:
            try:
                gc.collect()
                end_memory = self.process.memory_info().rss
                net_memory = end_memory-self.start_memory
                if net_memory > 100 * 1000 * 1000:
                    Log.warning(
                        "MEMORY WARNING (additional {{net_memory|comma}}bytes): "+self.description,
                        default_params=self.params,
                        net_memory=net_memory
                    )

                    from pympler import summary
                    from pympler import muppy
                    sum1 = sorted(summary.summarize(muppy.get_objects()), key=lambda r: -r[2])[:30]
                    Log.warning("{{data}}", data=sum1)
                elif end_memory > 1000*1000*1000:
                    Log.warning(
                        "MEMORY WARNING (over {{end_memory|comma}}bytes): "+self.description,
                        default_params=self.params,
                        end_memory=end_memory
                    )

                    from pympler import summary
                    from pympler import muppy
                    sum1 = sorted(summary.summarize(muppy.get_objects()), key=lambda r: -r[2])[:30]
                    Log.warning("{{data}}", data=sum1)

            except Exception as e:
                Log.warning("problem in memory measure", cause=e)
Example #4
0
    def _get_summaries(function, *args):
        """Get a 2-tuple containing one summary from before, and one summary
        from after the function has been invoked.

        """
        s_before = summary.summarize(get_objects())
        function(*args)
        s_after = summary.summarize(get_objects())
        return (s_before, s_after)
def profile_expose_method(profiled_method_wrapper, accept, args, func, kw, exclude_from_memory_profiling):
    """
    Targeted to profile a specific method that wraps HTTP request processing endpoints into database context.  
    :param profiled_method_wrapper: method wrapped around profiled call to be passed in to memory profiler
    :param accept: param specific to profiled call
    :param args: args of a function that is being wrapped by a profiled method
    :param func: function that is being wrapped by a profiled method
    :param kw: kwargs of a function that is being wrapped by a profiled method
    :return: output of a profiled method without modification
    """
    if not exclude_from_memory_profiling and get_memory_profile_logging_on() and \
            check_memory_profile_package_wide_disable(func):
        controller_class = args[0].__class__.__name__ if args and len(args) > 0 else ''
        end_point_name_parts = [s for s in [func.__module__, controller_class, func.__name__] if s != '']
        end_point_name = ".".join(end_point_name_parts)
        is_pympler_on = _is_pympler_profiling_value_on(end_point_name)
        profile_output = {'output': {}}
        if is_pympler_on:
            all_objects = muppy.get_objects()
            all_objects_summary_before = summary.summarize(all_objects)
        memory_profile = memory_usage((_profile_me,
                                       (profile_output, profiled_method_wrapper, func, accept, args, kw),
                                       {}),
                                      interval=0.1)
        output = profile_output['output']
        if is_pympler_on:
            all_objects_summary_after = summary.summarize(all_objects)
            diff = summary.get_diff(all_objects_summary_before, all_objects_summary_after)
            diff_less = summary.format_(diff)
            diff_out = ''
            for s in diff_less:
                diff_out += s+'\n'
            thread_log.info("================ PYMPLER OUTPUT <{}> ==============\n{}".format(end_point_name, diff_out))
        try:

            message = json.dumps({'log_type': 'memory_profile',
                                  'proc_id': os.getpid(),
                                  'name': func.__name__,
                                  'module': func.__module__,
                                  'mem_profile': memory_profile,
                                  'min': min(memory_profile),
                                  'max': max(memory_profile),
                                  'diff': max(memory_profile) - min(memory_profile),
                                  'leaked': memory_profile[-1] - memory_profile[0],
                                  'args': [arg for arg in args[1:]],  # exclude self
                                  'kwargs': kw})
            memory_log.info(message,
                            extra={'controller_module': func.__module__,
                                   'controller_class': controller_class,
                                   'endpoint': func.__name__})
        except Exception as e:
            thread_log.exception('Logger failed: {}'.format(e))
    else:
        output = profiled_method_wrapper(accept, args, func, kw)
    return output
Example #6
0
    def create_summary(self):
        """Return a summary.

        See also the notes on ignore_self in the class as well as the
        initializer documentation.

        """
        if not self.ignore_self:
            res = summary.summarize(muppy.get_objects())
        else:
            # If the user requested the data required to store summaries to be
            # ignored in the summaries, we need to identify all objects which
            # are related to each summary stored.
            # Thus we build a list of all objects used for summary storage as
            # well as a dictionary which tells us how often an object is
            # referenced by the summaries.
            # During this identification process, more objects are referenced,
            # namely int objects identifying referenced objects as well as the
            # correspondind count.
            # For all these objects it will be checked wether they are
            # referenced from outside the monitor's scope. If not, they will be
            # subtracted from the snapshot summary, otherwise they are
            # included (as this indicates that they are relevant to the
            # application).

            all_of_them = []  # every single object
            ref_counter = {}  # how often it is referenced; (id(o), o) pairs

            def store_info(o):
                all_of_them.append(o)
                if id(o) in ref_counter:
                    ref_counter[id(o)] += 1
                else:
                    ref_counter[id(o)] = 1

            # store infos on every single object related to the summaries
            store_info(self.summaries)
            for k, v in self.summaries.items():
                store_info(k)
                summary._traverse(v, store_info)

            # do the summary
            res = summary.summarize(muppy.get_objects())

            # remove ids stored in the ref_counter
            for _id in ref_counter:
                # referenced in frame, ref_counter, ref_counter.keys()
                if len(gc.get_referrers(_id)) == (3):
                    summary._subtract(res, _id)
            for o in all_of_them:
                # referenced in frame, summary, all_of_them
                if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2):
                    summary._subtract(res, o)

        return res
Example #7
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         sys.stdout = self.DevNull()
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
     finally:
         sys.stdout = self._stdout
Example #8
0
    def print_diff(self, ignore=[]):
        """Print the diff to the last time the state of objects was measured.

        keyword arguments
        ignore -- list of objects to ignore
        """
        # ignore this and the caller frame
        ignore.append(inspect.currentframe()) #PYCHOK change ignore
        diff = self.get_diff(ignore)
        print("Added objects:")
        summary.print_(summary.summarize(diff['+']))
        print("Removed objects:")
        summary.print_(summary.summarize(diff['-']))
        # manual cleanup, see comment above
        del ignore[:]
Example #9
0
def memusage_before_n_after(fun, *args, **kwargs):
    from pympler import muppy
    from pympler import summary
    from datetime import datetime

    before = summary.summarize(muppy.get_objects())
    before_time = datetime.now()
    fun_ret = fun(*args, **kwargs)
    after_time = datetime.now()
    after = summary.summarize(muppy.get_objects())
    diff = summary.get_diff(before, after)
    print "execution time: ", after_time - before_time
    summary.print_(diff)

    return fun_ret, diff
Example #10
0
    def format_diff(self, ignore=()):
        """Format the diff to the last time the state of objects was measured.

        keyword arguments
        ignore -- list of objects to ignore
        """
        # ignore this and the caller frame
        lines = []
        diff = self.get_diff(ignore+(inspect.currentframe(),))
        lines.append("Added objects:")
        for line in summary.format_(summary.summarize(diff['+'])):
            lines.append(line)
        lines.append("Removed objects:")
        for line in summary.format_(summary.summarize(diff['-'])):
            lines.append(line)
        return lines
def dump_objs():
	global TRACKER
	if TRACKER is None:
		TRACKER = tracker.SummaryTracker()

	with open("obj_log.txt", "a") as fp:
		fp.write("Memory at {}\n".format(str(datetime.datetime.now())))
		try:
			all_objects = muppy.get_objects()
			sum1 = summary.summarize(all_objects)
			str_sum  = summary.format_(sum1)

			fp.write("Summary:\n")
			for line in str_sum:
				fp.write("	{}\n".format(line))
		except Exception:
			err = traceback.format_exc()
			fp.write("Error: \n")
			fp.write(err)

		try:
			str_diff = TRACKER.format_diff()
			fp.write("Diff:\n")
			for line in str_diff:
				fp.write("	{}\n".format(line))
		except Exception:
			err = traceback.format_exc()
			fp.write("Error: \n")
			fp.write(err)

		fp.write("\n")
Example #12
0
 def test_sweep(self):
     """Test that all and only empty entries are removed from a summary."""
     objects = ['the', 'quick', 'brown', 'fox', 1298, 123, 234, [], {}]
     summ = summary.summarize(objects)
     # correct removal of rows when sizes are empty
     summary._subtract(summ, {})
     summary._subtract(summ, [])
     summ = summary._sweep(summ)
     found_dict = found_tuple = False
     for row in summ:
         if row[0] == "<type 'dict'>":
             found_dict = True
         if row[0] == "<type 'tuple'>":
             found_tuple = True
     self.assert_(found_dict == False)
     self.assert_(found_tuple == False)
     # do not remove row if one of the sizes is not empty
     # e.g. if the number of objects of a type did not change, but the
     # total size did
     summ = summary._subtract(summ, 'the')
     summ = summary._subtract(summ, 'quick')
     summ = summary._subtract(summ, 'brown')
     summ = summary._subtract(summ, '42')
     summ = summary._sweep(summ)
     found_string = False
     for row in summ:
         if row[0] == summary._repr(''):
             found_string = True
             self.assert_(row[1] == 0)
             totalsize = _getsizeof('fox') - _getsizeof('42')
             self.assert_(row[2] == totalsize)
     self.assert_(found_string == True)
Example #13
0
    def format_diff(self, ignore=[]):
        """Format the diff to the last time the state of objects was measured.

        keyword arguments
        ignore -- list of objects to ignore
        """
        # ignore this and the caller frame
        ignore.append(inspect.currentframe())  # PYCHOK change ignore
        diff = self.get_diff(ignore)
        yield "Added objects:"
        for line in summary.format_(summary.summarize(diff["+"])):
            yield line
        yield "Removed objects:"
        for line in summary.format_(summary.summarize(diff["-"])):
            yield line
        # manual cleanup, see comment above
        del ignore[:]
Example #14
0
 def printListingUsage(self, args):
     all_objects = muppy.get_objects()
     sum1 = summary.summarize(all_objects)
     summary.print_(sum1)
     print(" ")
     print("Summary: ")
     tr = tracker.SummaryTracker()
     tr.print_diff()
Example #15
0
def memory_usage(where):
    """
    Print out a basic summary of memory usage.
    """
    mem_summary = summary.summarize(muppy.get_objects())
    print("Memory summary:", where)
    summary.print_(mem_summary, limit=2)
    print("VM: %.2fMb" % (get_virtual_memory_usage_kb() / 1024.0))
Example #16
0
 def handle_signal_abort(self, signum, frame):
     Log.warn("Someone want to kill me! But I'll not die now! Hahahaha!")
     s = summary.summarize(muppy.get_objects())
     Log.debug("Current memory usage:")
     summary.print_(s)
     diff = summary.get_diff(self.mem_sum, s)
     self.mem_sum = s
     Log.debug("New memory usage:")
     summary.print_(diff)
Example #17
0
 def test_summarize(self):
     """Test summarize method. """
     objects = [1, 'a', 'b', 'a', 5, [], {}]
     expected = [[summary._repr(''), 3, 3*_getsizeof('a')],\
                 [summary._repr(1), 2, 2*_getsizeof(1)],\
                 [summary._repr([]), 1, _getsizeof([])],\
                 [summary._repr({}), 1, _getsizeof({})]]
     res = summary.summarize(objects)
     for row_e in res:
         self.assert_(row_e in expected)
Example #18
0
	def memory_summary(self):
		# Only import Pympler when we need it. We don't want it to
		# affect our process if we never call memory_summary.
		
		caller = sys._getframe(1).f_code.co_name # So we can reference the caller
		
		from pympler import summary, muppy
		mem_summary = summary.summarize(muppy.get_objects())
		rows = summary.format_(mem_summary)
		indigo.server.log ('\n\nCALLED BY: ' + caller + '\n\n' + '\n'.join(rows)	)	
    def sig_usr(self, a, b):
        import threading
        import gc

        held_locks = {}
        code = {}
        curthreads = threading.enumerate()

        for threadId, stack in sys._current_frames().items():
            name = str(threadId)
            for ct in curthreads:
                if ct.ident == threadId:
                    name = ct.name

            code[name] = ["NAME: %s" % name]
            for filename, lineno, fname, line in traceback.extract_stack(stack):
                code[name].append('FILE: "%s", line %d, in %s' % (filename, lineno, fname))
                if line:
                    code[name].append("  %s" % (line.strip()))

            held_locks[name] = ""
            for lock in alllocks:
                if lock.writer_id == threadId:
                    held_locks[name] += ("%s(w)" % lock.name)
                    continue
                for reader_id, reader_stack in lock.reader_stacks:
                    if reader_id == threadId:
                        held_locks[name] += ("%s(r)" % lock.name)

        for k in code:
            log.info('\n\nLOCKS: %s \n%s' % (held_locks[k], '\n'.join(code[k])))

        log.info("\n\nSTACKS:")
        for lock in alllocks:
            for (reader_id, reader_stack) in lock.reader_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock reader (thread %s):" % (reader_id,))
                log.info(''.join(reader_stack))

            for writer_stack in lock.writer_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock writer (thread %s):" % (lock.writer_id,))
                log.info(''.join(writer_stack))

        self.shelf.sync()
        gc.collect()

        # If we've got pympler installed, output a summary of memory usage.

        try:
            from pympler import summary, muppy
            from pympler.asizeof import asizeof
            summary.print_(summary.summarize(muppy.get_objects()))
        except:
            pass
Example #20
0
 def memory_summary(self, summarize=True):
     "Using pympler summarize module to view memory summary."
     
     if summarize:
         all_objects = muppy.get_objects()
         Logger.info("ENV: \nMemory Footprint:")
         Logger.info("-----------------")
         return summary.print_(summary.summarize(all_objects), limit=50)
     else:
         Logger.info("ENV: \nMemory Tracker:")
         Logger.info("---------------")
         self.mem_tracker.print_diff()        
Example #21
0
def print_muppy_sumary():
    # http://pythonhosted.org/Pympler/index.html
    try:
        from pympler import muppy, summary
    except ImportError:
        print("WARNING: pympler not installed")
        return
    # from pympler.classtracker import ClassTracker
    # from pympler.classtracker_stats import HtmlStats
    global all_objects, obj_summary, class_tracker
    if all_objects is None:
        all_objects = muppy.get_objects()
        obj_summary = summary.summarize(all_objects)
        summary.print_(obj_summary)

        # class_tracker = ClassTracker()
        # class_tracker.track_class(FICSPlayer, trace=1)
        # class_tracker.track_class(ICGameModel, resolution_level=2, trace=1)
    else:
        obj_summary2 = summary.summarize(muppy.get_objects())
        diff = summary.get_diff(obj_summary, obj_summary2)
        summary.print_(diff, limit=200)
Example #22
0
def print_memory(count=30):
    '''
    Print the statistics of the objects in the memory.
    Need pympler to use.
    '''
    from pympler import muppy, summary

    gc.collect()
    all_objects = muppy.get_objects()
    my_types = muppy.filter(all_objects, Type=wx.Object)
    sum1 = summary.summarize(my_types)
    # sum1 = summary.summarize(all_objects)
    summary.print_(sum1, limit=count)
Example #23
0
    def __init__(self, ignore_self=True):
        """Constructor.

        The number of summaries managed by the tracker has an performance
        impact on new summaries, iff you decide to exclude them from further
        summaries. Therefore it is suggested to use them economically.

        Keyword arguments:
        ignore_self -- summaries managed by this object will be ignored.
        """
        self.s0 = summary.summarize(muppy.get_objects())
        self.summaries = {}
        self.ignore_self = ignore_self
    def get_report(self):
        all_objects = muppy.get_objects()
        size = get_size(all_objects)
        report = summary.summarize(all_objects)

        sort_index = self.cleaned_data['sort_by']
        limit = self.cleaned_data['limit']

        report.sort(key=lambda item: item[sort_index], reverse=True)
        if limit:
            report = report[:limit]

        return size, report
Example #25
0
def analyzeAllMFCC():
    client = MongoClient()
    db = client.audiograins
    grainEntries = db.grains

    query = grainEntries.find({ "mfcc00" : { "$exists": False }})
    print("Analyzing MFCC for " + str(query.count()) + " grains")

    for grain in tqdm(query):
        mfccs = analyzeMFCC(grain)
        for mfccIndex in range(0, len(mfccs)):
            update = {"mfcc" + format(mfccIndex, '02') : mfccs[mfccIndex]}
            grainEntries.update_one({"_id": grain["_id"]}, {"$set" : update})

    summary.print_(summary.summarize(muppy.get_objects()))
    client.close()
Example #26
0
 def getDebugInfo(self, itemname):
     """Give debug info about a particular item."""
     global profile
     outf=StringIO()
     if itemname == "":
         outf.write("the item was empty")
         if profile:
             all_objects = muppy.get_objects()
             sum1 = summary.summarize(all_objects)
             summary.print_(sum1, 100)
             ib = refbrowser.InteractiveBrowser(self)
             ib.main()
         return outf.getvalue()
     itemname=keywords.fixID(itemname)
     itemlist=vtype.parseItemList(itemname)
     item=self.getSubValue(itemlist)
     item.writeDebug(outf)
     return outf.getvalue()
Example #27
0
    def test_subtract(self):
        """Test that a single object's data is correctly subtracted from a summary.
        - result in correct total size and total number of objects
        - if object was not listed before, it should be listed negative
          afterwards
        """

        objects = ['the', 'quick', 'brown', 'fox', 1298, 123, 234, [], {}]
        summ = summary.summarize(objects)
        summary._subtract(summ, 'the')
        summary._subtract(summ, {})
        summary._subtract(summ, (1,))
        # to verify that these rows where actually included afterwards
        checked_str = checked_dict = checked_tuple = False
        for row in summ:
            if row[0] == summary._repr(''):
                totalsize = _getsizeof('quick') + _getsizeof('brown') +\
                            _getsizeof('fox')
                self.assert_(row[1] == 3, "%s != %s" % (row[1], 3))
                self.assert_(row[2] == totalsize, totalsize)
                checked_str = True
            if row[0] == summary._repr({}):
                self.assert_(row[1] == 0)
                self.assert_(row[2] == 0)
                checked_dict = True
            if row[0] == summary._repr((1,)):
                self.assert_(row[1] == -1)
                self.assert_(row[2] == -_getsizeof((1,)))
                checked_tuple = True

        self.assert_(checked_str, "no str found in summary")
        self.assert_(checked_dict, "no dict found in summary")
        self.assert_(checked_tuple, "no tuple found in summary")

        summary._subtract(summ, 'quick')
        summary._subtract(summ, 'brown')
        checked_str = False
        for row in summ:
            if row[0] == summary._repr(''):
                self.assert_(row[1] == 1)
                self.assert_(row[2] == _getsizeof('fox'))
                checked_str = True
        self.assert_(checked_str, "no str found in summ")
Example #28
0
def pympler_snapshot(rows=None, limit=15, sort="size", order="descending"):
  """Print the rows as a summary.

  Keyword arguments:
  limit -- the maximum number of elements to be listed
  sort  -- sort elements by 'size', 'type', or '#'
  order -- sort 'ascending' or 'descending'
  """
  
  if not rows:
    rows = summary.summarize(muppy.get_objects())

  localrows = []
  for row in rows:
      localrows.append(list(row))
  # input validation
  sortby = ['type', '#', 'size']
  if sort not in sortby:
      raise ValueError("invalid sort, should be one of" + str(sortby))
  orders = ['ascending', 'descending']
  if order not in orders:
      raise ValueError("invalid order, should be one of" + str(orders))
  # sort rows
  if sortby.index(sort) == 0:
      if order == "ascending":
          localrows.sort(key=lambda x: _repr(x[0]))
      elif order == "descending":
          localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
  else:
      if order == "ascending":
          localrows.sort(key=lambda x: x[sortby.index(sort)])
      elif order == "descending":
          localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
  # limit rows
  localrows = localrows[0:limit]
  for row in localrows:
      row[2] = stringutils.pp(row[2])
  # print rows
  localrows.insert(0, ["types", "# objects", "total size"])
  return pympler_prepare(localrows)
Example #29
0
    def __init__(self):
        Thread.__init__(self)

        self.E = builder.ElementMaker(namespace = self.__xmlns__)
        self._rosters = {}
        self._resources = None
        self._session_cache = {}
        self._conns = {}
        self.xmpp_read = {}
        self.term_read = {}
        self.term_stealed = {}
        self.update_sessions()
        self.mem_sum = summary.summarize(muppy.get_objects())

        signal.signal(signal.SIGUSR2, self.handle_signal_message)
        signal.signal(signal.SIGABRT, self.handle_signal_abort)
        signal.signal(signal.SIGHUP, self.handle_signal_abort)

        self._running = True
        self._updater = Updater(self)
        self._pinger = Pinger(self)
        self._stealer = Stealer(self)
        self.start()
Example #30
0
        def log_summary(self, msg=None):
            """
            Generates a summary of all memory used. The summary is
            returned as a string and logged to a file
            """

            if self.on:
                all_objs=muppy.get_objects()
                all_summ=summary.summarize(all_objs)
                formatted_summ="\n".join(
                    summary.format_(
                        all_summ, 
                        limit=15, 
                        sort='size', 
                        order='descending'
                    )
                )
                if msg is not None:
                    self.logger.debug('Full Summary:\n' + msg + '\n' +\
                            formatted_summ)
                else:
                    self.logger.debug('Full Summary:\n' + formatted_summ)
                return formatted_summ
# Shallow size (например, размер листа, но не объектов, которые в нем есть)
print(sys.getsizeof(1))  # 28
print(sys.getsizeof(54234.000342314000))  # 24
print(sys.getsizeof(None))  # 16
print(sys.getsizeof([]))  # 72
print(sys.getsizeof([1, 2, 3, 4, ['s', 'l', ['a', 1]]]))  # 112
print(sys.getsizeof({}))  # 248
print(sys.getsizeof(tuple()))  # 56

print()
# Deep size (идет по иерархии объектов вглубь, суммирует)
print(asizeof.asizeof(1))  # 32
print(asizeof.asizeof(54234.000342314000))  # 24
print(asizeof.asizeof(None))  # 16
print(asizeof.asizeof([]))  # 72
print(asizeof.asizeof([1, 2, 3, 4, ['s', 'l', ['a', 1]]]))  # 592
print(
    asizeof.asizeof(
        [1, 2, 3, 4, ['s', 'l', ['a', 1, [None, 1, {
            'a': 1,
            'b': 'a'
        }]]]]))  # 1016
print(asizeof.asizeof({}))  # 248
print(asizeof.asizeof(tuple()))  # 56
print(asizeof.asizeof(set()))  # 232

all_objects_2 = muppy.get_objects()
sum_1 = summary.summarize(all_objects_1)
sum_2 = summary.summarize(all_objects_2)
summary.print_(summary.get_diff(sum_1, sum_2))
Example #32
0
    def learn(self):

        while True:

            experiences = []

            for i in range(0, self.REPLAY_BUFFER_SIZE):
                episode_experience = self.play_episode()
                experiences.append(episode_experience)
                step = self.global_step + i
                reward = sum(episode_experience.rewards)
                self.log('reward', reward, step, False)

                if reward > self.record_run:
                    frames = episode_experience.get_frames()
                    self.log_gif('best_run', frames, step)
                    self.record_run = reward
                    self.sess.run(tf.assign(self.RECORD, self.record_run))
                    logging.info('Record beaten: {0}'.format(self.record_run))

                logging.info('Episode: {0} | reward: {1}'.format(step, reward))

            states, actions, values, advantages = self.get_experience_batches(
                experiences)

            opt_results = []

            for i in range(len(actions)):
                opt_res = self.policy.optimize(states[i], actions[i],
                                               values[i], advantages[i],
                                               self.sess)

                opt_results.append(opt_res)

            value_loss = np.array([q["value_loss"]
                                   for q in opt_results]).mean()
            policy_loss = np.array([q["policy_loss"]
                                    for q in opt_results]).mean()
            entropy = np.array([q["entropy"] for q in opt_results]).mean()
            total_loss = np.array([q["total_loss"]
                                   for q in opt_results]).mean()

            self.log('value_loss', value_loss, self.global_step)
            self.log('policy_loss', policy_loss, self.global_step)
            self.log('entropy', entropy, self.global_step)
            self.log('total_loss', total_loss, self.global_step)

            print('=======')

            if self.batch_count % self.SAVE_MODEL_STEPS == 0:
                self.check_model()
                self.save_model()

                all_objects = muppy.get_objects()
                sum1 = summary.summarize(all_objects)
                summary.print_(sum1)

                all_objects.clear()
                sum1.clear()

                del all_objects
                del sum1

            opt_results.clear()
            experiences.clear()

            self.global_step = self.sess.run(
                tf.assign(self.GS, self.GS + self.REPLAY_BUFFER_SIZE))
            self.batch_count = self.sess.run(tf.assign(self.BC, self.BC + 1))
Example #33
0
 def summarize_memory():
     print("Virtual machine: {:.2f}Mb".format(
         psutil.Process().memory_info_ex().vms / (1024 * 1024)))
     summary.print_(summary.summarize(muppy.get_objects()), limit=1)
Example #34
0
def memusage(o):
    summary.print_(
        summary.summarize(o))
Example #35
0
def main():
	
	adv_messages = rawdata.load_dict_from_json(ADV_MESSAGES_PATH)

	banned_users = []
	
	while True:
		accounts = rawdata.load_dict_from_json(ACCOUNTS_PATH)
		users = init(accounts)
		accounts.clear()
		try:
			queue = Queue()

			users_signed = []
			for user in users:
				if not user in banned_users:
					queue.put(user)

			for i in range(len(users) - len(banned_users)):
				thread = Thread(target=signing, args=(queue, users_signed, banned_users))
				thread.daemon = True
				thread.start()
			queue.join()
			del users[:]
			users = users_signed.copy()
			del users_signed[:]

			queue = Queue()
			for user in users:
				if not user in banned_users:
					queue.put(user)
			theard_count = 3 if len(users) >= 5 else 1
			for i in range(theard_count):
				thread = Thread(target=group_creating_desire_c, args=(queue, COUNT_OF_GROUPS, NAME_OF_GROUPS))
				thread.daemon = True
				thread.start()
			queue.join()

			arg_list = {"counter":0, "adv_counter":0, "iteration":0}
			
			while True:
				queue = Queue()
				del_targ = []
				targets = rawdata.load_dict_from_json(TARGETS_PATH)
				len_targets = len(targets)
				time_start = time.time()
				for user in users:
					if not user in banned_users:
						queue.put(user)
				print("незабаненных пользователей %s/%s | забаненны %s" % (len(users) - len(banned_users), len(users), banned_users))
				for i in range(len(users) - len(banned_users)):
					thread = Thread(target=posting_for_all_users, args=(queue, targets, adv_messages, arg_list, del_targ, banned_users))
					thread.daemon = True
					thread.start()

				queue.join()

				time_stop = time.time()
				time_dif = time_stop - time_start
				wait_time = 60*(24 + random.uniform(0, 5)) - time_dif

				u_banned_users = [user.username for user in banned_users]
				rawdata.delete_from_json(data=del_targ, path_to_data=TARGETS_PATH)
				rawdata.delete_from_json(data=u_banned_users, path_to_data=ACCOUNTS_PATH)
				del del_targ[:]
				targets.clear()
				del targets
				
				gc.collect()
				summary.print_(summary.summarize(muppy.get_objects()))


				print("Осталось целей %s" % len_targets)
				print("Всего/рекламных %s/%s за %s мин" % (arg_list["counter"], arg_list["adv_counter"], str(time_dif / 60)))
				
				if wait_time > 0:
					print("Пауза %s минут\n" % str(wait_time / 60))
					time.sleep(wait_time)
				else:
					print("Продолжаем без паузы %s мин запаздывания\n" % str(-1 * wait_time / 60))
				arg_list["iteration"] += 1
		except:
			print("Ошибка в posting.py/main %s" % sys.exc_info()[0])
			raise
Example #36
0
def log_mem(mes=''):
    if options.debug:
        all_objects = muppy.get_objects()
        sum1 = summary.summarize(all_objects)
        app_log.log(logging.DEBUG, 'mem (%s): %d' % (mes, len(all_objects)))
        summary.print_(sum1)
Example #37
0
    def cli_mem_summary(self, event):
        """Output memory usage summary"""

        all_objects = muppy.get_objects()
        state = summary.summarize(all_objects)
        summary.print_(state)
    def process_response(self, request, response):
        req = request.META['PATH_INFO']
        if not self.is_ignored(req):
            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info(
                    '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                    '~~~~~~~~~~~~~~')
            print '\n\n'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info(u'REQUESTED URL: {}'.format(req))
            print u'REQUESTED URL: {}'.format(req)
            self.end_objects = muppy.get_objects()
            if SHOW['request_summary']:
                sum_start = summary.summarize(self.start_objects)
                if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                    logger.info(
                        '~~~~~~~~~ SUMMARIZE REQUEST OBJECTS ~~~~~~~~~')
                    for row in sorted(sum_start,
                                      key=lambda i: i[2],
                                      reverse=True)[:15]:
                        logger.info(
                            "type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2])))
                print '~~~~~~~~~ SUMMARIZE REQUEST OBJECTS ~~~~~~~~~'
                summary.print_(sum_start)

            if SHOW['response_summary']:
                sum_end = summary.summarize(self.end_objects)
                if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                    logger.info(
                        '~~~~~~~~~ SUMMARIZE RESPONSE OBJECTS ~~~~~~~~~')
                    for row in sorted(sum_end,
                                      key=lambda i: i[2],
                                      reverse=True)[:15]:
                        logger.info(
                            "type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2])))
                print '~~~~~~~~~ SUMMARIZE RESPONSE OBJECTS ~~~~~~~~~'
                summary.print_(sum_end)

            if SHOW['compared_request_response_summaries']:
                diff = summary.get_diff(sum_start, sum_end)
                if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                    logger.info(
                        '~~~~~~~~~ COMPARED REQUEST & RESPONSE SUMMARIES '
                        '~~~~~~~~~')
                    for row in sorted(diff, key=lambda i: i[2],
                                      reverse=True)[:15]:
                        logger.info(
                            "type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2])))
                print \
                    '~~~~~~~~~ COMPARED REQUEST & RESPONSE SUMMARIES ~~~~~~~~~'
                summary.print_(diff)

            # print '~~~~~~~~~'
            # cb = refbrowser.ConsoleBrowser(
            #     response, maxdepth=2, str_func=output_function)
            # cb.print_tree()

            a = asizeof(response)
            a_string = 'Total size of response object in kB: %s' % \
                str(a/1024.0)
            b = asizeof(self.end_objects)
            b_string = 'Total size of end_objects in MB: %s' % str(
                b / 1048576.0)
            c = asizeof(self.start_objects)
            c_string = 'Total size of start_objects in MB: %s' % \
                str(c/1048576.0)

            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info(
                    '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                    '~~~~~~~~~~~~~~')
                logger.info(a_string)
                logger.info(b_string)
                logger.info(c_string)
                logger.info(
                    '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                    '~~~~~~~~~~~~~~')

            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print a_string
            print b_string
            print c_string
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'

        return response
Example #39
0
 def memory_profiler(self):
     all_objects = muppy.get_objects()
     stats = summary.summarize(all_objects)
     return {'Memory_profiler': [l for l in summary.format_(stats, LIMIT_OBJECTS_FOR_PROFILER)]}
Example #40
0
def before_after_each_function(request):
    global _global_collect_info

    try:
        import psutil  # Don't fail if not there
    except ImportError:
        yield
        return

    current_pids = set(proc.pid for proc in psutil.process_iter())
    before_curr_proc_memory_info = psutil.Process().memory_info()

    if _global_collect_info and DEBUG_MEMORY_INFO:
        try:
            from pympler import summary, muppy
            sum1 = summary.summarize(muppy.get_objects())
        except:
            pydev_log.exception()

    sys.stdout.write('''
===============================================================================
Memory before: %s
%s
===============================================================================
''' % (request.function,
       format_memory_info(psutil.virtual_memory(),
                          before_curr_proc_memory_info)))
    yield

    processes_info = []
    for proc in psutil.process_iter():
        if proc.pid not in current_pids:
            try:
                try:
                    cmdline = proc.cmdline()
                except:
                    cmdline = '<unable to get>'
                processes_info.append(
                    'New Process: %s(%s - %s) - %s' %
                    (proc.name(), proc.pid, cmdline,
                     format_process_memory_info(proc.memory_info())))
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                pass  # The process could've died in the meanwhile

    after_curr_proc_memory_info = psutil.Process().memory_info()

    if DEBUG_MEMORY_INFO:
        try:
            if after_curr_proc_memory_info.rss - before_curr_proc_memory_info.rss > 10 * 1000 * 1000:
                # 10 MB leak
                if _global_collect_info:
                    sum2 = summary.summarize(muppy.get_objects())
                    diff = summary.get_diff(sum1, sum2)
                    sys.stdout.write(
                        '===============================================================================\n'
                    )
                    sys.stdout.write('Leak info:\n')
                    sys.stdout.write(
                        '===============================================================================\n'
                    )
                    summary.print_(diff)
                    sys.stdout.write(
                        '===============================================================================\n'
                    )

                _global_collect_info = True
                # We'll only really collect the info on the next test (i.e.: if at one test
                # we used too much memory, the next one will start collecting)
            else:
                _global_collect_info = False
        except:
            pydev_log.exception()

    sys.stdout.write(
        '''
===============================================================================
Memory after: %s
%s%s
===============================================================================


''' % (request.function,
       format_memory_info(psutil.virtual_memory(),
                          after_curr_proc_memory_info),
       '' if not processes_info else '\nLeaked processes:\n' +
       '\n'.join(processes_info)), )
    def process_response(self, request, response):
        path = request.META['PATH_INFO']

        if self.is_ignored(path):
            return response

        self.end_objects = muppy.get_objects()

        sum_start = summary.summarize(self.start_objects)
        sum_end = summary.summarize(self.end_objects)
        diff = summary.get_diff(sum_start, sum_end)

        if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
            logger.info(
                '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                '~~~~~~~~~~~~~~')
            logger.info("Top %d memory deltas after processing URL: %s",
                        SHOW_TOP_X_MEMORY_DELTAS, path)
            logger.info("%-60s %10s %10s", "type", "# objects", "total size")
        print '\n\n'
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print "Top %d memory deltas after processing URL: %s" % (
            SHOW_TOP_X_MEMORY_DELTAS, path)
        print "%60s %10s %10s" % ("type", "# objects", "total size")

        for row in sorted(diff, key=lambda i: i[2],
                          reverse=True)[:SHOW_TOP_X_MEMORY_DELTAS]:
            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info("type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2])))
            print "%60s %10d %s" % (row[0], row[1], filesizeformat(row[2]))

        start_size = asizeof(self.start_objects)
        end_size = asizeof(self.end_objects)

        if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
            logger.info(
                '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                '~~~~~~~~~~~~~~')
            logger.info(
                "Processed %s: memory delta %0.1f kB (%0.1f -> %0.1fMB), "
                "response size: %0.1f kB",
                path,
                (end_size - start_size) / 1024.0,
                start_size / 1048576.0,
                end_size / 1048576.0,
                len(response.content) / 1024.0,
            )
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print(
            "Processed %s: memory delta %0.1f kB (%0.1f -> %0.1fMB), "
            "response size: %0.1f kB" % (
                path,
                (end_size - start_size) / 1024.0,
                start_size / 1048576.0,
                end_size / 1048576.0,
                len(response.content) / 1024.0,
            ))
        if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
            logger.info(
                '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                '~~~~~~~~~~~~~~')
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
            '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print '\n\n'

        return response
Example #42
0
    def _load_model_data(self, modeldata, namespaces, **kwds):
        """
        Load declarations from a DataPortal object.
        """
        #
        # As we are primarily generating objects here (and acyclic ones
        # at that), there is no need to run the GC until the entire
        # model is created.  Simple reference-counting should be
        # sufficient to keep memory use under control.
        #
        with PauseGC() as pgc:

            #
            # Unlike the standard method in the pympler summary
            # module, the tracker doesn't print 0-byte entries to pad
            # out the limit.
            #
            profile_memory = kwds.get('profile_memory', 0)

            #
            # It is often useful to report timing results for various
            # activities during model construction.
            #
            report_timing = kwds.get('report_timing', False)

            if (pympler_available is True) and (profile_memory >= 2):
                mem_used = muppy.get_size(muppy.get_objects())
                print("")
                print("      Total memory = %d bytes prior to model "
                      "construction" % mem_used)

            if (pympler_available is True) and (profile_memory >= 3):
                gc.collect()
                mem_used = muppy.get_size(muppy.get_objects())
                print("      Total memory = %d bytes prior to model "
                      "construction (after garbage collection)" % mem_used)

            #
            # Do some error checking
            #
            for namespace in namespaces:
                if not namespace is None and not namespace in modeldata._data:
                    msg = "Cannot access undefined namespace: '%s'"
                    raise IOError(msg % namespace)

            #
            # Initialize each component in order.
            #

            if report_timing is True:
                import pyomo.core.base.expr as EXPR
                construction_start_time = time.time()

            for component_name, component in iteritems(self.component_map()):

                if component.type() is Model:
                    continue

                if report_timing is True:
                    start_time = time.time()
                    clone_counters = EXPR.generate_expression.clone_counter

                self._initialize_component(modeldata, namespaces,
                                           component_name, profile_memory)

                if report_timing is True:
                    total_time = time.time() - start_time
                    if isinstance(component, IndexedComponent):
                        clen = len(component)
                    else:
                        assert isinstance(component, Component)
                        clen = 1
                    print("    %%6.%df seconds required to construct component=%s; %d indicies total" \
                              % (total_time>=0.005 and 2 or 0, component_name, clen) \
                              % total_time)
                    tmp_clone_counters = EXPR.generate_expression.clone_counter
                    if clone_counters != tmp_clone_counters:
                        clone_counters = tmp_clone_counters
                        print(
                            "             Cloning detected! (clone counters: %d)"
                            % clone_counters)

            # Note: As is, connectors are expanded when using command-line pyomo but not calling model.create(...) in a Python script.
            # John says this has to do with extension points which are called from commandline but not when writing scripts.
            # Uncommenting the next two lines switches this (command-line fails because it tries to expand connectors twice)
            #connector_expander = ConnectorExpander()
            #connector_expander.apply(instance=self)

            if report_timing is True:
                total_construction_time = time.time() - construction_start_time
                print("      %6.2f seconds required to construct instance=%s" %
                      (total_construction_time, self.name))

            if (pympler_available is True) and (profile_memory >= 2):
                print("")
                print(
                    "      Summary of objects following instance construction")
                post_construction_summary = summary.summarize(
                    muppy.get_objects())
                summary.print_(post_construction_summary, limit=100)
                print("")
Example #43
0
    def create_instance(self,
                        filename=None,
                        data=None,
                        name=None,
                        namespace=None,
                        namespaces=None,
                        profile_memory=0,
                        report_timing=False,
                        **kwds):
        """
        Create a concrete instance of an abstract model, possibly using data
        read in from a file.

        Optional:
            filename:           The name of a Pyomo Data File that will be used
                                    to load data into the model.
            data:               A dictionary containing initialization data for
                                    the model to be used if there is no filename
            name:               The name given to the model.
            namespace:          A namespace used to select data.
            namespaces:         A list of namespaces used to select data.
            profile_memory:     A number that indicates the profiling level.
            report_timing:      Report timing statistics during construction.
        """
        #
        # Generate a warning if this is a concrete model but the
        # filename is specified.  A concrete model is already
        # constructed, so passing in a data file is a waste of time.
        #
        if self.is_constructed() and isinstance(filename, string_types):
            msg = "The filename=%s will not be loaded - supplied as an " \
                  "argument to the create_instance() method of a "\
                  "concrete instance with name=%s." % (filename, name)
            logger.warning(msg)

        if 'clone' in kwds:
            kwds.pop('clone')
            logger.warning(
                """DEPRECATION WARNING: Model.create_instance() no longer accepts the
'clone' argument: the base abstract model is always cloned.""")
        if 'preprocess' in kwds:
            kwds.pop('preprocess')
            logger.warning(
                """DEPRECATION WARNING: Model.create_instance() no longer accepts the
'preprocess' argument: preprocessing is always deferred to when the
model is sent to the solver""")
        if kwds:
            msg = \
"""Model.create_instance() passed the following unrecognized keyword
arguments (which have been ignored):"""
            for k in kwds:
                msg = msg + "\n    '%s'" % (k, )
            logger.error(msg)

        if self.is_constructed():
            logger.warning(
                """DEPRECATION WARNING: Cannot call Model.create_instance() on a
constructed model; returning a clone of the current model instance.""")
            return self.clone()

        if name is None:
            name = self.name
        if filename is not None:
            if data is not None:
                logger.warning(
                    "Model.create_instance() passed both 'filename' "
                    "and 'data' keyword arguments.  Ignoring the "
                    "'data' argument")
            data = filename
        if data is None:
            data = {}

        #
        # Clone the model and load the data
        #
        instance = self.clone()

        if name is not None:
            instance._name = name

        # If someone passed a rule for creating the instance, fire the
        # rule before constructing the components.
        if instance._rule is not None:
            instance._rule(instance)

        if namespaces:
            _namespaces = list(namespaces)
        else:
            _namespaces = []
        if namespace is not None:
            _namespaces.append(namespace)
        if None not in _namespaces:
            _namespaces.append(None)

        instance.load(data,
                      namespaces=_namespaces,
                      profile_memory=profile_memory,
                      report_timing=report_timing)

        #
        # Preprocess the new model
        #

        if False and preprocess is True:

            if report_timing is True:
                start_time = time.time()

            instance.preprocess()

            if report_timing is True:
                total_time = time.time() - start_time
                print("      %6.2f seconds required for preprocessing" %
                      total_time)

            if (pympler_available is True) and (profile_memory >= 2):
                mem_used = muppy.get_size(muppy.get_objects())
                print(
                    "      Total memory = %d bytes following instance preprocessing"
                    % mem_used)
                print("")

            if (pympler_available is True) and (profile_memory >= 2):
                print("")
                print(
                    "      Summary of objects following instance preprocessing"
                )
                post_preprocessing_summary = summary.summarize(
                    muppy.get_objects())
                summary.print_(post_preprocessing_summary, limit=100)

        #
        # Indicate that the model is concrete/constructed
        #
        instance._constructed = True
        return instance
Example #44
0
    return subs

def sub_string_brute(seq1, seq2):
    """ Sub-string by brute force """

    subs = []
    for item in seq2:
        for parent in seq1:
            if item in parent:
                subs.append(item)

    return subs


def test(N):
    random_strings(10, N)
    subs=sub_string(seq1, seq2)

def test2():
    subs=sub_string(seq1, seq2)

if __name__ == "__main__":
    from pympler import summary
    from pympler import muppy

    test(10000)
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    summary.print_(sum1)  
        parameters, errors = data.GetAllData(peakFileList)
        parameterList_bg = mapp.CreatePeakList(peakFileList)
        PlotParameterMappings(parameters, parameterList_bg, mapdims, step,
                              background=background, msize=msize, area=area)

        dict_bg = CreateMinMaxDict(parameters, parameterList_bg, folder)
        dict_minmax_global = UpdateGlobalDict(dict_minmax_global, dict_bg)

    print('\nList of minima and maxima.')
    PrintMinMax(dict_minmax, parameterList)

    print(linebreaker + '\n' + linebreaker)

    if debug:
        all_objects = muppy.get_objects()
        sum1 = summary.summarize(all_objects)# Prints out a summary of the large objects
        summary.print_(sum1)# Get references to certain types of objects such as dataframe
        dataframes = [ao for ao in all_objects if isinstance(ao, pd.DataFrame)]

        for dat in dataframes:
            print(dat.columns.values)
            print(len(dat))
        stats = time2.compare_to(time1, 'lineno')
        for stat in stats[:10]:
            print(stat)

if scaled:
    print('List of global minima and maxima '
        + 'and the mappings they are taken from.')
    PrintMinMax(dict_minmax_global, dict_minmax_global.keys())
    print(linebreaker + '\n' + linebreaker)
Example #46
0
def print_summary():
    """Print a summary of all known objects."""
    summary.print_(summary.summarize(get_objects()))
Example #47
0
def mem():
    all_objects = muppy.get_objects()
    summary.print_(summary.summarize(all_objects))
Example #48
0
def memory_summary():
    mem_summary = summary.summarize(muppy.get_objects())
    rows = summary.format_(mem_summary)
    return '\n'.join(rows)
Example #49
0
# MAX_RESULTS results.

# flake8: noqa

from operator import itemgetter
from pympler import muppy, summary

MODULES = ["netmiko", "napalm", "sqlalchemy"]
MAX_RESULTS = 100


def format_size(size):
    for unit in ["B", "KiB", "MiB", "GiB"]:
        if size < 1024.0 or unit == "GiB":
            break
        size /= 1024.0
    return f"{size:.2f} {unit}"


profile = sorted(
    (object_ for object_ in summary.summarize(muppy.get_objects())
     if any(module in object_[0] for module in MODULES)),
    key=itemgetter(2),
    reverse=True,
)

for object_ in profile[:MAX_RESULTS]:
    print(f"Name: {object_[0]}")
    print(f"Number of objects: {object_[1]}")
    print(f"Total size: {format_size(object_[2])}", end="\n\n")
Example #50
0
def create_model(data):
    """
    Create instance of Pyomo model.

    Return:
        model:      Model object.
        instance:   Problem instance.
        symbol_map: Symbol map created when writing model to a file.
        filename:    Filename that a model instance was written to.
    """
    #
    if not data.options.runtime.logging == 'quiet':
        sys.stdout.write('[%8.2f] Creating model\n' %
                         (time.time() - start_time))
        sys.stdout.flush()
    #
    if (pympler_available is True) and (data.options.runtime.profile_memory >=
                                        1):
        global memory_data
        mem_used = muppy.get_size(muppy.get_objects())
        data.local.max_memory = mem_used
        print("   Total memory = %d bytes prior to model construction" %
              mem_used)
    #
    # Find the Model objects
    #
    _models = {}
    for _name, _obj in iteritems(data.local.usermodel.__dict__):
        if isinstance(_obj, Model):
            _models[_name] = _obj
    model_name = data.options.model.object_name
    if len(_models) == 1:
        _name = list(_models.keys())[0]
        if model_name is None:
            model_name = _name
        elif model_name != _name:
            msg = "Model '%s' is not defined in file '%s'!"
            raise SystemExit(msg % (model_name, data.options.model.filename))
    elif len(_models) > 1:
        if model_name is None:
            msg = "Multiple models defined in file '%s'!"
            raise SystemExit(msg % data.options.model.filename)
        elif not model_name in _models:
            msg = "Unknown model '%s' in file '%s'!"
            raise SystemExit(msg % (model_name, data.options.model.filename))

    ep = ExtensionPoint(IPyomoScriptCreateModel)

    if model_name is None:
        if len(ep) == 0:
            msg = "A model is not defined and the 'pyomo_create_model' is not "\
                  "provided in module %s"
            raise SystemExit(msg % data.options.model.filename)
        elif len(ep) > 1:
            msg = 'Multiple model construction plugins have been registered in module %s!'
            raise SystemExit(msg % data.options.model.filename)
        else:
            model_options = data.options.model.options.value()
            model = ep.service().apply(
                options=pyutilib.misc.Container(*data.options),
                model_options=pyutilib.misc.Container(*model_options))
    else:
        if model_name not in _models:
            msg = "Model '%s' is not defined in file '%s'!"
            raise SystemExit(msg % (model_name, data.options.model.filename))
        model = _models[model_name]
        if model is None:
            msg = "'%s' object is 'None' in module %s"
            raise SystemExit(msg % (model_name, data.options.model.filename))
        elif len(ep) > 0:
            msg = "Model construction function 'create_model' defined in "    \
                  "file '%s', but model is already constructed!"
            raise SystemExit(msg % data.options.model.filename)

    #
    # Print model
    #
    for ep in ExtensionPoint(IPyomoScriptPrintModel):
        ep.apply(options=data.options, model=model)

    #
    # Create Problem Instance
    #
    ep = ExtensionPoint(IPyomoScriptCreateDataPortal)
    if len(ep) > 1:
        msg = 'Multiple model data construction plugins have been registered!'
        raise SystemExit(msg)

    if len(ep) == 1:
        modeldata = ep.service().apply(options=data.options, model=model)
    else:
        modeldata = DataPortal()

    if model._constructed:
        #
        # TODO: use a better test for ConcreteModel
        #
        instance = model

    elif len(data.options.data.files) > 1:
        #
        # Load a list of *.dat files
        #
        for file in data.options.data.files:
            suffix = (file).split(".")[-1]
            if suffix != "dat":
                msg = 'When specifiying multiple data files, they must all '  \
                      'be *.dat files.  File specified: %s'
                raise SystemExit(msg % str(file))

            modeldata.load(filename=file, model=model)

        instance = model.create_instance(
            modeldata,
            namespaces=data.options.data.namespaces,
            profile_memory=data.options.runtime.profile_memory,
            report_timing=data.options.runtime.report_timing)

    elif len(data.options.data.files) == 1:
        #
        # Load a *.dat file or process a *.py data file
        #
        suffix = (data.options.data.files[0]).split(".")[-1].lower()
        if suffix == "dat":
            instance = model.create_instance(
                data.options.data.files[0],
                namespaces=data.options.data.namespaces,
                profile_memory=data.options.runtime.profile_memory,
                report_timing=data.options.runtime.report_timing)
        elif suffix == "py":
            userdata = pyutilib.misc.import_file(data.options.data.files[0],
                                                 clear_cache=True)
            if "modeldata" in dir(userdata):
                if len(ep) == 1:
                    msg = "Cannot apply 'pyomo_create_modeldata' and use the" \
                          " 'modeldata' object that is provided in the model"
                    raise SystemExit(msg)

                if userdata.modeldata is None:
                    msg = "'modeldata' object is 'None' in module %s"
                    raise SystemExit(msg % str(data.options.data.files[0]))

                modeldata = userdata.modeldata

            else:
                if len(ep) == 0:
                    msg = "Neither 'modeldata' nor 'pyomo_create_dataportal' "  \
                          'is defined in module %s'
                    raise SystemExit(msg % str(data.options.data.files[0]))

            modeldata.read(model)
            instance = model.create_instance(
                modeldata,
                namespaces=data.options.data.namespaces,
                profile_memory=data.options.runtime.profile_memory,
                report_timing=data.options.runtime.report_timing)
        elif suffix == "yml" or suffix == 'yaml':
            try:
                import yaml
            except:
                msg = "Cannot apply load data from a YAML file: PyYaml is not installed"
                raise SystemExit(msg)

            modeldata = yaml.load(open(data.options.data.files[0]))
            instance = model.create_instance(
                modeldata,
                namespaces=data.options.data.namespaces,
                profile_memory=data.options.runtime.profile_memory,
                report_timing=data.options.runtime.report_timing)
        else:
            raise ValueError("Unknown data file type: " +
                             data.options.data.files[0])
    else:
        instance = model.create_instance(
            modeldata,
            namespaces=data.options.data.namespaces,
            profile_memory=data.options.runtime.profile_memory,
            report_timing=data.options.runtime.report_timing)

    #
    modify_start_time = time.time()
    for ep in ExtensionPoint(IPyomoScriptModifyInstance):
        if data.options.runtime.report_timing is True:
            tick = time.time()
        ep.apply(options=data.options, model=model, instance=instance)
        if data.options.runtime.report_timing is True:
            print("      %6.2f seconds to apply %s" %
                  (time.time() - tick, type(ep)))
            tick = time.time()
    #
    for transformation in data.options.transform:
        with TransformationFactory(transformation) as xfrm:
            instance = xfrm.create_using(instance)
            if instance is None:
                raise SystemExit("Unexpected error while applying "
                                 "transformation '%s'" % transformation)
    #
    if data.options.runtime.report_timing is True:
        total_time = time.time() - modify_start_time
        print("      %6.2f seconds required for problem transformations" %
              total_time)

    if logger.isEnabledFor(logging.DEBUG):
        print("MODEL INSTANCE")
        instance.pprint()
        print("")

    for ep in ExtensionPoint(IPyomoScriptPrintInstance):
        ep.apply(options=data.options, instance=instance)

    fname = None
    smap_id = None
    if not data.options.model.save_file is None:

        if data.options.runtime.report_timing is True:
            write_start_time = time.time()

        if data.options.model.save_file == True:
            if data.local.model_format in (ProblemFormat.cpxlp,
                                           ProblemFormat.lpxlp):
                fname = (data.options.data.files[0])[:-3] + 'lp'
            else:
                fname = (data.options.data.files[0])[:-3] + str(
                    data.local.model_format)
            format = data.local.model_format
        else:
            fname = data.options.model.save_file
            format = data.options.model.save_format

        io_options = {}
        if data.options.model.symbolic_solver_labels:
            io_options['symbolic_solver_labels'] = True
        if data.options.model.file_determinism != 1:
            io_options[
                'file_determinism'] = data.options.model.file_determinism
        (fname, smap_id) = instance.write(filename=fname,
                                          format=format,
                                          io_options=io_options)

        if not data.options.runtime.logging == 'quiet':
            if not os.path.exists(fname):
                print("ERROR: file " + fname + " has not been created!")
            else:
                print("Model written to file '" + str(fname) + "'")

        if data.options.runtime.report_timing is True:
            total_time = time.time() - write_start_time
            print("      %6.2f seconds required to write file" % total_time)

        if (pympler_available is True) and (data.options.runtime.profile_memory
                                            >= 2):
            print("")
            print("      Summary of objects following file output")
            post_file_output_summary = summary.summarize(muppy.get_objects())
            summary.print_(post_file_output_summary, limit=100)

            print("")

    for ep in ExtensionPoint(IPyomoScriptSaveInstance):
        ep.apply(options=data.options, instance=instance)

    if (pympler_available is True) and (data.options.runtime.profile_memory >=
                                        1):
        mem_used = muppy.get_size(muppy.get_objects())
        if mem_used > data.local.max_memory:
            data.local.max_memory = mem_used
        print("   Total memory = %d bytes following Pyomo instance creation" %
              mem_used)

    return pyutilib.misc.Options(model=model,
                                 instance=instance,
                                 smap_id=smap_id,
                                 filename=fname,
                                 local=data.local)
Example #51
0
 def get_summary(self):
     gc.collect()
     # exclude everything in this object itself
     excluded = set(id(o) for o in muppy.get_referents(self))
     return summary.summarize(o for o in muppy.get_objects()
                              if not id(o) in excluded)
def estimate_callback(req):
    global image_data
    global state_data
    global imu_data
    global objpoints
    global imgpoints
    global db
    global obs_n
    global cov_n
    global good_corners
    global last_frame_corners
    global goodenough
    global num_step

    rospy.loginfo(rospy.get_caller_id() + 'I heard image %s', len(image_data))
    rospy.loginfo(rospy.get_caller_id() + 'I heard imu %s', len(imu_data))
    rospy.loginfo(rospy.get_caller_id() + 'I heard state %s', len(state_data))
    local_img_data = image_data
    t = 0

    #stop record

    if req.reset == 1:
        #stop record
        #stop_recording()

        #remove history data
        os.chdir(kalibr_path[:-1])
        os.system("rm data.bag")
        os.system("rm data_copy.bag")
        os.system("rm data_tmp.bag")
        # bag = rosbag.Bag('data_tmp.bag', 'w')
        # bag.close()

        #clear all the record data
        del image_data[:]
        del imu_data[:]
        del state_data[:]
        del objpoints[:]
        del imgpoints[:]
        del good_corners[:]
        del db[:]
        obs_n = 0
        last_frame_corners = None
        goodenough = False
        num_step = 0

        #feed back the update
        res = estimateSrvResponse()
        res.par_upd = [0, 0, 0, 0, 0, 0]
        res.obs = 0
        res.coverage = 0

        #start recording
        #start_recording()
        return res

    if not req.reset == 1:
        num_step += 1
        rospy.loginfo(rospy.get_caller_id() + 'start estimate')
        res = estimateSrvResponse()
        res.par_upd = [0, 0, 0, 0, 0, 0]
        res.obs = 0
        res.coverage = 0

        # estimation
        # camera intrinsic calibration

        # imgpoints, best_mtx = camera_intrinsic_calibration(req, image_data)
        # best_mtx, ipts, opts, progress = camera_intrinsic_calibration2(
        #     req, image_data)
        # rospy.loginfo(rospy.get_caller_id() + 'I get parameters %s',
        #               best_mtx[0, 0])

        #merge bags
        #stop_recording()
        time.sleep(2)
        merge_bag(num_step)

        #cam imu calibration
        reproj_err = 1
        extrinsic = np.identity(4)
        H_Aopt = 0
        H_eig = np.zeros((6, ))
        # if num_step == 3 or num_step==7:
        #     #call kalibr
        #     os.chdir(kalibr_path[:-1])
        #     try_kalibr_total()
        #     reproj_err, extrinsic = get_kalibr_results()
        #     extrinsic = np.asarray(extrinsic)
        #     H_Aopt, H_Dopt, H_Eopt, H_eig = read_H()

        # if num_step<3:
        #     #get reprojection error and extrinsics
        #     os.chdir(kalibr_path[:-1])
        #     #try_kalibr()
        #     reproj_err, extrinsic = get_kalibr_results()
        #     extrinsic = np.asarray(extrinsic)
        #     # compute observability
        #     H_Aopt, H_Dopt, H_Eopt, H_eig = read_H()

        ########not calculate observability
        # else:
        #     os.system(k_command)

        ##check for leakage
        all_objects = muppy.get_objects()
        sum1 = summary.summarize(all_objects)
        # Prints out a summary of the large objects
        summary.print_(sum1)

        rospy.loginfo(rospy.get_caller_id() + ' ' + 'get kalibr reproj err %s',
                      reproj_err)
        rospy.loginfo(rospy.get_caller_id() + ' ' + 'get kalibr extrinsic %s',
                      extrinsic)

        #process extrinsic
        e_flat = extrinsic[0:3, 0:3].flatten()
        rotation = PyKDL.Rotation(e_flat[0], e_flat[1], e_flat[2], e_flat[3],
                                  e_flat[4], e_flat[5], e_flat[6], e_flat[7],
                                  e_flat[8])
        rpy = np.asarray(rotation.GetRPY())
        position = extrinsic[0:3, 3]
        state = np.concatenate([position.reshape(3, ), rpy.reshape(3, )])

        # compute entropy
        # orientation_x_en, orientation_y_en, orientation_z_en, orientation_w_en, angular_velocity_x_en, angular_velocity_y_en, angular_velocity_z_en, linear_acceleration_x_en, linear_acceleration_y_en, linear_acceleration_z_en = compute_imu_entropy(
        #     req, imu_data)

        # # compute the coverage
        # if len(good_corners) > 0:
        res = estimateSrvResponse()

        # for c, b in good_corners:
        #     imgpoints.append(c)  # just for coverage calculation.
        # rospy.loginfo(rospy.get_caller_id() + 'I get corners %s',
        #               len(imgpoints))

        # ####progress measures camera coverage
        # res.coverage = np.sum(progress) - cov_n
        # cov_n = np.sum(progress)

        # get parameter update
        # compute the observation
        res.obs = H_Aopt * 10000000
        res.par_upd = np.concatenate([state, np.asarray(H_eig) * 10000000])
        #res.coverage = (orientation_x_en+orientation_w_en+orientation_y_en+orientation_z_en)*0.1
        res.coverage = 0
        rospy.loginfo(rospy.get_caller_id() + 'I get par_upd %s', res.par_upd)
        rospy.loginfo(rospy.get_caller_id() + 'I get obs %s', res.obs)
        rospy.loginfo(rospy.get_caller_id() + 'I get coverage %s',
                      res.coverage)

        # res.obs = 1.0 * len(db) / 20.0 - obs_n
        # obs_n = 1.0 * len(db) / 20.0
        # rospy.loginfo(rospy.get_caller_id() + 'I get db %s', len(db))
        # rospy.loginfo(rospy.get_caller_id() + 'I get good corners %s',
        #               len(good_corners))

        #start_recording()
        return res
def profile_expose_method(profiled_method_wrapper, accept, args, func, kw,
                          exclude_from_memory_profiling):
    """
    Targeted to profile a specific method that wraps HTTP request processing endpoints into database context.  
    :param profiled_method_wrapper: method wrapped around profiled call to be passed in to memory profiler
    :param accept: param specific to profiled call
    :param args: args of a function that is being wrapped by a profiled method
    :param func: function that is being wrapped by a profiled method
    :param kw: kwargs of a function that is being wrapped by a profiled method
    :return: output of a profiled method without modification
    """
    if not exclude_from_memory_profiling and get_memory_profile_logging_on() and \
            check_memory_profile_package_wide_disable(func):
        controller_class = args[0].__class__.__name__ if args and len(
            args) > 0 else ''
        end_point_name_parts = [
            s for s in [func.__module__, controller_class, func.__name__]
            if s != ''
        ]
        end_point_name = ".".join(end_point_name_parts)
        is_pympler_on = _is_pympler_profiling_value_on(end_point_name)
        profile_output = {'output': {}}
        if is_pympler_on:
            all_objects = muppy.get_objects()
            all_objects_summary_before = summary.summarize(all_objects)
        memory_profile = memory_usage(
            (_profile_me, (profile_output, profiled_method_wrapper, func,
                           accept, args, kw), {}),
            interval=0.1)
        output = profile_output['output']
        if is_pympler_on:
            all_objects_summary_after = summary.summarize(all_objects)
            diff = summary.get_diff(all_objects_summary_before,
                                    all_objects_summary_after)
            diff_less = summary.format_(diff)
            diff_out = ''
            for s in diff_less:
                diff_out += s + '\n'
            thread_log.info(
                "================ PYMPLER OUTPUT <{}> ==============\n{}".
                format(end_point_name, diff_out))
        try:

            message = json.dumps({
                'log_type':
                'memory_profile',
                'proc_id':
                os.getpid(),
                'name':
                func.__name__,
                'module':
                func.__module__,
                'mem_profile':
                memory_profile,
                'min':
                min(memory_profile),
                'max':
                max(memory_profile),
                'diff':
                max(memory_profile) - min(memory_profile),
                'leaked':
                memory_profile[-1] - memory_profile[0],
                'args': [arg for arg in args[1:]],  # exclude self
                'kwargs':
                kw
            })
            memory_log.info(message,
                            extra={
                                'controller_module': func.__module__,
                                'controller_class': controller_class,
                                'endpoint': func.__name__
                            })
        except Exception as e:
            thread_log.exception('Logger failed: {}'.format(e))
    else:
        output = profiled_method_wrapper(accept, args, func, kw)
    return output
Example #54
0
    def process(self):
        #get printable version of indata
        self.infostr = ""
        for t in self.indata:
            if self.indata[t]:
                self.infostr = self.infostr + "|" + t + ":" + self.indata[t]
        for res in self.config:
            curt = time.time()
            self.linecount = 0
            #check for URL
            if "level" in res:
                #Check IGNORE option
                if "IGNORE" in DataContainer.optionsrec:
                    if res["level"]["path"] in DataContainer.optionsrec[
                            "IGNORE"]:
                        logging.debug("IGNORING LEVEL " + res["level"]["path"])
                        continue

                self.indata["type"] = res["level"]["path"]

                #TODO: not optimal solution: eg. type:instances appears after type:classes in logfile
                self.infostr = self.infostr + "|type:" + self.indata["type"]

                if "RESUME" in DataContainer.optionsrec:
                    #check skip -> We are at the lowest level
                    if not "resources" in res:
                        existing = False
                        if DataContainer.dbrec != None:
                            reddict = dict((k, v)
                                           for k, v in self.indata.iteritems()
                                           if k in DataContainer.fieldnames
                                           and v is not None)
                            logging.debug("Looking for existing resource " +
                                          repr(reddict))
                            #try:
                            #	key=raw_input("ENTER KEY :")
                            #except:
                            #	pass
                            existing = DataContainer.dbrec.findOne(
                                reddict, res["level"]["path"])
                        if existing:
                            logging.warning(
                                "Resume option set, Skipping existing resource "
                                + json.dumps(self.indata))
                            continue

                logging.info("GETTING LEVEL " + res["level"]["path"] + " " +
                             self.infostr)

            if "url" not in res:
                #No URL, we assume that there is exactly one "thing" (Here usually a Repository)
                outdata = copy.deepcopy(self.indata)
                self.processObject(res, outdata)
            else:
                #get response url
                self.zeroResCount = 0
                self.iterateUrl(res, res["url"]["path"])

                #after iteration, do some object counting
                all_objects = muppy.get_objects(include_frames=True)
                sum1 = summary.summarize(all_objects)
                #code.interact(local=locals())
                totalmem = 0
                counter = 0
                for entry in sorted(sum1, key=itemgetter(2), reverse=True):
                    out = ""
                    for data in entry:
                        out = out + "\t" + str(data)
                    totalmem += entry[2]
                    if counter < 15:
                        logging.debug(out)
                    counter += 1

                logging.debug("TOTAL MEM : " + str(totalmem))

                logging.debug(
                    "Process: Memory usage : %s (kb)" %
                    resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)

            logging.info(
                str(self.linecount) + " terms written to DB in " +
                str(int(time.time() - curt)) + " sec. " + self.infostr)
 def process_response(self, request, response):
     if not self.is_active:
         return False
     if self.track_usage:
         self.tracked = self.tracker.diff()
     self.summary = summary.summarize(muppy.get_objects())
Example #56
0
    def uc_init(self,
                input_file,
                wait: bool = False,
                trace: bool = False,
                verbose: bool = False) -> Tuple[Uc, int, List[int]]:
        """
        Initializes unicorn with the given params
        :param input_file: input file to drop into the emulator with config.init_func
        :param wait: block until state dir becomes available
        :param trace: if we should add trace hooks to unicorn
        :param verbose: enables some more logging
        :return: Tuple of (unicorn, entry_point, exits)
        """
        config = self.config
        uc = Uc(self.arch.unicorn_arch, self.arch.unicorn_mode)

        if trace:
            print("[+] Settings trace hooks")
            uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
            uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction, self)
            uc.hook_add(
                UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ | UC_HOOK_MEM_FETCH,
                unicorn_debug_mem_access,
            )

        if wait:
            self.wait_for_probe_wrapper()

        if verbose:
            print("[*] Reading from file {}".format(input_file))

        # we leave out gs_base and fs_base on x64 since they start the forkserver
        self.uc_load_registers(uc)

        # let's see if the user wants a change.
        config.init_func(self, uc)

        # get pc from unicorn state since init_func may have altered it.
        pc = self.uc_read_pc(uc)
        self.map_known_mem(uc)

        exits = self.calculate_exits(pc)
        if not exits:
            raise ValueError(
                "No exits founds. Would run forever... Please set an exit address in config.py."
            )

        # On error: map memory, add exits.
        uc.hook_add(UC_HOOK_MEM_UNMAPPED, unicorn_debug_mem_invalid_access,
                    self)

        if os.getenv("UCF_DEBUG_MEMORY"):
            from pympler import muppy, summary

            all_objects = muppy.get_objects()
            sum1 = summary.summarize(all_objects)
            summary.print_(sum1)

        # Last chance to hook before forkserver starts (if running as afl child)
        fork_sleep = os.getenv("UCF_DEBUG_SLEEP_BEFORE_FORK")
        if fork_sleep:
            print("[d] Sleeping. Forkserver will start in {} seconds.".format(
                fork_sleep))
            time.sleep(float(fork_sleep))

        return uc, pc, exits
Example #57
0
 def _muppy_object_summary(self):
     all_objects = muppy.get_objects()
     sum1 = summary.summarize(all_objects)
     summary.print_(sum1)
     sleep(5)
Example #58
0
    def create_instance(self,
                        filename=None,
                        data=None,
                        name=None,
                        namespace=None,
                        namespaces=None,
                        preprocess=False,
                        profile_memory=0,
                        report_timing=False,
                        clone=None):
        """
        Create a concrete instance of an abstract model, possibly using data
        read in from a file.

        Optional:
            filename:           The name of a Pyomo Data File that will be used to load
                                    data into the model.
            data:               A dictionary containing initialization data for the model
                                    to be used if there is no filename
            name:               The name given to the model.
            namespace:          A namespace used to select data.
            namespaces:         A list of namespaces used to select data.
            preprocess:         If True, then preprocess the constructed model.
            profile_memory:     A number that indicates the profiling level.
            report_timing:      Report timing statistics during construction.
            clone:              Force a clone of the model if this is True.
        """
        if self._constructed:
            logger.warning(
                "DEPRECATION WARNING: Cannot call Model.create_instance() on a concrete model."
            )
            return self

        if name is None:
            name = self.name
        if not filename is None:
            data = filename
        if data is None:
            data = {}
        #
        # Generate a warning if this is a concrete model but the filename is specified.
        # A concrete model is already constructed, so passing in a data file is a waste
        # of time.
        #
        if self.is_constructed() and isinstance(filename, basestring):
            msg = "The filename=%s will not be loaded - supplied as an argument to the create_instance() method of a ConcreteModel instance with name=%s." % (
                filename, name)
            logger.warning(msg)
        #
        # If construction is deferred, then clone the model and
        #
        if not self._constructed:
            instance = self.clone()

            if namespaces is None or len(namespaces) == 0:
                instance.load(data,
                              namespaces=[None],
                              profile_memory=profile_memory,
                              report_timing=report_timing)
            else:
                instance.load(data,
                              namespaces=list(namespaces) + [None],
                              profile_memory=profile_memory,
                              report_timing=report_timing)
        else:
            if clone:
                instance = self.clone()
            else:
                instance = self
        #
        # Preprocess the new model
        #
        if preprocess is True:
            print(
                "      Model preprocessing during construction has been deprecated."
            )

        if False and preprocess is True:

            if report_timing is True:
                start_time = time.time()

            instance.preprocess()

            if report_timing is True:
                total_time = time.time() - start_time
                print("      %6.2f seconds required for preprocessing" %
                      total_time)

            if (pympler_available is True) and (profile_memory >= 2):
                mem_used = muppy.get_size(muppy.get_objects())
                print(
                    "      Total memory = %d bytes following instance preprocessing"
                    % mem_used)
                print("")

            if (pympler_available is True) and (profile_memory >= 2):
                print("")
                print(
                    "      Summary of objects following instance preprocessing"
                )
                post_preprocessing_summary = summary.summarize(
                    muppy.get_objects())
                summary.print_(post_preprocessing_summary, limit=100)

        if not name is None:
            instance.name = name
        #
        # Indicate that the model is concrete/constructed
        #
        instance._constructed = True
        return instance
Example #59
0
 def print_memory(self):
     all_objects = muppy.get_objects()
     sum1 = summary.summarize(all_objects)
     # Prints out a summary of the large objects
     summary.print_(sum1)
Example #60
0
 def mem():
     objs = muppy.get_objects()
     summ = summary.summarize(objs)
     return '\n'.join(summary.format_(summ)) + '\n'