Esempio n. 1
0
def file_test(rows=500000, cols=50):
    "File test"
    print("Creating file with {} rows and {} columns".format(rows, cols))
    file = create_file(rows, cols)
    print("Size of the file: {:.2f} MiB".format(getsize(file) / (1024 * 1024)))
    print("Reading file")
    sum1 = summarize(get_objects())
    las = read(file)
    sum2 = summarize(get_objects())
    diff = get_diff(sum1, sum2)
    print_(diff)

    for curve in las.curves:
        print("Name: {}, Min: {:.2f}, Mean: {:.2f}, Max: {:.2f}"
              .format(curve.mnemonic, nanmin(curve.data), nanmean(curve.data),
                      nanmax(curve.data)))

    del las
    las = read(file)
    del las
    las = read(file)
    del las
    las = read(file)
    del las
    print("Happy end")
Esempio n. 2
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.debug:
            try:
                gc.collect()
                end_memory = self.process.memory_info().rss
                net_memory = end_memory-self.start_memory
                if net_memory > 100 * 1000 * 1000:
                    Log.warning(
                        "MEMORY WARNING (additional {{net_memory|comma}}bytes): "+self.description,
                        default_params=self.params,
                        net_memory=net_memory
                    )

                    from pympler import summary
                    from pympler import muppy
                    sum1 = sorted(summary.summarize(muppy.get_objects()), key=lambda r: -r[2])[:30]
                    Log.warning("{{data}}", data=sum1)
                elif end_memory > 1000*1000*1000:
                    Log.warning(
                        "MEMORY WARNING (over {{end_memory|comma}}bytes): "+self.description,
                        default_params=self.params,
                        end_memory=end_memory
                    )

                    from pympler import summary
                    from pympler import muppy
                    sum1 = sorted(summary.summarize(muppy.get_objects()), key=lambda r: -r[2])[:30]
                    Log.warning("{{data}}", data=sum1)

            except Exception as e:
                Log.warning("problem in memory measure", cause=e)
Esempio n. 3
0
def func5():
    """
    Модуль pympler

    muppy.get_objects() - метод считывает все существующтие
    обьекты в Python вызывая этот метод 2 раза в разный
    промежуток времени, мы можем выявить какие обьекты были
    созданы с первого среза.

    summary.get_diff() - выявляет какие обьекты как разницу между
    двумя срезами.

    summary.print_() - красиво выводит на экран обьекты и память
    ими занимаемую.
    """
    from pympler import asizeof
    from pympler import muppy
    from pympler import summary

    print('\nИспользование muppy :')
    alll_obj_1 = muppy.get_objects()

    data = list(range(1000))

    alll_obj_2 = muppy.get_objects()

    sum1 = summary.summarize(alll_obj_1)
    sum2 = summary.summarize(alll_obj_2)

    summary.print_(summary.get_diff(sum1, sum2))
Esempio n. 4
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.debug:
            try:
                gc.collect()
                end_memory = self.process.memory_info().rss
                net_memory = end_memory - self.start_memory
                if net_memory > 100 * 1000 * 1000:
                    Log.warning(
                        "MEMORY WARNING (additional {{net_memory|comma}}bytes): "
                        + self.description,
                        default_params=self.params,
                        net_memory=net_memory)

                    from pympler import summary
                    from pympler import muppy
                    sum1 = sorted(summary.summarize(muppy.get_objects()),
                                  key=lambda r: -r[2])[:30]
                    Log.warning("{{data}}", data=sum1)
                elif end_memory > 1000 * 1000 * 1000:
                    Log.warning(
                        "MEMORY WARNING (over {{end_memory|comma}}bytes): " +
                        self.description,
                        default_params=self.params,
                        end_memory=end_memory)

                    from pympler import summary
                    from pympler import muppy
                    sum1 = sorted(summary.summarize(muppy.get_objects()),
                                  key=lambda r: -r[2])[:30]
                    Log.warning("{{data}}", data=sum1)

            except Exception as e:
                Log.warning("problem in memory measure", cause=e)
Esempio n. 5
0
def test_smarts_fast_reset_memory_cleanup(agent_id, seed,
                                          social_agent_scenarios):
    agent_type = AgentType.Buddha
    # Run once to initialize globals and test to see if smarts is working
    _memory_buildup(agent_id, seed, social_agent_scenarios, 1, None,
                    agent_type)

    tr = tracker.SummaryTracker()
    gc.collect()
    initial_size = muppy.get_size(muppy.get_objects())

    for _ in range(100):
        _memory_buildup(
            agent_id,
            seed,
            social_agent_scenarios,
            1,
            None,
            agent_type,
            max_episode_steps=2,
        )

    gc.collect()
    end_size = muppy.get_size(muppy.get_objects())
    gc.collect()
    tr.print_diff()

    # Check for a major leak
    assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT
            ), f"End size delta {end_size - initial_size}"
Esempio n. 6
0
def test_env_memory_cleanup(agent_id, seed, primative_scenarios):
    # Run once to initialize globals
    _, action, agent_type = (100, None, AgentType.Buddha)
    _env_memory_buildup(agent_id, seed, primative_scenarios, action,
                        agent_type)
    gc.collect()

    # Memory size check
    size = muppy.get_size(muppy.get_objects())
    gc.collect()
    _env_memory_buildup(agent_id, seed, primative_scenarios, action,
                        agent_type)
    end_size = muppy.get_size(muppy.get_objects())
    gc.collect()

    def success_condition():
        return end_size - size < EPISODE_MEMORY_GROWTH_LIMIT

    if not success_condition():
        # Get a diff for failure case
        tr = tracker.SummaryTracker()
        tr.print_diff()
        _env_memory_buildup(agent_id, seed, primative_scenarios, action,
                            agent_type)
        diff = tr.diff()
        summary.print_(diff)
        diff = None
        gc.collect()
        assert success_condition(), f"Size diff {end_size - size}"
Esempio n. 7
0
    def _initialize_component(self, modeldata, namespaces, component_name,
                              profile_memory):
        declaration = self.component(component_name)

        if component_name in modeldata._default:
            if declaration.type() is not Set:
                declaration.set_default(modeldata._default[component_name])
        data = None

        for namespace in namespaces:
            if component_name in modeldata._data.get(namespace, {}):
                if declaration.type() is Set:
                    data = self._tuplize(
                        modeldata._data[namespace][component_name],
                        declaration)
                else:
                    data = modeldata._data[namespace][component_name]
            if not data is None:
                break

        if __debug__ and logger.isEnabledFor(logging.DEBUG):
            _blockName = "Model" if self.parent_block() is None \
                else "Block '%s'" % self.name
            logger.debug("Constructing %s '%s' on %s from data=%s",
                         declaration.__class__.__name__, declaration.name,
                         _blockName, str(data))
        try:
            declaration.construct(data)
        except:
            err = sys.exc_info()[1]
            logger.error(
                "Constructing component '%s' from data=%s failed:\n%s: %s",
                str(declaration.name),
                str(data).strip(),
                type(err).__name__, err)
            raise

        if __debug__ and logger.isEnabledFor(logging.DEBUG):
            _out = StringIO()
            declaration.pprint(ostream=_out)
            logger.debug("Constructed component '%s':\n%s" %
                         (declaration.name, _out.getvalue()))

        if (pympler_available is True) and (profile_memory >= 2):
            mem_used = muppy.get_size(muppy.get_objects())
            print(
                "      Total memory = %d bytes following construction of component=%s"
                % (mem_used, component_name))

        if (pympler_available is True) and (profile_memory >= 3):
            gc.collect()
            mem_used = muppy.get_size(muppy.get_objects())
            print(
                "      Total memory = %d bytes following construction of component=%s (after garbage collection)"
                % (mem_used, component_name))
Esempio n. 8
0
    def create_summary(self):
        """Return a summary.

        See also the notes on ignore_self in the class as well as the
        initializer documentation.

        """
        if not self.ignore_self:
            res = summary.summarize(muppy.get_objects())
        else:
            # If the user requested the data required to store summaries to be
            # ignored in the summaries, we need to identify all objects which
            # are related to each summary stored.
            # Thus we build a list of all objects used for summary storage as
            # well as a dictionary which tells us how often an object is
            # referenced by the summaries.
            # During this identification process, more objects are referenced,
            # namely int objects identifying referenced objects as well as the
            # correspondind count.
            # For all these objects it will be checked wether they are
            # referenced from outside the monitor's scope. If not, they will be
            # subtracted from the snapshot summary, otherwise they are
            # included (as this indicates that they are relevant to the
            # application).

            all_of_them = []  # every single object
            ref_counter = {}  # how often it is referenced; (id(o), o) pairs

            def store_info(o):
                all_of_them.append(o)
                if id(o) in ref_counter:
                    ref_counter[id(o)] += 1
                else:
                    ref_counter[id(o)] = 1

            # store infos on every single object related to the summaries
            store_info(self.summaries)
            for k, v in self.summaries.items():
                store_info(k)
                summary._traverse(v, store_info)

            # do the summary
            res = summary.summarize(muppy.get_objects())

            # remove ids stored in the ref_counter
            for _id in ref_counter:
                # referenced in frame, ref_counter, ref_counter.keys()
                if len(gc.get_referrers(_id)) == (3):
                    summary._subtract(res, _id)
            for o in all_of_them:
                # referenced in frame, summary, all_of_them
                if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2):
                    summary._subtract(res, o)

        return res
Esempio n. 9
0
    def create_summary(self):
        """Return a summary.

        See also the notes on ignore_self in the class as well as the
        initializer documentation.

        """
        if not self.ignore_self:
            res = summary.summarize(muppy.get_objects())
        else:
            # If the user requested the data required to store summaries to be
            # ignored in the summaries, we need to identify all objects which
            # are related to each summary stored.
            # Thus we build a list of all objects used for summary storage as
            # well as a dictionary which tells us how often an object is
            # referenced by the summaries.
            # During this identification process, more objects are referenced,
            # namely int objects identifying referenced objects as well as the
            # correspondind count.
            # For all these objects it will be checked wether they are
            # referenced from outside the monitor's scope. If not, they will be
            # subtracted from the snapshot summary, otherwise they are
            # included (as this indicates that they are relevant to the
            # application).

            all_of_them = []  # every single object
            ref_counter = {}  # how often it is referenced; (id(o), o) pairs

            def store_info(o):
                all_of_them.append(o)
                if id(o) in ref_counter:
                    ref_counter[id(o)] += 1
                else:
                    ref_counter[id(o)] = 1

            # store infos on every single object related to the summaries
            store_info(self.summaries)
            for k, v in self.summaries.items():
                store_info(k)
                summary._traverse(v, store_info)

            # do the summary
            res = summary.summarize(muppy.get_objects())

            # remove ids stored in the ref_counter
            for _id in ref_counter:
                # referenced in frame, ref_counter, ref_counter.keys()
                if len(gc.get_referrers(_id)) == (3):
                    summary._subtract(res, _id)
            for o in all_of_them:
                # referenced in frame, summary, all_of_them
                if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2):
                    summary._subtract(res, o)

        return res
Esempio n. 10
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         sys.stdout = self.DevNull()
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
     finally:
         sys.stdout = self._stdout
Esempio n. 11
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         sys.stdout = self.DevNull()
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
     finally:
         sys.stdout = self._stdout
Esempio n. 12
0
 def test_ignore_frame(self):
     """Test whether reference cycles are created
     """
     gc.collect()
     gc.disable()
     objs = muppy.get_objects()
     del objs
     self.assertEqual(gc.collect(), 0)
     objs = muppy.get_objects(include_frames=True)
     del objs
     self.assertEqual(gc.collect(), 0)
     gc.enable()
Esempio n. 13
0
 def test_ignore_frame(self):
     """Test whether reference cycles are created
     """
     gc.collect()
     gc.disable()
     objs = muppy.get_objects()
     del objs
     self.assertEqual(gc.collect(), 0)
     objs = muppy.get_objects(include_frames=True)
     del objs
     self.assertEqual(gc.collect(), 0)
     gc.enable()
Esempio n. 14
0
def test_smarts_basic_memory_cleanup(agent_id, seed, primative_scenarios,
                                     agent_params):
    # Run once to initialize globals and test to see if smarts is working
    _memory_buildup(agent_id, seed, primative_scenarios, 100, agent_params[1],
                    agent_params[2])

    gc.collect()
    initial_size = muppy.get_size(muppy.get_objects())

    _memory_buildup(agent_id, seed, primative_scenarios, *agent_params)
    end_size = muppy.get_size(muppy.get_objects())
    # Check for a major leak
    assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT
            ), f"End size delta {end_size - initial_size}"
Esempio n. 15
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         stream = StringIO()
         sys.stdout = stream
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
         self.assertIn('str', stream.getvalue())
         self.assertNotIn("<class 'str", stream.getvalue())
     finally:
         sys.stdout = self._stdout
Esempio n. 16
0
def memusage_before_n_after(fun, *args, **kwargs):
    from pympler import muppy
    from pympler import summary
    from datetime import datetime

    before = summary.summarize(muppy.get_objects())
    before_time = datetime.now()
    fun_ret = fun(*args, **kwargs)
    after_time = datetime.now()
    after = summary.summarize(muppy.get_objects())
    diff = summary.get_diff(before, after)
    print "execution time: ", after_time - before_time
    summary.print_(diff)

    return fun_ret, diff
Esempio n. 17
0
    def _initialize_component(self, modeldata, namespaces, component_name, profile_memory):
        declaration = self.component(component_name)

        if component_name in modeldata._default:
            if declaration.type() is not Set:
                declaration.set_default(modeldata._default[component_name])
        data = None

        for namespace in namespaces:
            if component_name in modeldata._data.get(namespace,{}):
                if declaration.type() is Set:
                    data = self._tuplize(modeldata._data[namespace][component_name],
                                         declaration)
                else:
                    data = modeldata._data[namespace][component_name]
            if not data is None:
                break

        if __debug__ and logger.isEnabledFor(logging.DEBUG):
            _blockName = "Model" if self.parent_block() is None \
                else "Block '%s'" % self.name
            logger.debug( "Constructing %s '%s' on %s from data=%s",
                          declaration.__class__.__name__,
                          declaration.name, _blockName, str(data) )
        try:
            declaration.construct(data)
        except:
            err = sys.exc_info()[1]
            logger.error(
                "Constructing component '%s' from data=%s failed:\n    %s: %s",
                str(declaration.name), str(data).strip(),
                type(err).__name__, err )
            raise

        if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _out = StringIO()
                declaration.pprint(ostream=_out)
                logger.debug("Constructed component '%s':\n    %s"
                             % ( declaration.name, _out.getvalue()))

        if (pympler_available is True) and (profile_memory >= 2):
            mem_used = muppy.get_size(muppy.get_objects())
            print("      Total memory = %d bytes following construction of component=%s" % (mem_used, component_name))

        if (pympler_available is True) and (profile_memory >= 3):
            gc.collect()
            mem_used = muppy.get_size(muppy.get_objects())
            print("      Total memory = %d bytes following construction of component=%s (after garbage collection)" % (mem_used, component_name))
def memory_summary():
    from pympler import muppy, summary

    all_objects = muppy.get_objects()
    obj_summary = summary.summarize(all_objects)

    logger.info("\n".join(summary.format_(obj_summary)))
Esempio n. 19
0
def dump_state():
    loop = get_event_loop()
    print(datetime.now())
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    summary.print_(sum1, limit=100)
    loop.call_later(10, dump_state)
Esempio n. 20
0
 def summary(cls):
     lg = InsightLogger.InsightLogger.get_logger('MemTracker',
                                                 'MemTracker.log')
     sm = '=====Total Memory Stats=====\n' + cls.str_print_(
         summary.summarize(muppy.get_objects()), limit=25)
     lg.info("Admin requested memory summary\n\n{}".format(sm))
     return sm
Esempio n. 21
0
            def consumer(inQ, outQ):
                while True:
                    try:
                        # get a new message
                        val = inQ.get()
                        # this is the 'TERM' signal
                        if val is None:
                            break
                        # process the data
                        ret = f(val)

                        if args.debug:
                            from pympler import summary
                            from pympler import muppy
                            all_objects = muppy.get_objects()
                            sum1 = summary.summarize(all_objects)
                            print("summary:")
                            summary.print_(sum1)
                            from pympler import tracker
                            tr = tracker.SummaryTracker()
                            print("diff:")
                            tr.print_diff()

                        outQ.put(ret)
                    except Exception as e:
                        print("error!", e)
                        break
Esempio n. 22
0
def fileCloseEnd(cntlrWinMain, filename):
    if __testContext.checkMemoryOnClose:
        gc.collect()
        try:
            xx = cntlrWinMain.prevNumObjects
            cntlrWinMain.dumpIdx
        except:
            cntlrWinMain.prevNumObjects = 0
            cntlrWinMain.dumpIdx = 0
        cntlrWinMain.dumpIdx += 1
        all_objects = muppy.get_objects()
        numObjects = len(all_objects)
        diffObjects = numObjects - cntlrWinMain.prevNumObjects
        cntlrWinMain.prevNumObjects = numObjects
        print("numObjects=" + str(numObjects) + " (" + str(diffObjects) + " more)")
        if False:
            with open(__testContext.dumpFilePrefix + str(cntlrWinMain.dumpIdx) + ".txt", "w") as text_file:
                idx = 0
                for o in all_objects:
                    idx += 1
                    otype = ""
                    try:
                        otype = str(type(o))
                    except:
                        pass
                    try:
                        print("type=" + otype + " " + str(o), file=text_file)
                    except:
                        pass
            all_objects =  None
            gc.collect()
            print(numObjects)
            print("End of close " + filename)
        __testContext.diffNumObjects = diffObjects
Esempio n. 23
0
def test_smarts_episode_memory_cleanup(agent_id, seed, primative_scenarios,
                                       agent_params):
    MAX_EPISODE_STEPS = 100
    EPISODE_COUNT = 100
    STEPS_PER_YIELD = 10

    _, action, agent_type = agent_params

    env_and_agent_spec = env_and_spec(action, agent_type, MAX_EPISODE_STEPS,
                                      primative_scenarios, seed, agent_id)

    size = 0
    last_size = 0
    gc.collect()
    tr = tracker.SummaryTracker()
    try:
        for current_episode in _every_nth_episode(
                agent_id,
                EPISODE_COUNT,
                env_and_agent_spec,
                steps_per_yield=STEPS_PER_YIELD):
            gc.collect()
            all_objects = muppy.get_objects()
            size = muppy.get_size(all_objects)
            tr.print_diff(summary.summarize(all_objects))
            print(flush=True)
            all_objects = None
            if current_episode > STEPS_PER_YIELD:
                assert (size - last_size < EPISODE_MEMORY_GROWTH_LIMIT
                        ), f"End size delta {size - last_size}"
            last_size = size
    finally:
        env_and_agent_spec[0].close()
Esempio n. 24
0
def dump_objs():
	global TRACKER
	if TRACKER is None:
		TRACKER = tracker.SummaryTracker()

	with open("obj_log.txt", "a") as fp:
		fp.write("Memory at {}\n".format(str(datetime.datetime.now())))
		try:
			all_objects = muppy.get_objects()
			sum1 = summary.summarize(all_objects)
			str_sum  = summary.format_(sum1)

			fp.write("Summary:\n")
			for line in str_sum:
				fp.write("	{}\n".format(line))
		except Exception:
			err = traceback.format_exc()
			fp.write("Error: \n")
			fp.write(err)

		try:
			str_diff = TRACKER.format_diff()
			fp.write("Diff:\n")
			for line in str_diff:
				fp.write("	{}\n".format(line))
		except Exception:
			err = traceback.format_exc()
			fp.write("Error: \n")
			fp.write(err)

		fp.write("\n")
Esempio n. 25
0
 def memory_profiler(self):
     all_objects = muppy.get_objects()
     stats = summary.summarize(all_objects)
     return {
         'Memory_profiler':
         [l for l in summary.format_(stats, LIMIT_OBJECTS_FOR_PROFILER)]
     }
Esempio n. 26
0
def test_smarts_social_agent_scenario_memory_cleanup(agent_id, seed,
                                                     social_agent_scenarios,
                                                     agent_type):
    # Run once to initialize globals and test to see if smarts is working
    _memory_buildup(agent_id, seed, social_agent_scenarios, 1, *agent_type)

    gc.collect()
    initial_size = muppy.get_size(muppy.get_objects())

    _memory_buildup(agent_id, seed, social_agent_scenarios, 100, *agent_type)

    gc.collect()
    end_size = muppy.get_size(muppy.get_objects())

    assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT
            ), f"End size delta {end_size - initial_size}"
Esempio n. 27
0
 def process_response(self, request, response):
         req = request.META['PATH_INFO']
         if req.find('static') == -1 and req.find('media') == -1:
                 print req
                 self.end_objects = muppy.get_objects()
                 sum_start = summary.summarize(self.start_objects)
                 sum_end = summary.summarize(self.end_objects)
                 diff = summary.get_diff(sum_start, sum_end)
                 summary.print_(diff)
                 #print '~~~~~~~~~'
                 #cb = refbrowser.ConsoleBrowser(response, maxdepth=2, \
                         #str_func=output_function)
                 #cb.print_tree()
                 print '~~~~~~~~~'
                 a = asizeof(response)
                 print 'Total size of response object in kB: %s' % \
                     str(a / 1024.0)
                 print '~~~~~~~~~'
                 a = asizeof(self.end_objects)
                 print 'Total size of end_objects in MB: %s' % \
                     str(a / 1048576.0)
                 b = asizeof(self.start_objects)
                 print 'Total size of start_objects in MB: %s' % \
                     str(b / 1048576.0)
                 print '~~~~~~~~~'
         return response
Esempio n. 28
0
    def on_epoch_end(self, epoch, log={}):
        x = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
        web_browser_debug = True
        print(x)

        if x > 40000:
            if web_browser_debug:
                if epoch == 0:
                    start_in_background()
                    tr = tracker.SummaryTracker()
                    tr.print_diff()
            else:
                global memlist
                all_objects = muppy.get_objects(include_frames=True)
                # print(len(all_objects))
                sum1 = summary.summarize(all_objects)
                memlist.append(sum1)
                summary.print_(sum1)
                if len(memlist) > 1:
                    # compare with last - prints the difference per epoch
                    diff = summary.get_diff(memlist[-2], memlist[-1])
                    summary.print_(diff)
                my_types = muppy.filter(all_objects, Type=types.ClassType)

                for t in my_types:
                    print(t)
Esempio n. 29
0
def check_leakage():
    ##check for leakage
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    # Prints out a summary of the large objects
    summary.print_(sum1)
    gc.collect()
Esempio n. 30
0
    def _capture_snapshot(self):
        # type: () -> None
        """
        Capture memory usage snapshot.
        """
        capture_time = int(time.time())

        # 1. Capture aggregate values
        all_objects = muppy.get_objects()
        all_objects = self._filter_muppy_objects(all_objects)
        sum1 = summary.summarize(all_objects)
        data = summary.format_(sum1, limit=50)

        item = {
            "timestamp": capture_time,
            "data": list(data),
            "type": "aggregated",
        }
        self._profiling_data.append(item)

        # 2. Capture diff since the last capture
        data = self._tracker.format_diff()
        item = {
            "timestamp": capture_time,
            "data": list(data),
            "type": "diff",
        }
        self._profiling_data.append(item)
Esempio n. 31
0
def apply_postprocessing(data, instance=None, results=None):
    """
    Apply post-processing steps.

    Required:
        instance:   Problem instance.
        results:    Optimization results object.
    """
    #
    if not data.options.runtime.logging == 'quiet':
        sys.stdout.write('[%8.2f] Applying Pyomo postprocessing actions\n' % (time.time()-start_time))
        sys.stdout.flush()

    # options are of type ConfigValue, not raw strings / atomics.
    for config_value in data.options.postprocess:
        postprocess = pyutilib.misc.import_file(config_value, clear_cache=True)
        if "pyomo_postprocess" in dir(postprocess):
            postprocess.pyomo_postprocess(data.options, instance,results)

    for ep in ExtensionPoint(IPyomoScriptPostprocess):
        ep.apply( options=data.options, instance=instance, results=results )

    if (pympler_available is True) and (data.options.runtime.profile_memory >= 1):
        mem_used = muppy.get_size(muppy.get_objects())
        if mem_used > data.local.max_memory:
            data.local.max_memory = mem_used
        print("   Total memory = %d bytes upon termination" % mem_used)
Esempio n. 32
0
def apply_postprocessing(data, instance=None, results=None):
    """
    Apply post-processing steps.

    Required:
        instance:   Problem instance.
        results:    Optimization results object.
    """
    #
    if not data.options.runtime.logging == 'quiet':
        sys.stdout.write('[%8.2f] Applying Pyomo postprocessing actions\n' %
                         (time.time() - start_time))
        sys.stdout.flush()

    # options are of type ConfigValue, not raw strings / atomics.
    for config_value in data.options.postprocess:
        postprocess = pyutilib.misc.import_file(config_value, clear_cache=True)
        if "pyomo_postprocess" in dir(postprocess):
            postprocess.pyomo_postprocess(data.options, instance, results)

    for ep in ExtensionPoint(IPyomoScriptPostprocess):
        ep.apply(options=data.options, instance=instance, results=results)

    if (pympler_available is True) and (data.options.runtime.profile_memory >=
                                        1):
        mem_used = muppy.get_size(muppy.get_objects())
        if mem_used > data.local.max_memory:
            data.local.max_memory = mem_used
        print("   Total memory = %d bytes upon termination" % mem_used)
Esempio n. 33
0
    def printMemorySummary(self):
        all_objects = muppy.get_objects()
        out = "Total_NumObjects=" + str(len(all_objects)) + "\n"
        self.write(out)

        #filter out certain types of objects
        out = None
        types1 = muppy.filter(all_objects, Type=types.ClassType)
        out = "Num_Type=" + str(len(types1)) + "\n"
        for t in types1:
            out += str(t)
            out += "\n"

        self.write(out)
        out = None

        #comppare summery of memory
        #         sumCurr = summary.summarize(all_objects)
        #         if (self.sumPrev):
        #             diff = summary.get_diff(sumCurr, self.sumPrev)
        #             summary.print_(diff)
        #             #self.write(str(diff))
        #         self.sumPrev = sumCurr
        #

        self.tr.print_diff()

        print "memory.summary.done"
Esempio n. 34
0
    def generate_loop(dump_mem):
        try:
            #raise MemoryError()
            class_tuple = classes if isinstance(classes, tuple) else (classes,
                                                                      classes)
            inputs = list(map(prep_in, load_classes('input/', class_tuple[0])))
            outputs = list(
                map(prep_out, load_classes('output/', class_tuple[1])))
            random.seed(1)
            area = size[0] * size[1]
            while not shutdown:
                results = []
                metrics = []
                msum = np.ones(out_channels)
                while len(results) < 20:
                    n = random.randint(0, len(inputs) - 1)
                    i = inputs[n]
                    o = outputs[n]
                    scale = 3.0**(0.4 - random.random())
                    if scale * i.shape[0] > size[0] and scale * i.shape[
                            1] > size[1] and random.random() > 0.5:
                        i = rescale(i, scale)
                        o = rescale(o, scale)
                    i_shape = i.shape
                    a = -30 + 60.0 * random.random()
                    while not test_angle(a, i_shape, size):
                        a *= 0.5
                    i = rotate_f(i, a)
                    o = rotate_f(o, a)
                    count = int(i_shape[0] * i_shape[1] / area * 3)
                    for p in gen_pos(a, i_shape, size, count):
                        x, y = p[1], p[0]
                        ip = i[y:y + size[0], x:x + size[1], :]
                        op = o[y:y + size[0], x:x + size[1], :]
                        if not every_flip and random.randint(0, 10) > 5:
                            ip = np.flip(ip, 1)
                            op = np.flip(op, 1)
                        m = np.sum(op, axis=(0, 1))
                        if m.sum() == 0:
                            continue
                        msum += m
                        metrics.append((len(results), m))
                        results.append((ip, op))
                metrics = sorted(metrics,
                                 key=lambda m: -np.sum(
                                     (m[1] / msum)[:out_channels - 1]))
                metrics = metrics[:int(len(metrics) *
                                       0.5)]  # Reduce number of empty outputs
                random.shuffle(metrics)
                for a in metrics:
                    r = results[a[0]]
                    yield r
                    if every_flip:
                        yield np.flip(r[0], 1), np.flip(r[1], 1)

                if dump_mem:
                    summary.print_(summary.summarize(muppy.get_objects()))
        except MemoryError:
            print('Memory error...')
            _thread.interrupt_main()
Esempio n. 35
0
def memory_summary():
    # Only import Pympler when we need it. We don't want it to
    # affect our process if we never call memory_summary.
    from pympler import summary, muppy
    mem_summary = summary.summarize(muppy.get_objects())
    rows = summary.format_(mem_summary)
    return '\n'.join(rows)
Esempio n. 36
0
 def printListingUsage(self, args):
     all_objects = muppy.get_objects()
     sum1 = summary.summarize(all_objects)
     summary.print_(sum1)
     print(" ")
     print("Summary: ")
     tr = tracker.SummaryTracker()
     tr.print_diff()
async def log_object_summary(interval: float):
    from pympler import muppy, summary

    while True:
        await asyncio.sleep(interval)
        lines = summary.format_(summary.summarize(muppy.get_objects()),
                                limit=20)
        logging.info('top objects:\n%s', '\n'.join(lines))
 def _get_stats():
     from pympler import muppy, summary
     all_objects = muppy.get_objects()
     result = summary.summarize(all_objects)
     result = result[0:20]
     summary = '\n'.join([l for l in summary.format_(result)])
     result = '%s\n\n%s' % (summary, json.dumps(result))
     return result, 200, {'content-type': 'text/plain'}
Esempio n. 39
0
def test_smarts_repeated_runs_memory_cleanup(agent_id, seed,
                                             primative_scenarios, agent_type):
    # Run once to initialize globals and test to see if smarts is working
    _memory_buildup(agent_id, seed, primative_scenarios, 1, *agent_type)

    gc.collect()
    initial_size = muppy.get_size(muppy.get_objects())

    for i in range(100):
        _memory_buildup(agent_id, seed, primative_scenarios, 1, *agent_type)

    gc.collect()
    end_size = muppy.get_size(muppy.get_objects())

    # This "should" be roughly the same as `test_smarts_basic_memory_cleanup`
    assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT
            ), f"End size delta {end_size - initial_size}"
Esempio n. 40
0
def memory_usage(where):
    """
    Print out a basic summary of memory usage.
    """
    mem_summary = summary.summarize(muppy.get_objects())
    print("Memory summary:", where)
    summary.print_(mem_summary, limit=2)
    print("VM: %.2fMb" % (get_virtual_memory_usage_kb() / 1024.0))
Esempio n. 41
0
 def handle_signal_abort(self, signum, frame):
     Log.warn("Someone want to kill me! But I'll not die now! Hahahaha!")
     s = summary.summarize(muppy.get_objects())
     Log.debug("Current memory usage:")
     summary.print_(s)
     diff = summary.get_diff(self.mem_sum, s)
     self.mem_sum = s
     Log.debug("New memory usage:")
     summary.print_(diff)
Esempio n. 42
0
 def test_untracked_containers(self):
     """Test whether untracked container objects are detected.
     """
     untracked = {}
     tracked = {'untracked': untracked}
     self.assertTrue(gc.is_tracked(tracked))
     self.assertFalse(gc.is_tracked(untracked))
     objects = [id(o) for o in muppy.get_objects()]
     self.assertTrue(id(untracked) in objects)
Esempio n. 43
0
    def sig_usr(self, a, b):
        import threading
        import gc

        held_locks = {}
        code = {}
        curthreads = threading.enumerate()

        for threadId, stack in sys._current_frames().items():
            name = str(threadId)
            for ct in curthreads:
                if ct.ident == threadId:
                    name = ct.name

            code[name] = ["NAME: %s" % name]
            for filename, lineno, fname, line in traceback.extract_stack(
                    stack):
                code[name].append('FILE: "%s", line %d, in %s' %
                                  (filename, lineno, fname))
                if line:
                    code[name].append("  %s" % (line.strip()))

            held_locks[name] = ""
            for lock in alllocks:
                if lock.writer_id == threadId:
                    held_locks[name] += ("%s(w)" % lock.name)
                    continue
                for reader_id, reader_stack in lock.reader_stacks:
                    if reader_id == threadId:
                        held_locks[name] += ("%s(r)" % lock.name)

        for k in code:
            log.info('\n\nLOCKS: %s \n%s' %
                     (held_locks[k], '\n'.join(code[k])))

        log.info("\n\nSTACKS:")
        for lock in alllocks:
            for (reader_id, reader_stack) in lock.reader_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock reader (thread %s):" % (reader_id, ))
                log.info(''.join(reader_stack))

            for writer_stack in lock.writer_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock writer (thread %s):" % (lock.writer_id, ))
                log.info(''.join(writer_stack))

        self.shelf.sync()
        gc.collect()

        # If we've got pympler installed, output a summary of memory usage.

        try:
            from pympler import summary, muppy
            summary.print_(summary.summarize(muppy.get_objects()))
        except:
            pass
Esempio n. 44
0
	def memory_summary(self):
		# Only import Pympler when we need it. We don't want it to
		# affect our process if we never call memory_summary.
		
		caller = sys._getframe(1).f_code.co_name # So we can reference the caller
		
		from pympler import summary, muppy
		mem_summary = summary.summarize(muppy.get_objects())
		rows = summary.format_(mem_summary)
		indigo.server.log ('\n\nCALLED BY: ' + caller + '\n\n' + '\n'.join(rows)	)	
Esempio n. 45
0
    def sig_usr(self, a, b):
        import threading
        import gc

        held_locks = {}
        code = {}
        curthreads = threading.enumerate()

        for threadId, stack in sys._current_frames().items():
            name = str(threadId)
            for ct in curthreads:
                if ct.ident == threadId:
                    name = ct.name

            code[name] = ["NAME: %s" % name]
            for filename, lineno, fname, line in traceback.extract_stack(stack):
                code[name].append('FILE: "%s", line %d, in %s' % (filename, lineno, fname))
                if line:
                    code[name].append("  %s" % (line.strip()))

            held_locks[name] = ""
            for lock in alllocks:
                if lock.writer_id == threadId:
                    held_locks[name] += ("%s(w)" % lock.name)
                    continue
                for reader_id, reader_stack in lock.reader_stacks:
                    if reader_id == threadId:
                        held_locks[name] += ("%s(r)" % lock.name)

        for k in code:
            log.info('\n\nLOCKS: %s \n%s' % (held_locks[k], '\n'.join(code[k])))

        log.info("\n\nSTACKS:")
        for lock in alllocks:
            for (reader_id, reader_stack) in lock.reader_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock reader (thread %s):" % (reader_id,))
                log.info(''.join(reader_stack))

            for writer_stack in lock.writer_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock writer (thread %s):" % (lock.writer_id,))
                log.info(''.join(writer_stack))

        self.shelf.sync()
        gc.collect()

        # If we've got pympler installed, output a summary of memory usage.

        try:
            from pympler import summary, muppy
            from pympler.asizeof import asizeof
            summary.print_(summary.summarize(muppy.get_objects()))
        except:
            pass
Esempio n. 46
0
def profile_expose_method(profiled_method_wrapper, accept, args, func, kw, exclude_from_memory_profiling):
    """
    Targeted to profile a specific method that wraps HTTP request processing endpoints into database context.  
    :param profiled_method_wrapper: method wrapped around profiled call to be passed in to memory profiler
    :param accept: param specific to profiled call
    :param args: args of a function that is being wrapped by a profiled method
    :param func: function that is being wrapped by a profiled method
    :param kw: kwargs of a function that is being wrapped by a profiled method
    :return: output of a profiled method without modification
    """
    if not exclude_from_memory_profiling and get_memory_profile_logging_on() and \
            check_memory_profile_package_wide_disable(func):
        controller_class = args[0].__class__.__name__ if args and len(args) > 0 else ''
        end_point_name_parts = [s for s in [func.__module__, controller_class, func.__name__] if s != '']
        end_point_name = ".".join(end_point_name_parts)
        is_pympler_on = _is_pympler_profiling_value_on(end_point_name)
        profile_output = {'output': {}}
        if is_pympler_on:
            all_objects = muppy.get_objects()
            all_objects_summary_before = summary.summarize(all_objects)
        memory_profile = memory_usage((_profile_me,
                                       (profile_output, profiled_method_wrapper, func, accept, args, kw),
                                       {}),
                                      interval=0.1)
        output = profile_output['output']
        if is_pympler_on:
            all_objects_summary_after = summary.summarize(all_objects)
            diff = summary.get_diff(all_objects_summary_before, all_objects_summary_after)
            diff_less = summary.format_(diff)
            diff_out = ''
            for s in diff_less:
                diff_out += s+'\n'
            thread_log.info("================ PYMPLER OUTPUT <{}> ==============\n{}".format(end_point_name, diff_out))
        try:

            message = json.dumps({'log_type': 'memory_profile',
                                  'proc_id': os.getpid(),
                                  'name': func.__name__,
                                  'module': func.__module__,
                                  'mem_profile': memory_profile,
                                  'min': min(memory_profile),
                                  'max': max(memory_profile),
                                  'diff': max(memory_profile) - min(memory_profile),
                                  'leaked': memory_profile[-1] - memory_profile[0],
                                  'args': [arg for arg in args[1:]],  # exclude self
                                  'kwargs': kw})
            memory_log.info(message,
                            extra={'controller_module': func.__module__,
                                   'controller_class': controller_class,
                                   'endpoint': func.__name__})
        except Exception as e:
            thread_log.exception('Logger failed: {}'.format(e))
    else:
        output = profiled_method_wrapper(accept, args, func, kw)
    return output
Esempio n. 47
0
 def memory_summary(self, summarize=True):
     "Using pympler summarize module to view memory summary."
     
     if summarize:
         all_objects = muppy.get_objects()
         Logger.info("ENV: \nMemory Footprint:")
         Logger.info("-----------------")
         return summary.print_(summary.summarize(all_objects), limit=50)
     else:
         Logger.info("ENV: \nMemory Tracker:")
         Logger.info("---------------")
         self.mem_tracker.print_diff()        
    def get_report(self):
        all_objects = muppy.get_objects()
        size = get_size(all_objects)
        report = summary.summarize(all_objects)

        sort_index = self.cleaned_data['sort_by']
        limit = self.cleaned_data['limit']

        report.sort(key=lambda item: item[sort_index], reverse=True)
        if limit:
            report = report[:limit]

        return size, report
Esempio n. 49
0
def print_memory(count=30):
    '''
    Print the statistics of the objects in the memory.
    Need pympler to use.
    '''
    from pympler import muppy, summary

    gc.collect()
    all_objects = muppy.get_objects()
    my_types = muppy.filter(all_objects, Type=wx.Object)
    sum1 = summary.summarize(my_types)
    # sum1 = summary.summarize(all_objects)
    summary.print_(sum1, limit=count)
Esempio n. 50
0
    def __init__(self, **kwargs):
        super(GlobalContainer, self).__init__(**kwargs)
        self._keyboard = None
        self.request_keyboard()
        self.working_dir = './'
        self.tutorial = None
        self.popup_stack = []
        window.Window.bind(on_resize=self.on_resize)

        if DEBUG:
            self.tracker = ClassTracker()
            self.tracker.track_object(MenuButton)
            self.all_objects = muppy.get_objects()
Esempio n. 51
0
    def __init__(self, ignore_self=True):
        """Constructor.

        The number of summaries managed by the tracker has an performance
        impact on new summaries, iff you decide to exclude them from further
        summaries. Therefore it is suggested to use them economically.

        Keyword arguments:
        ignore_self -- summaries managed by this object will be ignored.
        """
        self.s0 = summary.summarize(muppy.get_objects())
        self.summaries = {}
        self.ignore_self = ignore_self
Esempio n. 52
0
def print_muppy_sumary():
    # http://pythonhosted.org/Pympler/index.html
    try:
        from pympler import muppy, summary
    except ImportError:
        print("WARNING: pympler not installed")
        return
    # from pympler.classtracker import ClassTracker
    # from pympler.classtracker_stats import HtmlStats
    global all_objects, obj_summary, class_tracker
    if all_objects is None:
        all_objects = muppy.get_objects()
        obj_summary = summary.summarize(all_objects)
        summary.print_(obj_summary)

        # class_tracker = ClassTracker()
        # class_tracker.track_class(FICSPlayer, trace=1)
        # class_tracker.track_class(ICGameModel, resolution_level=2, trace=1)
    else:
        obj_summary2 = summary.summarize(muppy.get_objects())
        diff = summary.get_diff(obj_summary, obj_summary2)
        summary.print_(diff, limit=200)
Esempio n. 53
0
def request(ctx, flow):
    global step, ssl
    print("==========")
    print("GC: {}".format(gc.collect()))
    print("Threads: {}".format(threading.active_count()))

    step += 1
    if step == 1:
        all_objects = muppy.get_objects()
        ssl = muppy.filter(all_objects, SSL.Connection)[0]
    if step == 2:
        ib = refbrowser.InteractiveBrowser(ssl, 2, str_fun, repeat=False)
        del ssl  # do this to unpollute view
        ib.main(True)
Esempio n. 54
0
def analyzeAllMFCC():
    client = MongoClient()
    db = client.audiograins
    grainEntries = db.grains

    query = grainEntries.find({ "mfcc00" : { "$exists": False }})
    print("Analyzing MFCC for " + str(query.count()) + " grains")

    for grain in tqdm(query):
        mfccs = analyzeMFCC(grain)
        for mfccIndex in range(0, len(mfccs)):
            update = {"mfcc" + format(mfccIndex, '02') : mfccs[mfccIndex]}
            grainEntries.update_one({"_id": grain["_id"]}, {"$set" : update})

    summary.print_(summary.summarize(muppy.get_objects()))
    client.close()
Esempio n. 55
0
    def muppy_loop(self):
        """Generator method for looping over the iterations and writing out the muppy output."""

        # Loop over the desired number of iterations.
        for i in range(self.num):
            # Muppy output, only output at every 100th iteration.
            if not i % 100:
                self.file.write("Iteration %i\n" % i)
                self.file.write("Muppy heap:\n")
                for line in muppy.summary.format_(muppy.summary.summarize(muppy.get_objects())):
                    self.file.write("%s\n" % line)
                self.file.write("\n\n\n")
                self.file.flush()

            # Yield the loop index.
            yield i
    def collect_and_dump_root(self):
        log.msg('Profiling memory for OmsRoot objects...', system=self.__name__)
        try:
            import inspect
            from sys import getsizeof
            from BTrees.OOBTree import OOBucket
            from ZEO.Exceptions import ClientDisconnected
            from opennode.oms.model.model.root import OmsRoot

            data = []
            all_objects = muppy.get_objects()
            roots = muppy.filter(all_objects, Type=OmsRoot)
            logger.info('Root profile follows (%s rows)' % len(roots))

            gc.collect()

            for ue in roots:
                referrers = []
                for ref in gc.get_referrers(ue):
                    try:
                        if inspect.isframe(ref):
                            continue  # local object ref
                        elif isinstance(ref, list):
                            referrers.append('list len=%s id=%x' % (len(ref), id(ref)))
                        elif isinstance(ref, OOBucket):
                            referrers.append('OOBucket len=%s id=%x' % (len(ref), id(ref)))
                        else:
                            sref = repr(ref)
                            referrers.append(sref)
                    except ClientDisconnected:
                        referrers.append('ClientDisconnected')

                data.append((referrers, str(ue), repr(ue), str(getsizeof(ue))))

            rrows = [('object', 'raw', 'size', 'referrers')] + data
            rows = _format_table(rrows)
            for row in rows:
                logger.info(row)

            log.msg('Profiling Omsroot memory done', system=self.__name__)
            del all_objects
            gc.collect()
            return defer.succeed(None)
        except Exception, e:
            import traceback
            logger.error(traceback.format_exc(e))
            return defer.fail(None)
Esempio n. 57
0
 def test(self):
     tr = tracker.SummaryTracker()
     testDir = os.path.dirname(os.path.abspath(sys.modules[__name__].__file__))
     testFileSmall = testDir + "/solvency/2.0/random/spv_20_instance.xbrl"
     logFile = testDir + "/tmp/test.log"
     dumpFilePrefix = testDir + "/tmp/dump_"
     
     prevNumObjects = 0
     for idx in range(3): # increase this range for testing
         print("\nIteration " + str(idx))
         arelleRunArgs = ['--keepOpen', '--logFile', 'logToStdErr', '--logfile', logFile, '--file', testFileSmall]
         cntlr = parseAndRun(arelleRunArgs)
         cntlr.modelManager.close()
         cntlr.close()
         del cntlr
     
         gc.collect()
         all_objects = muppy.get_objects()
         numObjects = len(all_objects)
         diffObjects = numObjects - prevNumObjects
         prevNumObjects = numObjects
         print(str(numObjects) + " (" + str(diffObjects) + " more)")
         browserRoot = None
         if False:  # <<<--- set this to get object dump file
             with open(dumpFilePrefix + str(idx) + ".txt", "w") as text_file:
                 idx = 0
                 for o in all_objects:
                     if browserRoot is None and isinstance(o, arelle.ModelValue.QName):
                         browserRoot = o
                     idx += 1
                     otype = ""
                     try:
                         otype = str(type(o))
                     except:
                         pass
                     try:
                         print("type=" + otype + " " + str(o), file=text_file)
                     except:
                         pass
         if False:
             ibrowser = refbrowser.InteractiveBrowser(browserRoot)
             ibrowser.main()
         all_objects= None
         gc.collect()
         tr.print_diff()
         if idx > 1:
             assert diffObjects < 50, "Check for new objects leak"  
Esempio n. 58
0
 def getDebugInfo(self, itemname):
     """Give debug info about a particular item."""
     global profile
     outf=StringIO()
     if itemname == "":
         outf.write("the item was empty")
         if profile:
             all_objects = muppy.get_objects()
             sum1 = summary.summarize(all_objects)
             summary.print_(sum1, 100)
             ib = refbrowser.InteractiveBrowser(self)
             ib.main()
         return outf.getvalue()
     itemname=keywords.fixID(itemname)
     itemlist=vtype.parseItemList(itemname)
     item=self.getSubValue(itemlist)
     item.writeDebug(outf)
     return outf.getvalue()
Esempio n. 59
0
 def profile(self, frame, event, arg):  # arg req to match signature
     """Profiling method used to profile matching codepoints and events."""
     if (self.events is None) or (event in self.events):
         frame_info = inspect.getframeinfo(frame)
         cp = (frame_info[0], frame_info[2], frame_info[1])
         if self.codepoint_included(cp):
             objects = muppy.get_objects()
             size = muppy.get_size(objects)
             if cp not in self.memories:
                 self.memories[cp] = [0, 0, 0, 0]
                 self.memories[cp][0] = 1
                 self.memories[cp][1] = size
                 self.memories[cp][2] = size
             else:
                 self.memories[cp][0] += 1
                 if self.memories[cp][1] > size:
                     self.memories[cp][1] = size
                 if self.memories[cp][2] < size:
                     self.memories[cp][2] = size