Пример #1
0
def func5():
    """
    Модуль pympler

    muppy.get_objects() - метод считывает все существующтие
    обьекты в Python вызывая этот метод 2 раза в разный
    промежуток времени, мы можем выявить какие обьекты были
    созданы с первого среза.

    summary.get_diff() - выявляет какие обьекты как разницу между
    двумя срезами.

    summary.print_() - красиво выводит на экран обьекты и память
    ими занимаемую.
    """
    from pympler import asizeof
    from pympler import muppy
    from pympler import summary

    print('\nИспользование muppy :')
    alll_obj_1 = muppy.get_objects()

    data = list(range(1000))

    alll_obj_2 = muppy.get_objects()

    sum1 = summary.summarize(alll_obj_1)
    sum2 = summary.summarize(alll_obj_2)

    summary.print_(summary.get_diff(sum1, sum2))
Пример #2
0
def check_leakage():
    ##check for leakage
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    # Prints out a summary of the large objects
    summary.print_(sum1)
    gc.collect()
Пример #3
0
    def generate_loop(dump_mem):
        try:
            #raise MemoryError()
            class_tuple = classes if isinstance(classes, tuple) else (classes,
                                                                      classes)
            inputs = list(map(prep_in, load_classes('input/', class_tuple[0])))
            outputs = list(
                map(prep_out, load_classes('output/', class_tuple[1])))
            random.seed(1)
            area = size[0] * size[1]
            while not shutdown:
                results = []
                metrics = []
                msum = np.ones(out_channels)
                while len(results) < 20:
                    n = random.randint(0, len(inputs) - 1)
                    i = inputs[n]
                    o = outputs[n]
                    scale = 3.0**(0.4 - random.random())
                    if scale * i.shape[0] > size[0] and scale * i.shape[
                            1] > size[1] and random.random() > 0.5:
                        i = rescale(i, scale)
                        o = rescale(o, scale)
                    i_shape = i.shape
                    a = -30 + 60.0 * random.random()
                    while not test_angle(a, i_shape, size):
                        a *= 0.5
                    i = rotate_f(i, a)
                    o = rotate_f(o, a)
                    count = int(i_shape[0] * i_shape[1] / area * 3)
                    for p in gen_pos(a, i_shape, size, count):
                        x, y = p[1], p[0]
                        ip = i[y:y + size[0], x:x + size[1], :]
                        op = o[y:y + size[0], x:x + size[1], :]
                        if not every_flip and random.randint(0, 10) > 5:
                            ip = np.flip(ip, 1)
                            op = np.flip(op, 1)
                        m = np.sum(op, axis=(0, 1))
                        if m.sum() == 0:
                            continue
                        msum += m
                        metrics.append((len(results), m))
                        results.append((ip, op))
                metrics = sorted(metrics,
                                 key=lambda m: -np.sum(
                                     (m[1] / msum)[:out_channels - 1]))
                metrics = metrics[:int(len(metrics) *
                                       0.5)]  # Reduce number of empty outputs
                random.shuffle(metrics)
                for a in metrics:
                    r = results[a[0]]
                    yield r
                    if every_flip:
                        yield np.flip(r[0], 1), np.flip(r[1], 1)

                if dump_mem:
                    summary.print_(summary.summarize(muppy.get_objects()))
        except MemoryError:
            print('Memory error...')
            _thread.interrupt_main()
Пример #4
0
    def on_epoch_end(self, epoch, log={}):
        x = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
        web_browser_debug = True
        print(x)

        if x > 40000:
            if web_browser_debug:
                if epoch == 0:
                    start_in_background()
                    tr = tracker.SummaryTracker()
                    tr.print_diff()
            else:
                global memlist
                all_objects = muppy.get_objects(include_frames=True)
                # print(len(all_objects))
                sum1 = summary.summarize(all_objects)
                memlist.append(sum1)
                summary.print_(sum1)
                if len(memlist) > 1:
                    # compare with last - prints the difference per epoch
                    diff = summary.get_diff(memlist[-2], memlist[-1])
                    summary.print_(diff)
                my_types = muppy.filter(all_objects, Type=types.ClassType)

                for t in my_types:
                    print(t)
Пример #5
0
def file_test(rows=500000, cols=50):
    "File test"
    print("Creating file with {} rows and {} columns".format(rows, cols))
    file = create_file(rows, cols)
    print("Size of the file: {:.2f} MiB".format(getsize(file) / (1024 * 1024)))
    print("Reading file")
    sum1 = summarize(get_objects())
    las = read(file)
    sum2 = summarize(get_objects())
    diff = get_diff(sum1, sum2)
    print_(diff)

    for curve in las.curves:
        print("Name: {}, Min: {:.2f}, Mean: {:.2f}, Max: {:.2f}"
              .format(curve.mnemonic, nanmin(curve.data), nanmean(curve.data),
                      nanmax(curve.data)))

    del las
    las = read(file)
    del las
    las = read(file)
    del las
    las = read(file)
    del las
    print("Happy end")
Пример #6
0
def test_env_memory_cleanup(agent_id, seed, primative_scenarios):
    # Run once to initialize globals
    _, action, agent_type = (100, None, AgentType.Buddha)
    _env_memory_buildup(agent_id, seed, primative_scenarios, action,
                        agent_type)
    gc.collect()

    # Memory size check
    size = muppy.get_size(muppy.get_objects())
    gc.collect()
    _env_memory_buildup(agent_id, seed, primative_scenarios, action,
                        agent_type)
    end_size = muppy.get_size(muppy.get_objects())
    gc.collect()

    def success_condition():
        return end_size - size < EPISODE_MEMORY_GROWTH_LIMIT

    if not success_condition():
        # Get a diff for failure case
        tr = tracker.SummaryTracker()
        tr.print_diff()
        _env_memory_buildup(agent_id, seed, primative_scenarios, action,
                            agent_type)
        diff = tr.diff()
        summary.print_(diff)
        diff = None
        gc.collect()
        assert success_condition(), f"Size diff {end_size - size}"
Пример #7
0
def dump_state():
    loop = get_event_loop()
    print(datetime.now())
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    summary.print_(sum1, limit=100)
    loop.call_later(10, dump_state)
Пример #8
0
 def process_response(self, request, response):
         req = request.META['PATH_INFO']
         if req.find('static') == -1 and req.find('media') == -1:
                 print req
                 self.end_objects = muppy.get_objects()
                 sum_start = summary.summarize(self.start_objects)
                 sum_end = summary.summarize(self.end_objects)
                 diff = summary.get_diff(sum_start, sum_end)
                 summary.print_(diff)
                 #print '~~~~~~~~~'
                 #cb = refbrowser.ConsoleBrowser(response, maxdepth=2, \
                         #str_func=output_function)
                 #cb.print_tree()
                 print '~~~~~~~~~'
                 a = asizeof(response)
                 print 'Total size of response object in kB: %s' % \
                     str(a / 1024.0)
                 print '~~~~~~~~~'
                 a = asizeof(self.end_objects)
                 print 'Total size of end_objects in MB: %s' % \
                     str(a / 1048576.0)
                 b = asizeof(self.start_objects)
                 print 'Total size of start_objects in MB: %s' % \
                     str(b / 1048576.0)
                 print '~~~~~~~~~'
         return response
Пример #9
0
            def consumer(inQ, outQ):
                while True:
                    try:
                        # get a new message
                        val = inQ.get()
                        # this is the 'TERM' signal
                        if val is None:
                            break
                        # process the data
                        ret = f(val)

                        if args.debug:
                            from pympler import summary
                            from pympler import muppy
                            all_objects = muppy.get_objects()
                            sum1 = summary.summarize(all_objects)
                            print("summary:")
                            summary.print_(sum1)
                            from pympler import tracker
                            tr = tracker.SummaryTracker()
                            print("diff:")
                            tr.print_diff()

                        outQ.put(ret)
                    except Exception as e:
                        print("error!", e)
                        break
Пример #10
0
def memory_usage(where):
    """
    Print out a basic summary of memory usage.
    """
    mem_summary = summary.summarize(muppy.get_objects())
    print("Memory summary:", where)
    summary.print_(mem_summary, limit=2)
    print("VM: %.2fMb" % (get_virtual_memory_usage_kb() / 1024.0))
Пример #11
0
 def printListingUsage(self, args):
     all_objects = muppy.get_objects()
     sum1 = summary.summarize(all_objects)
     summary.print_(sum1)
     print(" ")
     print("Summary: ")
     tr = tracker.SummaryTracker()
     tr.print_diff()
Пример #12
0
    def print_diff(self, summary1=None, summary2=None):
        """Compute diff between to summaries and print it.

        If no summary is provided, the diff from the last to the current
        summary is used. If summary1 is provided the diff from summary1
        to the current summary is used. If summary1 and summary2 are
        provided, the diff between these two is used.
        """
        summary.print_(self.diff(summary1=summary1, summary2=summary2))
Пример #13
0
 def handle_signal_abort(self, signum, frame):
     Log.warn("Someone want to kill me! But I'll not die now! Hahahaha!")
     s = summary.summarize(muppy.get_objects())
     Log.debug("Current memory usage:")
     summary.print_(s)
     diff = summary.get_diff(self.mem_sum, s)
     self.mem_sum = s
     Log.debug("New memory usage:")
     summary.print_(diff)
Пример #14
0
    def print_diff(self, summary1=None, summary2=None):
        """Compute diff between to summaries and print it.

        If no summary is provided, the diff from the last to the current
        summary is used. If summary1 is provided the diff from summary1
        to the current summary is used. If summary1 and summary2 are
        provided, the diff between these two is used.
        """
        summary.print_(self.diff(summary1=summary1, summary2=summary2))
Пример #15
0
    def sig_usr(self, a, b):
        import threading
        import gc

        held_locks = {}
        code = {}
        curthreads = threading.enumerate()

        for threadId, stack in sys._current_frames().items():
            name = str(threadId)
            for ct in curthreads:
                if ct.ident == threadId:
                    name = ct.name

            code[name] = ["NAME: %s" % name]
            for filename, lineno, fname, line in traceback.extract_stack(
                    stack):
                code[name].append('FILE: "%s", line %d, in %s' %
                                  (filename, lineno, fname))
                if line:
                    code[name].append("  %s" % (line.strip()))

            held_locks[name] = ""
            for lock in alllocks:
                if lock.writer_id == threadId:
                    held_locks[name] += ("%s(w)" % lock.name)
                    continue
                for reader_id, reader_stack in lock.reader_stacks:
                    if reader_id == threadId:
                        held_locks[name] += ("%s(r)" % lock.name)

        for k in code:
            log.info('\n\nLOCKS: %s \n%s' %
                     (held_locks[k], '\n'.join(code[k])))

        log.info("\n\nSTACKS:")
        for lock in alllocks:
            for (reader_id, reader_stack) in lock.reader_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock reader (thread %s):" % (reader_id, ))
                log.info(''.join(reader_stack))

            for writer_stack in lock.writer_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock writer (thread %s):" % (lock.writer_id, ))
                log.info(''.join(writer_stack))

        self.shelf.sync()
        gc.collect()

        # If we've got pympler installed, output a summary of memory usage.

        try:
            from pympler import summary, muppy
            summary.print_(summary.summarize(muppy.get_objects()))
        except:
            pass
Пример #16
0
    def memusage(line):
        limit = int(line or 10)

        memory_info = psutil.Process().memory_info()
        print("VMS {:.2f}M, RSS {:.2f}M, SHR {:.2f}M, objects:".format(
            float(memory_info.vms) / 1024.0 / 1024.0,
            float(memory_info.rss) / 1024.0 / 1024.0,
            float(memory_info.shared) / 1024.0 / 1024.0))
        mem_summary = summary.summarize(muppy.get_objects())
        summary.print_(mem_summary, limit=limit)
Пример #17
0
    def sig_usr(self, a, b):
        import threading
        import gc

        held_locks = {}
        code = {}
        curthreads = threading.enumerate()

        for threadId, stack in sys._current_frames().items():
            name = str(threadId)
            for ct in curthreads:
                if ct.ident == threadId:
                    name = ct.name

            code[name] = ["NAME: %s" % name]
            for filename, lineno, fname, line in traceback.extract_stack(stack):
                code[name].append('FILE: "%s", line %d, in %s' % (filename, lineno, fname))
                if line:
                    code[name].append("  %s" % (line.strip()))

            held_locks[name] = ""
            for lock in alllocks:
                if lock.writer_id == threadId:
                    held_locks[name] += ("%s(w)" % lock.name)
                    continue
                for reader_id, reader_stack in lock.reader_stacks:
                    if reader_id == threadId:
                        held_locks[name] += ("%s(r)" % lock.name)

        for k in code:
            log.info('\n\nLOCKS: %s \n%s' % (held_locks[k], '\n'.join(code[k])))

        log.info("\n\nSTACKS:")
        for lock in alllocks:
            for (reader_id, reader_stack) in lock.reader_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock reader (thread %s):" % (reader_id,))
                log.info(''.join(reader_stack))

            for writer_stack in lock.writer_stacks:
                log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
                log.info("Lock writer (thread %s):" % (lock.writer_id,))
                log.info(''.join(writer_stack))

        self.shelf.sync()
        gc.collect()

        # If we've got pympler installed, output a summary of memory usage.

        try:
            from pympler import summary, muppy
            from pympler.asizeof import asizeof
            summary.print_(summary.summarize(muppy.get_objects()))
        except:
            pass
Пример #18
0
def memory_profile_print():
    from pympler import muppy, summary
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    # Prints out a summary of the large objects
    summary.print_(sum1)
    # Get references to certain types of objects such as dataframe
    dataframes = [ao for ao in all_objects if isinstance(ao, pd.DataFrame)]
    for d in dataframes:
        print(d.columns.values)
        print(len(d))
Пример #19
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         sys.stdout = self.DevNull()
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
     finally:
         sys.stdout = self._stdout
Пример #20
0
def run_history_cleaner():
    gc.collect()
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    # Prints out a summary of the large objects
    summary.print_(sum1)

    remove_files(".png")
    remove_files(".rcg")

    threading.Timer(60, run_history_cleaner).start()
Пример #21
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         sys.stdout = self.DevNull()
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
     finally:
         sys.stdout = self._stdout
Пример #22
0
def dosomething():
    print
    "memory total"
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    summary.print_(sum1)

    print
    "memory difference"
    tr.print_diff()
    # ........ 爬取任务
    pass
Пример #23
0
def print_memory(count=30):
    '''
    Print the statistics of the objects in the memory.
    Need pympler to use.
    '''
    from pympler import muppy, summary

    gc.collect()
    all_objects = muppy.get_objects()
    my_types = muppy.filter(all_objects, Type=wx.Object)
    sum1 = summary.summarize(my_types)
    # sum1 = summary.summarize(all_objects)
    summary.print_(sum1, limit=count)
Пример #24
0
def profile(diff=False):  # type: (bool) -> None
    """A simple profiler using stuff from pympler"""
    if _DO_PROFILE and _MEM_PROFILE:
        if diff:
            TRACKER.print_diff()
        else:
            summary.print_(summary.summarize(muppy.get_objects()))
    elif _DO_PROFILE and not _MEM_PROFILE:
        logging.error(
            "Could not find 'pympler' module, please install with pip and run again"
        )
    else:
        pass
Пример #25
0
def print_memory(count=30):
    '''
    Print the statistics of the objects in the memory.
    Need pympler to use.
    '''
    from pympler import muppy, summary

    gc.collect()
    all_objects = muppy.get_objects()
    my_types = muppy.filter(all_objects, Type=wx.Object)
    sum1 = summary.summarize(my_types)
    # sum1 = summary.summarize(all_objects)
    summary.print_(sum1, limit=count)
Пример #26
0
def analyze_memory(tracker):
    all_objects = muppy.get_objects()
    num = len(all_objects)
    print 'number of objects:', num

    sum1 = summary.summarize(all_objects)
    print 'sumary of all objects'
    summary.print_(sum1)

    print 'difference: '
    tracker.print_diff()

    pdb.set_trace()
Пример #27
0
def dosomething():
    print("memory total")
    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    summary.print_(sum1)

    print("memory difference")
    tr.print_diff()
    # ........ 爬取任务
    a = {}
    for i in range(10000):
        a[i] = i
        time.sleep(0.5)
    return
Пример #28
0
def memory_details(task, method=None):
    if method == 'pympler':
        from pympler import muppy, summary
        all_objs = muppy.get_objects()
        summary.print_(summary.summarize(all_objs))
    elif method == 'mem_top':
        from mem_top import mem_top
        task.log(mem_top())
    else:
        import subprocess
        result = subprocess.check_output(
            'ps --no-headers -eo pmem,vsize,rss,pid,cmd | sort -k 1 -nr',
            shell=True)
        task.log('\n' + result.decode('utf8'))
Пример #29
0
 def test_print_diff(self):
     """Test summary can be printed."""
     try:
         self._stdout = sys.stdout
         stream = StringIO()
         sys.stdout = stream
         sum1 = summary.summarize(muppy.get_objects())
         sum2 = summary.summarize(muppy.get_objects())
         sumdiff = summary.get_diff(sum1, sum2)
         summary.print_(sumdiff)
         self.assertIn('str', stream.getvalue())
         self.assertNotIn("<class 'str", stream.getvalue())
     finally:
         sys.stdout = self._stdout
Пример #30
0
    def print_diff(self, ignore=[]):
        """Print the diff to the last time the state of objects was measured.

        keyword arguments
        ignore -- list of objects to ignore
        """
        # ignore this and the caller frame
        ignore.append(inspect.currentframe()) #PYCHOK change ignore
        diff = self.get_diff(ignore)
        print("Added objects:")
        summary.print_(summary.summarize(diff['+']))
        print("Removed objects:")
        summary.print_(summary.summarize(diff['-']))
        # manual cleanup, see comment above
        del ignore[:]
Пример #31
0
    def print_diff(self, ignore=[]):
        """Print the diff to the last time the state of objects was measured.

        keyword arguments
        ignore -- list of objects to ignore
        """
        # ignore this and the caller frame
        ignore.append(inspect.currentframe())  #PYCHOK change ignore
        diff = self.get_diff(ignore)
        print("Added objects:")
        summary.print_(summary.summarize(diff['+']))
        print("Removed objects:")
        summary.print_(summary.summarize(diff['-']))
        # manual cleanup, see comment above
        del ignore[:]
Пример #32
0
def memusage_before_n_after(fun, *args, **kwargs):
    from pympler import muppy
    from pympler import summary
    from datetime import datetime

    before = summary.summarize(muppy.get_objects())
    before_time = datetime.now()
    fun_ret = fun(*args, **kwargs)
    after_time = datetime.now()
    after = summary.summarize(muppy.get_objects())
    diff = summary.get_diff(before, after)
    print "execution time: ", after_time - before_time
    summary.print_(diff)

    return fun_ret, diff
Пример #33
0
def analyzeAllMFCC():
    client = MongoClient()
    db = client.audiograins
    grainEntries = db.grains

    query = grainEntries.find({ "mfcc00" : { "$exists": False }})
    print("Analyzing MFCC for " + str(query.count()) + " grains")

    for grain in tqdm(query):
        mfccs = analyzeMFCC(grain)
        for mfccIndex in range(0, len(mfccs)):
            update = {"mfcc" + format(mfccIndex, '02') : mfccs[mfccIndex]}
            grainEntries.update_one({"_id": grain["_id"]}, {"$set" : update})

    summary.print_(summary.summarize(muppy.get_objects()))
    client.close()
Пример #34
0
    def finished_level(self, level, name):
        if level <= int(self.reporting_level):
            self.check_for_leaks_before_next_test = True

        if level > int(self.reporting_level):
            return

        if self.report_delta:
            color = self.REPORT_DETAILS[level].color
            report = u'Memory Delta Report for %s: %s\n' % (
                self.REPORT_DETAILS[level].title.upper(), name)
            memory_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

            print('Peak memory usage for %s: %s' % (name, memory_usage))

            old_summary = self.previous_summaries[level]

            if not self.current_summary:
                self.current_summary = self.get_summary()

            diff = self._fast_get_summary_diff(old_summary, self.current_summary)
            filtered_diff = [row for row in diff if row[1] or row[2]]
            if filtered_diff:
                print(termcolor.colored(report, color))
                print(summary.print_(filtered_diff))
            else:
                report += 'No changes\n'
                report += 'Peak memory usage: %s' % memory_usage
                print(termcolor.colored(report, color))

            self.previous_summaries[level] = self.current_summary
Пример #35
0
    def finished_level(self, level, name):
        if level <= int(self.reporting_level):
            self.check_for_leaks_before_next_test = True

        if level > int(self.reporting_level):
            return

        if self.report_delta:
            color = self.REPORT_DETAILS[level].color
            report = u'Memory Delta Report for %s: %s\n' % (
                self.REPORT_DETAILS[level].title.upper(), name)
            memory_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

            print('Peak memory usage for %s: %s' % (name, memory_usage))

            old_summary = self.previous_summaries[level]

            if not self.current_summary:
                self.current_summary = self.get_summary()

            diff = self._fast_get_summary_diff(old_summary,
                                               self.current_summary)
            filtered_diff = [row for row in diff if row[1] or row[2]]
            if filtered_diff:
                print(termcolor.colored(report, color))
                print(summary.print_(filtered_diff))
            else:
                report += 'No changes\n'
                report += 'Peak memory usage: %s' % memory_usage
                print(termcolor.colored(report, color))

            self.previous_summaries[level] = self.current_summary
Пример #36
0
def epoch(model, train_seq, val_seq, noaction=False, step=16):
    MEMPROFILE = False
    import gc
    for j in range(len(train_seq)):
        try:
            (_, _), klasses, _, buildings = train_seq[j]
        except Exception as exc:
            logger.error(str(exc))
            continue

    #for (pre,post), klasses, mask, buildings in train_seq:
        buildings = np.array(buildings)
        klasses = np.array(klasses)
        for i in range(0, len(buildings), step):
            if i + step > len(buildings):
                if noaction:
                    buf1, buf2 = buildings[i:], klasses[i:]
                    logger.info(
                        f"{j}:{i}: {len(buf1)} samples accessed successfully.")
                    continue
                model.fit(buildings[i:],
                          klasses[i:],
                          verbose=2,
                          shuffle=False,
                          use_multiprocessing=False)
            if noaction:
                buf1, buf2 = buildings[i:i + step], klasses[i:i + step]
                logger.info(
                    f"{j}:{i}: {len(buf1)} samples accessed successfully.")
                continue
            model.fit(buildings[i:i + step],
                      klasses[i:i + step],
                      verbose=2,
                      shuffle=False,
                      use_multiprocessing=False)

        if j % 100 == 0:
            num_uncollectable = gc.collect(2)
            logger.info("Uncollectable objects: %d" % num_uncollectable)
        if MEMPROFILE is True and j > 100:
            from pympler import muppy, summary
            objs = muppy.get_objects()
            sum = summary.summarize(objs)
            summary.print_(sum)
            sys.exit()
Пример #37
0
    def collection_post(self):
        print("REQUEST {}".format(self.request.json))
        sums = []
        for key in 'sum1', 'sum2':
            sums.append(self.request.json[key])
        with transaction.manager:
            query = self.request.dbsession.query(ObjectSummary)
            query = query.filter(ObjectSummary.id.in_(sums))
            query = query.order_by(ObjectSummary.created)
            dbsums = query.all()
        sums = [m.content for m in dbsums]
        diff = summary.get_diff(*sums)
        output = io.StringIO()
        with contextlib.redirect_stdout(output):
            summary.print_(diff)

        # import pdb ; pdb.set_trace()
        return dict(result='testing', output=output.getvalue())
Пример #38
0
 def getDebugInfo(self, itemname):
     """Give debug info about a particular item."""
     global profile
     outf=StringIO()
     if itemname == "":
         outf.write("the item was empty")
         if profile:
             all_objects = muppy.get_objects()
             sum1 = summary.summarize(all_objects)
             summary.print_(sum1, 100)
             ib = refbrowser.InteractiveBrowser(self)
             ib.main()
         return outf.getvalue()
     itemname=keywords.fixID(itemname)
     itemlist=vtype.parseItemList(itemname)
     item=self.getSubValue(itemlist)
     item.writeDebug(outf)
     return outf.getvalue()
Пример #39
0
    async def seememory(self, ctx: Context):
        '''
        Shows you the number of each created object in the program
        '''

        # summary.print_(summary.summarize(muppy.get_objects()))

        awaitable_dot_code = self.bot.loop.run_in_executor(
            None, muppy.get_objects)
        try:
            objects = await wait_for(awaitable_dot_code,
                                     timeout=10.0,
                                     loop=self.bot.loop)
        except Exception as e:
            await ctx.send(f"{e!s}")
            return
        summary.print_(summary.summarize(objects))
        await ctx.send("Printed to console.")
Пример #40
0
 def getDebugInfo(self, itemname):
     """Give debug info about a particular item."""
     global profile
     outf = StringIO()
     if itemname == "":
         outf.write("the item was empty")
         if profile:
             all_objects = muppy.get_objects()
             sum1 = summary.summarize(all_objects)
             summary.print_(sum1, 100)
             ib = refbrowser.InteractiveBrowser(self)
             ib.main()
         return outf.getvalue()
     itemname = keywords.fixID(itemname)
     itemlist = vtype.parseItemList(itemname)
     item = self.getSubValue(itemlist)
     item.writeDebug(outf)
     return outf.getvalue()
Пример #41
0
    def check_memory():
        logger.debug('Checking memory.')
        logger.debug(objgraph.by_type('SpiderTask'))
        logger.debug(objgraph.by_type('TaskExecutor'))
        logger.debug(objgraph.by_type('Future'))
        logger.debug(objgraph.by_type('PeriodicCallback'))
        logger.debug(objgraph.by_type('Workspace'))
        logger.debug(objgraph.by_type('MultipartRequestBodyProducer'))
        logger.debug(objgraph.by_type('HTTPRequest'))
        future_objs = objgraph.by_type('Future')
        if future_objs:
            objgraph.show_chain(
                objgraph.find_backref_chain(future_objs[-1], objgraph.is_proper_module),
                filename='chain.png'
            )

        all_objects = muppy.get_objects()
        sum1 = summary.summarize(all_objects)
        # Prints out a summary of the large objects
        summary.print_(sum1)
Пример #42
0
 def memory_summary(self, summarize=True):
     "Using pympler summarize module to view memory summary."
     
     if summarize:
         all_objects = muppy.get_objects()
         Logger.info("ENV: \nMemory Footprint:")
         Logger.info("-----------------")
         return summary.print_(summary.summarize(all_objects), limit=50)
     else:
         Logger.info("ENV: \nMemory Tracker:")
         Logger.info("---------------")
         self.mem_tracker.print_diff()        
Пример #43
0
async def pympler_summary(request: web.Request) -> web.StreamResponse:
    """ Build and return a Pympler summary: https://pympler.readthedocs.io/en/latest/muppy.html#the-summary-module

    Example:

    curl -v -X POST 'localhost:9999/pympler/summary'
    """
    try:
        from pympler import muppy, summary
    except ImportError:
        return web.Response(status=500, text='pympler not installed\n')
    log = request.app[_LOG]
    all_objects = muppy.get_objects()
    sum_ = summary.summarize(all_objects)
    if 'print' in request.query:
        log.info('pympler.summary.print')
        summary.print_(sum_)
        return web.Response(status=204)
    else:
        log.info('pympler.summary')
        return web.json_response(sum_)
Пример #44
0
def print_muppy_sumary():
    # http://pythonhosted.org/Pympler/index.html
    try:
        from pympler import muppy, summary
    except ImportError:
        print("WARNING: pympler not installed")
        return
    # from pympler.classtracker import ClassTracker
    # from pympler.classtracker_stats import HtmlStats
    global all_objects, obj_summary, class_tracker
    if all_objects is None:
        all_objects = muppy.get_objects()
        obj_summary = summary.summarize(all_objects)
        summary.print_(obj_summary)

        # class_tracker = ClassTracker()
        # class_tracker.track_class(FICSPlayer, trace=1)
        # class_tracker.track_class(ICGameModel, resolution_level=2, trace=1)
    else:
        obj_summary2 = summary.summarize(muppy.get_objects())
        diff = summary.get_diff(obj_summary, obj_summary2)
        summary.print_(diff, limit=200)
Пример #45
0
    def create_instance(self, filename=None,
                data=None, name=None, namespace=None, namespaces=None,
                preprocess=False, profile_memory=0, report_timing=False, clone=None):
        """
        Create a concrete instance of an abstract model, possibly using data
        read in from a file.

        Optional:
            filename:           The name of a Pyomo Data File that will be used to load
                                    data into the model.
            data:               A dictionary containing initialization data for the model
                                    to be used if there is no filename
            name:               The name given to the model.
            namespace:          A namespace used to select data.
            namespaces:         A list of namespaces used to select data.
            preprocess:         If True, then preprocess the constructed model.
            profile_memory:     A number that indicates the profiling level.
            report_timing:      Report timing statistics during construction.
            clone:              Force a clone of the model if this is True.
        """
        if self._constructed:
            logger.warning("DEPRECATION WARNING: Cannot call Model.create_instance() on a concrete model.")
            return self

        if name is None:
            name = self.name
        if not filename is None:
            data = filename
        if data is None:
            data = {}
        #
        # Generate a warning if this is a concrete model but the filename is specified.
        # A concrete model is already constructed, so passing in a data file is a waste
        # of time.
        #
        if self.is_constructed() and isinstance(filename, basestring):
            msg = "The filename=%s will not be loaded - supplied as an argument to the create_instance() method of a ConcreteModel instance with name=%s." % (filename, name)
            logger.warning(msg)
        #
        # If construction is deferred, then clone the model and
        #
        if not self._constructed:
            instance = self.clone()

            if namespaces is None or len(namespaces) == 0:
                instance.load(data, namespaces=[None], profile_memory=profile_memory, report_timing=report_timing)
            else:
                instance.load(data, namespaces=list(namespaces)+[None], profile_memory=profile_memory, report_timing=report_timing)
        else:
            if clone:
                instance = self.clone()
            else:
                instance = self
        #
        # Preprocess the new model
        #
        if preprocess is True:
            print("      Model preprocessing during construction has been deprecated.")

        if False and preprocess is True:

            if report_timing is True:
                start_time = time.time()

            instance.preprocess()

            if report_timing is True:
                total_time = time.time() - start_time
                print("      %6.2f seconds required for preprocessing" % total_time)

            if (pympler_available is True) and (profile_memory >= 2):
                mem_used = muppy.get_size(muppy.get_objects())
                print("      Total memory = %d bytes following instance preprocessing" % mem_used)
                print("")

            if (pympler_available is True) and (profile_memory >= 2):
                print("")
                print("      Summary of objects following instance preprocessing")
                post_preprocessing_summary = summary.summarize(muppy.get_objects())
                summary.print_(post_preprocessing_summary, limit=100)

        if not name is None:
            instance.name=name
        #
        # Indicate that the model is concrete/constructed
        #
        instance._constructed = True
        return instance
Пример #46
0
    longest_anagrams = None # tuple 2 words
    
    for w in words:
        key = get_histogram(w)
        if key not in histograms:
            histograms[key] = [w]
        else:
            histograms[key].append(w)
            if longest_anagrams is None or len(w) > len(longest_anagrams[0]):
                longest_anagrams = histograms[key]
    
    return longest_anagrams

from pympler import summary, muppy

if __name__ == "__main__":
    print('SOWPODS analyzer')
    words = read_swopods()
    print('Total: %s words' % len(words))

    print('%s is the longest palindrome' % (find_longest_palindrome(words)))

    char, frequency, word = find_most_repeated_char(words)
    print("'%s' is the most repeated character in one word (in '%s', occurs %s times)" % (char, word, frequency))

    print("%s are the longest anagrams" % (find_longest_anagram(words)))

    all_objects = muppy.get_objects()
    sum1 = summary.summarize(all_objects)
    summary.print_(sum1) 
Пример #47
0
def before_after_each_function(request):
    global _global_collect_info
    import psutil
    current_pids = set(proc.pid for proc in psutil.process_iter())
    before_curr_proc_memory_info = psutil.Process().memory_info()

    if _global_collect_info and DEBUG_MEMORY_INFO:
        try:
            from pympler import summary, muppy
            sum1 = summary.summarize(muppy.get_objects())
        except:
            pydev_log.exception()

    sys.stdout.write(
'''
===============================================================================
Memory before: %s
%s
===============================================================================
''' % (request.function, format_memory_info(psutil.virtual_memory(), before_curr_proc_memory_info)))
    yield

    processes_info = []
    for proc in psutil.process_iter():
        if proc.pid not in current_pids:
            try:
                processes_info.append(
                    'New Process: %s(%s) - %s' % (
                        proc.name(),
                        proc.pid,
                        format_process_memory_info(proc.memory_info())
                    )
                )
            except psutil.NoSuchProcess:
                pass  # The process could've died in the meanwhile

    after_curr_proc_memory_info = psutil.Process().memory_info()

    if DEBUG_MEMORY_INFO:
        try:
            if after_curr_proc_memory_info.rss - before_curr_proc_memory_info.rss > 10 * 1000 * 1000:
                # 10 MB leak
                if _global_collect_info:
                    sum2 = summary.summarize(muppy.get_objects())
                    diff = summary.get_diff(sum1, sum2)
                    sys.stdout.write('===============================================================================\n')
                    sys.stdout.write('Leak info:\n')
                    sys.stdout.write('===============================================================================\n')
                    summary.print_(diff)
                    sys.stdout.write('===============================================================================\n')

                _global_collect_info = True
                # We'll only really collect the info on the next test (i.e.: if at one test
                # we used too much memory, the next one will start collecting)
            else:
                _global_collect_info = False
        except:
            pydev_log.exception()

    sys.stdout.write(
'''
===============================================================================
Memory after: %s
%s%s
===============================================================================


''' % (
    request.function,
    format_memory_info(psutil.virtual_memory(), after_curr_proc_memory_info),
    '' if not processes_info else '\nLeaked processes:\n' + '\n'.join(processes_info)),
    )
Пример #48
0
def main():
	
	adv_messages = rawdata.load_dict_from_json(ADV_MESSAGES_PATH)

	banned_users = []
	
	while True:
		accounts = rawdata.load_dict_from_json(ACCOUNTS_PATH)
		users = init(accounts)
		accounts.clear()
		try:
			queue = Queue()

			users_signed = []
			for user in users:
				if not user in banned_users:
					queue.put(user)

			for i in range(len(users) - len(banned_users)):
				thread = Thread(target=signing, args=(queue, users_signed, banned_users))
				thread.daemon = True
				thread.start()
			queue.join()
			del users[:]
			users = users_signed.copy()
			del users_signed[:]

			queue = Queue()
			for user in users:
				if not user in banned_users:
					queue.put(user)
			theard_count = 3 if len(users) >= 5 else 1
			for i in range(theard_count):
				thread = Thread(target=group_creating_desire_c, args=(queue, COUNT_OF_GROUPS, NAME_OF_GROUPS))
				thread.daemon = True
				thread.start()
			queue.join()

			arg_list = {"counter":0, "adv_counter":0, "iteration":0}
			
			while True:
				queue = Queue()
				del_targ = []
				targets = rawdata.load_dict_from_json(TARGETS_PATH)
				len_targets = len(targets)
				time_start = time.time()
				for user in users:
					if not user in banned_users:
						queue.put(user)
				print("незабаненных пользователей %s/%s | забаненны %s" % (len(users) - len(banned_users), len(users), banned_users))
				for i in range(len(users) - len(banned_users)):
					thread = Thread(target=posting_for_all_users, args=(queue, targets, adv_messages, arg_list, del_targ, banned_users))
					thread.daemon = True
					thread.start()

				queue.join()

				time_stop = time.time()
				time_dif = time_stop - time_start
				wait_time = 60*(24 + random.uniform(0, 5)) - time_dif

				u_banned_users = [user.username for user in banned_users]
				rawdata.delete_from_json(data=del_targ, path_to_data=TARGETS_PATH)
				rawdata.delete_from_json(data=u_banned_users, path_to_data=ACCOUNTS_PATH)
				del del_targ[:]
				targets.clear()
				del targets
				
				gc.collect()
				summary.print_(summary.summarize(muppy.get_objects()))


				print("Осталось целей %s" % len_targets)
				print("Всего/рекламных %s/%s за %s мин" % (arg_list["counter"], arg_list["adv_counter"], str(time_dif / 60)))
				
				if wait_time > 0:
					print("Пауза %s минут\n" % str(wait_time / 60))
					time.sleep(wait_time)
				else:
					print("Продолжаем без паузы %s мин запаздывания\n" % str(-1 * wait_time / 60))
				arg_list["iteration"] += 1
		except:
			print("Ошибка в posting.py/main %s" % sys.exc_info()[0])
			raise
    def process_response(self, request, response):
        req = request.META['PATH_INFO']
        if not self.is_ignored(req):
            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info(
                    '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                    '~~~~~~~~~~~~~~'
                )
            print '\n\n'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info(u'REQUESTED URL: {}'.format(req))
            print u'REQUESTED URL: {}'.format(req)
            self.end_objects = muppy.get_objects()
            if SHOW['request_summary']:
                sum_start = summary.summarize(self.start_objects)
                if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                    logger.info(
                        '~~~~~~~~~ SUMMARIZE REQUEST OBJECTS ~~~~~~~~~')
                    for row in sorted(
                            sum_start, key=lambda i: i[2], reverse=True)[:15]:
                        logger.info(
                            "type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2]))
                        )
                print '~~~~~~~~~ SUMMARIZE REQUEST OBJECTS ~~~~~~~~~'
                summary.print_(sum_start)

            if SHOW['response_summary']:
                sum_end = summary.summarize(self.end_objects)
                if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                    logger.info(
                        '~~~~~~~~~ SUMMARIZE RESPONSE OBJECTS ~~~~~~~~~')
                    for row in sorted(
                            sum_end, key=lambda i: i[2], reverse=True)[:15]:
                        logger.info(
                            "type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2]))
                        )
                print '~~~~~~~~~ SUMMARIZE RESPONSE OBJECTS ~~~~~~~~~'
                summary.print_(sum_end)

            if SHOW['compared_request_response_summaries']:
                diff = summary.get_diff(sum_start, sum_end)
                if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                    logger.info(
                        '~~~~~~~~~ COMPARED REQUEST & RESPONSE SUMMARIES '
                        '~~~~~~~~~'
                    )
                    for row in sorted(
                            diff, key=lambda i: i[2], reverse=True)[:15]:
                        logger.info(
                            "type: %60s , # objects: %10d, total size: %s",
                            *(row[0], row[1], filesizeformat(row[2]))
                        )
                print \
                    '~~~~~~~~~ COMPARED REQUEST & RESPONSE SUMMARIES ~~~~~~~~~'
                summary.print_(diff)

            # print '~~~~~~~~~'
            # cb = refbrowser.ConsoleBrowser(
            #     response, maxdepth=2, str_func=output_function)
            # cb.print_tree()

            a = asizeof(response)
            a_string = 'Total size of response object in kB: %s' % \
                str(a/1024.0)
            b = asizeof(self.end_objects)
            b_string = 'Total size of end_objects in MB: %s' % str(b/1048576.0)
            c = asizeof(self.start_objects)
            c_string = 'Total size of start_objects in MB: %s' % \
                str(c/1048576.0)

            if SHOW_ON_DJANGO_DEBUG_TOOLBAR_LOGGIN_PANEL:
                logger.info(
                    '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                    '~~~~~~~~~~~~~~'
                )
                logger.info(a_string)
                logger.info(b_string)
                logger.info(c_string)
                logger.info(
                    '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                    '~~~~~~~~~~~~~~'
                )

            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print a_string
            print b_string
            print c_string
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\
                '~~~~~~~~~~~~~~~~~~~~~~~~~~'

        return response
Пример #50
0
    def _load_model_data(self, modeldata, namespaces, **kwds):
        """
        Load declarations from a DataPortal object.
        """
        #
        # As we are primarily generating objects here (and acyclic ones
        # at that), there is no need to run the GC until the entire
        # model is created.  Simple reference-counting should be
        # sufficient to keep memory use under control.
        #
        with PauseGC() as pgc:

            #
            # Unlike the standard method in the pympler summary
            # module, the tracker doesn't print 0-byte entries to pad
            # out the limit.
            #
            profile_memory = kwds.get('profile_memory', 0)

            if (pympler_available is True) and (profile_memory >= 2):
                mem_used = muppy.get_size(muppy.get_objects())
                print("")
                print("      Total memory = %d bytes prior to model "
                      "construction" % mem_used)

            if (pympler_available is True) and (profile_memory >= 3):
                gc.collect()
                mem_used = muppy.get_size(muppy.get_objects())
                print("      Total memory = %d bytes prior to model "
                      "construction (after garbage collection)" % mem_used)

            #
            # Do some error checking
            #
            for namespace in namespaces:
                if not namespace is None and not namespace in modeldata._data:
                    msg = "Cannot access undefined namespace: '%s'"
                    raise IOError(msg % namespace)

            #
            # Initialize each component in order.
            #

            for component_name, component in iteritems(self.component_map()):

                if component.type() is Model:
                    continue

                self._initialize_component(modeldata, namespaces, component_name, profile_memory)
                if False:
                    total_time = time.time() - start_time
                    if isinstance(component, IndexedComponent):
                        clen = len(component)
                    else:
                        assert isinstance(component, Component)
                        clen = 1
                    print("    %%6.%df seconds required to construct component=%s; %d indicies total" \
                              % (total_time>=0.005 and 2 or 0, component_name, clen) \
                              % total_time)
                    tmp_clone_counter = expr_common.clone_counter
                    if clone_counter != tmp_clone_counter:
                        clone_counter = tmp_clone_counter
                        print("             Cloning detected! (clone count: %d)" % clone_counters)

            # Note: As is, connectors are expanded when using command-line pyomo but not calling model.create(...) in a Python script.
            # John says this has to do with extension points which are called from commandline but not when writing scripts.
            # Uncommenting the next two lines switches this (command-line fails because it tries to expand connectors twice)
            #connector_expander = ConnectorExpander()
            #connector_expander.apply(instance=self)

            if (pympler_available is True) and (profile_memory >= 2):
                print("")
                print("      Summary of objects following instance construction")
                post_construction_summary = summary.summarize(muppy.get_objects())
                summary.print_(post_construction_summary, limit=100)
                print("")
Пример #51
0
    def create_instance( self, filename=None, data=None, name=None,
                         namespace=None, namespaces=None,
                         profile_memory=0, report_timing=False,
                         **kwds ):
        """
        Create a concrete instance of an abstract model, possibly using data
        read in from a file.

        Parameters
        ----------
        filename: `str`, optional           
            The name of a Pyomo Data File that will be used to load data into 
            the model.
        data: `dict`, optional
            A dictionary containing initialization data for the model to be 
            used if there is no filename
        name: `str`, optional
            The name given to the model.
        namespace: `str`, optional          
            A namespace used to select data.
        namespaces: `list`, optional   
            A list of namespaces used to select data.
        profile_memory: `int`, optional    
            A number that indicates the profiling level.
        report_timing: `bool`, optional     
            Report timing statistics during construction.

        """
        #
        # Generate a warning if this is a concrete model but the
        # filename is specified.  A concrete model is already
        # constructed, so passing in a data file is a waste of time.
        #
        if self.is_constructed() and isinstance(filename, string_types):
            msg = "The filename=%s will not be loaded - supplied as an " \
                  "argument to the create_instance() method of a "\
                  "concrete instance with name=%s." % (filename, name)
            logger.warning(msg)

        if 'clone' in kwds:
            kwds.pop('clone')
            deprecation_warning(
                "Model.create_instance() no longer accepts the 'clone' "
                "argument: the base abstract model is always cloned.")
        if 'preprocess' in kwds:
            kwds.pop('preprocess')
            deprecation_warning(
                "Model.create_instance() no longer accepts the preprocess' "
                "argument: preprocessing is always deferred to when the "
                "model is sent to the solver")
        if kwds:
            msg = \
"""Model.create_instance() passed the following unrecognized keyword
arguments (which have been ignored):"""
            for k in kwds:
                msg = msg + "\n    '%s'" % (k,)
            logger.error(msg)

        if self.is_constructed():
            deprecation_warning(
                "Cannot call Model.create_instance() on a constructed "
                "model; returning a clone of the current model instance.")
            return self.clone()

        if report_timing:
            pyomo.common.timing.report_timing()

        if name is None:
            name = self.name
        if filename is not None:
            if data is not None:
                logger.warning("Model.create_instance() passed both 'filename' "
                               "and 'data' keyword arguments.  Ignoring the "
                               "'data' argument")
            data = filename
        if data is None:
            data = {}

        #
        # Clone the model and load the data
        #
        instance = self.clone()

        if name is not None:
            instance._name = name

        # If someone passed a rule for creating the instance, fire the
        # rule before constructing the components.
        if instance._rule is not None:
            instance._rule(instance)

        if namespaces:
            _namespaces = list(namespaces)
        else:
            _namespaces = []
        if namespace is not None:
            _namespaces.append(namespace)
        if None not in _namespaces:
            _namespaces.append(None)

        instance.load( data,
                       namespaces=_namespaces,
                       profile_memory=profile_memory )

        #
        # Preprocess the new model
        #

        if False and preprocess is True:

            if report_timing is True:
                start_time = time.time()

            instance.preprocess()

            if report_timing is True:
                total_time = time.time() - start_time
                print("      %6.2f seconds required for preprocessing" % total_time)

            if (pympler_available is True) and (profile_memory >= 2):
                mem_used = muppy.get_size(muppy.get_objects())
                print("      Total memory = %d bytes following instance preprocessing" % mem_used)
                print("")

            if (pympler_available is True) and (profile_memory >= 2):
                print("")
                print("      Summary of objects following instance preprocessing")
                post_preprocessing_summary = summary.summarize(muppy.get_objects())
                summary.print_(post_preprocessing_summary, limit=100)

        #
        # Indicate that the model is concrete/constructed
        #
        instance._constructed = True
        #
        # Change this class from "Abstract" to "Concrete".  It is
        # absolutely crazy that this is allowed in Python, but since the
        # AbstractModel and ConcreteModel are basically identical, we
        # can "reassign" the new concrete instance to be an instance of
        # ConcreteModel
        #
        instance.__class__ = ConcreteModel
        return instance
Пример #52
0
def log_mem(mes=''):
    if options.debug:
        all_objects = muppy.get_objects()
        sum1 = summary.summarize(all_objects)
        app_log.log(logging.DEBUG, 'mem (%s): %d' % (mes, len(all_objects)))
        summary.print_(sum1)
Пример #53
0
def memory_usage(where):
    """Print out a basic summary of memory usage."""
    mem_summary = summary.summarize(muppy.get_objects())
    log.debug("Memory summary: {}".format(where))
    summary.print_(mem_summary, limit=2)
    log.debug("VM: {:2f}Mb".format(get_virtual_memory_usage_kb() / 1024.0))
Пример #54
0
def create_model(data):
    """
    Create instance of Pyomo model.

    Return:
        model:      Model object.
        instance:   Problem instance.
        symbol_map: Symbol map created when writing model to a file.
        filename:    Filename that a model instance was written to.
    """
    #
    if not data.options.runtime.logging == 'quiet':
        sys.stdout.write('[%8.2f] Creating model\n' % (time.time()-start_time))
        sys.stdout.flush()
    #
    if (pympler_available is True) and (data.options.runtime.profile_memory >= 1):
        global memory_data
        mem_used = muppy.get_size(muppy.get_objects())
        data.local.max_memory = mem_used
        print("   Total memory = %d bytes prior to model construction" % mem_used)
    #
    # Create Model
    #
    ep = ExtensionPoint(IPyomoScriptCreateModel)
    model_name = 'model'
    if data.options.model.object_name is not None: model_name = data.options.model.object_name

    if model_name in dir(data.local.usermodel):
        if len(ep) > 0:
            msg = "Model construction function 'create_model' defined in "    \
                  "file '%s', but model is already constructed!"
            raise SystemExit(msg % data.options.model.filename)
        model = getattr(data.local.usermodel, data.options.model.object_name)

        if model is None:
            msg = "'%s' object is 'None' in module %s"
            raise SystemExit(msg % (model_name, data.options.model.filename))

    else:
        if len(ep) == 0:
            msg = "Neither '%s' nor 'pyomo_create_model' are available in "    \
                  'module %s'
            raise SystemExit(msg % ( model_name, data.options.model.filename ))
        elif len(ep) > 1:
            msg = 'Multiple model construction plugins have been registered!'
            raise SystemExit(msg)
        else:
            model_options = data.options.model.options.value()
            #if model_options is None:
                #model_options = []
            model = ep.service().apply( options = pyutilib.misc.Container(*data.options), model_options=pyutilib.misc.Container(*model_options) )
    #
    for ep in ExtensionPoint(IPyomoScriptPrintModel):
        ep.apply( options=data.options, model=model )

    #
    # Create Problem Instance
    #
    ep = ExtensionPoint(IPyomoScriptCreateDataPortal)
    if len(ep) > 1:
        msg = 'Multiple model data construction plugins have been registered!'
        raise SystemExit(msg)

    if len(ep) == 1:
        modeldata = ep.service().apply( options=data.options, model=model )
    else:
        modeldata = DataPortal()


    if model._constructed:
        #
        # TODO: use a better test for ConcreteModel
        #
        instance = model

    elif len(data.options.data.files) > 1:
        #
        # Load a list of *.dat files
        #
        for file in data.options.data.files:
            suffix = (file).split(".")[-1]
            if suffix != "dat":
                msg = 'When specifiying multiple data files, they must all '  \
                      'be *.dat files.  File specified: %s'
                raise SystemExit(msg % str( file ))

            modeldata.load(filename=file, model=model)

        instance = model.create_instance(modeldata,
                                         namespaces=data.options.data.namespaces,
                                         profile_memory=data.options.runtime.profile_memory,
                                         report_timing=data.options.runtime.report_timing)

    elif len(data.options.data.files) == 1:
        #
        # Load a *.dat file or process a *.py data file
        #
        suffix = (data.options.data.files[0]).split(".")[-1].lower()
        if suffix == "dat":
            instance = model.create_instance(data.options.data.files[0],
                                             namespaces=data.options.data.namespaces,
                                             profile_memory=data.options.runtime.profile_memory,
                                             report_timing=data.options.runtime.report_timing)
        elif suffix == "py":
            userdata = pyutilib.misc.import_file(data.options.data.files[0], clear_cache=True)
            if "modeldata" in dir(userdata):
                if len(ep) == 1:
                    msg = "Cannot apply 'pyomo_create_modeldata' and use the" \
                          " 'modeldata' object that is provided in the model"
                    raise SystemExit(msg)

                if userdata.modeldata is None:
                    msg = "'modeldata' object is 'None' in module %s"
                    raise SystemExit(msg % str( data.options.data.files[0] ))

                modeldata=userdata.modeldata

            else:
                if len(ep) == 0:
                    msg = "Neither 'modeldata' nor 'pyomo_create_dataportal' "  \
                          'is defined in module %s'
                    raise SystemExit(msg % str( data.options.data.files[0] ))

            modeldata.read(model)
            instance = model.create_instance(modeldata,
                                             namespaces=data.options.data.namespaces,
                                             profile_memory=data.options.runtime.profile_memory,
                                             report_timing=data.options.runtime.report_timing)
        elif suffix == "yml" or suffix == 'yaml':
            try:
                import yaml
            except:
                msg = "Cannot apply load data from a YAML file: PyYaml is not installed"
                raise SystemExit(msg)

            modeldata = yaml.load(open(data.options.data.files[0]))
            instance = model.create_instance(modeldata,
                                             namespaces=data.options.data.namespaces,
                                             profile_memory=data.options.runtime.profile_memory,
                                             report_timing=data.options.runtime.report_timing)
        else:
            raise ValueError("Unknown data file type: "+data.options.data.files[0])
    else:
        instance = model.create_instance(modeldata,
                                         namespaces=data.options.data.namespaces,
                                         profile_memory=data.options.runtime.profile_memory,
                                         report_timing=data.options.runtime.report_timing)

    #
    modify_start_time = time.time()
    for ep in ExtensionPoint(IPyomoScriptModifyInstance):
        if data.options.runtime.report_timing is True:
            tick = time.time()
        ep.apply( options=data.options, model=model, instance=instance )
        if data.options.runtime.report_timing is True:
            print("      %6.2f seconds to apply %s" % (time.time() - tick, type(ep)))
            tick = time.time()
    #
    for transformation in data.options.transform:
        with TransformationFactory(transformation) as xfrm:
            instance = xfrm.create_using(instance)
            if instance is None:
                raise SystemExit("Unexpected error while applying "
                                 "transformation '%s'" % transformation)
    #
    if data.options.runtime.report_timing is True:
        total_time = time.time() - modify_start_time
        print("      %6.2f seconds required for problem transformations" % total_time)

    if logger.isEnabledFor(logging.DEBUG):
        print("MODEL INSTANCE")
        instance.pprint()
        print("")

    for ep in ExtensionPoint(IPyomoScriptPrintInstance):
        ep.apply( options=data.options, instance=instance )

    fname=None
    smap_id=None
    if not data.options.model.save_file is None:

        if data.options.runtime.report_timing is True:
            write_start_time = time.time()

        if data.options.model.save_file == True:
            if data.local.model_format in (ProblemFormat.cpxlp, ProblemFormat.lpxlp):
                fname = (data.options.data.files[0])[:-3]+'lp'
            else:
                fname = (data.options.data.files[0])[:-3]+str(data.local.model_format)
            format=data.local.model_format
        else:
            fname = data.options.model.save_file
            format= data.options.model.save_format

        io_options = {}
        if data.options.model.symbolic_solver_labels:
            io_options['symbolic_solver_labels'] = True
        if data.options.model.file_determinism != 1:
            io_options['file_determinism'] = data.options.model.file_determinism
        (fname, smap_id) = instance.write(filename=fname,
                                          format=format,
                                          io_options=io_options)

        if not data.options.runtime.logging == 'quiet':
            if not os.path.exists(fname):
                print("ERROR: file "+fname+" has not been created!")
            else:
                print("Model written to file '"+str(fname)+"'")

        if data.options.runtime.report_timing is True:
            total_time = time.time() - write_start_time
            print("      %6.2f seconds required to write file" % total_time)

        if (pympler_available is True) and (data.options.runtime.profile_memory >= 2):
            print("")
            print("      Summary of objects following file output")
            post_file_output_summary = summary.summarize(muppy.get_objects())
            summary.print_(post_file_output_summary, limit=100)

            print("")

    for ep in ExtensionPoint(IPyomoScriptSaveInstance):
        ep.apply( options=data.options, instance=instance )

    if (pympler_available is True) and (data.options.runtime.profile_memory >= 1):
        mem_used = muppy.get_size(muppy.get_objects())
        if mem_used > data.local.max_memory:
            data.local.max_memory = mem_used
        print("   Total memory = %d bytes following Pyomo instance creation" % mem_used)

    return pyutilib.misc.Options(
                    model=model, instance=instance,
                    smap_id=smap_id, filename=fname, local=data.local )
Пример #55
0
 def summarize_memory():
     print("Virtual machine: {:.2f}Mb".format(
         psutil.Process().memory_info_ex().vms / (1024 * 1024)))
     summary.print_(summary.summarize(muppy.get_objects()), limit=1)
Пример #56
0
def memusage(o):
    summary.print_(
        summary.summarize(o))
Пример #57
0
def print_summary():
    """Print a summary of all known objects."""
    summary.print_(summary.summarize(get_objects()))
Пример #58
0
import random
import hashlib
from pympler import summary, muppy, asizeof
from time import time

hs = set()
q = 1000000
def rand_string(l):
    return ''.join(chr(random.randint(97,122)) for i in range(l))
x = 1
ti = time()
for i in range(q):
    fields = (rand_string(8) for i in range(12))
    md = hashlib.md5()
    md.update('*'.join(fields))
    h = md.digest()
    hs.add(h)
    if x % 100000 == 0:
        print x
        print time() - ti
        ti = time()
    x +=1
all_objects = muppy.get_objects()
suml = summary.summarize(all_objects)
summary.print_(suml)
len(hs)