def print_summary(self):
        """
        Print per-class summary for each snapshot.
        """
        # Emit class summaries for each snapshot
        classlist = self.tracked_classes

        fobj = self.stream

        fobj.write('---- SUMMARY '+'-'*66+'\n')
        for snapshot in self.snapshots:
            self.annotate_snapshot(snapshot)
            fobj.write('%-35s %11s %12s %12s %5s\n' % (
                trunc(snapshot.desc, 35),
                'active',
                pp(snapshot.asizeof_total),
                'average',
                'pct'
            ))
            for classname in classlist:
                info = snapshot.classes.get(classname)
                fobj.write('  %-33s %11d %12s %12s %4d%%\n' % (
                    trunc(classname, 33),
                    info['active'],
                    pp(info['sum']),
                    pp(info['avg']),
                    info['pct']
                ))
        fobj.write('-'*79+'\n')
Exemple #2
0
    def render_vars(self, req):
        before = self.stats['before']
        after = self.stats['after']
        class_stats = self.stats['class_stats']
        rows = [
            ('Resident set size', after.rss),
            ('Virtual size', after.vsz),
        ]

        rows.extend(after - before)
        rows = [(key, pp(value)) for key, value in rows]
        rows.extend(after.os_specific)
        classes = []
        snapshot = class_stats.snapshots[-1]
        for model in class_stats.tracked_classes:
            history = [cnt for _, cnt in class_stats.history[model]]
            size = snapshot.classes.get(model, {}).get('sum', 0)
            if history and history[-1] > 0:
                classes.append((model, history, pp(size)))
        return {
            'rows': rows,
            'classes': classes,
            'process_history': process_history,
            'req_before': self.stats['req_before'],
            'req_after': self.stats['req_after'],
        }
    def print_summary(self):
        """
        Print per-class summary for each snapshot.
        """
        # Emit class summaries for each snapshot
        classlist = list(self.index.keys())
        classlist.sort()

        fobj = self.stream

        fobj.write('---- SUMMARY '+'-'*66+'\n')
        for footprint in self.footprint:
            self.annotate_snapshot(footprint)
            fobj.write('%-35s %11s %12s %12s %5s\n' % (
                trunc(footprint.desc, 35),
                'active',
                pp(footprint.asizeof_total),
                'average',
                'pct'
            ))
            for classname in classlist:
                info = footprint.classes.get(classname)
                # If 'info' is None there is no such class in this snapshot. If
                # print_stats is called multiple times there may exist older
                # annotations in earlier snapshots.
                if info:
                    fobj.write('  %-33s %11d %12s %12s %4d%%\n' % (
                        trunc(classname, 33),
                        info['active'],
                        pp(info['sum']),
                        pp(info['avg']),
                        info['pct']
                    ))
        fobj.write('-'*79+'\n')
    def print_summary(self):
        """
        Print per-class summary for each snapshot.
        """
        # Emit class summaries for each snapshot
        classlist = self.tracked_classes

        fobj = self.stream

        fobj.write('---- SUMMARY ' + '-' * 66 + '\n')
        for snapshot in self.snapshots:
            self.annotate_snapshot(snapshot)
            fobj.write('%-35s %11s %12s %12s %5s\n' % (
                trunc(snapshot.desc, 35),
                'active',
                pp(snapshot.asizeof_total),
                'average',
                'pct'
            ))
            for classname in classlist:
                info = snapshot.classes.get(classname)
                fobj.write('  %-33s %11d %12s %12s %4d%%\n' % (
                    trunc(classname, 33),
                    info['active'],
                    pp(info['sum']),
                    pp(info['avg']),
                    info['pct']
                ))
        fobj.write('-' * 79 + '\n')
Exemple #5
0
 def nav_subtitle(self):
     context = self.get_stats()
     before = context['before']
     after = context['after']
     rss = after.rss
     delta = rss - before.rss
     delta = ('(+%s)' % pp(delta)) if delta > 0 else ''
     return "%s %s" % (pp(rss), delta)
Exemple #6
0
 def nav_subtitle(self):
     context = self.get_stats()
     before = context['before']
     after = context['after']
     rss = after.rss
     delta = rss - before.rss
     delta = ('(+%s)' % pp(delta)) if delta > 0 else ''
     return "%s %s" % (pp(rss), delta)
 def print_object(self, tobj):
     """
     Print the gathered information of object `tobj` in human-readable
     format.
     """
     if tobj.death:
         self.stream.write('%-32s ( free )   %-35s\n' % (
             trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35)))
     else:
         self.stream.write('%-32s 0x%08x %-35s\n' % (
             trunc(tobj.name, 32, left=1),
             tobj.id,
             trunc(tobj.repr, 35)
         ))
     if tobj.trace:
         self.stream.write(_format_trace(tobj.trace))
     for (timestamp, size) in tobj.snapshots:
         self.stream.write('  %-30s %s\n' % (
             pp_timestamp(timestamp), pp(size.size)
         ))
         self._print_refs(size.refs, size.size)
     if tobj.death is not None:
         self.stream.write('  %-30s finalize\n' % (
             pp_timestamp(tobj.death),
         ))
Exemple #8
0
    def update(self):
        """
        Get virtual size of current process by reading the process' stat file.
        This should work for Linux.
        """
        try:
            stat = open('/proc/self/stat')
            status = open('/proc/self/status')
        except IOError:  # pragma: no cover
            return False
        else:
            stats = stat.read().split()
            self.vsz = int(stats[22])
            self.rss = int(stats[23]) * self.pagesize
            self.pagefaults = int(stats[11])

            for entry in status.readlines():
                key, value = entry.split(':', 1)
                size_in_bytes = lambda x: int(x.split()[0]) * 1024
                if key == 'VmData':
                    self.data_segment = size_in_bytes(value)
                elif key == 'VmExe':
                    self.code_segment = size_in_bytes(value)
                elif key == 'VmLib':
                    self.shared_segment = size_in_bytes(value)
                elif key == 'VmStk':
                    self.stack_segment = size_in_bytes(value)
                key = self.key_map.get(key)
                if key:
                    self.os_specific.append((key, pp(size_in_bytes(value))))

            stat.close()
            status.close()
            return True
Exemple #9
0
    def _process_cache(c):
        items = []
        for k in c._cache.keys():
            expiration, insert_time, data = c._cache[k]
            size = stringutils.pp(asizeof(data))
            item = {
                'key': ', '.join(filter(None, (k or [[]])[0] or [])), 'expiration': expiration,
                'saved': insert_time, 'size': size
            }
            items.append(item)

        return {
            'total_size': stringutils.pp(asizeof(c._cache)),
            'total_items': len(c._cache),
            'items': items
        }
Exemple #10
0
 def _print_refs(self,
                 fobj: IO,
                 refs: Iterable[Asized],
                 total: int,
                 level: int = 1,
                 minsize: int = 0,
                 minpct: float = 0.1) -> None:
     """
     Print individual referents recursively.
     """
     lrefs = list(refs)
     lrefs.sort(key=lambda x: x.size)
     lrefs.reverse()
     if level == 1:
         fobj.write('<table>\n')
     for ref in lrefs:
         if ref.size > minsize and (ref.size * 100.0 / total) > minpct:
             data = dict(level=level,
                         name=trunc(str(ref.name), 128),
                         size=pp(ref.size),
                         pct=ref.size * 100.0 / total)
             fobj.write(self.refrow % data)
             self._print_refs(fobj, ref.refs, total, level=level + 1)
     if level == 1:
         fobj.write("</table>\n")
Exemple #11
0
    def update(self):
        """
        Get virtual size of current process by reading the process' stat file.
        This should work for Linux.
        """
        try:
            stat = open('/proc/self/stat')
            status = open('/proc/self/status')
        except IOError:  # pragma: no cover
            return False
        else:
            stats = stat.read().split()
            self.vsz = int(stats[22])
            self.rss = int(stats[23]) * self.pagesize
            self.pagefaults = int(stats[11])

            for entry in status.readlines():
                key, value = entry.split(':')
                size_in_bytes = lambda x: int(x.split()[0]) * 1024
                if key == 'VmData':
                    self.data_segment = size_in_bytes(value)
                elif key == 'VmExe':
                    self.code_segment = size_in_bytes(value)
                elif key == 'VmLib':
                    self.shared_segment = size_in_bytes(value)
                elif key == 'VmStk':
                    self.stack_segment = size_in_bytes(value)
                key = self.key_map.get(key)
                if key:
                    self.os_specific.append((key, pp(size_in_bytes(value))))

            stat.close()
            status.close()
            return True
Exemple #12
0
    def content(self):
        stats = self._tracker.stats
        stats.annotate()
        context = self.context.copy()
        rows = [('Resident set size', self._after.rss),
                ('Virtual size', self._after.vsz),
                ]
        rows.extend(self._after - self._before)
        rows = [(key, pp(value)) for key, value in rows]
        rows.extend(self._after.os_specific)

        classes = []
        snapshot = stats.snapshots[-1]
        for model in stats.tracked_classes:
            history = [cnt for _, cnt in stats.history[model]]
            size = snapshot.classes.get(model, {}).get('sum', 0)
            if cnt > 0:
                classes.append((model, history, pp(size)))
        context.update({'rows': rows, 'classes': classes})
        return render_to_string(self.template, context)
 def print_stats(self, fobj=sys.stdout):
     """
     Log annotated garbage objects to console or file.
     """
     self.metadata.sort(key=lambda x: x.size)
     self.metadata.reverse()
     fobj.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation'))
     for g in self.metadata:
         fobj.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12),
             trunc(g.str, 46)))
     fobj.write('Garbage: %8d collected objects (%6d in cycles): %12s\n' % \
         (self.count, self.count_in_cycles, pp(self.total_size)))
Exemple #14
0
    def content(self):
        stats = self._tracker.stats
        stats.annotate()
        context = self.context.copy()
        rows = [
            ('Resident set size', self._after.rss),
            ('Virtual size', self._after.vsz),
        ]
        rows.extend(self._after - self._before)
        rows = [(key, pp(value)) for key, value in rows]
        rows.extend(self._after.os_specific)

        classes = []
        snapshot = stats.snapshots[-1]
        for model in stats.tracked_classes:
            cnt = snapshot.classes.get(model, {}).get('active', 0)
            size = snapshot.classes.get(model, {}).get('sum', 0)
            if cnt > 0:
                classes.append((model, cnt, pp(size)))
        context.update({'rows': rows, 'classes': classes})
        return render_to_string(self.template, context)
Exemple #15
0
    def content(self):
        context = self.get_stats()
        before = context['before']
        after = context['after']
        stats = context['stats']
        rows = [('Resident set size', after.rss),
                ('Virtual size', after.vsz),
                ]
        rows.extend(after - before)
        rows = [(key, pp(value)) for key, value in rows]
        rows.extend(after.os_specific)

        classes = []
        snapshot = stats.snapshots[-1]
        for model in stats.tracked_classes:
            history = [cnt for _, cnt in stats.history[model]]
            size = snapshot.classes.get(model, {}).get('sum', 0)
            if history and history[-1] > 0:
                classes.append((model, history, pp(size)))
        context.update({'rows': rows, 'classes': classes})
        return render_to_string(self.template, context)
Exemple #16
0
    def content(self):
        context = self.get_stats()
        before = context['before']
        after = context['after']
        stats = context['stats']
        rows = [('Resident set size', after.rss),
                ('Virtual size', after.vsz),
                ]
        rows.extend(after - before)
        rows = [(key, pp(value)) for key, value in rows]
        rows.extend(after.os_specific)

        classes = []
        snapshot = stats.snapshots[-1]
        for model in stats.tracked_classes:
            history = [cnt for _, cnt in stats.history[model]]
            size = snapshot.classes.get(model, {}).get('sum', 0)
            if history and history[-1] > 0:
                classes.append((model, history, pp(size)))
        context.update({'rows': rows, 'classes': classes})
        return render_to_string(self.template, context)
    def print_stats(self, stream=None):
        """
        Log annotated garbage objects to console or file.

        :param stream: open file, uses sys.stdout if not given
        """
        if not stream: # pragma: no cover
            stream = sys.stdout
        self.metadata.sort(key=lambda x: -x.size)
        stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation'))
        for g in self.metadata:
            stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12),
                trunc(g.str, 46)))
        stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \
            (self.count, self.num_in_cycles, pp(self.total_size)))
    def print_class_details(self, fname, classname):
        """
        Print detailed statistics and instances for the class `classname`. All
        data will be written to the file `fname`.
        """
        fobj = open(fname, "w")
        fobj.write(self.header % (classname, self.style))

        fobj.write("<h1>%s</h1>\n" % (classname))

        sizes = [tobj.get_max_size() for tobj in self.index[classname]]
        total = 0
        for s in sizes:
            total += s
        data = {'cnt': len(self.index[classname]), 'cls': classname}
        data['avg'] = pp(total / len(sizes))
        data['max'] = pp(max(sizes))
        data['min'] = pp(min(sizes))
        fobj.write(self.class_summary % data)

        fobj.write(self.charts[classname])

        fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
        for snapshot in self.snapshots:
            if classname in snapshot.classes:
                merged = snapshot.classes[classname]['merged']
                fobj.write(self.class_snapshot % {
                    'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size)
                })
                if merged.refs:
                    self._print_refs(fobj, merged.refs, merged.size)
                else:
                    fobj.write('<p>No per-referent sizes recorded.</p>\n')

        fobj.write("<h2>Instances</h2>\n")
        for tobj in self.index[classname]:
            fobj.write('<table id="tl" width="100%" rules="rows">\n')
            fobj.write('<tr><td id="hl" width="140px">Instance</td><td id="hl">%s at 0x%08x</td></tr>\n' % (tobj.name, tobj.id))
            if tobj.repr:
                fobj.write("<tr><td>Representation</td><td>%s&nbsp;</td></tr>\n" % tobj.repr)
            fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" % (pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))
            if tobj.trace:
                trace = "<pre>%s</pre>" % (_format_trace(tobj.trace))
                fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" % trace)
            for (timestamp, size) in tobj.snapshots:
                fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp))
                if not size.refs:
                    fobj.write("<td>%s</td></tr>\n" % pp(size.size))
                else:
                    fobj.write("<td>%s" % pp(size.size))
                    self._print_refs(fobj, size.refs, size.size)
                    fobj.write("</td></tr>\n")
            fobj.write("</table>\n")

        fobj.write(self.footer)
        fobj.close()
Exemple #19
0
    def print_stats(self, stream=None):
        """
        Log annotated garbage objects to console or file.

        :param stream: open file, uses sys.stdout if not given
        """
        if not stream:  # pragma: no cover
            stream = sys.stdout
        self.metadata.sort(key=lambda x: -x.size)
        stream.write('%-10s %8s %-12s %-46s\n' %
                     ('id', 'size', 'type', 'representation'))
        for g in self.metadata:
            stream.write('0x%08x %8d %-12s %-46s\n' %
                         (g.id, g.size, trunc(g.type, 12), trunc(g.str, 46)))
        stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' %
                     (self.count, self.num_in_cycles, pp(self.total_size)))
 def _print_refs(self, refs, total, prefix='    ',
                 level=1, minsize=0, minpct=0.1):
     """
     Print individual referents recursively.
     """
     lrefs = list(refs)
     lrefs.sort(key=lambda x: x.size)
     lrefs.reverse()
     for ref in lrefs:
         if ref.size > minsize and (ref.size * 100.0 / total) > minpct:
             self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
                 trunc(prefix + str(ref.name), 50),
                 pp(ref.size),
                 int(ref.size * 100.0 / total),
                 level
             ))
             self._print_refs(ref.refs, total, prefix=prefix + '  ',
                              level=level + 1)
 def _print_refs(self, refs, total, prefix='    ',
                 level=1, minsize=0, minpct=0.1):
     """
     Print individual referents recursively.
     """
     lrefs = list(refs)
     lrefs.sort(key=lambda x: x.size)
     lrefs.reverse()
     for ref in lrefs:
         if ref.size > minsize and (ref.size*100.0/total) > minpct:
             self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
                 trunc(prefix+str(ref.name), 50),
                 pp(ref.size),
                 int(ref.size*100.0/total),
                 level
             ))
             self._print_refs(ref.refs, total, prefix=prefix+'  ',
                              level=level+1)
Exemple #22
0
    def create_title_page(self, filename: str, title: str = '') -> None:
        """
        Output the title page.
        """
        fobj = open(filename, "w")
        fobj.write(self.header % (title, self.style))

        fobj.write("<h1>%s</h1>\n" % title)
        fobj.write("<h2>Memory distribution over time</h2>\n")
        fobj.write(self.charts['snapshots'])

        fobj.write("<h2>Snapshots statistics</h2>\n")
        fobj.write('<table id="nb">\n')

        classlist = list(self.index.keys())
        classlist.sort()

        for snapshot in self.snapshots:
            fobj.write('<tr><td>\n')
            fobj.write('<table id="tl" rules="rows">\n')
            fobj.write("<h3>%s snapshot at %s</h3>\n" %
                       (snapshot.desc
                        or 'Untitled', pp_timestamp(snapshot.timestamp)))

            data = {}
            data['sys'] = pp(snapshot.system_total.vsz)
            data['tracked'] = pp(snapshot.tracked_total)
            data['asizeof'] = pp(snapshot.asizeof_total)
            data['overhead'] = pp(getattr(snapshot, 'overhead', 0))

            fobj.write(self.snapshot_summary % data)

            if snapshot.tracked_total:
                fobj.write(self.snapshot_cls_header)
                for classname in classlist:
                    if snapshot.classes:
                        info = snapshot.classes[classname].copy()
                        path = self.relative_path(self.links[classname])
                        info['cls'] = '<a href="%s">%s</a>' % (path, classname)
                        info['sum'] = pp(info['sum'])
                        info['avg'] = pp(info['avg'])
                        fobj.write(self.snapshot_cls % info)
            fobj.write('</table>')
            fobj.write('</td><td>\n')
            if snapshot.tracked_total:
                fobj.write(self.charts[snapshot])
            fobj.write('</td></tr>\n')

        fobj.write("</table>\n")
        fobj.write(self.footer)
        fobj.close()
    def create_title_page(self, filename, title=''):
        """
        Output the title page.
        """
        fobj = open(filename, "w")
        fobj.write(self.header % (title, self.style))

        fobj.write("<h1>%s</h1>\n" % title)
        fobj.write("<h2>Memory distribution over time</h2>\n")
        fobj.write(self.charts['snapshots'])

        fobj.write("<h2>Snapshots statistics</h2>\n")
        fobj.write('<table id="nb">\n')

        classlist = list(self.index.keys())
        classlist.sort()

        for snapshot in self.snapshots:
            fobj.write('<tr><td>\n')
            fobj.write('<table id="tl" rules="rows">\n')
            fobj.write("<h3>%s snapshot at %s</h3>\n" % (
                snapshot.desc or 'Untitled',
                pp_timestamp(snapshot.timestamp)
            ))

            data = {}
            data['sys'] = pp(snapshot.system_total.vsz)
            data['tracked'] = pp(snapshot.tracked_total)
            data['asizeof'] = pp(snapshot.asizeof_total)
            data['overhead'] = pp(getattr(snapshot, 'overhead', 0))

            fobj.write(self.snapshot_summary % data)

            if snapshot.tracked_total:
                fobj.write(self.snapshot_cls_header)
                for classname in classlist:
                    data = snapshot.classes[classname].copy()
                    path = self.relative_path(self.links[classname])
                    data['cls'] = '<a href="%s">%s</a>' % (path, classname)
                    data['sum'] = pp(data['sum'])
                    data['avg'] = pp(data['avg'])
                    fobj.write(self.snapshot_cls % data)
            fobj.write('</table>')
            fobj.write('</td><td>\n')
            if snapshot.tracked_total:
                fobj.write(self.charts[snapshot])
            fobj.write('</td></tr>\n')

        fobj.write("</table>\n")
        fobj.write(self.footer)
        fobj.close()
 def _print_refs(self, fobj, refs, total, level=1, minsize=0, minpct=0.1):
     """
     Print individual referents recursively.
     """
     lrefs = list(refs)
     lrefs.sort(key=lambda x: x.size)
     lrefs.reverse()
     if level == 1:
         fobj.write('<table>\n')
     for ref in lrefs:
         if ref.size > minsize and (ref.size * 100.0 / total) > minpct:
             data = dict(level=level,
                         name=trunc(str(ref.name), 128),
                         size=pp(ref.size),
                         pct=ref.size * 100.0 / total)
             fobj.write(self.refrow % data)
             self._print_refs(fobj, ref.refs, total, level=level + 1)
     if level == 1:
         fobj.write("</table>\n")
Exemple #25
0
 def print_object(self, tobj: 'TrackedObject') -> None:
     """
     Print the gathered information of object `tobj` in human-readable
     format.
     """
     if tobj.death:
         self.stream.write(
             '%-32s ( free )   %-35s\n' %
             (trunc(tobj.name, 32, left=True), trunc(tobj.repr, 35)))
     else:
         self.stream.write('%-32s 0x%08x %-35s\n' % (trunc(
             tobj.name, 32, left=True), tobj.id, trunc(tobj.repr, 35)))
     if tobj.trace:
         self.stream.write(_format_trace(tobj.trace))
     for (timestamp, size) in tobj.snapshots:
         self.stream.write('  %-30s %s\n' %
                           (pp_timestamp(timestamp), pp(size.size)))
         self._print_refs(size.refs, size.size)
     if tobj.death is not None:
         self.stream.write('  %-30s finalize\n' %
                           (pp_timestamp(tobj.death), ))
def _log_summary(objects, limit=15):
    """Log (DEBUG3) a summary of the given list of objects based on type and size."""
    if not isinstance(limit, int):
        raise RuntimeError("limit must be an integer")
    if not objects:
        return
    # pylint:disable=undefined-variable
    rows = summary.summarize(objects)
    try:
        # sort on the total size rather than object count
        keyf = lambda e: e[2]
        rows.sort(key=keyf, reverse=True)
        LOG.debug3("{0: >45}{1: >10}{2: >15}".format('object type', 'count',
                                                     'total size'))
        for row in rows[:limit]:
            size = stringutils.pp(row[2])
            LOG.debug3("{0: >45}{1: >10}{2: >15}".format(
                row[0][-45:], row[1], size))
    finally:
        # clear cyclic references to frame
        del rows
Exemple #27
0
def pympler_snapshot(rows=None, limit=15, sort="size", order="descending"):
  """Print the rows as a summary.

  Keyword arguments:
  limit -- the maximum number of elements to be listed
  sort  -- sort elements by 'size', 'type', or '#'
  order -- sort 'ascending' or 'descending'
  """
  
  if not rows:
    rows = summary.summarize(muppy.get_objects())

  localrows = []
  for row in rows:
      localrows.append(list(row))
  # input validation
  sortby = ['type', '#', 'size']
  if sort not in sortby:
      raise ValueError("invalid sort, should be one of" + str(sortby))
  orders = ['ascending', 'descending']
  if order not in orders:
      raise ValueError("invalid order, should be one of" + str(orders))
  # sort rows
  if sortby.index(sort) == 0:
      if order == "ascending":
          localrows.sort(key=lambda x: _repr(x[0]))
      elif order == "descending":
          localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
  else:
      if order == "ascending":
          localrows.sort(key=lambda x: x[sortby.index(sort)])
      elif order == "descending":
          localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
  # limit rows
  localrows = localrows[0:limit]
  for row in localrows:
      row[2] = stringutils.pp(row[2])
  # print rows
  localrows.insert(0, ["types", "# objects", "total size"])
  return pympler_prepare(localrows)
def format_(rows, limit=15, sort='size', order='descending'):
    """Format the rows as a summary.

    Keyword arguments:
    limit -- the maximum number of elements to be listed
    sort  -- sort elements by 'size', 'type', or '#'
    order -- sort 'ascending' or 'descending'

    Heavily based on pympler.summary.print_
    """
    localrows = []
    for row in rows:
        localrows.append(list(row))
    # input validation
    sortby = ['type', '#', 'size']
    if sort not in sortby:
        raise ValueError("invalid sort, should be one of" + str(sortby))
    orders = ['ascending', 'descending']
    if order not in orders:
        raise ValueError("invalid order, should be one of" + str(orders))
    # sort rows
    if sortby.index(sort) == 0:
        if order == "ascending":
            localrows.sort(key=lambda x: summary._repr(x[0]))
        elif order == "descending":
            localrows.sort(key=lambda x: summary._repr(x[0]), reverse=True)
    else:
        if order == "ascending":
            localrows.sort(key=lambda x: x[sortby.index(sort)])
        elif order == "descending":
            localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
    # limit rows
    localrows = localrows[0:limit]
    for row in localrows:
        row[2] = stringutils.pp(row[2])
    # print rows
    localrows.insert(0, ["types", "# objects", "total size"])
    return _format_table(localrows)
Exemple #29
0
def format_(rows, limit=15, sort='size', order='descending'):
    """Format the rows as a summary.

    Keyword arguments:
    limit -- the maximum number of elements to be listed
    sort  -- sort elements by 'size', 'type', or '#'
    order -- sort 'ascending' or 'descending'

    Heavily based on pympler.summary.print_
    """
    localrows = []
    for row in rows:
        localrows.append(list(row))
    # input validation
    sortby = ['type', '#', 'size']
    if sort not in sortby:
        raise ValueError("invalid sort, should be one of" + str(sortby))
    orders = ['ascending', 'descending']
    if order not in orders:
        raise ValueError("invalid order, should be one of" + str(orders))
    # sort rows
    if sortby.index(sort) == 0:
        if order == "ascending":
            localrows.sort(key=lambda x: summary._repr(x[0]))
        elif order == "descending":
            localrows.sort(key=lambda x: summary._repr(x[0]), reverse=True)
    else:
        if order == "ascending":
            localrows.sort(key=lambda x: x[sortby.index(sort)])
        elif order == "descending":
            localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
    # limit rows
    localrows = localrows[0:limit]
    for row in localrows:
        row[2] = stringutils.pp(row[2])
    # print rows
    localrows.insert(0, ["types", "# objects", "total size"])
    return _format_table(localrows)
def print_(rows, limit=15, sort='size', order='descending'):
    """Print the rows as a summary.

    Keyword arguments:
    limit -- the maximum number of elements to be listed
    sort  -- sort elements by 'size', 'type', or '#'
    order -- sort 'ascending' or 'descending'
    """
    localrows = []
    for row in rows:
        localrows.append(list(row))
    # input validation
    sortby = ['type', '#', 'size']
    if sort not in sortby:
        raise ValueError("invalid sort, should be one of" + str(sortby))
    orders = ['ascending', 'descending']
    if order not in orders:
        raise ValueError("invalid order, should be one of" + str(orders))
    # sort rows
    if sortby.index(sort) == 0:
        if order == "ascending":
            localrows.sort(lambda r1, r2: cmp(_repr(r1[0]),_repr(r2[0])))
        elif order == "descending":
            localrows.sort(lambda r1, r2: -cmp(_repr(r1[0]),_repr(r2[0])))
    else: 
        if order == "ascending":
            localrows.sort(lambda r1, r2: r1[sortby.index(sort)] - r2[sortby.index(sort)])
        elif order == "descending":
            localrows.sort(lambda r1, r2: r2[sortby.index(sort)] - r1[sortby.index(sort)])
    # limit rows
    localrows = localrows[0:limit]
    for row in localrows:
        row[2] = stringutils.pp(row[2])
    # print rows
    localrows.insert(0,["types", "# objects", "total size"])
    _print_table(localrows)
Exemple #31
0
 def nav_subtitle(self):
     rss = self._after.rss
     delta = rss - self._before.rss
     delta = ('(+%s)' % pp(delta)) if delta > 0 else ''
     return "RSS: %s %s" % (pp(rss), delta)
Exemple #32
0
 def nav_subtitle(self):
     rss = self._after.rss
     delta = rss - self._before.rss
     delta = ('(+%s)' % pp(delta)) if delta > 0 else ''
     return "%s %s" % (pp(rss), delta)
Exemple #33
0
        def track_memory_wrapper(*args, **kwargs):

            memory_info = {}
            tracker = ClassTracker()
            for cls in apps.get_models() + [Context, Template]:
                # track all models from registered apps, plus some standard Django ones
                tracker.track_class(cls)

            try:
                tracker.create_snapshot("before")
                memory_info["before"] = ProcessMemoryInfo()
                result = fn(*args, **kwargs)
                memory_info["after"] = ProcessMemoryInfo()
                tracker.create_snapshot("after")
                memory_info["stats"] = tracker.stats
                memory_info["stats"].annotate()
                return result

            finally:

                # record a whole bunch of memory statistics...
                resources = [
                    ("resident set size", memory_info["after"].rss),
                    ("virtual size", memory_info["after"].vsz),
                ]
                resources.extend(memory_info["after"] - memory_info["before"])
                resources = [(k, pp(v)) for k, v in resources]
                resources.extend(memory_info["after"].os_specific)

                # record each tracked class as of the final snapshot...
                classes_stats = []
                snapshot = memory_info["stats"].snapshots[-1]
                for class_name in memory_info["stats"].tracked_classes:
                    # history is a list of tuples that is updated on every creation/deletions: (timestamp, n_instances)
                    history = [
                        n for _, n in memory_info["stats"].history[class_name]
                    ]
                    if history:
                        classes_stats.append({
                            "name":
                            class_name,
                            "n_instances":
                            len(history),
                            "min_instances":
                            min(history),
                            "max_instances":
                            max(history),
                            "size":
                            pp(
                                snapshot.classes.get(class_name,
                                                     {}).get("sum", 0)),
                        })

                if not path:
                    stream = sys.stdout
                else:
                    stream = open(path, "w")

                print("\nRESOURCES", file=stream)
                for k, v in resources:
                    print(f"{k:<26}: {v:>10}", file=stream)
                print("\nCLASSES", file=stream)
                for class_stats in classes_stats:
                    print(
                        "{name}: created/deleted {n_instances} times for a min/max of {min_instances}/{max_instances} instances: {size:>10}"
                        .format(**class_stats),
                        file=stream,
                    )

                stream.closed
                tracker.detach_all_classes()