def print_report(self, top=None, alloc=False, collate=True, start=None, end=None): """ analyze and report on ttop files """ parser = TTopParser(start=start, end=end) print("ttop version %s" % VERSION) print() for file in self.files: log = open(file, 'r') if env.DEBUG: print("parsing", file) for total, threads in parser.parse(log): if alloc: print('{0:<40} {1:<10} {2:<10} {3:<10}'.format( total['date'].strftime("%Y-%m-%d %H:%M:%S"), 'Threads', 'Alloc/s', "Total: " + format_bytes(total['heap_rate']))) else: print('{0:<40} {1:<10} {2:<10} {3:<10}'.format( total['date'].strftime("%Y-%m-%d %H:%M:%S"), 'Threads', 'CPU%', "Total: " + str(total['app_cpu']) + '%')) print('=' * 80) combined = threads if collate: combined = self.collate_threads(threads) ordered = [] if alloc: ordered = sorted(combined.items(), key=lambda k: k[1]['heap_rate'], reverse=True) else: ordered = sorted(combined.items(), key=lambda k: k[1]['total_cpu'], reverse=True) if top: ordered = ordered[0:top] for name, value in ordered: count = 1 if collate: count = int(value['thread_count']) if alloc: print('{0:<40} {1:<10} {2:<10} {3:<10}'.format( name, count, format_bytes(value['heap_rate']), textbar(total['heap_rate'], value['heap_rate']))) else: print('{0:<40} {1:<10} {2:<10} {3:<10}'.format( name, count, value['total_cpu'], textbar(total['app_cpu'], value['total_cpu']))) print()
def generate(self, parsed): """generates a time series report for a tarball""" table = [] table.append("") table.append("filter cache evictions by hour") table.append("------------------------------") events_by_datetime = OrderedDefaultDict(list) start = dates.max_utc_time() end = dates.min_utc_time() for node, events in parsed["nodes"].items(): for info in events.get("evictions"): # put into structure we can use for bucketize for value in info.values(): if value.time_stamp > end: end = value.time_stamp if value.time_stamp < start: start = value.time_stamp events_by_datetime[value.time_stamp].append(value) buckets = sorted( util.bucketize(events_by_datetime, start, end, 3600).items(), key=lambda t: t[0], ) maxval = len(max(buckets, key=lambda t: len(t[1]))[1]) for time, matches in buckets: pad = "" for x in range(len(str(maxval)) - len(str(len(matches)))): pad += " " table.append("%s %s %s" % ( time.strftime("%Y-%m-%d %H:%M:%S") + pad, len(matches), util.textbar(maxval, len(matches)), )) return "\n".join(table)
def print_report(self, interval=3600): """ print bucketized result counts """ print("bucketgrep version %s" % VERSION) print("search: '%s'" % self.supplied_regex) print() if not self.analyzed: self.analyze() if not self.matches: print("No matches found") if self.unknown: print(self.unknown, "matches without timestamp") return buckets = sorted( bucketize(self.matches, start=self.start, end=self.end, seconds=interval).items(), key=lambda t: t[0], ) maxval = len(max(buckets, key=lambda t: len(t[1]))[1]) for time, matches in buckets: pad = "" for x in range(len(str(maxval)) - len(str(len(matches)))): pad += " " print( time.strftime("%Y-%m-%d %H:%M:%S") + pad, len(matches), textbar(maxval, len(matches)), ) if self.unknown: print(self.unknown, "matches without timestamp")
def print_report(self, interval=3600): """print bucketized result counts""" print() if not self.analyzed: self.analyze() if not self.matches: print("No matches found") if self.unknown: print(self.unknown, "matches without timestamp") return if self.report == "summary": print() print("cluster wide") print("------------") buckets = sorted( bucketize(self.matches, start=self.start, end=self.end, seconds=interval).items(), key=lambda t: t[0], ) maxval = len(max(buckets, key=lambda t: len(t[1]))[1]) for time, matches in buckets: pad = "" for x in range(len(str(maxval)) - len(str(len(matches)))): pad += " " print( time.strftime("%Y-%m-%d %H:%M:%S") + pad, len(matches), textbar(maxval, len(matches)), ) else: print() print() print("per node numbers") print("----------------") for node in sorted(self.node_matches.keys()): print() print("node: %s" % node) print("--------") if not len(self.node_matches[node]): print("No matches for %s found" % node) continue buckets = sorted( bucketize( self.node_matches[node], start=self.start, end=self.end, seconds=interval, ).items(), key=lambda t: t[0], ) maxval = len(max(buckets, key=lambda t: len(t[1]))[1]) for time, matches in buckets: pad = "" for x in range(len(str(maxval)) - len(str(len(matches)))): pad += " " print( time.strftime("%Y-%m-%d %H:%M:%S") + pad, len(matches), textbar(maxval, len(matches)), ) if self.unknown: print(self.unknown, "matches without timestamp")
def print_report(self, top=None, alloc=False, collate=True, start=None, end=None): """ analyze and report on ttop files """ parser = TTopParser(start=start, end=end) print("ttop version %s" % VERSION) print() table = [] for file in self.files: with diag.FileWithProgress(file) as log: table.append([]) if env.DEBUG: print("parsing", file) for total, threads in parser.parse(log): if alloc: table.append([ total["date"].strftime("%Y-%m-%d %H:%M:%S"), "Threads", "Alloc/s", "Total: " + format_bytes(total["heap_rate"]), ]) else: table.append([ total["date"].strftime("%Y-%m-%d %H:%M:%S"), "Threads", "CPU%", "Total: " + str(total["app_cpu"]) + "%", ]) header = "=" * 80 table.append([header]) combined = threads if collate: combined = self.collate_threads(threads) ordered = [] if alloc: ordered = sorted( combined.items(), key=lambda k: k[1]["heap_rate"], reverse=True, ) else: ordered = sorted( combined.items(), key=lambda k: k[1]["total_cpu"], reverse=True, ) if top: ordered = ordered[0:top] for name, value in ordered: count = 1 if collate: count = int(value["thread_count"]) if alloc: table.append([ name, str(count), format_bytes(value["heap_rate"]), textbar(total["heap_rate"], value["heap_rate"]), ]) else: table.append([ name, str(count), "{:.2f}".format(value["total_cpu"]), textbar(total["app_cpu"], value["total_cpu"]), ]) table.append([]) pad_table(table, extra_pad=1) for row in table: print("".join(row)) print()