def print_debug_data(self): """ Show weights statistics """ if not self.logger.isEnabledFor(logging.DEBUG): return self.weights.map_read() self.bias.map_read() self.gradient_bias.map_read() self.gradient_weights.map_read() weights = self.weights.mem bias = self.bias.mem grad_weights = self.gradient_weights.mem grad_bias = self.gradient_bias.mem weight_table = PrettyTable("TYPE", "Mean", "StdDev", "Min", "Max") weight_table.float_format = ".10" for (w_name, w_array) in [ ("Weight", weights), ("Bias", bias), ("Grad Weight", grad_weights), ("Grad Bias", grad_bias), ]: w_mean = w_stddev = w_min = w_max = None if w_array is not None and w_array.size > 0: w_mean = numpy.mean(w_array) w_stddev = numpy.std(w_array) w_min = numpy.min(w_array) w_max = numpy.max(w_array) weight_table.add_row(w_name, w_mean, w_stddev, w_min, w_max) self.debug("\n" + weight_table.get_string())
def print_table(diffs): table = PrettyTable("Unit", "Attribute", "Average Relative Diff", "Average Diff", "Max Diff") for fn in table.field_names[:2]: table.align[fn] = "l" for fn in table.field_names[2:]: table.align[fn] = "c" for diff in diffs: table.add_row(*diff[1:]) print(table)
def run(self): self.input.map_read() mem = self.input.mem[0] labels = [(v, i) for i, v in enumerate(mem)] labels.sort(reverse=True) table = PrettyTable("label", "value") table.float_format = ".5" for row in labels[:self.top_number]: table.add_row(*reversed(row)) self.info("Results:\n%s", table) self.info("Max to mean ratio: %.1f", numpy.max(mem) / numpy.mean(mem))
def finished(response): self.debug("Received %s", response) try: response = json.loads(response.decode('UTF-8')) except ValueError as e: self.exception("Failed to parse the response from server: %s", response) self.stop(Failure(e), False) return table = PrettyTable("Name", "Description", "Author", "Version", "Date") table.align["Name"] = table.align["Description"] = 'l' for item in response: table.add_row(*item) print(table) sys.stdout.flush() self.stop()
def _print_label_stats(self, stats, set_name): values = list(stats.values()) if sum(values) == 0: self.info("No %s labels specified", set_name) return mean = int(numpy.mean(values)) stddev = int(numpy.std(values)) lmin = numpy.min(values) amin = list(stats.keys())[numpy.argmin(values)] lmax = numpy.max(values) amax = list(stats.keys())[numpy.argmax(values)] if has_colors() and stddev > mean / 10: endstr = "\033[0m" if stddev > mean / 2: openstr = "\033[1;31m" # red else: openstr = "\033[1;33m" # yellow else: openstr = endstr = "" self.info( u"%s label cardinalities: min: %d (\"%s\"), max: %d (\"%s\"), avg:" u" %d, %sσ: %d (%d%%)%s", set_name, lmin, amin, lmax, amax, mean, openstr, stddev, stddev * 100 // mean, endstr) if not self.logger.isEnabledFor(logging.DEBUG): return total = sum(values) table = PrettyTable("Label", "Cardinality", "%", "Histogram") table.align["Cardinality"] = "r" table.align["%"] = "r" table.align["Histogram"] = "l" for k, v in stats.items(): table.add_row(k, v, "%.1f" % (v * 100 / total), "*" * (v * 25 // lmax)) self.debug("Detailed %s label stats:\n%s", set_name, table)
def print_debug_data(self): """ Show weights statistics """ if not self.logger.isEnabledFor(logging.DEBUG): return self.weights.map_read() self.bias.map_read() self.gradient_bias.map_read() self.gradient_weights.map_read() weights = self.weights.mem bias = self.bias.mem grad_weights = self.gradient_weights.mem grad_bias = self.gradient_bias.mem weight_table = PrettyTable("TYPE", "Mean", "StdDev", "Min", "Max") weight_table.float_format = ".10" for (w_name, w_array) in [("Weight", weights), ("Bias", bias), ("Grad Weight", grad_weights), ("Grad Bias", grad_bias)]: w_mean = w_stddev = w_min = w_max = None if w_array is not None and w_array.size > 0: w_mean = numpy.mean(w_array) w_stddev = numpy.std(w_array) w_min = numpy.min(w_array) w_max = numpy.max(w_array) weight_table.add_row(w_name, w_mean, w_stddev, w_min, w_max) self.debug("\n" + weight_table.get_string())
def check_snapshot_size(self, size): if size > self.SIZE_WARNING_THRESHOLD and self._warn_about_size: self._warn_about_size = False psizes = [] try: for unit in self.workflow: unit.stripped_pickle = True psize = len(pickle.dumps(unit, protocol=4)) psizes.append((psize, unit)) unit.stripped_pickle = False except: self.warning("The snapshot size looks too big: %d bytes", size) return import gc gc.collect() psizes.sort(reverse=True) pstable = PrettyTable("Unit", "Size") pstable.align["Unit"] = "l" for size, unit in psizes[:5]: pstable.add_row(str(unit), size) self.warning( "The snapshot size looks too big: %d bytes. Here are top 5 " "big units:\n%s", size, pstable)
def print_stats(self): if self.zmq_connection is None: return table = PrettyTable("", "receive", "send") table.align[""] = "r" table.add_row( "all", datetime.timedelta(seconds=self.zmq_connection.total_receive_time), datetime.timedelta(seconds=self.zmq_connection.total_request_time)) try: table.add_row( "avg", datetime.timedelta(seconds=self.zmq_connection.receive_timing), datetime.timedelta( seconds=self.zmq_connection.request_timings["update"])) except KeyError: pass self.info("Timings:\n%s", table)
def print_stats(self, by_name=False, top_number=5): """Outputs various time statistics gathered with run_timed and method_timed. """ stats = self.get_unit_run_time_stats(by_name) time_all = sum(s[1] for s in stats) if time_all > 0: table = PrettyTable("#", "%", "time", "unit") table.align["unit"] = "l" top_time = 0 for i in range(1, min(top_number, len(stats)) + 1): top_time += stats[i - 1][1] table.add_row(i, int(stats[i - 1][1] * 100 / time_all), datetime.timedelta(seconds=stats[i - 1][1]), stats[i - 1][0]) table.add_row(u"Σ", int(top_time * 100 / time_all), datetime.timedelta(seconds=top_time), "Top 5") self.info(u"Unit run time statistics top:\n%s", table) table = PrettyTable("units", "real", u"η,%") table.add_row(datetime.timedelta(seconds=time_all), datetime.timedelta(seconds=self._run_time_), int(time_all * 100 / (self._run_time_ or 1))) self.info(u"Total run time:\n%s", table) table = PrettyTable("method", "%", "time") table.align["method"] = "l" time_all = 0 for k, v in sorted(self._method_time_.items()): if k == "run": continue time_all += v if self._run_time_ > 0: table.add_row(k, int(v * 100 / self._run_time_), datetime.timedelta(seconds=v)) if self.is_slave: table.add_row(u"Σ", int(time_all * 100 / self._run_time_), datetime.timedelta(seconds=time_all)) if time_all > 0: self.info(u"Workflow methods run time:\n%s", table)