def do_astats(self, args: argparse.Namespace): """ Calculate activation statistics on one or more input files.""" self._check_graph() input_args = self._get_input_args(args) stats_collector = ActivationStatsCollector() step_idx = args.step if step_idx is not None: if len(step_idx) == 1: step_idx = step_idx[0] else: step_idx = tuple(step_idx) if len(args.input_files) == 0: self.perror("You must enter some files to process") return for file_per_input in glob_input_files(args.input_files, self.G.num_inputs): LOG.info("input file %s", file_per_input) data = [ import_data(input_file, **input_args) for input_file in file_per_input ] stats_collector.collect_stats(self.G, data) fmt = ('tab' if args.output is None else args.output['fmt']) tab = ActivationReporter(do_totals=(fmt != "csv"), threshold=args.qsnr, yield_fusions=args.detail or isinstance(step_idx, tuple)).report( self.G, stats_collector.reduce_stats()) output_table(tab, args)
def do_qerror(self, args): """ Show quantization error introduced by processing one or more input files.""" self._check_graph() self._check_quantized() fmt = ('tab' if args.output is None else args.output['fmt']) input_args = self._get_input_args(args) if args.step: stats_collector = StepErrorStatsCollector( quant_compare=args.compare_quantized) else: stats_collector = ErrorStatsCollector( quant_compare=args.compare_quantized) cnt = 0 for file_per_input in glob_input_files(args.input_files, self.G.num_inputs): cnt += 1 data = [ import_data(input_file, **input_args) for input_file in file_per_input ] stat = stats_collector.collect_stats(self.G, data) if args.report_lowest is not None: lowest = min((elem['qsnr'] for elem in stat.values())) if lowest < args.report_lowest: self.pfeedback( "{} had QSNR below threshold".format(file_per_input)) if not cnt: self.perror("no files to process") return tab = ErrorReporter(do_totals=(fmt != "csv"), one_input=cnt <= 1, with_chan=args.step)\ .report(self.G, stats_collector.reduce_stats()) output_table(tab, args)
def do_qshow(self, args): """ Show current quantization settings.""" self._check_graph() self._check_quantized() tab = QuantizationReporter(step=args.step).report( self.G, self.G.quantization) output_table(tab, args)
def do_temps(self, args): """ Show statistics on activations.""" self._check_graph() fmt = ('tab' if args.output is None else args.output['fmt']) stats_collector = TempsStatsCollector() stats = stats_collector.collect_stats(self.G) tab = TempsReporter(do_totals=(fmt != "csv")).report(self.G, stats) output_table(tab, args)
def do_show(self, args: argparse.Namespace): """ Display the structure of the graph""" self._check_graph() fmt = ('tab' if args.output is None else args.output['fmt']) split_dims = fmt == "xls" do_totals = fmt != "csv" tab = GraphReporter(split_dims=split_dims, do_totals=do_totals, step=args.step).report(self.G, None) output_table(tab, args)
def do_qshow(self, args): """ Show current quantization settings.""" self._check_graph() self._check_quantized() if args.step: nodes, _ = self.get_node_step_or_name(args.step) if not nodes: return else: nodes = None tab = QuantizationReporter().report(self.G, self.G.quantization, nodes) output_table(tab, args)
def do_show(self, args: argparse.Namespace): """ Display the structure of the graph""" self._check_graph() if args.step: nodes, _ = self.get_node_step_or_name(args.step, allow_comma=True) if not nodes: self.do_help('show') return else: nodes = None fmt = ('tab' if args.output is None else args.output['fmt']) split_dims = fmt == "xls" do_totals = fmt != "csv" show_constants = args.show_constants if args.step is None else True tab = GraphReporter(do_totals=do_totals, split_dims=split_dims, show_constants=show_constants).report(self.G, nodes=nodes) output_table(tab, args)
def do_stats(self, args: argparse.Namespace): """ Display statistics on weights and biases""" self._check_graph() fmt = ('tab' if args.output is None else args.output['fmt']) if args.detailed: stats_collector = FilterDetailedStatsCollector() stats = stats_collector.collect_stats(self.G) tab = FilterDetailedStatsReporter().report(self.G, stats) else: step_idx = args.step if step_idx is not None: if len(step_idx) == 1: step_idx = step_idx[0] else: step_idx = tuple(step_idx) stats_collector = FilterStatsCollector() stats = stats_collector.collect_stats(self.G, step_idx=step_idx) tab = FilterStatsReporter(do_totals=(fmt != "csv"), threshold=args.qsnr, step_idx=step_idx)\ .report(self.G, stats) output_table(tab, args)
def do_fquant(self, args: argparse.Namespace): """ Attempt to calculate a fake quantization for graph using random tensors and parameters. This is intended to allow code generation for performance testing even if no real weights and input data are avalaible.""" self._check_graph() self.G.constant_store.fake = True stats_collector = ActivationStatsCollector() input_tensors = [ np.random.normal(0, 0.2, input.dims.shape) for input in self.G.input_nodes() ] stats_collector.collect_stats(self.G, input_tensors) astats = stats_collector.reduce_stats() stats_collector = FakeFilterStatsCollector() fstats = stats_collector.collect_stats(self.G) quantizer = SymmetricQuantizer(astats, fstats, force_width=args.force_width) qrecs = quantizer.quantize(self.G) self.G.quantization = qrecs tab = QuantizationReporter().report(self.G, qrecs) output_table(tab, args) self.G.constant_store.fake = False