コード例 #1
0
ファイル: qshow.py プロジェクト: mfkiwl/gap_sdk
class QshowCommand(NNToolShellBase):
    # QSHOW COMMAND
    parser_qshow = NNToolArguementParser()
    table_options(parser_qshow)
    parser_qshow.add_argument(
        'step',
        nargs=(0, 1),
        help='step to tune. ' + NODE_SELECTOR_HELP,
        completer_method=NNToolShellBase.node_step_or_name_completer)

    @with_argparser(parser_qshow)
    @no_history
    def do_qshow(self, args):
        """
Show current quantization settings."""
        self._check_graph()
        self._check_quantized()
        if args.step:
            nodes, _ = self.get_node_step_or_name(args.step)
            if not nodes:
                return
        else:
            nodes = None
        tab = QuantizationReporter().report(self.G, self.G.quantization, nodes)
        output_table(tab, args)
コード例 #2
0
ファイル: qerror.py プロジェクト: mfkiwl/gap_sdk
class QerrorCommand(NNToolShellBase):
    # QERROR COMMAND
    parser_qerror = Cmd2ArgumentParser()
    parser_qerror.add_argument('-s',
                               '--step',
                               action='store_true',
                               help='evaluate quantization per step. i.e.\
                                    individually quantize each layer')
    parser_qerror.add_argument('--compare_quantized',
                               action='store_true',
                               help='quantize and dequantize the float output \
                                   to give it the same error as the quantized output of the layer'
                               )
    parser_qerror.add_argument(
        '-r',
        '--report_lowest',
        type=int,
        help='QSNR threshold below which to report filename')
    table_options(parser_qerror, default_width=140)
    input_options(parser_qerror)

    @with_argparser(parser_qerror)
    @no_history
    def do_qerror(self, args):
        """
Show quantization error introduced by processing one or more input files."""
        self._check_graph()
        self._check_quantized()
        fmt = ('tab' if args.output is None else args.output['fmt'])
        input_args = self._get_input_args(args)
        if args.step:
            stats_collector = StepErrorStatsCollector(
                quant_compare=args.compare_quantized)
        else:
            stats_collector = ErrorStatsCollector(
                quant_compare=args.compare_quantized)
        cnt = 0
        for file_per_input in glob_input_files(args.input_files,
                                               self.G.num_inputs):
            cnt += 1

            data = [
                import_data(input_file, **input_args)
                for input_file in file_per_input
            ]
            stat = stats_collector.collect_stats(self.G, data)
            if args.report_lowest is not None:
                lowest = min((elem['qsnr'] for elem in stat.values()))
                if lowest < args.report_lowest:
                    self.pfeedback(
                        "{} had QSNR below threshold".format(file_per_input))
        if not cnt:
            self.perror("no files to process")
            return
        tab = ErrorReporter(do_totals=(fmt != "csv"), one_input=cnt <= 1, with_chan=args.step)\
            .report(self.G, stats_collector.reduce_stats())
        output_table(tab, args)
コード例 #3
0
ファイル: astats.py プロジェクト: mfkiwl/gap_sdk
class AstatsCommand(NNToolShellBase):
    # ASTATS COMMAND
    parser_astats = Cmd2ArgumentParser()
    parser_astats.add_argument('-q',
                               '--qsnr',
                               type=float,
                               default=30.0,
                               help='QSNR threshold')
    parser_astats.add_argument('-d',
                               '--detail',
                               action="store_true",
                               help='Show fusions detail')
    parser_astats.add_argument(
        '-s',
        '--step',
        type=int,
        nargs=(1, 2),
        help=
        'display information by channel for step. You can indicate a fusion step with two values. The step_idx and the idx of the node in the fusion.'
    )
    table_options(parser_astats, default_width=180)
    input_options(parser_astats)

    @with_argparser(parser_astats)
    @no_history
    def do_astats(self, args: argparse.Namespace):
        """
Calculate activation statistics on one or more input files."""
        self._check_graph()
        input_args = self._get_input_args(args)
        stats_collector = ActivationStatsCollector()
        step_idx = args.step
        if step_idx is not None:
            if len(step_idx) == 1:
                step_idx = step_idx[0]
            else:
                step_idx = tuple(step_idx)
        if len(args.input_files) == 0:
            self.perror("You must enter some files to process")
            return
        for file_per_input in glob_input_files(args.input_files,
                                               self.G.num_inputs):
            LOG.info("input file %s", file_per_input)
            data = [
                import_data(input_file, **input_args)
                for input_file in file_per_input
            ]
            stats_collector.collect_stats(self.G, data)

        fmt = ('tab' if args.output is None else args.output['fmt'])
        tab = ActivationReporter(do_totals=(fmt != "csv"),
                                 threshold=args.qsnr,
                                 yield_fusions=args.detail
                                 or isinstance(step_idx, tuple)).report(
                                     self.G, stats_collector.reduce_stats())
        output_table(tab, args)
コード例 #4
0
ファイル: temps.py プロジェクト: dilawar/gap_sdk
class TempsCommand(NNToolShellBase):
    # TEMPS COMMAND
    parser_temps = Cmd2ArgumentParser()
    table_options(parser_temps, default_width=140)

    @with_argparser(parser_temps)
    def do_temps(self, args):
        """
Show statistics on activations."""
        self._check_graph()
        fmt = ('tab' if args.output is None else args.output['fmt'])
        stats_collector = TempsStatsCollector()
        stats = stats_collector.collect_stats(self.G)
        tab = TempsReporter(do_totals=(fmt != "csv")).report(self.G, stats)
        output_table(tab, args)
コード例 #5
0
ファイル: qshow.py プロジェクト: dilawar/gap_sdk
class QshowCommand(NNToolShellBase):
    # QSHOW COMMAND
    parser_qshow = Cmd2ArgumentParser()
    table_options(parser_qshow)
    parser_qshow.add_argument('step', type=int, nargs=(0, 1), help='Limit to step number')
    parser_qshow.add_argument('-s', '--show_wrapped',
                              action='store_true',
                              help='show original quantization parameters on multiplicative quantization')

    @with_argparser(parser_qshow)
    def do_qshow(self, args):
        """
Show current quantization settings."""
        self._check_graph()
        self._check_quantized()
        tab = QuantizationReporter(step=args.step,
                                   emit_wrapped=args.show_wrapped).report(self.G,
                                                                          self.G.quantization)
        output_table(tab, args)
コード例 #6
0
class QshowCommand(NNToolShellBase):
    # QSHOW COMMAND
    parser_qshow = Cmd2ArgumentParser()
    table_options(parser_qshow)
    parser_qshow.add_argument('step',
                              type=int,
                              nargs=(0, 1),
                              help='Limit to step number')

    @with_argparser(parser_qshow)
    @no_history
    def do_qshow(self, args):
        """
Show current quantization settings."""
        self._check_graph()
        self._check_quantized()
        tab = QuantizationReporter(step=args.step).report(
            self.G, self.G.quantization)
        output_table(tab, args)
コード例 #7
0
class StatsCommand(NNToolShellBase):
    # STATS COMMAND
    parser_stats = Cmd2ArgumentParser("display statistics on globals")
    parser_stats.add_argument('-d',
                              '--detailed',
                              action="store_true",
                              help='Dump detailed statistics')
    parser_stats.add_argument('-q',
                              '--qsnr',
                              type=float,
                              default=30.0,
                              help='QSNR threshold')
    parser_stats.add_argument('-s',
                              '--step',
                              type=int,
                              nargs=(1, 2),
                              help='display information by channel for step')
    table_options(parser_stats, default_width=180)

    @with_argparser(parser_stats)
    @no_history
    def do_stats(self, args: argparse.Namespace):
        """
Display statistics on weights and biases"""
        self._check_graph()
        fmt = ('tab' if args.output is None else args.output['fmt'])
        if args.detailed:
            stats_collector = FilterDetailedStatsCollector()
            stats = stats_collector.collect_stats(self.G)
            tab = FilterDetailedStatsReporter().report(self.G, stats)
        else:
            step_idx = args.step
            if step_idx is not None:
                if len(step_idx) == 1:
                    step_idx = step_idx[0]
                else:
                    step_idx = tuple(step_idx)
            stats_collector = FilterStatsCollector()
            stats = stats_collector.collect_stats(self.G, step_idx=step_idx)
            tab = FilterStatsReporter(do_totals=(fmt != "csv"), threshold=args.qsnr, step_idx=step_idx)\
                .report(self.G, stats)
        output_table(tab, args)
コード例 #8
0
class FquantCommand(NNToolShellBase):
    #FQUANT COMMAND
    parser_fquant = Cmd2ArgumentParser()
    parser_fquant.add_argument('-f', '--force_width',
                               choices=STATS_BITS, default=8, type=int, help='force all layers to this width')
    parser_fquant.add_argument('-s', '--scheme',
                               type=str, choices=QUANTIZATION_SCHEMES, default='SQ8',
                               help='quantize with scaling factors (TFlite quantization-like) [default] or POW2')
    table_options(parser_fquant, default_width=140)

    @with_argparser(parser_fquant)
    def do_fquant(self, args: argparse.Namespace):
        """
Attempt to calculate a fake quantization for graph using random tensors and parameters.
This is intended to allow code generation for performance testing even if no real
weights and input data are avalaible."""
        self._check_graph()
        self.G.constant_store.fake = True
        stats_collector = ACTIVATION_STATS[args.scheme]()
        input_tensors = [np.random.normal(0, 0.2, input.dims.shape)
                         for input in self.G.input_nodes()]
        stats_collector.collect_stats(self.G, input_tensors)
        if args.scheme == 'SQ8':
            astats = stats_collector.stats
            quantizer = MultQuantizer(astats, 8)
        else:
            astats = stats_collector.reduce_stats()
            stats_collector = FakeFilterStatsCollector()
            fstats = stats_collector.collect_stats(self.G)
            quantizer = SymmetricQuantizer(astats, fstats,
                                           force_width=args.force_width,
                                           min_qsnr=args.qsnr)
        qrecs = quantizer.quantize(self.G)
        self.G.quantization = qrecs
        if args.scheme == 'SQ8':
            concats_matcher = EqualizeSymmetricMultiplicativeQuantivedConcats()
            concats_matcher.match(self.G, set_identity=False)
            softmax_qrec_matcher = PropagateSoftmaxSymQrec()
            softmax_qrec_matcher.match(self.G, set_identity=False)
        self.G.constant_store.fake = False
コード例 #9
0
ファイル: fquant.py プロジェクト: dilawar/gap_sdk
class FquantCommand(NNToolShellBase):
    #FQUANT COMMAND
    parser_fquant = Cmd2ArgumentParser()
    parser_fquant.add_argument('-f',
                               '--force_width',
                               choices=STATS_BITS,
                               default=8,
                               type=int,
                               help='force all layers to this width')
    table_options(parser_fquant, default_width=140)

    @with_argparser(parser_fquant)
    def do_fquant(self, args: argparse.Namespace):
        """
Attempt to calculate a fake quantization for graph using random tensors and parameters.
This is intended to allow code generation for performance testing even if no real
weights and input data are avalaible."""
        self._check_graph()
        self.G.constant_store.fake = True
        stats_collector = ActivationStatsCollector()
        input_tensors = [
            np.random.normal(0, 0.2, input.dims.shape)
            for input in self.G.input_nodes()
        ]
        stats_collector.collect_stats(self.G, input_tensors)
        astats = stats_collector.reduce_stats()
        stats_collector = FakeFilterStatsCollector()
        fstats = stats_collector.collect_stats(self.G)
        quantizer = SymmetricQuantizer(astats,
                                       fstats,
                                       force_width=args.force_width)
        qrecs = quantizer.quantize(self.G)
        self.G.quantization = qrecs
        tab = QuantizationReporter().report(self.G, qrecs)
        output_table(tab, args)
        self.G.constant_store.fake = False
コード例 #10
0
class GraphCommand(NNToolShellBase):
    # GRAPH COMMAND

    def other_open_graphs(self, only_open=False):
        items = []
        for graph_idx, graph in enumerate(self._graphs):
            if graph_idx == self._graph_idx:
                continue
            if graph['G'] is None:
                if only_open:
                    continue
                name = "No Graph"
            else:
                name = graph['G'].name
            items.append(CompletionItem(graph_idx, name))
        return items

    parser_graph = Cmd2ArgumentParser("display graph")
    parser_graph.add_argument(
        'graph_number',
        nargs=argparse.OPTIONAL,
        type=int,
        choices_method=other_open_graphs,
        help='graph to select or nothing to show open graphs')

    @with_argparser(parser_graph)
    @no_history
    def do_graph(self, args: argparse.Namespace):
        """
Select actuve graphs"""
        if args.graph_number is not None:
            if args.graph_number < 0 or args.graph_number >= len(self._graphs):
                self.perror("graph number is invalid")
                return
            self._graph_idx = args.graph_number
            self.pfeedback("selected graph {}".format(self._graph_idx))
            self._update_prompt()
            self.py_locals['G'] = self.G
        else:
            for idx, rec in enumerate(self._graphs):
                self.poutput("{:d} - {}".format(idx, rec['graph_file']))

    # SHOW COMMAND
    parser_show = NNToolArguementParser("display graph")
    table_options(parser_show, default_width=180)
    parser_show.add_argument(
        'step',
        nargs=(0, 1),
        help='step to show or nothing to show all.' + NODE_SELECTOR_HELP,
        completer_method=NNToolShellBase.node_step_or_name_completer(
            allow_comma=True))
    parser_show.add_argument('-s',
                             '--show_constants',
                             action='store_true',
                             help='Show constant parameters nodes')

    @with_argparser(parser_show)
    @no_history
    def do_show(self, args: argparse.Namespace):
        """
Display the structure of the graph"""
        self._check_graph()
        if args.step:
            nodes, _ = self.get_node_step_or_name(args.step, allow_comma=True)
            if not nodes:
                self.do_help('show')
                return
        else:
            nodes = None
        fmt = ('tab' if args.output is None else args.output['fmt'])
        split_dims = fmt == "xls"
        do_totals = fmt != "csv"
        show_constants = args.show_constants if args.step is None else True
        tab = GraphReporter(do_totals=do_totals,
                            split_dims=split_dims,
                            show_constants=show_constants).report(self.G,
                                                                  nodes=nodes)
        output_table(tab, args)
コード例 #11
0
ファイル: graph.py プロジェクト: dilawar/gap_sdk
class GraphCommand(NNToolShellBase):
    # GRAPH COMMAND

    def other_open_graphs(self, only_open=False):
        items = []
        for graph_idx, graph in enumerate(self._graphs):
            if graph_idx == self._graph_idx:
                continue
            if graph['G'] is None:
                if only_open:
                    continue
                name = "No Graph"
            else:
                name = graph['G'].name
            items.append(CompletionItem(graph_idx, name))
        return items

    parser_graph = Cmd2ArgumentParser("display graph")
    parser_graph.add_argument(
        'graph_number',
        nargs=argparse.OPTIONAL,
        type=int,
        choices_method=other_open_graphs,
        help='graph to select or nothing to show open graphs')

    @with_argparser(parser_graph)
    def do_graph(self, args: argparse.Namespace):
        """
Select actuve graphs"""
        if args.graph_number is not None:
            if args.graph_number < 0 or args.graph_number >= len(self._graphs):
                self.perror("graph number is invalid")
                return
            self._graph_idx = args.graph_number
            self.pfeedback("selected graph {}".format(self._graph_idx))
            self._update_prompt()
            self.py_locals['G'] = self.G
        else:
            for idx, rec in enumerate(self._graphs):
                self.poutput("{:d} - {}".format(idx, rec['graph_file']))

    # SHOW COMMAND
    parser_show = Cmd2ArgumentParser("display graph")
    table_options(parser_show, default_width=180)
    parser_show.add_argument('step',
                             type=int,
                             nargs=(0, 1),
                             help='Limit to step number')

    @with_argparser(parser_show)
    def do_show(self, args: argparse.Namespace):
        """
Display the structure of the graph"""
        self._check_graph()
        fmt = ('tab' if args.output is None else args.output['fmt'])
        split_dims = fmt == "xls"
        do_totals = fmt != "csv"
        tab = GraphReporter(split_dims=split_dims,
                            do_totals=do_totals,
                            step=args.step).report(self.G, None)
        output_table(tab, args)
コード例 #12
0
ファイル: fquant.py プロジェクト: brupa9/gap_sdk
class FquantCommand(NNToolShellBase):
    # FQUANT COMMAND
    parser_fquant = Cmd2ArgumentParser()
    parser_fquant.add_argument('-f',
                               '--force_width',
                               choices=STATS_BITS,
                               default=8,
                               type=int,
                               help='force all layers to this width')
    parser_fquant.add_argument(
        '-s',
        '--scheme',
        type=str,
        choices=QUANTIZATION_SCHEMES,
        default='SQ8',
        help=
        'quantize with scaling factors (TFlite quantization-like) [default] or POW2'
    )
    parser_fquant.add_argument('-d',
                               '--quant_dimension',
                               choices=['tensor', 'channel'],
                               default='channel')
    parser_fquant.add_argument(
        '--uniform',
        type=float,
        default=0.0,
        help='Use uniform distribution for input with the specified max value')
    parser_fquant.add_argument('--num_inference',
                               type=int,
                               default=1,
                               help='How many inferences')
    parser_fquant.add_argument(
        '-n',
        '--no_narrow_weights',
        action='store_true',
        help='Don\'t quantize weights uniformly over negative/positive ' +
        'range. i.e. Avoid -128 vs 127')
    table_options(parser_fquant, default_width=140)

    @with_argparser(parser_fquant)
    def do_fquant(self, args: argparse.Namespace):
        """
Attempt to calculate a fake quantization for graph using random tensors and parameters.
This is intended to allow code generation for performance testing even if no real
weights and input data are avalaible."""
        self._check_graph()
        self.G.constant_store.fake = True
        stats_collector = ActivationRangesCollector()
        for _ in range(args.num_inference):
            if args.uniform:
                input_tensors = [
                    np.random.uniform(-args.uniform, args.uniform,
                                      inp.dims.shape)
                    for inp in self.G.input_nodes()
                ]
            else:
                input_tensors = [
                    np.random.normal(0, 0.2, inp.dims.shape)
                    for inp in self.G.input_nodes()
                ]
            stats_collector.collect_stats(self.G, input_tensors)
        if args.scheme == 'SQ8':
            bits = 8
        else:
            bits = args.force_width
        astats = stats_collector.stats

        quantizer = UnifiedQuantizer(args.scheme,
                                     astats,
                                     quantized_dimension=args.quant_dimension,
                                     narrow_weights=not args.no_narrow_weights,
                                     bits=bits)
        self._record_stats(astats)
        qrecs = quantizer.quantize(self.G)
        self.G.quantization = qrecs
        if args.scheme == 'SQ8':
            concats_matcher = EqualizeSymmetricMultiplicativeQuantivedConcats()
            concats_matcher.match(self.G, set_identity=False)
            softmax_qrec_matcher = PropagateSoftmaxSymQrec()
            softmax_qrec_matcher.match(self.G, set_identity=False)
        self.G.constant_store.fake = False