コード例 #1
0
def test_graph_imu_auto_quant_and_execute_quant():
    G = create_graph("tests/graph/imu.tflite", opts={"load_tensors": True})
    G.add_dimensions()
    G.adjust_order()
    get_pow2_match_group().match(G)
    G.add_dimensions()
    stats_collector = ActivationStatsCollector()
    for input_file in ['tests/images/imu0.pgm']:
        input_tensor = import_data(input_file,
                                   offset=0,
                                   divisor=256,
                                   nptype='int16')
        stats_collector.collect_stats(G, [input_tensor])
    astats = stats_collector.reduce_stats()
    stats_collector = FilterStatsCollector()
    fstats = stats_collector.collect_stats(G)
    quantizer = SymmetricQuantizer(astats, fstats, force_width=16)
    qrecs = quantizer.quantize(G)
    G.quantization = qrecs
    executer = GraphExecuter(G, qrecs=qrecs)
    for input_file in ['tests/images/imu0.pgm']:
        input_tensor = import_data(input_file,
                                   offset=0,
                                   divisor=256,
                                   nptype='int16')
        output_ = executer.execute([input_tensor],
                                   qmode=QuantizationMode.all())
コード例 #2
0
def test_validate_mn1_quantized1(mn1q_graph, mn1f_graph):
    tfi = TfliteImporter()
    Gf = tfi.create_graph(mn1f_graph, {'load_tensors': True})
    Gf.add_dimensions()
    Gf.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(Gf)
    Gf.add_dimensions()

    tfi = TfliteImporter()
    G = tfi.create_graph(mn1q_graph, {
        'load_tensors': True,
        'load_quantization': True
    })
    G.add_dimensions()
    G.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()

    fpnode = Gf.graph_state.steps[2]['node']
    fpcnode = fpnode.contained_filters()[0]
    qpnode = G.graph_state.steps[2]['node']
    qpcnode = qpnode.contained_filters()[0]
    nid = NodeId(qpnode, qpcnode)
    qrec = G.quantization[nid]
    dqbiases = qrec.biases_q.get_dequantized(qpcnode.biases)
    assert np.max(np.abs(fpcnode.biases - dqbiases)) < 0.1
    input_tensor = np.load('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')
    input_tensor = input_tensor.reshape((224, 224, 3)).transpose((2, 0, 1))

    executer = GraphExecuter(Gf)
    foutput_tensors = executer.execute([input_tensor])
    foutput_tensor = np.load(
        'tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy')
    assert np.max(np.abs(foutput_tensors[-1][0] - foutput_tensor[0])) < 0.0001

    executer = GraphExecuter(G, qrecs=G.quantization)
    qfroutput_tensors = executer.execute([input_tensor],
                                         qmode=QuantizationMode.none())
    assert np.max(np.abs(qfroutput_tensors[-1][0] - foutput_tensor[0])) < 0.2

    executer = GraphExecuter(G, qrecs=G.quantization)
    qroutput_tensors = executer.execute(
        [input_tensor], qmode=QuantizationMode.all_dequantize())

    output_tensor = np.load(
        'tests/mobv1_valid/output_COCO_val2014_000000362331_0_quant.npy')
    # assert np.max(np.abs(qroutput_tensors[-1][0] - output_tensor[0])) < 0.16
    assert np.max(np.abs(qroutput_tensors[-1][0] - output_tensor[0])) < 0.28
コード例 #3
0
def test_fake_values_concat(concat_test_graph):
    G = create_graph(concat_test_graph, opts={"load_tensors": True})
    G.add_dimensions()
    G.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()
    G.constant_store.fake = True
    stats_collector = ActivationStatsCollector()
    stats_collector.collect_stats(
        G, [np.random.rand(*node.dims.shape) for node in G.input_nodes()])
    astats = stats_collector.reduce_stats()
    stats_collector = FilterStatsCollector()
    fstats = stats_collector.collect_stats(G)
    quantizer = SymmetricQuantizer(astats, fstats, force_width=8)
    qrecs = quantizer.quantize(G)
    G.quantization = qrecs
    with tempfile.TemporaryDirectory() as tempdir:
        opts = {
            'default_input_location': 'ARG_LOC_L2',
            'default_output_location': 'ARG_LOC_L2',
            'default_global_location': 'ARG_LOC_L3_HFLASH',
            'default_local_location': 'AT_MEM_UNDEF',
            'at_ver': 3,
            'tensor_directory': tempdir
        }
        code_gen = CodeGenerator(G, DefaultNamingConvension(G), opts)
        print(default_template(G, code_generator=code_gen))
        code_gen.write_constants()
コード例 #4
0
def test_graph_kws_auto_quant(kws_graph, kws_sounds):
    G = create_graph(kws_graph, opts={"load_tensors": True})
    G.add_dimensions()
    G.adjust_order()
    get_pow2_match_group().match(G)
    G.add_dimensions()
    stats_collector = ActivationStatsCollector()
    for input_file in kws_sounds:
        data = import_data(input_file, offset=0, divisor=256, nptype='int16')
        stats_collector.collect_stats(G, [data])
    astats = stats_collector.reduce_stats()
    stats_collector = FilterStatsCollector()
    fstats = stats_collector.collect_stats(G)
    quantizer = SymmetricQuantizer(astats, fstats, force_width=16)
    qrecs = quantizer.quantize(G)
    G.quantization = qrecs
コード例 #5
0
ファイル: fusions.py プロジェクト: dilawar/gap_sdk
    def do_fusions(self, args):
        """
Carry out the default set of fusions on the graph"""
        self._check_graph()
        if args.list:
            self.ppaged("\n".join(
                ["%s - %s" % (name, desc) for name, desc in get_fusions()]))
            return
        if args.apply:
            fusions = [get_fusion(name) for name in args.apply]
            if not fusions:
                self.perror('fusion %s not found' % args.apply)
                return
        elif args.pow2:
            fusions = [get_pow2_match_group()]
        elif args.scale8:
            fusions = [get_scale8_match_group()]
        else:
            self.perror(
                "No fusion set selected. Nothing to do. Select --pow2 or --scale8."
            )
            return
        for fusion in fusions:
            fusion.match(self.G)
        self.G.add_dimensions()
        if self.G.quantization and not self.G.quantization.verify_quantization(
                self.G):
            self.G.quantization = None
コード例 #6
0
def test_fusions4(ssd_graph):
    tfi = TfliteImporter()
    G = tfi.create_graph(ssd_graph, {})
    G.add_dimensions()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()
コード例 #7
0
def test_validate_mn1_quantized2(mn1q_graph):
    tfi = TfliteImporter()
    G = tfi.create_graph(mn1q_graph, {
        'load_tensors': True,
        'load_quantization': True
    })
    G.add_dimensions()
    G.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()
コード例 #8
0
def test_validate_mn1_dequant_quantfloat(mn1q_graph):
    # load dequantized graph same results as quant graph and float execution
    tfi = TfliteImporter()
    G = tfi.create_graph(mn1q_graph, {
        'load_tensors': True,
        'load_quantization': True
    })
    G.add_dimensions()
    G.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()

    Gdq = tfi.create_graph(mn1q_graph, {
        'load_tensors': True,
        'load_dequantized': True
    })
    Gdq.add_dimensions()
    Gdq.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(Gdq)
    Gdq.add_dimensions()

    input_tensor = np.load('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')
    input_tensor = input_tensor.reshape((224, 224, 3)).transpose((2, 0, 1))

    executer = GraphExecuter(G, qrecs=G.quantization)
    qfoutput_tensors = executer.execute([input_tensor],
                                        qmode=QuantizationMode.none())

    executer = GraphExecuter(Gdq)
    dfoutput_tensors = executer.execute([input_tensor])

    diff_list = [
        np.abs(df[0] - qf[0])
        for df, qf in zip(dfoutput_tensors, qfoutput_tensors)
    ]
    max_diff = [np.max(elem) for elem in diff_list]
    assert max(max_diff) < 0.003
コード例 #9
0
ファイル: conftest.py プロジェクト: VishalSharma0309/gap_sdk
def save_state(temp_dir, width, fusions=False, adjust=False):
    file_name = os.path.join(temp_dir, "state_file")
    G = create_graph(MNIST_GRAPH, opts={"load_tensors": True})
    G.add_dimensions()
    if adjust:
        G.adjust_order()
    if fusions:
        get_pow2_match_group().match(G)
        G.add_dimensions()
    stats_collector = ActivationStatsCollector()
    for input_file in MNIST_IMAGES:
        data = import_data(input_file, offset=0, divisor=255)
        if not adjust:
            data = data.reshape((28, 28, 1))
        stats_collector.collect_stats(G, [data])
    astats = stats_collector.reduce_stats()
    stats_collector = FilterStatsCollector()
    fstats = stats_collector.collect_stats(G)
    quantizer = SymmetricQuantizer(astats, fstats, force_width=width)
    qrecs = quantizer.quantize(G)
    G.quantization = qrecs
    dump_state(G, include_parameters=True, state_path=file_name)
    return file_name
コード例 #10
0
def test_validate_mn1_float(mn1f_graph):
    tfi = TfliteImporter()
    G = tfi.create_graph(mn1f_graph, {'load_tensors': True})
    G.add_dimensions()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()
    input_tensor = np.load('tests/mobv1_valid/COCO_val2014_000000362331_0.npy')
    input_tensor = input_tensor.reshape((224, 224, 3))
    executer = GraphExecuter(G, qrecs=G.quantization)
    routput_tensors = executer.execute([input_tensor])
    output_tensor = np.load(
        'tests/mobv1_valid/output_COCO_val2014_000000362331_0_float.npy')
    assert np.max(np.abs(routput_tensors[-1][0] - output_tensor[0])) < 0.0001
コード例 #11
0
def test_adjust7(concat_test_graph):
    tfi = TfliteImporter()
    G = tfi.create_graph(concat_test_graph, {'load_tensors': True})
    G.node('input_1').fixed_order = True
    G.node('output_1').fixed_order = True
    G.node('output_2').fixed_order = True
    G.add_dimensions()
    G.adjust_order()
    matcher = get_pow2_match_group()
    matcher.match(G)
    G.add_dimensions()
    report = GraphReporter().report(G, None)
    renderer = TextTableRenderer(maxwidth=200)
    print(report.render(renderer))
    report = GraphReporter(split_dims=True).report(G, None)
コード例 #12
0
    def do_fusions(self, args):
        """
Carry out the default set of fusions on the graph"""
        if args.list:
            table = texttable.Texttable()
            table.set_cols_align(['l', 'l'])
            table.set_max_width(120)
            table.add_rows([['Name', 'Description']] + get_fusions())
            self.ppaged(table.draw())
            return
        self._check_graph()
        state = ConstantInputParameters.save_compression_state(self.G)
        try:
            if args.apply:
                fusions = [get_fusion(name) for name in args.apply]
                invalid_names = [
                    args.apply[idx] for idx, fusion in enumerate(fusions)
                    if fusion is None
                ]
                if invalid_names:
                    self.perror(
                        f'fusion{"s" if len(invalid_names) > 1 else ""} {", ".join(invalid_names)} not found'
                    )
                    return
            elif args.pow2:
                fusions = [get_pow2_match_group()]
            elif args.scale8:
                fusions = [get_scale8_match_group()]
            else:
                self.perror(
                    "No fusion set selected. Nothing to do. Select --pow2 or --scale8."
                )
                return
            for fusion in fusions:
                fusion.match(self.G)
            self.G.add_dimensions()
            if self.G.quantization and verify_quantization(self.G):
                quantizer = NewQuantizer(self.G)
                quantizer.quantize()
                problems = verify_quantization(self.G)
                if problems:
                    self.perror('quantization issue after fusions')
                    for problem in problems:
                        self.perror(problem)
        finally:
            ConstantInputParameters.restore_compression_state(self.G, state)
コード例 #13
0
    def do_fusions(self, args):
        """
Carry out the default set of fusions on the graph"""
        if args.list:
            table = texttable.Texttable()
            table.set_cols_align(['l', 'l'])
            table.set_max_width(120)
            table.add_rows([['Name', 'Description']] + get_fusions())
            self.ppaged(table.draw())
            return
        self._check_graph()
        if args.apply:
            fusions = [get_fusion(name) for name in args.apply]
            invalid_names = [
                args.apply[idx] for idx, fusion in enumerate(fusions)
                if fusion is None
            ]
            if invalid_names:
                self.perror(
                    f'fusion{"s" if len(invalid_names) > 1 else ""} {", ".join(invalid_names)} not found'
                )
                return
        elif args.pow2:
            fusions = [get_pow2_match_group()]
        elif args.scale8:
            fusions = [get_scale8_match_group()]
        else:
            self.perror(
                "No fusion set selected. Nothing to do. Select --pow2 or --scale8."
            )
            return
        for fusion in fusions:
            fusion.match(self.G)
        self.G.add_dimensions()
        if self.G.quantization and not self.G.quantization.verify_quantization(
                self.G):
            self.G.quantization = None