Exemple #1
0
    def _collect(self, G, input_tensors) -> Mapping[NodeId, Mapping]:
        LOG.debug("gather quantization statistics")
        output_ = execute(G, input_tensors, limit=self._limit)
        all_details = []
        qoutput_ = execute(G,
                           input_tensors,
                           limit=self._limit,
                           qrecs=G.quantization,
                           qmode=QuantizationMode.all(),
                           all_details=all_details)
        stats = OrderedDict()
        for idx, out in enumerate(output_):
            error_ = np.abs(out[0] - qoutput_[idx][0])
            step = G.graph_state.steps[idx]
            node = step['node']
            details = all_details[idx]
            if details:
                overflow_dot = details['overflow_dot']
                overflow_acc = details['overflow_acc']
            else:
                overflow_dot = overflow_acc = ""

            stats[NodeId(node, None)] = {
                'name': node.name,
                'op_name': node.op_name,
                'step': idx,
                'av_err': np.mean(error_),
                'max_err': np.max(error_),
                'min_err': np.min(error_),
                'qsnr': qsnr(out[0], qoutput_[idx][0]),
                'overflow_dot': overflow_dot,
                'overflow_acc': overflow_acc,
            }

        return stats
Exemple #2
0
def test_cross_mini(two_conv_graph):
    G = two_conv_graph
    output1 = execute(G, [np.full([10, 10, 2], 1)])
    groups, neurons = cl.discover_groups(G)
    assert groups and neurons, "Nothing discovered"
    cl.process_groups(groups)
    cl.update_parameters(neurons)
    output2 = execute(G, [np.full([10, 10, 2], 1)])
    assert np.max(np.abs(output1[3][0] - output2[3][0])) < 0.00001
Exemple #3
0
def test_graph_calc_quantize_one_2(value_cache, mnist_unfused_16bit_state, mnist_images):
    G = load_state(mnist_unfused_16bit_state, value_cache=value_cache)
    input_tensor = import_data(mnist_images[0], height=28, width=28, offset=0, divisor=255)
    input_tensor = input_tensor.reshape((28, 28, 1))
    output1 = execute(G, [input_tensor])
    input_tensor = import_data(mnist_images[0], height=28, width=28, offset=0, divisor=255)
    input_tensor = input_tensor.reshape((28, 28, 1))
    output2 = execute(G, [input_tensor], qmode=QuantizationMode.step(4), qrecs=G.quantization)
    diffs = []
    for i, out1 in enumerate(output1):
        diffs.append(out1[0] - output2[i][0])
    assert np.min(diffs[7]) > -2 and np.max(diffs[7]) < 2
Exemple #4
0
def test_graph_calc_quantized8(value_cache, mnist_unfused_8bit_state, mnist_images):
    G = load_state(mnist_unfused_8bit_state, value_cache=value_cache)
    input_tensor = import_data(mnist_images[0], height=28, width=28, offset=0, divisor=255)
    input_tensor = input_tensor.reshape((28, 28, 1))
    output1 = execute(G, [input_tensor], limit=7)
    input_tensor = import_data(mnist_images[0], height=28, width=28, offset=0, divisor=255)
    input_tensor = input_tensor.reshape((28, 28, 1))
    output2 = execute(G, [input_tensor], qrecs=G.quantization, limit=7, dequantize=True)
    diffs = []
    for i in range(8):
        diffs.append(output1[i][0] - output2[i][0])
    assert np.max(np.abs(diffs[7])) < 9
Exemple #5
0
def test_cross_large(vww_graph, vww_images):
    G = create_graph(vww_graph, opts={"load_tensors": True})
    G.add_dimensions()
    input_tensor = import_data(vww_images[4], offset=0, divisor=255)
    output1 = execute(G, [input_tensor])
    groups, neurons = cl.discover_groups(G, do_relun=True)
    group_inputs = [
        G.in_edges(grp[0][0]['name'])[0].from_node.step_idx for grp in groups
    ]
    group_outputs = [grp[-1][-1]['node'].step_idx for grp in groups]
    assert groups and neurons, "Nothing discovered"
    cl.process_groups(groups, threshold=0.0001)
    cl.update_parameters(neurons)
    output2 = execute(G, [input_tensor])
    assert max(
        [np.max(np.abs(output1[i][0] - output2[i][0]))
         for i in group_inputs]) < 0.0001
    assert max(
        [np.max(np.abs(output1[i][0] - output2[i][0]))
         for i in group_outputs]) < 0.0001
    assert np.max(np.abs(output1[-1][0] - output2[-1][0])) < 0.0001
Exemple #6
0
def test_equivalence(mnist_graph, mnist_images):
    G = create_graph(mnist_graph, opts={"load_tensors": True})
    G.add_dimensions()
    G.adjust_order()
    G.add_dimensions()
    input_tensor = import_data(mnist_images[0],
                               height=28,
                               width=28,
                               divisor=255,
                               offset=0,
                               transpose=False)
    output_ = execute(G, [input_tensor])
    with open("tests/h5_pickles/weights.pickle", 'rb') as fp:
        verif_weights = pickle.load(fp)
    assert np.array_equal(verif_weights[0]['weights'],
                          G.graph_state.steps[1]['node'].weights)
    assert np.array_equal(verif_weights[0]['biases'],
                          G.graph_state.steps[1]['node'].biases)
    assert np.array_equal(verif_weights[3]['weights'],
                          G.graph_state.steps[4]['node'].weights)
    assert np.array_equal(verif_weights[3]['biases'],
                          G.graph_state.steps[4]['node'].biases)
    assert np.array_equal(verif_weights[7]['weights'],
                          G.graph_state.steps[7]['node'].weights)
    assert np.array_equal(verif_weights[7]['biases'],
                          G.graph_state.steps[7]['node'].biases)
    with open(
            os.path.join("tests/h5_pickles",
                         os.path.basename(mnist_images[0]) + '.pickle'),
            'rb') as fp:
        verif = pickle.load(fp)
    assert all([
        np.max(np.abs(verif[idx][0] - output_[idx][0])) < 0.00001
        for idx in range(7)
    ])
    # checking the Flatten layer doesn't work because the layout was not changed in the run tool
    # the layout for the output of the linear layer is a little different
    assert np.max(np.abs(verif[8][0] - output_[7][0].flatten())) < 0.00001
    assert np.array_equal(np.round(output_[-1][0].flatten()),
                          [1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Exemple #7
0
def test_graph_execute_complex(ir_graph, ir_images):
    G = create_graph(ir_graph, opts={"load_tensors":True})
    G.add_dimensions()
    input_tensor = import_data(ir_images[0], offset=0, divisor=255)
    input_tensor = input_tensor.reshape((80, 80, 1))
    execute(G, [input_tensor])