예제 #1
0
파일: run.py 프로젝트: marph91/pocket-cnn
def create_stimuli(root, ksize, stride, bitwidth_data_in, bitwidth_data_out,
                   bitwidth_weights, channel_in, channel_out, width, height):
    a_rand = random_fixed_array((1, channel_in, height, width),
                                bitwidth_data_in)
    a_in = v_to_fixedint(a_rand)
    np.savetxt(join(root, "gen", f"input_{ksize}_{stride}_{channel_in}.csv"),
               flatten(a_in),
               delimiter=", ",
               fmt="%3d")

    a_weights_rand = random_fixed_array(
        (channel_out, channel_in, ksize, ksize), bitwidth_weights)
    a_bias_rand = random_fixed_array((channel_out, ), bitwidth_weights)

    # weights and bias to txt
    weights_to_files(a_weights_rand, a_bias_rand,
                     f"conv_{ksize}_{stride}_{channel_in}", join(root, "gen"))

    # assign the outputs
    conv_out = v_to_fixedint(
        conv(a_rand, a_weights_rand, a_bias_rand, (ksize, stride),
             bitwidth_data_out.as_tuple))
    filename = join(root, "gen", f"output_{ksize}_{stride}_{channel_in}.csv")
    with open(filename, "w") as outfile:
        np.savetxt(outfile, flatten(conv_out), delimiter=", ", fmt="%3d")
예제 #2
0
파일: run.py 프로젝트: marph91/pocket-cnn
def create_stimuli(root, model_name):
    model = onnx.load(join(root, model_name))
    shape = cnn_onnx.parse_param.get_input_shape(model)

    a_rand = random_fixed_array(shape, Bitwidth(8, 8, 0), signed=False)
    a_in = v_to_fixedint(a_rand)
    a_out = v_to_fixedint(cnn_onnx.inference.numpy_inference(model, a_rand))

    np.savetxt(join(root, "input.csv"),
               flatten(a_in),
               delimiter=", ",
               fmt="%3d")
    np.savetxt(join(root, "output.csv"), a_out, delimiter=", ", fmt="%3d")
예제 #3
0
파일: run.py 프로젝트: marph91/pocket-cnn
def create_stimuli(root, ksize, stride, bitwidth, shape):
    a_rand = random_fixed_array(shape, bitwidth)
    a_in = v_to_fixedint(a_rand)
    np.savetxt(join(root, "src", "input_%d_%d.csv" % (ksize, stride)),
               flatten(a_in),
               delimiter=", ",
               fmt="%3d")

    # assign the outputs
    filename = join(root, "src", "output_%d_%d.csv" % (ksize, stride))
    max_out = v_to_fixedint(max_pool(a_rand, ksize, stride))
    with open(filename, "w") as outfile:
        np.savetxt(outfile, flatten(max_out), delimiter=", ", fmt="%3d")
예제 #4
0
def create_stimuli(root, shape, bitwidth):
    a_rand = random_fixed_array(shape, bitwidth)
    a_in = v_to_fixedint(a_rand)
    np.savetxt(join(root, "src", "input.csv"),
               flatten(a_in),
               delimiter=", ",
               fmt="%3d")

    a_out = v_to_fixedint(avg_pool(a_rand))
    np.savetxt(join(root, "src", "output.csv"),
               a_out,
               delimiter=", ",
               fmt="%3d")
예제 #5
0
파일: run.py 프로젝트: marph91/pocket-cnn
def create_stimuli(root, bitwidth, leaky, sample_cnt: int = 1):
    a_rand = random_fixed_array((sample_cnt), bitwidth)
    a_in = v_to_fixedint(a_rand)
    np.savetxt(join(root, "src", "input_" + "leaky" * leaky + ".csv"),
               a_in,
               delimiter=", ",
               fmt="%3d")

    a_out = (relu(a_rand) if not leaky else leaky_relu(
        a_rand, FpBinary(int_bits=0, frac_bits=3, value=0.125)))
    np.savetxt(join(root, "src", "output_" + "leaky" * leaky + ".csv"),
               v_to_fixedint(a_out),
               delimiter=", ",
               fmt="%3d")
예제 #6
0
def analyze_and_quantize(original_weights,
                         original_bias,
                         aggressive: bool = False) -> dict:
    """Analyze and quantize the weights."""
    max_val = max(np.amax(original_weights), np.amax(original_bias))
    min_val = min(np.amin(original_weights), np.amin(original_bias))
    highest_val = max(abs(max_val), abs(min_val))
    int_width = get_integer_width(highest_val)
    print("weight quantization: ", int_width, 8 - int_width)
    print("stats: ", max_val, min_val, highest_val)

    # quantize the weights
    quantized_weights = to_fixed_point_array(original_weights,
                                             int_bits=int_width,
                                             frac_bits=8 - int_width,
                                             aggressive=aggressive)
    quantized_bias = to_fixed_point_array(original_bias,
                                          int_bits=int_width,
                                          frac_bits=8 - int_width,
                                          aggressive=aggressive)
    quantized_weights_int = v_to_fixedint(quantized_weights)
    quantized_bias_int = v_to_fixedint(quantized_bias)
    print("average error per weight:",
          np.mean(np.abs(original_weights - quantized_weights)))
    avg_val = np.mean(np.abs(quantized_weights))
    print("average absolute weight value:", avg_val)

    # print the weight stats (bias is omitted for now)
    count = {"total": quantized_weights.size}
    count["zeros"] = count["total"] - np.count_nonzero(quantized_weights)
    count["power_of_two"] = np.count_nonzero(
        v_is_power_of_two(quantized_weights))
    count["other"] = count["total"] - count["zeros"] - count["power_of_two"]
    print("total weights:", count["total"])
    print("zero weights:", count["zeros"], count["zeros"] / count["total"])
    print("power of two weights:", count["power_of_two"],
          count["power_of_two"] / count["total"])
    print("left weights:", count["other"], count["other"] / count["total"])

    if aggressive and count["other"]:
        Warning("At aggressive quantization all weights should be"
                "0 or power of two.")

    return {
        "weights": quantized_weights_int,
        "bias": quantized_bias_int,
        "quant": (int_width, 8 - int_width),
        "avg_val": avg_val,
    }
예제 #7
0
def create_stimuli(root, model_name):
    model = onnx.load(join(root, model_name))
    shape = cnn_onnx.parse_param.get_input_shape(model)

    a_rand = random_fixed_array(shape, Bitwidth(8, 8, 0), signed=False)
    a_in = v_to_fixedint(a_rand)
    a_out = v_to_fixedint(cnn_onnx.inference.numpy_inference(model, a_rand))

    # ONNX runtime prediction, TODO: doesn't work right now
    # https://github.com/microsoft/onnxruntime/issues/2964
    # sess = rt.InferenceSession(join(root, model_name))
    # input_name = sess.get_inputs()[0].name
    # pred_onnx = sess.run(None, {input_name: in_.astype(np.float32)})[0]
    # print(pred_onnx)

    np.savetxt(join(root, "input.csv"), flatten(a_in),
               delimiter=", ", fmt="%3d")
    np.savetxt(join(root, "output.csv"), a_out,
               delimiter=", ", fmt="%3d")
예제 #8
0
def create_stimuli(root, pool_dim, bitwidth):
    a_rand = random_fixed_array((pool_dim, pool_dim), bitwidth)
    a_in = v_to_fixedint(a_rand)
    np.savetxt(join(root, "src", "input%d.csv" % pool_dim),
               a_in,
               delimiter=", ",
               fmt="%3d")

    # use atleast_1d to fulfill 1d requirement of savetxt
    a_out = np.atleast_1d(to_fixedint(np.max(a_rand)))
    np.savetxt(join(root, "src", "output%d.csv" % pool_dim),
               a_out,
               delimiter=", ",
               fmt="%3d")
예제 #9
0
def create_stimuli(root, stage, ksize, bitwidth_data, bitwidth_weights):
    # vunit import from csv can only handle datatype integer.
    # Therefore the random fixed point values have to be converted to
    # corresponding integer values.
    a_rand = random_fixed_array((ksize, ) * 2,
                                bitwidth_data,
                                signed=stage != 1)
    # manually extend the bitwidth to implicitly create unsigned values
    sign_bit = 1 if stage == 1 else 0
    a_in = v_to_fixedint(a_rand)
    name = "input_data%s.csv" % ("_stage1" if stage == 1 else str(ksize))
    np.savetxt(join(root, "src", name), a_in, delimiter=", ", fmt="%3d")

    a_weights_rand = random_fixed_array((ksize, ksize), bitwidth_weights)
    a_weights_in = v_to_fixedint(a_weights_rand)
    name = "input_weights%s.csv" % ("_stage1" if stage == 1 else str(ksize))
    np.savetxt(join(root, "src", name),
               a_weights_in,
               delimiter=", ",
               fmt="%3d")

    product = a_rand * a_weights_rand
    additions = 0 if ksize == 1 else int(math.log2(ksize - 1) * 2)
    # TODO: replace for loop
    for value in product.flat:
        # No rounding needed for resize.
        # The range is covered by "additions + 1 + sign_bit"
        value.resize(
            (value.format[0] + additions + 1 + sign_bit, value.format[1]),
            OverflowEnum.excep)
    sum_ = np.sum(product)

    # use atleast_1d to fulfill 1d requirement of savetxt
    a_out = np.atleast_1d(to_fixedint(sum_))
    name = "output%s.csv" % ("_stage1" if stage == 1 else str(ksize))
    np.savetxt(join(root, "src", name), a_out, delimiter=", ", fmt="%d")