Пример #1
0
def test_layer(layer, data):
    """
    Test a specific layer
    """
    if isinstance(layer, Layer1):
        x = data["input"]
        y = data["layer1_activ"]
    elif isinstance(layer, Layer2):
        x = data["layer1_activ"]
        y = data["layer2_activ"]
    elif isinstance(layer, Layer3):
        x = data["layer2_activ"]
        y = data["layer3_activ"]
    elif isinstance(layer, Layer4):
        x = data["layer3_activ"]
        y = data["layer4_activ"]
    elif isinstance(layer, Layer5):
        x = data["layer4_activ"]
        y = data["output_quant"]
    else:
        raise TypeError("Argument layer must be a Layer")

    x = np.reshape(x, layer.input_shape)
    y = np.reshape(y, layer.output_shape)
    x = F.quantize_to_int(x, layer.input_scale)
    y = F.quantize_to_int(y, layer.output_scale)
    
    y_hat = layer(x)

    return _compare_result(y_hat, y)
Пример #2
0
def test_model(model, data):
    """
    Test the entire Golden Model
    """
    assert isinstance(model, GoldenModel)

    x = data["input"]
    x = np.reshape(x, model.input_shape)
    x = F.quantize_to_int(x, model.input_scale)

    test_names = [
        "layer2_activ", "layer3_activ", "layer4_activ", "output_quant"
    ]

    result = {}

    for i, layer, test_name in zip(range(1, 5), model.layers, test_names):
        y_exp = data[test_name]
        y_exp = np.reshape(y_exp, layer.output_shape)
        y_exp = F.quantize_to_int(y_exp, layer.output_scale)

        x = layer(x)

        test_index = i + 1
        if i == 1:
            test_index = "1+2"

        result.update(_compare_result(x, y_exp, test_index))

    return result
Пример #3
0
def inq_conv2d(net, layer_name, num_levels=255, store_reversed=False):
    """
    Converts a INQConv2d layer into a quantized array
    
    Parameters
    - net: dict, contains all network parameters
    - layer_name: str, name of the layer
    - num_levels: int, Number of quantization layers
    - store_reversed: bool, if True, then return the weights in reversed order (Cross Correlation)

    Returns: weights, scale_factor
     - weights: np.array(dtype=int)
     - scale_factor: float
    """

    weights = net["{}.weightFrozen".format(layer_name)]
    scale_factor = net["{}.sParam".format(layer_name)][0]

    # Torch implements conv2d as a cross-correlation. Thus, we need to flip the dimension
    if not store_reversed:
        weights = np.flip(weights, (-2, -1))

    # quantize the weights
    weights = F.quantize_to_int(weights, scale_factor, num_levels)
    return weights, scale_factor
Пример #4
0
def gen_stimuli(random_input):
    """
    This function generates the stimuli (input and output) for the test
    """
    model = GoldenModel(CONFIG_FILENAME, NET_FILENAME, clip_balanced=False)
    if random_input:
        x = np.random.randint(-60, 60, (model.C, model.T))
    else:
        x = np.load(INPUT_FILENAME)["input"][0, :, :]
        x = F.quantize_to_int(x, model.input_scale)
    y_exp = model.layers[0](x)
    return x, y_exp
Пример #5
0
def gen_stimuli(random_input, no_div=False, pad_data=False):
    """
    This function generates the stimuli (input and output) for the test
    """
    if no_div:
        model = GoldenModel(CONFIG_FILENAME,
                            NET_FILENAME,
                            clip_balanced=False,
                            no_scale_between_l1_l2=True)
        layer = model.layers[0]
        if random_input:
            x = np.random.randint(-60, 60, (model.C, model.T))
        else:
            x = np.load(INPUT_FILENAME)["input"][0, :, :]
            x = F.quantize_to_int(x, layer.input_scale)
        y_exp = layer(x)
    else:
        model = GoldenModel(CONFIG_FILENAME, NET_FILENAME, clip_balanced=False)
        layer1 = model.layers[0]
        layer2 = model.layers[1]
        if random_input:
            x = np.random.randint(-60, 60, (model.C, model.T))
        else:
            x = np.load(INPUT_FILENAME)["input"][0, :, :]
            x = F.quantize_to_int(x, layer1.input_scale)
        y_exp = layer2(layer1(x))

    y_exp_align = align_array(y_exp)

    if pad_data:
        C, T = x.shape
        T_pad = T + 63
        assert T_pad % 4 == 0
        x_pad = np.zeros((C, T_pad), dtype=np.int)
        x_pad[:, 31:31 + T] = x
        return x, x_pad, y_exp, y_exp_align
    else:
        x_align = align_array(x)
        return x, x_align, y_exp, y_exp_align
Пример #6
0
def inq_linear(net, layer_name, num_levels=255):
    """
    Converts a INQLinear layer into a quantized array
    
    Parameters
    - net: dict, contains all network parameters
    - layer_name: str, name of the layer
    - num_levels: int, Number of quantization layers

    Returns: weights, scale_factor
     - weights: np.array(dtype=int)
     - bias: np.array(dtype=int)
     - scale_factor: float
    """

    weights = net["{}.weightFrozen".format(layer_name)]
    bias = net["{}.bias".format(layer_name)]
    scale_factor = net["{}.sParam".format(layer_name)][0]

    weights = F.quantize_to_int(weights, scale_factor, num_levels)
    bias = F.quantize_to_int(bias, scale_factor, num_levels)

    return weights, bias, scale_factor
Пример #7
0
def gen_stimuli(random_input):
    """
    This function generates the stimuli (input and output) for the test
    """
    model = GoldenModel(CONFIG_FILENAME, NET_FILENAME, clip_balanced=False)
    layer = model.layers[2]
    if random_input:
        x = np.random.randint(-60, 60, (model.F2, model.T // 8))
    else:
        x = np.load(INPUT_FILENAME)["layer2_activ"][0, :, 0, :]
        x = F.quantize_to_int(x, layer.input_scale)
    y_exp = layer(x)
    x_align = align_array(x)
    y_exp_align = align_array(y_exp)
    return x, x_align, y_exp, y_exp_align
Пример #8
0
def gen_stimuli(random_input, flip, reorder_bn):
    """
    This function generates the stimuli (input and output) for the test
    """
    model = GoldenModel(CONFIG_FILENAME, NET_FILENAME, clip_balanced=False, reorder_bn=reorder_bn)
    layer = model.layers[1]
    if random_input:
        x = np.random.randint(-60, 60, (model.F1, model.C, model.T))
    else:
        x = np.load(INPUT_FILENAME)["layer1_activ"][0, :, :, :]
        x = F.quantize_to_int(x, layer.input_scale)
    y_exp = layer(x)
    if flip:
        x_flip = np.transpose(x, (0, 2, 1))
        x_align = np.zeros((model.F1, align_array_size(model.T), align_array_size(model.C)), dtype=int)
        x_align[:, :model.T, :model.C] = x_flip
    else:
        x_align = align_array(x)
    y_exp_align = align_array(y_exp)
    return x, x_align, y_exp, y_exp_align
def gen_input_header(net, net_params, data, output_file):

    # only allow nets with 255 levels
    assert net_params["weightInqNumLevels"] == 255
    assert net_params["actSTENumLevels"] == 255

    # extract and prepare the input data
    scale_factor = convert.ste_quant(net, "quant1")
    input_quant = F.quantize_to_int(data, scale_factor)
    input_quant_align = align_array(input_quant)

    # also generate the padded input vector
    _, C, T = input_quant.shape
    T_pad = T + 63
    assert T_pad % 4 == 0
    input_pad = np.zeros((C, T_pad), dtype=np.int)
    input_pad[:, 31:31 + T] = input_quant[0]

    # generate the header file
    header = HeaderFile(output_file, "__INPUT_H__", with_c=True)
    header.add(HeaderArray("input_data", "int8_t", input_quant_align.ravel()))
    header.add(HeaderArray("input_data_pad", "int8_t", input_pad.ravel()))
    header.write()