Exemple #1
0
    def __init__(self, dtype=np.float32, reshape_torch=False):
        """
        Initializes a new LeNet inspired network
        Args:
            dtype: Datatype to be used
            reshape_torch: set this, if the training parameters came from Pytorch which requires a custom reshape
        """
        self.reshape_torch = reshape_torch
        r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1])
        cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu',
                                 dtype=np.float32)  # [? 28 28 16]
        mp1 = EggNet.MaxPool2dLayer(size=2)  # [? 14 14 16]
        cn2 = EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3, activation='relu')  # [? 14 14 32]
        mp2 = EggNet.MaxPool2dLayer(size=2)  # [?  7  7 32]
        r2 = EggNet.FlattenLayer()
        fc1 = EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=32, activation='relu', dtype=np.float32)
        fc2 = EggNet.FullyConnectedLayer(input_size=32, output_size=10, activation='softmax')

        # Store a reference to each layer
        self.r1 = r1
        self.cn1 = cn1
        self.mp1 = mp1
        self.cn2 = cn2
        self.mp2 = mp2
        self.r2 = r2
        self.fc1 = fc1
        self.fc2 = fc2
        self.lenet_layers = [r1, cn1, mp1, cn2, mp2, r2, fc1, fc2]

        super(LeNet, self).__init__(self.lenet_layers)
Exemple #2
0
    def __init__(self, weights, options, shifts, real_quant=False):
        # Check input

        r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1])
        cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu',
                                 dtype=np.float32)  # [? 28 28 16]
        mp1 = EggNet.MaxPool2dLayer(size=2)  # [? 14 14 16]
        cn2 = EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3, activation='relu')  # [? 14 14 32]
        mp2 = EggNet.MaxPool2dLayer(size=2)  # [?  7  7 32]
        r2 = EggNet.FlattenLayer()
        fc1 = EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=32, activation='relu', dtype=np.float32)

        if real_quant:
            fc2 = EggNet.FullyConnectedLayer(input_size=32, output_size=10, activation=None)
        else:
            fc2 = EggNet.FullyConnectedLayer(input_size=32, output_size=10, activation='softmax')

        if real_quant:
            rs1 = EggNet.SimpleShiftLayer(shift=shifts[0], a_min=options['out_min'][0], a_max=options['out_max'][0])
            rs2 = EggNet.SimpleShiftLayer(shift=shifts[1], a_min=options['out_min'][1], a_max=options['out_max'][1])
            rs3 = EggNet.SimpleShiftLayer(shift=shifts[2], a_min=options['out_min'][2], a_max=options['out_max'][2])
            rs4 = EggNet.SimpleShiftLayer(shift=shifts[3], a_min=options['out_min'][3], a_max=options['out_max'][3])
        else:
            # scales = 2.0 ** (-shifts)
            scales = np.ones(shape=(4,))
            rs1 = EggNet.ScaleLayer(scale=scales[0], a_min=options['out_min_f'][0], a_max=options['out_max_f'][0])
            rs2 = EggNet.ScaleLayer(scale=scales[1], a_min=options['out_min_f'][1], a_max=options['out_max_f'][1])
            rs3 = EggNet.ScaleLayer(scale=scales[2], a_min=options['out_min_f'][2], a_max=options['out_max_f'][2])
            rs4 = EggNet.ScaleLayer(scale=scales[3], a_min=options['out_min_f'][3], a_max=options['out_max_f'][3])
        self.rs1 = rs1
        self.rs2 = rs2
        self.rs3 = rs3
        self.rs4 = rs4

        # Store a reference to each layer
        self.r1 = r1
        self.cn1 = cn1
        self.mp1 = mp1
        self.cn2 = cn2
        self.mp2 = mp2
        self.r2 = r2
        self.fc1 = fc1
        self.fc2 = fc2

        self.cn1.weights = weights['cn1.k']
        self.cn1.bias = weights['cn1.b']
        self.cn2.weights = weights['cn2.k']
        self.cn2.bias = weights['cn2.b']
        self.fc1.weights = weights['fc1.w']
        self.fc1.bias = weights['fc1.b']
        self.fc2.weights = weights['fc2.w']
        self.fc2.bias = weights['fc2.b']

        self.lenet_layers = [r1,
                             cn1, mp1, rs1,
                             cn2, mp2, rs2,
                             r2,
                             fc1, rs3,
                             fc2, rs4]

        super(FpiLeNet, self).__init__(self.lenet_layers)
Exemple #3
0
def _get_layers(weights_dict, target_bits, fraction_bits):
    assert target_bits > fraction_bits

    value_bits = target_bits - fraction_bits

    a_max = 2 ** (value_bits - 1) - 1
    a_min = -2 ** (value_bits - 1)
    scale = 1 / 2 ** value_bits

    c1_k1 = weights_dict['conv1_k']
    c1_b1 = weights_dict['conv1_b']
    c2_k2 = weights_dict['conv2_k']
    c2_b2 = weights_dict['conv2_b']

    fc1_w = weights_dict['fc1_w']
    fc1_b = weights_dict['fc1_b']
    fc2_w = weights_dict['fc2_w']
    fc2_b = weights_dict['fc2_b']

    # ni3 = nn_lenet_f64.fc1.input_size
    # no3 = nn_lenet_f64.fc1.output_size
    # ni4 = nn_lenet_f64.fc2.input_size
    # no4 = nn_lenet_f64.fc2.output_size
    ni3, no3 = fc1_w.shape
    ni4, no4 = fc2_w.shape

    qk1 = np.clip(c1_k1 / scale, a_max=a_max, a_min=a_min).astype(np.int8)
    qb1 = np.clip(c1_b1 / scale, a_max=a_max, a_min=a_min).astype(np.int8)
    qk2 = np.clip(c2_k2 / scale, a_max=a_max, a_min=a_min).astype(np.int8)
    qb2 = np.clip(c2_b2 / scale, a_max=a_max, a_min=a_min).astype(np.int8)

    qw3 = np.clip(fc1_w / scale, a_max=a_max, a_min=a_min).astype(np.int8)
    qb3 = np.clip(fc1_b / scale, a_max=a_max, a_min=a_min).astype(np.int8)
    qw4 = np.clip(fc2_w / scale, a_max=a_max, a_min=a_min).astype(np.int8)
    qb4 = np.clip(fc2_b / scale, a_max=a_max, a_min=a_min).astype(np.int8)

    dfrac_bits = 2 * fraction_bits

    layers = [

        EggNet.ReshapeLayer(newshape=(-1, 28, 28, 1)),
        EggNet.Conv2dLayer(in_channels=1, out_channels=3, kernel_size=3, kernel_init_weights=qk1,
                           bias_init_weights=qb1, use_bias=True),
        EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16,
                          source_frac_bits=dfrac_bits),
        EggNet.ReluActivationLayer(),
        EggNet.MaxPool2dLayer(),
        EggNet.Conv2dLayer(in_channels=3, out_channels=9, kernel_size=3, kernel_init_weights=qk2,
                           bias_init_weights=qb2, use_bias=True),
        EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16,
                          source_frac_bits=dfrac_bits),
        EggNet.ReluActivationLayer(),
        EggNet.MaxPool2dLayer(),
        EggNet.FlattenLayer(),
        EggNet.BreakpointLayer(enabled=False),
        EggNet.FullyConnectedLayer(input_size=ni3, output_size=no3, dtype=np.int16, weights=qw3, bias=qb3),
        EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16,
                          source_frac_bits=dfrac_bits),
        EggNet.ReluActivationLayer(),
        EggNet.FullyConnectedLayer(input_size=ni4, output_size=no4, dtype=np.int16, weights=qw4, bias=qb4),
        EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16,
                          source_frac_bits=dfrac_bits),
        EggNet.SoftmaxLayer()
    ]

    return layers