def __init__(self, dtype=np.float32, reshape_torch=False): """ Initializes a new LeNet inspired network Args: dtype: Datatype to be used reshape_torch: set this, if the training parameters came from Pytorch which requires a custom reshape """ self.reshape_torch = reshape_torch r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1]) cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu', dtype=np.float32) # [? 28 28 16] mp1 = EggNet.MaxPool2dLayer(size=2) # [? 14 14 16] cn2 = EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3, activation='relu') # [? 14 14 32] mp2 = EggNet.MaxPool2dLayer(size=2) # [? 7 7 32] r2 = EggNet.FlattenLayer() fc1 = EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=32, activation='relu', dtype=np.float32) fc2 = EggNet.FullyConnectedLayer(input_size=32, output_size=10, activation='softmax') # Store a reference to each layer self.r1 = r1 self.cn1 = cn1 self.mp1 = mp1 self.cn2 = cn2 self.mp2 = mp2 self.r2 = r2 self.fc1 = fc1 self.fc2 = fc2 self.lenet_layers = [r1, cn1, mp1, cn2, mp2, r2, fc1, fc2] super(LeNet, self).__init__(self.lenet_layers)
def test_conv(self): cl = EggNet.Conv2dLayer(in_channels=1, out_channels=3, kernel_size=5) test_img = np.random.rand(4, 28, 28, 1) # create 4 test images cl_out = cl.__call__(test_img) # Check Shape self.assertEqual(cl_out.shape, (4, 28, 28, 3))
def backprop(self, x, y_): y, zs = self.forward_intermediate(x) loss = EggNet.mean_squared_error(y, y_) delta = y - y_ deltas = [] for layer in self.layers: delta = layer.backprop(delta) deltas.append(delta) layer.update_weights(delta)
def init_network_from_weights(qweights, from_torch): our_net = EggNet.LeNet(reshape_torch=from_torch) our_net.cn1.weights = qweights['cn1.k'] our_net.cn1.bias = qweights['cn1.b'] our_net.cn2.weights = qweights['cn2.k'] our_net.cn2.bias = qweights['cn2.b'] our_net.fc1.weights = qweights['fc1.w'] our_net.fc1.bias = qweights['fc1.b'] our_net.fc2.weights = qweights['fc2.w'] our_net.fc2.bias = qweights['fc2.b'] return our_net
def test_blur(self): k = EggNet.make_gauss_kernel() cl = EggNet.Conv2dLayer(in_channels=1, out_channels=1, kernel_size=5) loader = MnistDataDownloader("../../test/MNIST/") path_img, path_lbl = loader.get_path(DataSetType.TRAIN) reader = MnistDataReader(path_img, path_lbl) for lbl, img in reader.get_next(4): img = img.astype(np.float) / 255.0 img = np.reshape(img, newshape=[-1, 28, 28, 1]) k = np.reshape(k, newshape=[k.shape[0], k.shape[1], 1, 1]) cl.kernel = k img_out = cl(img) # Check the dimensions self.assertEqual(img_out.shape, (4, 28, 28, 1)) # Uncomment to see the image img_out = np.reshape(img_out, newshape=[1, 4 * 28, 28, 1]) img_out = np.squeeze(img_out) plt.imshow(img_out, cmap='gray', vmin=0.0, vmax=1.0) plt.show() break
def test_tf_compare(self): b = 0 test_img = np.random.rand(10, 128, 128, 3) # create 4 test images y_tf = keras.backend.numpy_backend.pool2d(test_img, pool_size=(2, 2), strides=(2, 2), padding=None, data_format='channels_last', pool_mode='max') # y_tf = max_pool2d(test_img, ksize=2, strides=2, padding='same', data_format='NHWC') y = EggNet.pooling_max(data_in=test_img, pool_size=2, stride=2) self.assertEqual(y_tf.shape, y.shape) for v1, v2 in zip(y_tf.flatten(), y.flatten()): self.assertAlmostEqual(v1, v2, delta=0.01)
def testDataShapeCheck(self): layers = [ EggNet.FullyConnectedLayer(input_size=1, output_size=10), EggNet.FullyConnectedLayer(input_size=20, output_size=20), EggNet.FullyConnectedLayer(input_size=20, output_size=10), EggNet.FullyConnectedLayer(input_size=10, output_size=1), ] self.assertRaises(ValueError, check_layers, layers) layers = [ EggNet.FullyConnectedLayer(input_size=1, output_size=10), EggNet.FullyConnectedLayer(input_size=10, output_size=20), EggNet.FullyConnectedLayer(input_size=20, output_size=10), EggNet.FullyConnectedLayer(input_size=10, output_size=1), ] try: check_layers(layers) except ValueError: self.fail(msg="Layers check failed")
def test_tf_compare_zero(self): kernel = np.zeros(shape=(5, 5, 8, 16)) b = np.zeros(16) x = np.random.rand(5, 28, 28, 8) # create 4 test images y_tf = keras.backend.numpy_backend.conv2d(x, kernel, padding='same', data_format='channels_last') cl = EggNet.Conv2dLayer(in_channels=1, out_channels=3, kernel_size=5) cl.kernel = kernel cl.b = b y = cl(x) self.assertEqual(y_tf.shape, y.shape) self.assertTrue(np.allclose(y_tf, y)) for v1, v2 in zip(y_tf.flatten(), y.flatten()): self.assertAlmostEqual(v1, 0, delta=0.0001) self.assertAlmostEqual(v2, 0, delta=0.0001)
def test_tf_compare3(self): # kernel = test_kernel_gauss(size=5, sigma=1.6) # kernel = kernel[..., np.newaxis, np.newaxis] kernel = 2.0 * (np.random.rand(5, 5, 8, 16) - 0.5) b = np.zeros(16) x = np.random.rand(30, 28, 28, 8) # create 4 test images y_tf = keras.backend.numpy_backend.conv2d(x, kernel, padding='same', data_format='channels_last') y_tf = keras.backend.numpy_backend.relu(y_tf) cl = EggNet.Conv2dLayer(in_channels=8, out_channels=16, kernel_size=5, activation='relu') cl.kernel = kernel cl.b = b y = cl(x) self.assertEqual(y_tf.shape, y.shape) self.assertTrue(np.allclose(y_tf, y)) for v1, v2 in zip(y_tf.flatten(), y.flatten()): self.assertAlmostEqual(v1, v2, delta=0.001)
def test_tf_compare1(self): # kernel = test_kernel_gauss(size=5, sigma=1.6) # kernel = kernel[..., np.newaxis, np.newaxis] kernel = np.random.rand(5, 5, 1, 3) # kernel = np.zeros(shape=(5, 5, 1, 3)) b = np.zeros(3) x = np.random.rand(5, 28, 28, 1) # create 4 test images y_tf = keras.backend.numpy_backend.conv2d(x, kernel, padding='same', data_format='channels_last') cl = EggNet.Conv2dLayer(in_channels=1, out_channels=3, kernel_size=5) cl.kernel = kernel cl.b = b y = cl(x) self.assertEqual(y_tf.shape, y.shape) self.assertTrue(np.allclose(y_tf, y)) for v1, v2 in zip(y_tf.flatten(), y.flatten()): # self.assertAlmostEqual(v1,0,delta=0.0001) # self.assertAlmostEqual(v2, 0, delta=0.0001) self.assertAlmostEqual(v1, v2, delta=0.001)
def test_forward_prop(self): layers = [ EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1]), EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3), # [? 28 28 16] EggNet.MaxPool2dLayer(size=2), # [? 14 14 16] EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3), # [? 14 14 32] EggNet.MaxPool2dLayer(size=2), # [? 7 7 32] # ConvLayer(in_channels=32, out_channels=64, kernel_size=3), # [? 7 7 64] # MaxPool2dLayer(size=2), EggNet.ReshapeLayer(newshape=[-1, 32 * 7 * 7]), EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=64), EggNet.FullyConnectedLayer(input_size=64, output_size=10), ] n = Network(layers) # create test data x = np.random.rand(10, 28, 28) y = n.forward(x)
def test_pool(self): pl = EggNet.MaxPool2dLayer(size=2) img = np.array([ [1, 2, 1, 1], [1, 1, 3, 1], [1, 4, 1, 1], [4.3, 1, 1, 5], ]) img = np.reshape(img, newshape=(1, 4, 4, 1)) exp_img = np.array([ [2, 3], [4.3, 5] ]) exp_img = np.reshape(exp_img, newshape=(1, 2, 2, 1)) p_img = pl(img) self.assertEqual(p_img.shape, (1, 2, 2, 1)) self.assertTrue(np.array_equal(p_img, exp_img))
def test_keras_compare(self): mnist = tf.keras.datasets.mnist IMG_HEIGHT = 28 IMG_WIDTH = 28 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 x = x_train[0:10, :, :] xr = np.reshape(x, newshape=(-1, IMG_HEIGHT, IMG_WIDTH, 1)) cl = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu') model = tf.keras.models.Sequential([ keras.layers.Reshape((IMG_HEIGHT, IMG_WIDTH, 1), input_shape=(IMG_HEIGHT, IMG_WIDTH)), keras.layers.Conv2D(16, 3, padding='same', activation='relu'), ]) keras_conv_layer = model.layers[1] model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) y_keras = model.predict(x) # y_keras = y_keras.numpy() cl.kernel = keras_conv_layer.kernel.numpy() cl.b = keras_conv_layer.bias.numpy() y = cl(xr) err = np.abs(y - y_keras) err_flat = err.flatten() ix = np.array(indices(err_flat, lambda x: x > 0.5)) subs = ind2sub(ix, err.shape) eq = np.allclose(y, y_keras, atol=0.1) self.assertTrue(eq)
def test_tensorflow_parameter_0(self): r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1]) cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu') # [? 28 28 16] checkpoint_path = "test/training_1/cp.ckpt" checkpoint_dir = os.path.abspath(os.path.dirname(checkpoint_path)) os.path.join(checkpoint_dir, "model_config.json") if not os.path.exists(checkpoint_dir): raise RuntimeError("There is no trained model data!") # Reload the model from the 2 files we saved with open(os.path.join(checkpoint_dir, "model_config.json")) as json_file: json_config = json_file.read() model = keras.models.model_from_json(json_config) model.load_weights(os.path.join(checkpoint_dir, "weights.h5")) # Print a summary Ws = model.get_weights() # See Keras Documentation # https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D # # Default ordering of weights for Conv: (batch, height, width, channels) # Default ordering of weights for Dense self.assertEqual(cn1.kernel.shape, Ws[0].shape) self.assertEqual(cn1.b.shape, Ws[1].shape) # Assign values cn1.kernel = Ws[0] cn1.b = Ws[1] layers = [ r1, cn1, ] interesting_layers = [1] # don't care about reshape layers n = EggNet.Network(layers) loader = MnistDataDownloader("../../test/MNIST/") path_img, path_lbl = loader.get_path(DataSetType.TRAIN) reader = MnistDataReader(path_img, path_lbl) for lbls, imgs in reader.get_next(10): imgs = imgs.astype(np.float) / 255.0 imgs_r = np.reshape(imgs, newshape=[-1, 28, 28, 1]) # Check the tensorflow model y_keras = model.predict(imgs) # Keras Model Debug inp = model.input # input placeholder outputs = [layer.output for layer in model.layers] # all layer outputs outputs = [outputs[i] for i in (1, 2, 3, 4, 6, 8)] # remove dropout, reshape functors = [K.function([inp], [out]) for out in outputs] # evaluation functions layer_outs = [func([imgs, 1.]) for func in functors] # print(layer_outs) # Check the results of the own made NN y, zs = n.forward_intermediate(imgs_r) zs = [zs[i] for i in interesting_layers] # remove reshape layers eps = 0.1 index = 0 for l_keras_out, l_out in zip(layer_outs, zs): err = np.abs((l_keras_out - l_out).flatten()) # print(l_keras_out - l_out) # err_image = 1.0 * (np.abs(l_keras_out - l_out) > eps) # # err_image = np.reshape(err_image[], newshape=(1, -1, 28, 1)) # err_image = np.squeeze(err_image[0, :, :, 0]) # plt.imshow(err_image, vmin=0.0, vmax=1.0, cmap='gray') # plt.show() right_indices = indices(err < eps, lambda b: b) false_indices = indices(err > eps, lambda b: b) wrong_values = err[false_indices] # print(wrong_values) if not np.all(right_indices): print("error in layer ", index) index += 1 lbls_pred_keras = y_keras.argmax(axis=1) lbls_pred = y.argmax(axis=1) print("Original: ", lbls.reshape(-1)) print("Keras: ", lbls_pred_keras.reshape(-1)) print("Our Model: ", lbls_pred.reshape(-1)) break
def test_tensorflow_parameter(self): r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1]) cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu') # [? 28 28 16] mp1 = EggNet.MaxPool2dLayer(size=2) # [? 14 14 16] cn2 = EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3, activation='relu') # [? 14 14 32] mp2 = EggNet.MaxPool2dLayer(size=2) # [? 7 7 32] r2 = EggNet.ReshapeLayer(newshape=[-1, 32 * 7 * 7]) fc1 = EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=64, activation='relu') fc2 = EggNet.FullyConnectedLayer(input_size=64, output_size=10, activation='softmax') checkpoint_path = "test/training_1/cp.ckpt" checkpoint_dir = os.path.abspath(os.path.dirname(checkpoint_path)) os.path.join(checkpoint_dir, "model_config.json") if not os.path.exists(checkpoint_dir): raise RuntimeError("There is no trained model data!") # Reload the model from the 2 files we saved with open(os.path.join(checkpoint_dir, "model_config.json")) as json_file: json_config = json_file.read() model = keras.models.model_from_json(json_config) model.load_weights(os.path.join(checkpoint_dir, "weights.h5")) # Print a summary Ws = model.get_weights() # K0 = Ws[0] # plt.imshow(K0[:,:,1,1], vmin=0.0, vmax=1.0, cmap='gray') # plt.show() # See Keras Documentation # https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D # # Default ordering of weights for Conv: (batch, height, width, channels) # Default ordering of weights for Dense self.assertEqual(cn1.kernel.shape, Ws[0].shape) self.assertEqual(cn1.b.shape, Ws[1].shape) self.assertEqual(cn2.kernel.shape, Ws[2].shape) self.assertEqual(cn2.b.shape, Ws[3].shape) self.assertEqual(fc1.W.shape, Ws[4].shape) self.assertEqual(fc1.b.shape, Ws[5].shape) self.assertEqual(fc2.W.shape, Ws[6].shape) self.assertEqual(fc2.b.shape, Ws[7].shape) # Assign values cn1.kernel = Ws[0] cn1.b = Ws[1] cn2.kernel = Ws[2] cn2.b = Ws[3] fc1.W = Ws[4] fc1.b = Ws[5] fc2.W = Ws[6] fc2.b = Ws[7] layers = [r1, cn1, mp1, cn2, mp2, r2, fc1, fc2] interesting_layers = [1, 2, 3, 4, 6, 7] # don't care about reshape layers net = EggNet.Network(layers) loader = MnistDataDownloader("../../test/MNIST/") path_img, path_lbl = loader.get_path(DataSetType.TRAIN) reader = MnistDataReader(path_img, path_lbl) for lbls, imgs in reader.get_next(20): imgs = imgs.astype(np.float) / 255.0 imgs_r = np.reshape(imgs, newshape=[-1, 28, 28, 1]) # Check the tensorflow model y_keras = model.predict(imgs) # Keras Model Debug inp = model.input # input placeholder outputs = [layer.output for layer in model.layers] # all layer outputs outputs = [outputs[i] for i in (1, 2, 3, 4, 6, 8)] # remove dropout, reshape functors = [K.function([inp], [out]) for out in outputs] # evaluation functions layer_outs = [func([imgs, 1.]) for func in functors] # print(layer_outs) # Check the results of the own made NN y, zs = net.forward_intermediate(imgs_r) zs = [zs[i] for i in interesting_layers] # remove reshape layers eps = 0.1 index = 0 for l_keras_out, l_out in zip(layer_outs, zs): l_keras_out = l_keras_out[0] # why ever err = np.abs((l_keras_out - l_out)) # err_subs = ind2sub(indices(err.flatten(), lambda x: x > 1), l_out.shape) # self.assertTrue(np.allclose(l_out, l_keras_out)) #print(l_keras_out - l_out) # img_keras = np.squeeze() # err_image = 1.0 * (np.abs(l_keras_out - l_out) > eps) # Test: Shift l_out # l_out[:, 0:-1, 0:-1, :] = l_out[:, 1:, 1:, :] # err_image = l_keras_out - l_out # # err_image = np.reshape(err_image, newshape=(1, 28, 28, 1)) # # err_image = np.squeeze(err_image[0, :, :, 0]) # err_image = np.concatenate([np.squeeze(err_image[0, :, :, i]) for i in range(4)], axis=1) # img_keras = np.concatenate([np.squeeze(l_keras_out[0, :, :, i]) for i in range(4)], axis=1) # img_nn = np.concatenate([np.squeeze(l_out[0, :, :, i]) for i in range(4)], axis=1) # img = np.concatenate([img_nn, img_keras, err_image], axis=0) # # fig, ax = plt.subplots() # _im = ax.imshow(img, cmap='gray') # ax.set_title('Computation Layer {}'.format(index)) # ax.set_yticks([14, 14 + 28, 14 + 2 * 28]) # ax.set_yticklabels(['Our NN', 'Keras', 'Difference']) # fig.colorbar(_im) # plt.show() #right_indices = indices(err < eps, lambda b: b) #false_indices = indices(err > eps, lambda b: b) #wrong_values = err[false_indices] #print(wrong_values) if not np.allclose(l_out, l_keras_out, atol=0.0001): print("error in layer ", index) breakpoint() index += 1 lbls_pred_keras = y_keras.argmax(axis=1) lbls_pred = y.argmax(axis=1) print("Original: ", lbls.reshape(-1)) print("Keras: ", lbls_pred_keras.reshape(-1)) print("Our Model: ", lbls_pred.reshape(-1)) break
def fpi_conv2D(data_in, kernel_in, dtype_out=np.int8, stride=1): nn.conv2d( data_in=data_in.astype(dtype_out), kernel=kernel_in.astype(dtype_out), )
def can_kernel_overflow(qkernel, kernel_bits, kernel_frac_bits, qinputs, input_bits, input_frac_bits, output_bits, output_frac_bits): qout = nn.conv2d(qinputs, qkernel)
def init_quant_network_from_weights(qweights, shift, options): our_net = EggNet.FpiLeNet(qweights, shifts=shift, options=options, real_quant=True) return our_net
def _get_layers(weights_dict, target_bits, fraction_bits): assert target_bits > fraction_bits value_bits = target_bits - fraction_bits a_max = 2 ** (value_bits - 1) - 1 a_min = -2 ** (value_bits - 1) scale = 1 / 2 ** value_bits c1_k1 = weights_dict['conv1_k'] c1_b1 = weights_dict['conv1_b'] c2_k2 = weights_dict['conv2_k'] c2_b2 = weights_dict['conv2_b'] fc1_w = weights_dict['fc1_w'] fc1_b = weights_dict['fc1_b'] fc2_w = weights_dict['fc2_w'] fc2_b = weights_dict['fc2_b'] # ni3 = nn_lenet_f64.fc1.input_size # no3 = nn_lenet_f64.fc1.output_size # ni4 = nn_lenet_f64.fc2.input_size # no4 = nn_lenet_f64.fc2.output_size ni3, no3 = fc1_w.shape ni4, no4 = fc2_w.shape qk1 = np.clip(c1_k1 / scale, a_max=a_max, a_min=a_min).astype(np.int8) qb1 = np.clip(c1_b1 / scale, a_max=a_max, a_min=a_min).astype(np.int8) qk2 = np.clip(c2_k2 / scale, a_max=a_max, a_min=a_min).astype(np.int8) qb2 = np.clip(c2_b2 / scale, a_max=a_max, a_min=a_min).astype(np.int8) qw3 = np.clip(fc1_w / scale, a_max=a_max, a_min=a_min).astype(np.int8) qb3 = np.clip(fc1_b / scale, a_max=a_max, a_min=a_min).astype(np.int8) qw4 = np.clip(fc2_w / scale, a_max=a_max, a_min=a_min).astype(np.int8) qb4 = np.clip(fc2_b / scale, a_max=a_max, a_min=a_min).astype(np.int8) dfrac_bits = 2 * fraction_bits layers = [ EggNet.ReshapeLayer(newshape=(-1, 28, 28, 1)), EggNet.Conv2dLayer(in_channels=1, out_channels=3, kernel_size=3, kernel_init_weights=qk1, bias_init_weights=qb1, use_bias=True), EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16, source_frac_bits=dfrac_bits), EggNet.ReluActivationLayer(), EggNet.MaxPool2dLayer(), EggNet.Conv2dLayer(in_channels=3, out_channels=9, kernel_size=3, kernel_init_weights=qk2, bias_init_weights=qb2, use_bias=True), EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16, source_frac_bits=dfrac_bits), EggNet.ReluActivationLayer(), EggNet.MaxPool2dLayer(), EggNet.FlattenLayer(), EggNet.BreakpointLayer(enabled=False), EggNet.FullyConnectedLayer(input_size=ni3, output_size=no3, dtype=np.int16, weights=qw3, bias=qb3), EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16, source_frac_bits=dfrac_bits), EggNet.ReluActivationLayer(), EggNet.FullyConnectedLayer(input_size=ni4, output_size=no4, dtype=np.int16, weights=qw4, bias=qb4), EggNet.ShiftLayer(target_bits=target_bits, target_frac_bits=fraction_bits, source_bits=16, source_frac_bits=dfrac_bits), EggNet.SoftmaxLayer() ] return layers
def __init__(self, weights, options, shifts, real_quant=False): # Check input r1 = EggNet.ReshapeLayer(newshape=[-1, 28, 28, 1]) cn1 = EggNet.Conv2dLayer(in_channels=1, out_channels=16, kernel_size=3, activation='relu', dtype=np.float32) # [? 28 28 16] mp1 = EggNet.MaxPool2dLayer(size=2) # [? 14 14 16] cn2 = EggNet.Conv2dLayer(in_channels=16, out_channels=32, kernel_size=3, activation='relu') # [? 14 14 32] mp2 = EggNet.MaxPool2dLayer(size=2) # [? 7 7 32] r2 = EggNet.FlattenLayer() fc1 = EggNet.FullyConnectedLayer(input_size=32 * 7 * 7, output_size=32, activation='relu', dtype=np.float32) if real_quant: fc2 = EggNet.FullyConnectedLayer(input_size=32, output_size=10, activation=None) else: fc2 = EggNet.FullyConnectedLayer(input_size=32, output_size=10, activation='softmax') if real_quant: rs1 = EggNet.SimpleShiftLayer(shift=shifts[0], a_min=options['out_min'][0], a_max=options['out_max'][0]) rs2 = EggNet.SimpleShiftLayer(shift=shifts[1], a_min=options['out_min'][1], a_max=options['out_max'][1]) rs3 = EggNet.SimpleShiftLayer(shift=shifts[2], a_min=options['out_min'][2], a_max=options['out_max'][2]) rs4 = EggNet.SimpleShiftLayer(shift=shifts[3], a_min=options['out_min'][3], a_max=options['out_max'][3]) else: # scales = 2.0 ** (-shifts) scales = np.ones(shape=(4,)) rs1 = EggNet.ScaleLayer(scale=scales[0], a_min=options['out_min_f'][0], a_max=options['out_max_f'][0]) rs2 = EggNet.ScaleLayer(scale=scales[1], a_min=options['out_min_f'][1], a_max=options['out_max_f'][1]) rs3 = EggNet.ScaleLayer(scale=scales[2], a_min=options['out_min_f'][2], a_max=options['out_max_f'][2]) rs4 = EggNet.ScaleLayer(scale=scales[3], a_min=options['out_min_f'][3], a_max=options['out_max_f'][3]) self.rs1 = rs1 self.rs2 = rs2 self.rs3 = rs3 self.rs4 = rs4 # Store a reference to each layer self.r1 = r1 self.cn1 = cn1 self.mp1 = mp1 self.cn2 = cn2 self.mp2 = mp2 self.r2 = r2 self.fc1 = fc1 self.fc2 = fc2 self.cn1.weights = weights['cn1.k'] self.cn1.bias = weights['cn1.b'] self.cn2.weights = weights['cn2.k'] self.cn2.bias = weights['cn2.b'] self.fc1.weights = weights['fc1.w'] self.fc1.bias = weights['fc1.b'] self.fc2.weights = weights['fc2.w'] self.fc2.bias = weights['fc2.b'] self.lenet_layers = [r1, cn1, mp1, rs1, cn2, mp2, rs2, r2, fc1, rs3, fc2, rs4] super(FpiLeNet, self).__init__(self.lenet_layers)