Exemple #1
0
def conv_bn_conv_bn_pool2x2(inp_layer, conv_filters, conv_shapes, res_shape,
                            training_name):
    assert conv_shapes[0][1] == conv_shapes[0][2]
    pad1 = conv_shapes[0][1] // 2
    conv1 = layers.Conv((conv_filters[0], ) + conv_shapes[0], {
        'stride': 1,
        'pad': pad1
    }, inp_layer)
    conv1 = layers.SpatialBatchnorm((conv_filters[0], ) + res_shape,
                                    training_name, conv1)
    conv1 = layers.Relu(conv1)
    conv1 = layers.Dropout(0.6, training_name, conv1)

    assert conv_shapes[1][0] == conv_shapes[1][1]
    pad2 = conv_shapes[1][1] // 2
    conv2 = layers.Conv((conv_filters[1], conv_filters[0]) + conv_shapes[1], {
        'stride': 1,
        'pad': pad2
    }, conv1)
    conv2 = layers.SpatialBatchnorm((conv_filters[1], ) + res_shape,
                                    training_name, conv2)
    conv2 = layers.Relu(conv2)
    conv2 = layers.Dropout(0.6, training_name, conv2)

    pool = layers.MaxPool((2, 2), 2, conv2)

    return pool
Exemple #2
0
def fc_bn_dropout(inp_layer, size, training_name):
    fc = layers.Affine(size, inp_layer)
    fc = layers.Batchnorm(size[1], training_name, fc)
    fc = layers.Relu(fc)
    fc = layers.Dropout(0.8, training_name, fc)

    return fc
	def load(self, file_path="./saved_dlmb_model.json"):
		"""
			Goes to the file where the model has been saved and retrieves the data.

			Arguments:
				file_path : str : A file path where the .json file has been saved.

		"""


		layers = {
					"Dense":la.Dense(0, 0),
					"Batchnorm":la.Batchnorm(0),
					"Dropout":la.Dropout()
				 }

		# Try to open the file at file_path.
		try:
			with open(file_path, "r") as json_file:
				model_layers = json.loads(json_file.read())

				for i in range(len(model_layers)):
					layer_data = model_layers["layer: %s" % i]

					new_layer = copy.copy(layers[layer_data["name"]])
					new_layer.load(layer_data)
					self.model.append(new_layer)

		# Gets called if the program can't find the file_path.
		except Exception as e:
			raise FileNotFoundError("Can't find file path %s. Try saving the model or enter a correct file path." % file_path)
Exemple #4
0
    def __init__(self, input_size, mid_size, out_size, sig=True):
        mag = None
        if sig:
            mag = 1
        else:
            mag = 2
        self.weights = {
            'W1':
            np.random.normal(0, mag / np.sqrt(input_size),
                             (input_size, mid_size)),
            'b1':
            np.random.normal(0, mag / np.sqrt(input_size), (mid_size, )),
            'W2':
            np.random.normal(0, mag / np.sqrt(mid_size), (mid_size, out_size)),
            'b2':
            np.random.normal(0, mag / np.sqrt(mid_size), (out_size, ))
        }

        self.layers = OrderedDict()
        self.layers['Affine1'] = layers.Affine(self.weights['W1'],
                                               self.weights['b1'])
        if sig:
            self.layers['Sig'] = layers.Sigmoid()
        else:
            self.layers['ReLU'] = layers.ReLU()
        self.layers['Dropout'] = layers.Dropout()
        self.layers['Affine2'] = layers.Affine(self.weights['W2'],
                                               self.weights['b2'])
        self.last_layer = layers.SmLo()
Exemple #5
0
    def model(self):
        block0 = layers.gate_block(
            inputs=self.embed, k_size=3, filters=100, scope_name='block0')

        pool0 = layers.one_maxpool(
            inputs=block0, padding='VALID', scope_name='pool0')

        flatten0 = layers.flatten(pool0, scope_name='flatten0')

        block1 = layers.gate_block(
            inputs=self.embed, k_size=4, filters=100, scope_name='block1')

        pool1 = layers.one_maxpool(
            inputs=block1, padding='VALID', scope_name='pool1')

        flatten1 = layers.flatten(pool1, scope_name='flatten1')

        block2 = layers.gate_block(
            inputs=self.embed, k_size=5, filters=100, scope_name='block2')

        pool2 = layers.one_maxpool(
            inputs=block2, padding='VALID', scope_name='pool2')

        flatten2 = layers.flatten(pool2, scope_name='flatten2')

        concat0 = layers.concatinate(
            inputs=[flatten0, flatten1, flatten2], scope_name='concat0')

        dropout0 = layers.Dropout(
            inputs=concat0, rate=1 - self.keep_prob, scope_name='dropout0')

        self.logits = layers.fully_connected(
            inputs=dropout0, out_dim=self.n_classes, scope_name='fc0')
Exemple #6
0
    def test_dropout_after_training(self):
        n = core.FeedForward(momentum=0.1, learn_rate=0.1)
        drop = layers.Dropout(layers.Tanh(2, 2), percentage=0.5)
        n += layers.Linear(2, 2)
        n += drop
        n += layers.Linear(2, 1)

        s = [
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ]

        n.fit(*s[1])
        n.fit(*s[0])
        n.fit(*s[2])
        n.fit(*s[0])
        n.fit(*s[1])

        zeros = 0
        for row in drop.y:
            if row[0] == 0:
                zeros += 1
        self.assertEqual(zeros, len(drop.w) // 2)
Exemple #7
0
    def test_dropout_drop(self):
        l = layers.Dropout(layers.Linear(10, 6), percentage=0.5)
        zeros = 0
        for row in l.D:
            if row[0] == 0:
                zeros += 1

        self.assertEqual(zeros, len(l.D) // 2)
Exemple #8
0
    def model(self):

        conv0 = layers.conv1d(
            inputs=self.embed,
            filters=100,
            k_size=3,
            stride=1,
            padding="SAME",
            scope_name="conv0",
        )
        relu0 = layers.relu(inputs=conv0, scope_name="relu0")
        pool0 = layers.one_maxpool(inputs=relu0,
                                   padding="VALID",
                                   scope_name="pool0")

        flatten0 = layers.flatten(inputs=pool0, scope_name="flatten0")

        conv1 = layers.conv1d(
            inputs=self.embed,
            filters=100,
            k_size=4,
            stride=1,
            padding="SAME",
            scope_name="conv1",
        )
        relu1 = layers.relu(inputs=conv1, scope_name="relu0")
        pool1 = layers.one_maxpool(inputs=relu1,
                                   padding="VALID",
                                   scope_name="pool1")

        flatten1 = layers.flatten(inputs=pool1, scope_name="flatten1")

        conv2 = layers.conv1d(
            inputs=self.embed,
            filters=100,
            k_size=5,
            stride=1,
            padding="SAME",
            scope_name="conv2",
        )
        relu2 = layers.relu(inputs=conv2, scope_name="relu0")
        pool2 = layers.one_maxpool(inputs=relu2,
                                   padding="VALID",
                                   scope_name="pool2")

        flatten2 = layers.flatten(inputs=pool2, scope_name="flatten2")

        concat0 = layers.concatinate([flatten0, flatten1, flatten2],
                                     scope_name="concat0")

        dropout0 = layers.Dropout(inputs=concat0,
                                  rate=1 - self.keep_prob,
                                  scope_name="dropout0")

        self.logits = layers.fully_connected(inputs=dropout0,
                                             out_dim=self.n_classes,
                                             scope_name="fc0")
Exemple #9
0
    def __init__(self, lossfunc, optimizer, batch_size):
        super().__init__(lossfunc, optimizer, batch_size)

        self.conv0 = L.Convolution_(n_filter=8, filter_size=(3, 3), stride=1)
        self.conv1 = L.Convolution_(n_filter=16, filter_size=(3, 3), stride=1)

        self.fc0 = L.Linear_(output_size=1024)
        self.fc1 = L.Linear_(output_size=10)

        self.bn0 = L.BatchNormalization_()
        self.bn1 = L.BatchNormalization_()
        self.bn4 = L.BatchNormalization_()

        self.acti0 = L.ELU()
        self.acti1 = L.ELU()
        self.acti4 = L.ELU()

        self.pool0 = L.MaxPooling(7, 7)
        self.pool1 = L.MaxPooling(5, 5)

        self.flat = L.Flatten()

        self.drop0 = L.Dropout(0.5)
        self.drop1 = L.Dropout(0.5)

        self.layers = [
            self.conv0,
            self.acti0,
            self.pool0,
            self.bn0,
            #self.drop0,
            self.conv1,
            self.acti1,
            self.pool1,
            self.bn1,
            #self.drop1,
            self.flat,
            self.fc0,
            self.acti4,
            self.bn4,
            self.fc1,
        ]
model.add(layers.Dense(1, activation='sigmoid'))

#大きくする
model =models.Sequential()
model.add(layers.Dense(512, activaion='relu', input_shape=(10000,)))
model.add(layers.Dense(512, activaion='relu'))
model.add(layers.Dense(1, activaion='sigmoid'))

#4.4.2 重みを正則化する
#モデルにL2正則化を追加
from keras import regularizersomodel = models.Sequential()
model.add(layers.Dense(16, kernel_regularizer = regularizers.l2(0.001),
                       activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, kernel_regularizer = regularizers.l2(0.001),
                       activation='relu', input_shape=(10000,)))
model.add(layers.Dense(1, activation='sigmoid'))

#kerasの様々な正則化項
from keras import regularizers
#L1正則化
regularizers.l1(0.001)
regularizers.l1_l2(l1=0.001, l2=0.001)

#4.4.3 ドロップアウト
model = models.Sequential()
model.add(layers.Dense(16, activaion='relu', input_shape=(10000,)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(16, activaion='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
    plt.plot(X_test, y_test)
    plt.plot(X_test, predictions)
    plt.show()


    ''' ''
    EPOCHS = 10001
    LEARNING_RATE = 0.05

    X_train, y_train = spiral_data(samples=100, classes=3)
    X_val, y_val = spiral_data(samples=100, classes=3)

    model = network.NeuralNetwork()

    model.add_layer(
        layers.Dense(2,
                     64,
                     weight_regularizer_l2=0.000005,
                     bias_regularizer_l2=0.000005))
    model.add_layer(activations.ReLU())
    model.add_layer(layers.Dropout(rate=0.2))
    model.add_layer(layers.Dense(64, 3))
    model.add_layer(activations.Softmax())

    model.set(loss=losses.CategoricalCrossentropy(),
              optimizier=optimizers.Adam(learning_rate=LEARNING_RATE),
              accuracy=metrics.CategoricalAccuracy())

    model.fit(X_train, y_train, epochs=EPOCHS, validation_data=(X_val, y_val))
Exemple #12
0
    def build(self, X):
        """
        Build the graph of network:
        ----------
        Args:
            X: Tensor, [1, height, width, 3]
        Returns:
            logits: Tensor, predicted annotated image flattened 
                                  [1 * height * width,  num_classes]
        """

        dropout_prob = tf.where(True, 0.2, 1.0)

        # Left Side
        down_1_conv_1 = layers.Conv2d(X, [3, 3], 64, 'down_1_conv_1')
        down_1_conv_2 = layers.Conv2d(down_1_conv_1, [3, 3], 64,
                                      'down_1_conv_2')
        down_1_pool = layers.Maxpool(down_1_conv_2, [2, 2], 'down_1_pool')

        down_2_conv_1 = layers.Conv2d(down_1_pool, [3, 3], 128,
                                      'down_2_conv_1')
        down_2_conv_2 = layers.Conv2d(down_2_conv_1, [3, 3], 128,
                                      'down_2_conv_2')
        down_2_pool = layers.Maxpool(down_2_conv_2, [2, 2], 'down_2_pool')

        down_3_conv_1 = layers.Conv2d(down_2_pool, [3, 3], 256,
                                      'down_3_conv_1')
        down_3_conv_2 = layers.Conv2d(down_3_conv_1, [3, 3], 256,
                                      'down_3_conv_2')
        down_3_pool = layers.Maxpool(down_3_conv_2, [2, 2], 'down_3_pool')
        down_3_drop = layers.Dropout(down_3_pool, dropout_prob, 'down_3_drop')

        down_4_conv_1 = layers.Conv2d(down_3_drop, [3, 3], 512,
                                      'down_4_conv_1')
        down_4_conv_2 = layers.Conv2d(down_4_conv_1, [3, 3], 512,
                                      'down_4_conv_2')
        down_4_pool = layers.Maxpool(down_4_conv_2, [2, 2], 'down_4_pool')
        down_4_drop = layers.Dropout(down_4_pool, dropout_prob, 'down_4_drop')

        down_5_conv_1 = layers.Conv2d(down_4_drop, [3, 3], 1024,
                                      'down_5_conv_1')
        down_5_conv_2 = layers.Conv2d(down_5_conv_1, [3, 3], 1024,
                                      'down_5_conv_2')
        down_5_drop = layers.Dropout(down_5_conv_2, dropout_prob,
                                     'down_5_drop')

        # Right Side
        up_6_deconv = layers.Deconv2d(down_5_drop, 2, 'up_6_deconv')
        up_6_concat = layers.Concat(up_6_deconv, down_4_conv_2, 'up_6_concat')
        up_6_conv_1 = layers.Conv2d(up_6_concat, [3, 3], 512, 'up_6_conv_1')
        up_6_conv_2 = layers.Conv2d(up_6_conv_1, [3, 3], 512, 'up_6_conv_2')
        up_6_drop = layers.Dropout(up_6_conv_2, dropout_prob, 'up_6_drop')

        up_7_deconv = layers.Deconv2d(up_6_drop, 2, 'up_7_deconv')
        up_7_concat = layers.Concat(up_7_deconv, down_3_conv_2, 'up_7_concat')
        up_7_conv_1 = layers.Conv2d(up_7_concat, [3, 3], 256, 'up_7_conv_1')
        up_7_conv_2 = layers.Conv2d(up_7_conv_1, [3, 3], 256, 'up_7_conv_2')
        up_7_drop = layers.Dropout(up_7_conv_2, dropout_prob, 'up_7_drop')

        up_8_deconv = layers.Deconv2d(up_7_drop, 2, 'up_8_deconv')
        up_8_concat = layers.Concat(up_8_deconv, down_2_conv_2, 'up_8_concat')
        up_8_conv_1 = layers.Conv2d(up_8_concat, [3, 3], 128, 'up_8_conv_1')
        up_8_conv_2 = layers.Conv2d(up_8_conv_1, [3, 3], 128, 'up_8_conv_2')

        up_9_deconv = layers.Deconv2d(up_8_conv_2, 2, 'up_9_deconv')
        up_9_concat = layers.Concat(up_9_deconv, down_1_conv_2, 'up_9_concat')
        up_9_conv_1 = layers.Conv2d(up_9_concat, [3, 3], 64, 'up_9_conv_1')
        up_9_conv_2 = layers.Conv2d(up_9_conv_1, [3, 3], 64, 'up_9_conv_2')

        score = layers.Conv2d(up_9_conv_2, [1, 1], 1, 'score')
        logits = tf.reshape(score, (-1, 1))

        return logits
Exemple #13
0
    def __init__(
        self,
        lossfunc,
        optimizer,
        batch_size=32,
    ):
        self.lossfunc = lossfunc
        self.optimizer = optimizer
        self.batch_size = batch_size

        input_size = 64
        hidden_size = 3136
        output_size = 10

        # self.lr = 0.001
        # self.alpha = 0.9
        self.l1 = 1e-4
        self.l2 = 1e-4
        self.optimizer = optimizers.Adam(l1=self.l1, l2=self.l2)

        self.conv0 = L.Convolution_(n_filter=8, filter_size=(3, 3), stride=1)
        self.conv1 = L.Convolution_(n_filter=16, filter_size=(3, 3), stride=1)
        self.conv2 = L.Convolution_(n_filter=32, filter_size=(5, 5), stride=1)
        self.conv3 = L.Convolution_(n_filter=64, filter_size=(5, 5), stride=1)

        self.fc0 = L.Linear_(output_size=1024)
        self.fc1 = L.Linear_(output_size=10)

        self.bn0 = L.BatchNormalization_()
        self.bn1 = L.BatchNormalization_()
        self.bn2 = L.BatchNormalization_()
        self.bn3 = L.BatchNormalization_()
        self.bn4 = L.BatchNormalization_()

        self.acti0 = L.ELU()
        self.acti1 = L.ELU()
        self.acti2 = L.ELU()
        self.acti3 = L.ELU()
        self.acti4 = L.ELU()

        self.pool0 = L.MaxPooling(7, 7)
        self.pool1 = L.MaxPooling(5, 5)
        self.pool2 = L.MaxPooling(3, 3)
        self.pool3 = L.MaxPooling(3, 3)

        self.flat = L.Flatten()

        self.drop0 = L.Dropout(0.5)
        self.drop1 = L.Dropout(0.5)
        self.drop2 = L.Dropout(0.5)
        self.drop3 = L.Dropout(0.25)

        self.layers = [
            self.conv0,
            self.acti0,
            self.pool0,
            self.bn0,
            #self.drop0,
            self.conv1,
            self.acti1,
            self.pool1,
            self.bn1,
            #self.drop1,

            #self.conv2,
            #self.acti2,
            #self.pool2,
            #self.bn2,
            #self.drop2,

            #self.conv3,
            #self.acti3,
            #self.pool3,
            #self.bn3,
            #self.drop3,
            self.flat,
            self.fc0,
            self.acti4,
            self.bn4,
            self.fc1,
        ]
Exemple #14
0
 def __init__(self, p, input=None):
     SyntaxOp.__init__(self, input)
     self.layer = layers.Dropout(p)
Exemple #15
0

if __name__ == '__main__':
    torch.random.manual_seed(1234)
    np.random.seed(1234)

    epochs = 10
    lr = 0.01
    batch_size = 32

    optimizer = optimizers.SGD(learning_rate=lr)
    criterion = loss.CrossEntropy()
    layers = [
        layers.LinearLayer(784, 512),
        layers.ReLU(),
        layers.Dropout(keep_rate=0.8),
        layers.LinearLayer(512, 512),
        layers.ReLU(),
        layers.Dropout(keep_rate=0.8),
        layers.LinearLayer(512, 10)
    ]
    model = Model(layers, optimizer, criterion)

    train_loader, test_loader = get_dataset(batch_size)
    for epoch_id in range(epochs):
        model.train()
        total = 0
        correct = 0
        for i, (x, y) in enumerate(train_loader):
            x = x.numpy().reshape(y.shape[0], -1, 1)
            y = y.numpy()
Exemple #16
0

def normal(arr):
    s = numpy.sum(numpy.abs(arr))
    return numpy.round(numpy.abs(arr) / s, decimals=2)


training, validation = separate_data(
    from_csv("D:\\DELETE\\Дипломмо\\output.csv"), 0.15)

# noise(training, from_range=(0, 2), axis=0)
# noise(training, from_range=(-0.05, 0.05), axis=1)

ff1 = FeedForward(learn_rate=0.05, momentum=0.2, weight_decay=0.5)
ff1 += layers.Tanh(6, 23)
ff1 += layers.Dropout(layers.Tanh(23, 28), percentage=0.3)
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff1 += layers.Linear(28, 8)

ff2 = FeedForward(learn_rate=0.07, momentum=0.2, weight_decay=0.23)
ff2 += layers.Tanh(6, 23)
ff2 += layers.Dropout(layers.Tanh(23, 28), percentage=0.3)
ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff2 += layers.Linear(28, 8)

ff3 = FeedForward(learn_rate=0.04, momentum=0.6, weight_decay=0.4)
ff3 += layers.Tanh(6, 23)
Exemple #17
0
error = compute_nb_errors(model, x_test, y_test)
print('Test error: {:.2f}%'.format(error * 100))

plt.figure()
plt.plot(list(range(n_epochs)), losses_dict['train'])
plt.plot(list(range(n_epochs)), losses_dict['val'])
plt.xlabel('Epoch')
plt.ylabel('MSE Loss')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.show()

### Testing with Dropout ###

# Defining the model architecture
model = containers.Sequential(layers.Linear(2, 500, with_bias=True),
                              activations.ReLU(), layers.Dropout(0.5),
                              layers.Linear(500, 500, with_bias=True),
                              activations.ReLU(), layers.Dropout(0.5),
                              layers.Linear(500, 500, with_bias=True),
                              activations.ReLU(), layers.Dropout(0.5),
                              layers.Linear(500, 500, with_bias=True),
                              activations.ReLU(), layers.Dropout(0.5),
                              layers.Linear(500, 2, with_bias=True),
                              activations.Tanh())

criterion = losses.LossMSE()
optimizer = optimizers.Adam(model.param(),
                            learning_rate=0.001,
                            p1=0.9,
                            p2=0.999)
Exemple #18
0
    embed_dim = 32
    # number of attention heads
    num_heads = 2
    # hidden layer size in feed forward network
    ff_dim = 32
    # maximum length of each sentence
    max_len = splitting.max_len

    # implementing layers
    inputs = keras.Input(shape=(max_len, ), )
    embedding_layer = TokenAndPositionEmbedding(max_len, len(preprocess.vocab)+1, embed_dim)
    x = embedding_layer(inputs)
    transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
    x = transformer_block(x)
    x = layers.GlobalAveragePooling1D()(x)
    x = layers.Dropout(0.1)(x)
    x = layers.Dense(20, activation='relu')(x)
    x = layers.Dropout(0.1)(x)
    outputs = layers.Dense(7, activation='softmax')(x)

    # transformer model
    model = keras.Model(inputs=inputs, outputs=outputs)

    # summary of the model
    model.summary()

    # training and fitting the model to the data
    early_stopping = keras.callbacks.EarlyStopping()
    model.compile('adam', 'sparse_categorical_crossentropy', metrics=['accuracy'])
    history = model.fit(
        x_train, y_train, batch_size=32, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping]
def upperBlock(x, conv_size=[128, 256, 512, 256, 128], n_channels=3):
    x = L.GaussianNoise(x)
    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=n_channels,
                 n_filters=conv_size[0],
                 padding='SAME',
                 name='1a')
    x = L.LeakyReLU(x)
    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=conv_size[0],
                 n_filters=conv_size[0],
                 name='1b')
    x = L.LeakyReLU(x)
    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=conv_size[0],
                 n_filters=conv_size[0],
                 name='1c')
    x = L.MaxPooling(x, ksize=2, stride_length=2)
    x = L.Dropout(x, probability=0.5)

    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=conv_size[0],
                 n_filters=conv_size[1],
                 name='2a')
    x = L.LeakyReLU(x)
    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=conv_size[1],
                 n_filters=conv_size[1],
                 name='2b')
    x = L.LeakyReLU(x)
    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=conv_size[1],
                 n_filters=conv_size[1],
                 name='2c')
    x = L.LeakyReLU(x)
    x = L.MaxPooling(x, ksize=2, stride_length=2)

    x = L.Conv2D(x,
                 filter_size=3,
                 n_channels=conv_size[1],
                 n_filters=conv_size[2],
                 padding='VALID',
                 name='3a')
    x = L.LeakyReLU(x)
    x = L.Conv2D(x,
                 filter_size=1,
                 n_channels=conv_size[2],
                 n_filters=conv_size[3],
                 name='3b')
    x = L.LeakyReLU(x)
    x = L.Conv2D(x,
                 filter_size=1,
                 n_channels=conv_size[3],
                 n_filters=conv_size[4],
                 name='3c')
    x = L.LeakyReLU(x)
    x = L.GlobalAveragePooling(x)

    # x = L.Dropout(x, probability=0.5)
    # x = L.Dense(x, conv_size[4], 10)
    # x = L.SoftMax(x)

    return x