예제 #1
0
    def createSimpleDNN(self):
        print("Createing the NERModel[{}]...".format(self.model_name))
        self.buildTrainingData()

        # Building convolutional network
        net = input_data(shape=[None, self.config.sent_size], name='input')
        #net = tflearn.input_data([None, self.config.window*2+1])
        net = tflearn.embedding(net,
                                input_dim=self.vocab.dict_size,
                                weights_init=self.config.w_initializer,
                                output_dim=self.config.wv_size)
        net = tflearn.fully_connected(net,
                                      200,
                                      activation=self.config.activation_1)
        net = tflearn.dropout(net, self.config.drop_prob)
        net = tflearn.fully_connected(net,
                                      self.no_classes,
                                      activation='softmax')

        # With TFLearn estimators
        adam = Adam(learning_rate=self.config.lrate, beta1=0.99)

        net = tflearn.regression(
            net,
            #optimizer = self.config.optimizer,
            optimizer=adam,
            learning_rate=self.config.lrate,
            loss='categorical_crossentropy')

        # Define model
        self.model = tflearn.DNN(net)
예제 #2
0
def _nn_creation(first_layer_width, last_layer_width, depth, epsilon=1e-8, learning_rate=0.001, dropout_val=0.99,
                stddev_init=0.001, hidden_act='relu', outlayer_act='linear', weight_decay=0.001,
                validation_fun=_rmse_valid, cost_fun=_rmse, gpu_mem_prop=1):
    """
    Creates a neural network with the given parameters.
    :param first_layer_width:
    :param last_layer_width:
    :param depth:
    :param epsilon:
    :param learning_rate:
    :param dropout_val:
    :param stddev_init:
    :param hidden_act:
    :param outlayer_act:
    :param weight_decay:
    :param validation_fun:
    :param cost_fun:
    :param gpu_mem_prop:
    :return: created neural network
    """

    # Weights initialization
    winit = tfl.initializations.truncated_normal(stddev=stddev_init, dtype=tf.float32, seed=None)

    # GPU memory utilisation proportion
    tfl.init_graph(num_cores=16, gpu_memory_fraction=gpu_mem_prop, soft_placement=True)

    # Creating NN input
    network = input_data(shape=[None, first_layer_width], name='input')

    # Calculating width coef
    width_coef = (last_layer_width - first_layer_width) / (depth + 1)

    # Creating hidden layers
    for i in range(1, depth+1):

        # Computing current width
        curr_width = math.floor(width_coef * (i+1) + first_layer_width)

        # Creating current layer
        network = fully_connected(network, curr_width, activation=hidden_act, name='fc' + str(i-1), weights_init=winit,
                                  weight_decay=weight_decay)

        print("size : " + str(curr_width))

        # Applying dropout
        network = dropout(network, dropout_val)

    # Adding outlayer
    network = fully_connected(network, 1, activation=outlayer_act, name='outlayer', weights_init=winit)

    # Adam optimizer creation
    adam = Adam(learning_rate=learning_rate, epsilon=epsilon)

    # Model evaluation layer creation
    network = regression(network, optimizer=adam,
                         loss=cost_fun, metric=validation_fun, name='target')

    return network
예제 #3
0
def main(argv=None):
    print("Reading image dataset...")
    train_images = flowers.read_dataset("./Images")
    batch_reader = dataset.BatchDatset(train_images)
    network = Network()
    adam = Adam(learning_rate=3e-3, beta1=0.9, beta2=0.99, epsilon=1.0)
    reg = regression(network, optimizer=adam)
    l_image, color_images = batch_reader.next_batch(16)
    model = tflearn.DNN(reg, checkpoint_path='model.tfl.ckpt')
    model.fit(l_image, color_images[:, :, :, 1:3])
    model.save("model.tfl")
    for itr in xrange(MAX_ITERATION):
        model.load("model.tfl")
        l_image, color_images = batch_reader.next_batch(16)
        model = tflearn.DNN(reg, checkpoint_path='model.tfl.ckpt')
        model.fit(l_image, color_images[:, :, :, 1:3], show_metric=False)
        model.save("model.tfl")
    def cnn(self):

        images = input_data(shape=[None, 48, 48, 1], name='input')
        images = conv_2d(images, 64, 3, padding='same', activation=self.activation)
        images = batch_normalization(images)
        #max pooling
        images = max_pool_2d(images, 3, strides=2)
        images = conv_2d(images, 128, 3, padding='same', activation=self.activation)
        images = max_pool_2d(images, 3, strides=2)
        images = conv_2d(images, 256, 3, padding='same', activation=self.activation)
        #maxpooling
        images = max_pool_2d(images, 3, strides=2)
        images = dropout(images, keep_prob=self.dropout)
        images = fully_connected(images, 4096, activation=self.activation)
        images = dropout(images, keep_prob=self.dropout)
        #fully connected layers
        images = fully_connected(images, 1024, activation=self.activation)
        images = fully_connected(images, 7, activation='softmax')

        if self.optimizer == 'momentum':

            optimizers = Momentum(
                learning_rate=self.learning_rate,
                momentum=self.optimizer_param,
                lr_decay=self.learning_rate_decay,
                decay_step=self.decay_step
            )
        elif self.optimizer == 'adam':

            optimizers = Adam(
                learning_rate=self.learning_rate,
                beta1=self.optimizer_param,
                beta2=self.learning_rate_decay,
            )
        else:
            print("Error Optimizer")

        network = regression(
            images,
            optimizer=optimizers,
            loss='categorical_crossentropy',
            learning_rate=self.learning_rate,
            name='output'
        )

        return network
예제 #5
0
    def conv_nn(self):
        convnet = input_data(shape=[None, self.img_size, self.img_size, self.image_channels], name='input')
        # TODO given layers are currently x2 the given layer amount
        for _ in range(self.layers):
            convnet = conv_2d(convnet, 32, 2, activation='relu')
            convnet = max_pool_2d(convnet, 2)

            convnet = conv_2d(convnet, 64, 2, activation='relu')
            convnet = max_pool_2d(convnet, 2)

        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = dropout(convnet, 0.5)
        adam = Adam(learning_rate=self.learning_rate, beta1=self.beta)
        convnet = fully_connected(convnet, len(self.label_folders), activation='softmax')
        convnet = regression(convnet, optimizer=adam, shuffle_batches=False, learning_rate=self.learning_rate,
                             loss='categorical_crossentropy', batch_size=self.batch_size,
                             name='targets')

        if not os.path.isdir(self.data_folder + 'checkpoints/' + self.model_name + '/'):
            os.mkdir(self.data_folder + 'checkpoints/' + self.model_name + '/')
        self.model = tflearn.DNN(convnet, tensorboard_dir='log',
                            checkpoint_path=self.data_folder + 'checkpoints/' + self.model_name + '/')
예제 #6
0
    def build_DCGAN(self):
        gen_input = input_data(shape=[None, self.z_dim], name='input_gen_noise')
        input_disc_noise = input_data(shape=[None, self.z_dim], name='input_disc_noise')

        input_disc_real = input_data(shape=[None, self.img_size, self.img_size, 1], name='input_disc_real')

        disc_fake = self.discriminator(self.generator(input_disc_noise))
        disc_real = self.discriminator(input_disc_real, reuse=True)
        disc_net = tf.concat([disc_fake, disc_real], axis=0)

        gen_net = self.generator(gen_input, reuse=True)
        stacked_gan_net = self.discriminator(gen_net, reuse=True)

        disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')

        disc_target = tflearn.multi_target_data(['target_disc_fake', 'target_disc_real'],
                                                shape=[None, 2])

        adam = Adam(learning_rate=self.learning_rate, beta1=self.beta)
        disc_model = regression(disc_net, optimizer=adam,
                                placeholder=disc_target,
                                loss='categorical_crossentropy',
                                trainable_vars=disc_vars,
                                name='target_disc', batch_size=self.batch_size,
                                op_name='DISC')

        gen_vars = tflearn.get_layer_variables_by_scope('Generator')
        gan_model = regression(stacked_gan_net, optimizer=adam,
                               loss='categorical_crossentropy',
                               trainable_vars=gen_vars,
                               name='target_gen', batch_size=self.batch_size,
                               op_name='GEN')

        self.model = tflearn.DNN(gan_model, tensorboard_dir='log',
                          checkpoint_path=self.data_folder + 'checkpoints/' + self.model_name + '/')
        self.gen_net = gen_net
예제 #7
0
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2)
network = local_response_normalization(network)

network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 24, activation='softmax')
adam = Adam(learning_rate=0.001)
network = regression(network, optimizer=adam,
                     loss='categorical_crossentropy', name='target')

# Training
model = tflearn.DNN(network)

model.fit({'input': X}, {'target': Y}, n_epoch=100,
          validation_set=({'input': testX}, {'target': testY}),
          show_metric=True, run_id='convnet', batch_size=100)

model.save('convnet.tflearn')

예제 #8
0
def build_modelB(optimizer=HYPERPARAMS.optimizer,
                 optimizer_param=HYPERPARAMS.optimizer_param,
                 learning_rate=HYPERPARAMS.learning_rate,
                 keep_prob=HYPERPARAMS.keep_prob,
                 learning_rate_decay=HYPERPARAMS.learning_rate_decay,
                 decay_step=HYPERPARAMS.decay_step):

    images_network = input_data(
        shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1')
    images_network = conv_2d(images_network,
                             64,
                             3,
                             activation=NETWORK.activation)
    #images_network = local_response_normalization(images_network)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = conv_2d(images_network,
                             128,
                             3,
                             activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = conv_2d(images_network,
                             256,
                             3,
                             activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = dropout(images_network, keep_prob=keep_prob)
    images_network = fully_connected(images_network,
                                     4096,
                                     activation=NETWORK.activation)
    images_network = dropout(images_network, keep_prob=keep_prob)
    images_network = fully_connected(images_network,
                                     1024,
                                     activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)

    if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks:
        if NETWORK.use_hog_sliding_window_and_landmarks:
            landmarks_network = input_data(shape=[None, 2728], name='input2')
        elif NETWORK.use_hog_and_landmarks:
            landmarks_network = input_data(shape=[None, 208], name='input2')
        else:
            landmarks_network = input_data(shape=[None, 68, 2], name='input2')
        landmarks_network = fully_connected(landmarks_network,
                                            1024,
                                            activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        landmarks_network = fully_connected(landmarks_network,
                                            128,
                                            activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        images_network = fully_connected(images_network,
                                         128,
                                         activation=NETWORK.activation)
        network = merge([images_network, landmarks_network], 'concat', axis=1)
    else:
        network = images_network
    network = fully_connected(network,
                              NETWORK.output_size,
                              activation='softmax')

    if optimizer == 'momentum':
        optimizer = Momentum(learning_rate=learning_rate,
                             momentum=optimizer_param,
                             lr_decay=learning_rate_decay,
                             decay_step=decay_step)
    elif optimizer == 'adam':
        optimizer = Adam(learning_rate=learning_rate,
                         beta1=optimizer_param,
                         beta2=learning_rate_decay)
    else:
        print("Unknown optimizer: {}".format(optimizer))
    network = regression(network,
                         optimizer=optimizer,
                         loss=NETWORK.loss,
                         learning_rate=learning_rate,
                         name='output')

    return network
예제 #9
0
파일: model.py 프로젝트: mr-cloud/fer-nando
def build_model(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, 
    learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob,
    learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step):

    images_input = input_data(shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1')
    
    images_network = conv_2d(images_input, 16, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 16, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)  #24*24*16
    
    
    images_network = conv_2d(images_network, 32, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 32, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)    #12*12*32
    
    images_network=tf.pad(images_network,[[0,0],[18,18],[18,18],[0,0]],'CONSTANT')
    images_network = merge([images_network, images_input], 'concat', axis=3)              #48*48*33
    
    images_network = conv_2d(images_network, 64, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 64, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 64, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)       #24*24*64
    
    
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)      #12*12*128
#     
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)     #6*6*128
     
    images_network = fully_connected(images_network, 1024, activation='relu')
    images_network = dropout(images_network,keep_prob=keep_prob)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)
    images_network = fully_connected(images_network, 1024, activation='relu')
    images_network = dropout(images_network, keep_prob=keep_prob)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)

    if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks:
        if NETWORK.use_hog_sliding_window_and_landmarks:
            landmarks_network = input_data(shape=[None, 2728], name='input2')
        elif NETWORK.use_hog_and_landmarks:
            landmarks_network = input_data(shape=[None, 208], name='input2')
        else:
            landmarks_network = input_data(shape=[None, 68, 2], name='input2')
        landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        landmarks_network = fully_connected(landmarks_network, 40, activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        images_network = fully_connected(images_network, 40, activation=NETWORK.activation)
        network = merge([images_network, landmarks_network], 'concat', axis=1)
    else:
        network = images_network
    network = fully_connected(network, NETWORK.output_size, activation='softmax')

    if optimizer == 'momentum':
        # FIXME base_lr * (1 - iter/max_iter)^0.5, base_lr = 0.01
        optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param,
                    lr_decay=learning_rate_decay, decay_step=decay_step)
    elif optimizer == 'adam':
        optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay)
    else:
        print("Unknown optimizer: {}".format(optimizer))
    network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output')

    return network
#network = dropout(network, 0.25)
#network = conv_3d(network, 128, 3,3, activation='relu')
#network = conv_3d(network, 128, 2,2, activation='relu')
#network = max_pool_3d(network, 2,2)
#network = dropout(network, 0.25)
#network = conv_3d(network, 256, 3,3, activation='relu')
#network = conv_3d(network, 256, 1,1, activation='relu')
#network = max_pool_3d(network, 2,2)
#network = dropout(network, 0.25)
#network = conv_3d(network, 512, 1,1, activation='relu')
#network = conv_3d(network, 128, 2,2, activation='relu')
#network = max_pool_3d(network, 1,1)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
softmax = fully_connected(network, num_classes, activation='softmax')
adam = Adam(learning_rate=0.0001, beta1=0.98, beta2=0.9999)
regression = regression(softmax, optimizer=adam,loss='categorical_crossentropy',learning_rate=0.001)
model = tflearn.DNN(regression,tensorboard_verbose=3,tensorboard_dir='log')
model.fit(X, y1, n_epoch=1,shuffle=True,validation_set=(v1,v2),show_metric=True, batch_size=1, snapshot_step=500)
#score=model.evaluate(np.array(v1),np.array(v2))
#print('Test score:',score[0],'\nTest loss:',score[1])
#print('Test loss:',score[1])
with open('D:/prob.csv','w') as f:
    f.write('probability\n')         
with open('D:/prob.csv','a') as f:
    for data in tqdm(test_data):
        img_num = data[1]
        img_data = data[0]
        data = img_data.reshape(20,50,50,1)
        model_out = model.predict([data])[0]
        #model_out=model.predict_label([data])[0]
예제 #11
0
def get_network_architecture(image_width, image_height, number_of_classes, learning_rate):

    number_of_channels = 1

    network = input_data(
        shape=[None, image_width, image_height, number_of_channels],
        data_preprocessing=img_prep,
        data_augmentation=img_aug,
        name='InputData'
    )

    """
        def conv_2d(incoming, nb_filters, filter_size, strides=1, padding='same',
                    activation='linear', bias='True', weights_init='uniform_scaling',
                    bias_init='zeros', regularizer=None, weight_decay=0.001,
                    trainable=True, restore=True, reuse=False, scope=None,
                    name='Conv2D')

        network = conv_2d(network, 32, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
        network = max_pool_2d(network, (2, 2), strides=2, padding='same', name='MaxPool2D_1')
        network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
        network = dropout(network, 0.5, name='Dropout_1')
        network = batch_normalization(network, name='BatchNormalization')
        network = flatten(network, name='Flatten')
        network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
        network = fully_connected(network, number_of_classes, activation='softmax', name='FullyConnected_Final')

        print('  {}: {}'.format('Conv2D................', network.shape))
        print('  {}: {}'.format('MaxPool2D.............', network.shape))
        print('  {}: {}'.format('Dropout...............', network.shape))
        print('  {}: {}'.format('BatchNormalization....', network.shape))
        print('  {}: {}'.format('Flatten...............', network.shape))
        print('  {}: {}'.format('FullyConnected........', network.shape))
        print('  {}: {}'.format('FullyConnected_Final..', network.shape))

        CONV / FC -> Dropout -> BN -> activation function -> ...

        Convolutional filters: { 32, 64, 128 }
        Convolutional filter sizes: { 1, 3, 5, 11 }
        Convolutional strides: 1
        Activation: ReLu

        Pooling kernel sizes: { 2, 3, 4, 5 }
        Pooling kernel strides: 2

        Dropout probability: 0.5
            - Higher probability of keeping in earlier stages
            - Lower probability of keeping in later stages
    """

    print('\nNetwork architecture:')
    print('  {}: {}'.format('InputData.............', network.shape))

    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_1')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_2')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_2')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_1')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_3')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_3')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_4')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_4')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_2')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_2')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_5')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_5')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_6')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_6')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_3')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_3')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_7')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_7')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_8')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_8')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_4')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_4')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_9')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_9')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_10')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_10')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_5')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_5')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = flatten(network, name='Flatten')
    print('  {}: {}'.format('Flatten...............', network.shape))


    network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
    print('  {}: {}'.format('FullyConnected........', network.shape))
    network = dropout(network, 0.5, name='Dropout_6')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = fully_connected(network, number_of_classes, activation='softmax', name="FullyConnected_Final")
    print('  {}: {}'.format('FullyConnected_Final..', network.shape))


    optimizer = Adam(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam')
    # optimizer = SGD(learning_rate=learning_rate, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='SGD')
    # optimizer = RMSProp(learning_rate=learning_rate, decay=0.9, momentum=0.9, epsilon=1e-10, use_locking=False, name='RMSProp')
    # optimizer = Momentum(learning_rate=learning_rate, momentum=0.9, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='Momentum')

    metric = Accuracy(name='Accuracy')
    # metric = R2(name='Standard Error')
    # metric = WeightedR2(name='Weighted Standard Error')
    # metric = Top_k(k=6, name='Top K')


    network = regression(
        network,
        optimizer=optimizer,
        loss='categorical_crossentropy',
        metric=metric,
        learning_rate=learning_rate,
        name='Regression'
    )

    return network