Пример #1
0
x = small_basic_block(nr_filters=128)(x)
x = Reshape(
    (x.shape[1], x.shape[2], x.shape[3],
     1))(x)  #add temporary dim to reduce nr of filters(dim before the added)
#don't know why (3,3,1) but it does some necessary dim reduction
x = MaxPooling3D(padding='valid', pool_size=(3, 3, 1),
                 strides=(2, 1, 2))(x)  #we should reduce 128 to 64,div by 2
x = Reshape((x.shape[1], x.shape[2], x.shape[3]))(x)  #delete added dim
x = small_basic_block(nr_filters=256)(x)
x = small_basic_block(nr_filters=256)(x)
x = Reshape(
    (x.shape[1], x.shape[2], x.shape[3], 1))(x)  #same procedure as before
x = MaxPooling3D(padding='valid', pool_size=(3, 3, 1),
                 strides=(2, 1, 4))(x)  #now reduce 256 to 64, div by 4
x = Reshape((x.shape[1], x.shape[2], x.shape[3]))(x)
x = Dropout(rate=0.5)(x)
x = Conv2D(filters=256, kernel_size=(4, 1), kernel_initializer=hu,
           strides=1)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(rate=0.5)(x)
x = Conv2D(filters=class_number,
           kernel_size=(1, 13),
           kernel_initializer=hu,
           strides=1)(x)
y_pred = BatchNormalization()(x)
y_pred = Activation('relu')(y_pred)
y_pred = Reshape((y_pred.shape[2], y_pred.shape[3]))(y_pred)

dim_of_samples = y_pred.shape[1].value
vgg_model.trainable = True

set_trainable = False
for layer in vgg_model.layers:
    if layer.name in ['block5_conv1', 'block4_conv1']:
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False

print("Model architecture")
model = Sequential()
model.add(vgg_model)
model.add(Dense(512, activation='relu', input_dim=input_shape))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
            optimizer=optimizers.RMSprop(lr=1e-5),
            metrics=['accuracy'])


datagen = ImageDataGenerator(
        rotation_range=25,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.3,
Пример #3
0
def Network():

    branch_one_dense_outputs = []
    branch_two_dense_outputs = []

    ### Head-1:
    input1 = Input(shape=X2[0].shape)

    x = Conv1D(filters=25,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l=0.03))(input1)
    temp_B = Dropout(rate=0.25)(x)
    x = Dropout(rate=0.65)(x)
    temp_A = x

    #Type A feature connection
    dense_output_from_branch_one_filters_combined_one = Dense(
        units=20, activation=tf.nn.relu)(Flatten()(temp_A))

    #Type B feature connection
    #temp = MaxPooling1D(pool_size=2,strides=2)(temp)
    for filter_index in range(temp_B.shape[-1]):
        i = 0
        branch_one_dense_outputs.append(
            Dense(units=2, activation=tf.nn.relu)(temp_B[:, :, filter_index]))

    x = Conv1D(filters=50,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l=0.03))(x)
    temp_B = Dropout(rate=0.25)(x)
    x = Dropout(rate=0.65)(x)
    temp_A = x

    #Type A feature connection
    dense_output_from_branch_one_filters_combined_two = Dense(
        units=20, activation=tf.nn.relu)(Flatten()(temp_A))

    #Type B feature connection
    #temp = MaxPooling1D(pool_size=2,strides=2)(temp)
    for filter_index in range(temp_B.shape[-1]):
        branch_one_dense_outputs.append(
            Dense(units=2, activation=tf.nn.relu)(temp_B[:, :, filter_index]))

    ### Head-2:
    input2 = Input(shape=X4[0].shape)

    x = Conv1D(filters=25,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l=0.03))(input2)
    temp_B = Dropout(rate=0.25)(x)
    x = Dropout(rate=0.65)(x)
    temp_A = x

    #Type A feature connection
    dense_output_from_branch_two_filters_combined_one = Dense(
        units=20, activation=tf.nn.relu)(Flatten()(temp_A))

    #Type B feature connection
    #temp = MaxPooling1D(pool_size=2,strides=2)(temp)
    for filter_index in range(temp_B.shape[-1]):
        i = 0
        branch_two_dense_outputs.append(
            Dense(units=2, activation=tf.nn.relu)(temp_B[:, :, filter_index]))

    x = Conv1D(filters=50,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l=0.03))(x)
    temp_B = Dropout(rate=0.25)(x)
    x = Dropout(rate=0.65)(x)
    temp_A = x

    #Type A feature connection
    dense_output_from_branch_two_filters_combined_two = Dense(
        units=20, activation=tf.nn.relu)(Flatten()(temp_A))

    #Type B feature connection
    #temp = MaxPooling1D(pool_size=2,strides=2)(temp)
    for filter_index in range(temp_B.shape[-1]):
        branch_two_dense_outputs.append(
            Dense(units=2, activation=tf.nn.relu)(temp_B[:, :, filter_index]))

    #Type B feature connection
    branch_one_dense_outputs = Concatenate()(branch_one_dense_outputs)
    # branch_one_dense_outputs = Dropout(rate=0.45)(branch_one_dense_outputs)
    # branch_one_dense_outputs = Dense(units = 20, activation=tf.nn.relu)(branch_one_dense_outputs)

    branch_two_dense_outputs = Concatenate()(branch_two_dense_outputs)
    # branch_two_dense_outputs = Dropout(rate=0.45)(branch_two_dense_outputs)
    # branch_two_dense_outputs = Dense(units = 20, activation=tf.nn.relu)(branch_two_dense_outputs)

    merge = Concatenate()([branch_one_dense_outputs, branch_two_dense_outputs])

    output = Dropout(rate=0.50)(merge)

    output = Dense(units=1, activation='sigmoid')(output)

    return Model(inputs=[input1, input2], outputs=output)
Пример #4
0
    :param model_filepath: path to where model will be saved
    """
    model.save_weights(weights_filepath)
    model_json = model.to_json()
    with open(model_filepath, "w") as json_file:
        json_file.write(model_json)


# Using ResNet architecture initialized with ImageNet weights and default fully connected layer removed
base_model = ResNet50V2(include_top=False, weights='imagenet')

# Adding custom layers at the end of the network
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.4)(x)
predictions = Dense(5, activation='softmax')(x)  # star rating of 1 - 5

# Creating a trainable model
model = Model(inputs=base_model.input, outputs=predictions)

# Freezing the base_model's layers
# for layer in base_model.layers:
#     layer.trainable = False

# Compiling the model
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Return the training and testing data and labels from get_data
def generate_resnet_model(classes_len: int):
    """
    Function to create a VGG19 model pre-trained with custom FC Layers.
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    :param classes_len: The number of classes (labels).
    :return: The VGG19 model.
    """
    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.VGG_IMG_SIZE['HEIGHT'],
                             config.VGG_IMG_SIZE['WIDTH'], 1))
    img_conc = Concatenate()([img_input, img_input, img_input])

    # Generate a ResNet50 model with pre-trained ImageNet weights, input as given above, excluded fully connected layers.
    model_base = ResNet50(include_top=False,
                          weights='imagenet',
                          input_tensor=img_conc)

    # Add fully connected layers
    model = Sequential()
    # Start with base model consisting of convolutional layers
    model.add(model_base)

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())

    # Possible dropout for regularisation can be added later and experimented with:
    if config.DROPOUT != 0:
        model.add(Dropout(config.DROPOUT, name='Dropout_Regularization_1'))

    # Add fully connected hidden layers.
    model.add(
        Dense(units=512,
              activation='relu',
              kernel_initializer='random_uniform',
              name='Dense_Intermediate_1'))

    model.add(
        Dense(units=32,
              activation='relu',
              kernel_initializer='random_uniform',
              name='Dense_Intermediate_2'))

    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if classes_len == 2:
        model.add(
            Dense(1,
                  activation='sigmoid',
                  kernel_initializer='random_uniform',
                  name='Output'))
    else:
        model.add(
            Dense(classes_len,
                  kernel_initializer='random_uniform',
                  activation='softmax',
                  name='Output'))

    # Print model details if running in debug mode.
    if config.verbose_mode:
        print(model.summary())

    return model
Пример #6
0
    X_test = X_test.reshape(list(X_test.shape) + [1])

#Converting the labels into one hot encoding
y_train = to_categorical(y_train, classes)
y_test = to_categorical(y_test, classes)

#Building the model
model = Sequential()
model.add(
    Conv2D(filters=32,
           kernel_size=(5, 5),
           activation='relu',
           input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(classes, activation='softmax'))

#Compilation of the model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
epochs = 10
history = model.fit(X_train,
Пример #7
0
    def __init__(self):
        super(VGGNet, self).__init__()
        self.c1 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same', input_shape=(32, 32, 3))
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.c2 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same', input_shape=(32, 32, 3))
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d1 = Dropout(0.05)

        self.c3 = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')
        self.b3 = BatchNormalization()
        self.a3 = Activation('relu')
        self.c4 = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')
        self.b4 = BatchNormalization()
        self.a4 = Activation('relu')
        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d2 = Dropout(0.05)

        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')
        self.b5 = BatchNormalization()
        self.a5 = Activation('relu')
        self.c6 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')
        self.b6 = BatchNormalization()
        self.a6 = Activation('relu')
        self.c7 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')
        self.b7 = BatchNormalization()
        self.a7 = Activation('relu')
        self.p3 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d3 = Dropout(0.05)

        self.c8 = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same')
        self.b8 = BatchNormalization()
        self.a8 = Activation('relu')
        self.c9 = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same')
        self.b9 = BatchNormalization()
        self.a9 = Activation('relu')
        self.c10 = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same')
        self.b10 = BatchNormalization()
        self.a10 = Activation('relu')
        self.p4 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d4 = Dropout(0.05)

        self.c11 = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same')
        self.b11 = BatchNormalization()
        self.a11 = Activation('relu')
        self.c12 = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same')
        self.b12 = BatchNormalization()
        self.a12 = Activation('relu')
        self.c13 = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same')
        self.b13 = BatchNormalization()
        self.a13 = Activation('relu')
        self.p5 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d5 = Dropout(0.05)

        self.flatten = Flatten()
        self.f1 = Dense(4096, activation='relu')
        self.d6 = Dropout(0.05)
        self.f2 = Dense(4096, activation='relu')
        self.d7 = Dropout(0.05)
        self.f3 = Dense(10, activation='softmax')
def classifier_model():
    #Prepare Training Data
    vgg_face=loadModel()
    x_train=[]
    y_train=[]
    person_folders=os.listdir('./Images_crop/')
    person_rep=dict()
    for i,person in enumerate(person_folders):
        person_rep[i]=person
        image_names=os.listdir('./Images_crop/'+person+'/')
        for image_name in image_names:
            img=load_img('./Images_crop/'+person+'/'+image_name,target_size=(224,224))
            img=img_to_array(img)
            img=np.expand_dims(img,axis=0)
            img=preprocess_input(img)
            img_encode=vgg_face(img)
            x_train.append(np.squeeze(K.eval(img_encode)).tolist())
            y_train.append(i)

    x_train=np.array(x_train)
    y_train=np.array(y_train)
    np.save('train_data',x_train)
    np.save('train_labels',y_train)

    #Prepare Test Data
    x_test=[]
    y_test=[]
    person_folders=os.listdir('./Test_Images_crop/')
    for i,person in enumerate(person_folders):
        image_names=os.listdir('./Test_Images_crop/'+person+'/')
        for image_name in image_names:
            img=load_img('./Test_Images_crop/'+person+'/'+image_name,target_size=(224,224))
            img=img_to_array(img)
            img=np.expand_dims(img,axis=0)
            img=preprocess_input(img)
            img_encode=vgg_face(img)
            x_test.append(np.squeeze(K.eval(img_encode)).tolist())
            y_test.append(i)
    print(x_train)
    print(y_train)

    x_test=np.array(x_test)
    y_test=np.array(y_test)
    np.save('test_data',x_test)
    np.save('test_labels',y_test)
    # x_train=np.load('train_data.npy')
    # y_train=np.load('train_labels.npy')

    # Softmax regressor to classify images based on encoding 
    classifier_model=Sequential()
    classifier_model.add(Dense(units=100,input_dim=x_train.shape[1],kernel_initializer='glorot_uniform'))
    classifier_model.add(BatchNormalization())
    classifier_model.add(Activation('tanh'))
    classifier_model.add(Dropout(0.3))
    classifier_model.add(Dense(units=10,kernel_initializer='glorot_uniform'))
    classifier_model.add(BatchNormalization())
    classifier_model.add(Activation('tanh'))
    classifier_model.add(Dropout(0.2))
    classifier_model.add(Dense(units=7,kernel_initializer='he_uniform'))
    classifier_model.add(Activation('softmax'))
    classifier_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),optimizer='nadam',metrics=['accuracy'])

    classifier_model.fit(x_train,y_train,epochs=100,validation_data=(x_test,y_test))
    tf.keras.models.save_model(classifier_model,'face_classifier_model.h5')

# classifier_model()
Пример #9
0
#include_top = false will remove the last layer because i have 2 categories only, not 1000 as imagenet
vgg = VGG16(input_shape=Image_size, weights='imagenet', include_top=False)
#vgg.summary()

#to prevent training existing weights
for layer in vgg.layers:
    layer.trainable = False

#build the model
x = vgg.input
y = vgg.output  # a vector with a size of 2 for each image (2 probabilities)
y = Flatten(name='flatten')(y)
#normaliza the input to the neural network to speed up training
y = BatchNormalization()(y)
y = Dense(1024, activation='relu', name='FC1')(y)
y = Dropout(0.5)(y)
y = Dense(64, name='FC2')(y)
#the default for leaky relu in keras is 0.3
y = LeakyReLU()(y)
y = Dropout(0.2)(y)
y = Dense(2, activation='softmax', name='prediction')(y)
model = Model(inputs=x, outputs=y)
#model.summary()
#plot_model(model, to_file='/home/marmia/snic2020-6-41/Mariam/Mariam_Thesis/Notebooks_Master/train1/model1.png')

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
#save results to a csv
csv_logger = tf.keras.callbacks.CSVLogger('train_log.csv',
                                          append=True,
Пример #10
0
                                      min_lr=1e-8,
                                      verbose=1)

# Lets define different architecture (brains) for each head :)
# Let each hydra think differently.

# You can play with configurations - i just randomly created 4 nn architectures

# ANCHOR MODEL

h_model_1 = [
    Dense(64,
          input_dim=NUM_FEATURES,
          activation='relu',
          kernel_initializer='he_uniform'),  # TODO 05281336 he_uniform, relu
    Dropout(0.3),
    Dense(32, activation='relu', kernel_initializer='he_uniform'),
    Dropout(0.2),
    Dense(128, activation='softmax', kernel_initializer='he_uniform'),
    Dropout(0.2),
    Dense(NUM_CLASS, activation='softmax', kernel_initializer='he_uniform')
]

h_model_1A = [
    Dense(10,
          input_dim=NUM_FEATURES,
          activation='relu',
          kernel_initializer='he_uniform'),
    Dropout(0.3),
    Dense(20, activation='relu', kernel_initializer='he_uniform'),
    Dropout(0.2),
test_generator  = test_datagen.flow_from_directory('Dataset\Test',
                                            target_size = (224, 224),
                                            batch_size = 32,
                                            class_mode = 'categorical')


# In[9]:


bModel = VGG16(weights="imagenet", include_top=False,input_tensor=Input(shape=(224, 224,3 )))  #base_Model
hModel = bModel.output #head_Model
hModel = AveragePooling2D(pool_size=(4, 4))(hModel)
hModel = Flatten(name="flatten")(hModel)
hModel = Dense(64, activation="relu")(hModel)
hModel = Dropout(0.5)(hModel)
hModel = Dense(2, activation="softmax")(hModel)
model = Model(inputs=bModel.input, outputs=hModel)
for layer in bModel.layers:
    layer.trainable = False


# In[10]:


from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping

checkpoint = ModelCheckpoint(r"models\model2.h5",
                             monitor="val_loss",
                             mode="min",
                             save_best_only = True,
Пример #12
0
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# loading the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = VGG16(weights="imagenet",
                  include_top=False,
                  input_tensor=Input(shape=(224, 224, 3)))

# constructing the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(len(config.CLASSES), activation="softmax")(headModel)

# placing the head FC model on top of the base model - this will become
# the actual model we will train
model = Model(inputs=baseModel.input, outputs=headModel)

# looping over all layers in the base model and freeze them so they will
# NOT be updated during the first training process
for layer in baseModel.layers:
    layer.trainable = False

# compiling our model (this needs to be done after our setting our
# layers to being non-trainable
print("[INFO] compiling model...")
opt = SGD(lr=config.MIN_LR, momentum=0.9)
           input_shape=(IMG_ROWS, IMG_COLS, 1),
           name='conv2d_1'))

model.add(BatchNormalization(name='batchnorm_1'))

model.add(
    Conv2D(64, (3, 3),
           activation='elu',
           padding='same',
           kernel_initializer='he_normal',
           name='conv2d_2'))

model.add(BatchNormalization(name='batchnorm_2'))

model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_1'))
model.add(Dropout(0.3, name='dropout_1'))

model.add(
    Conv2D(128, (3, 3),
           activation='elu',
           padding='same',
           kernel_initializer='he_normal',
           name='conv2d_3'))

model.add(BatchNormalization(name='batchnorm_3'))

model.add(
    Conv2D(128, (3, 3),
           activation='elu',
           padding='same',
           kernel_initializer='he_normal',
Пример #14
0
def create_model(input_shape, config, is_training=True):

    weight_decay = 0.001

    model = Sequential()

    model.add(
        Convolution2D(16,
                      7,
                      7,
                      W_regularizer=l2(weight_decay),
                      activation="relu",
                      input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Convolution2D(32,
                      5,
                      5,
                      W_regularizer=l2(weight_decay),
                      activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Convolution2D(64,
                      3,
                      3,
                      W_regularizer=l2(weight_decay),
                      activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Convolution2D(128,
                      3,
                      3,
                      W_regularizer=l2(weight_decay),
                      activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Convolution2D(256,
                      3,
                      3,
                      W_regularizer=l2(weight_decay),
                      activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(512, W_regularizer=l2(weight_decay), activation="relu"))

    model.add(Dense(config["num_classes"], activation="softmax"))

    ref_model = load_model("logs/2016-12-08-15-14-06/weights.20.model")
    for ref_layer in ref_model.layers[:-2]:
        layer = model.get_layer(ref_layer.name)
        if layer:
            print(ref_layer.name)
            layer.set_weights(ref_layer.get_weights())
            layer.trainable = False

    return model
Пример #15
0
    def build_vgg16_like(self, data):
        # data must have size nb_pictures x height x width x nb_channels
        assert type(data) is tf.Tensor and len(data.shape) == 4
        with tf.variable_scope(self.name):
            (nb_pics, height, width, nb_channels) = data.get_shape().as_list()

            # VGG-16 encoder
            ### conv3 - 64
            self.input_layer = tf.cast(data, dtype=tf.float32)
            self.conv1_1 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu',
                                  input_shape=(height, width,
                                               nb_channels))(self.input_layer)
            self.conv1_1 = BatchNormalization()(self.conv1_1)
            self.conv1_2 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv1_1)
            self.conv1_2 = BatchNormalization()(self.conv1_2)
            self.pool1 = MaxPooling2D(pool_size=(2, 2),
                                      strides=(2, 2),
                                      padding='same')(self.conv1_2)
            ### conv3 - 128
            self.conv2_1 = Conv2D(filters=128,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.pool1)
            self.conv2_1 = BatchNormalization()(self.conv2_1)
            self.conv2_2 = Conv2D(filters=128,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv2_1)
            self.conv2_2 = BatchNormalization()(self.conv2_2)
            self.pool2 = MaxPooling2D(pool_size=(2, 2),
                                      strides=(2, 2),
                                      padding='same')(self.conv2_2)
            ### conv3 - 256
            self.conv3_1 = Conv2D(filters=256,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.pool2)
            self.conv3_1 = BatchNormalization()(self.conv3_1)
            self.conv3_2 = Conv2D(filters=256,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv3_1)
            self.conv3_2 = BatchNormalization()(self.conv3_2)
            self.conv3_3 = Conv2D(filters=256,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv3_2)
            self.conv3_3 = BatchNormalization()(self.conv3_3)
            self.pool3 = MaxPooling2D(pool_size=(2, 2),
                                      strides=(2, 2),
                                      padding='same')(self.conv3_3)
            ### conv3 - 512
            self.conv4_1 = Conv2D(filters=512,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.pool3)
            self.conv4_1 = BatchNormalization()(self.conv4_1)
            self.conv4_2 = Conv2D(filters=512,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv4_1)
            self.conv4_2 = BatchNormalization()(self.conv4_2)
            self.conv4_3 = Conv2D(filters=512,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv4_2)
            self.conv4_3 = BatchNormalization()(self.conv4_3)
            self.pool4 = MaxPooling2D(pool_size=(2, 2),
                                      strides=(2, 2),
                                      padding='same')(self.conv4_3)
            ### conv3 - 512
            self.conv5_1 = Conv2D(filters=512,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.pool4)
            self.conv5_1 = BatchNormalization()(self.conv5_1)
            self.conv5_2 = Conv2D(filters=512,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv5_1)
            self.conv5_2 = BatchNormalization()(self.conv5_2)
            self.conv5_3 = Conv2D(filters=512,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  padding='same',
                                  activation='relu')(self.conv5_2)
            self.conv5_3 = BatchNormalization()(self.conv5_3)
            self.pool5 = MaxPooling2D(pool_size=(2, 2),
                                      strides=(2, 2),
                                      padding='same')(self.conv5_3)

            ### SPP [4, 2, 1]
            self.spp = self.spp_layer(self.pool5, [4, 2, 1],
                                      'spp',
                                      pooling='TV')
            ### FC-1024
            self.dense1 = Dense(1024, activation='relu')(self.spp)
            if (self.training_mode):
                self.dense1 = Dropout(self.dropout_prob)(self.dense1)
            ### FC-1024
            self.dense2 = Dense(1024)(self.dense1)
            if (self.training_mode):
                self.dense2 = Dropout(self.dropout_prob)(self.dense2)
            ### FC-32
            self.dense3 = Dense(32, activation='relu')(self.dense2)
            if (self.training_mode):
                self.dense3 = Dropout(self.dropout_prob)(self.dense3)
            ### output
            if (self.pb_kind == 'classification'):
                self.output = Dense(self.nb_classes)(self.dense3)
            elif (self.pb_kind == 'regression'):
                self.output = Dense(1)(self.dense3)
            else:
                print('Illegal kind of problem for VGG-16 model: {}'.format(
                    self.pb_kind))

            self.weights_summary(
                tf.get_variable('conv1_1/kernel',
                                shape=[3, 3, nb_channels, 64]),
                'first_conv_weights')
            #self.weights_summary(tf.get_variable('conv5_3/kernel',shape=[3,3,512,512]), 'last_conv_weights')
            #self.weights_summary(self.dense2, '1024_fc_layer')
            self.weights_summary(self.output, 'last_fc_layer')
            #self.prob_summary(nb_pics)
            self.model_built = 'VGG_16'
            return self.output
Пример #16
0
              trainable=False)(model_input)

conv_blocks = []

for sz in filter_sizes:
    conv = Conv1D(filters=num_filters,
                  kernel_size=sz,
                  padding="valid",
                  activation="relu",
                  strides=1)(z)
    conv = GlobalMaxPooling1D()(conv)
    conv = Flatten()(conv)
    conv_blocks.append(conv)

z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dropout(drop)(z)
model_output = Dense(len(label_idx), activation='softmax')(z)

model = Model(model_input, model_output)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])

print(model.summary())

history = model.fit(
    X_train,
    y_train,
    batch_size=100,  # 1회 학습시 주는 데이터 개수
    epochs=20,  # 전체 데이타 10번 학습
Пример #17
0
def sublayer_connection(inputs, sublayer, dropout=0.2):
    outputs = layer_norm(inputs + Dropout(dropout)(sublayer))
    return outputs
Пример #18
0
# Otra técnica para reducir el overfitting es la introducción de dropouts a la red
# Es una forma de regularización que fuerza a los pesos neuronales a tomar
# solo valores pequeños, lo que hace la distribución de los pesos más regulares
# y la red puede reducir el overfitting en entrenamientos con muestras pequeñas

# Crear una nueva red con Dropouts
# esto hace, de manera aleatoria, que el 20% de las neuronas se vayan a 0
model_new = Sequential([
    Conv2D(16,
           3,
           padding='same',
           activation='relu',
           input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
    MaxPooling2D(),
    Dropout(0.2),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Dropout(0.2),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(1, activation='sigmoid')
])

# Compilar el nuevo modelo
model_new.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import MaxPooling2D
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

mode = "display"

# Create the model
model = Sequential()

model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))


def emotion_recog(frame):
    model.load_weights('model.h5')
Пример #20
0
# 학습 데이터와 시험데이터로 분리한다.
x_train, x_test, y_train, y_test = train_test_split(x_data,
                                                    y_data,
                                                    test_size=0.2)
y_train = np.array(y_train).reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)

x_train.shape, y_train.shape, x_test.shape, y_test.shape

# NPLM 모델을 생성한다.
EMB_SIZE = 32
VOCAB_SIZE = len(word2idx) + 1
x_input = Input(batch_shape=(None, x_train.shape[1]))
x_embed = Embedding(input_dim=VOCAB_SIZE, output_dim=EMB_SIZE)(
    x_input)  # weights 옵션으로 C행렬을 넣어줄 수 있다. - 사전학습
x_embed = Dropout(0.5)(x_embed)
x_lstm = LSTM(64, dropout=0.5)(x_embed)
y_output = Dense(n_topic, activation='softmax')(x_lstm)

model = Model(x_input, y_output)  # 학습, 예측용 모델
model.compile(loss='sparse_categorical_crossentropy',
              optimizer=optimizers.Adam(learning_rate=0.01))
model.summary()

# 모델을 학습한다.
hist = model.fit(x_train,
                 y_train,
                 validation_data=(x_test, y_test),
                 batch_size=512,
                 epochs=30)  # 사전학습해서 Fine-tuning
Пример #21
0
    def build(self,
              word_length,
              num_labels,
              num_intent_labels,
              word_vocab_size,
              char_vocab_size,
              word_emb_dims=100,
              char_emb_dims=30,
              char_lstm_dims=30,
              tagger_lstm_dims=100,
              dropout=0.2):

        self.word_length = word_length
        self.num_labels = num_labels
        self.num_intent_labels = num_intent_labels
        self.word_vocab_size = word_vocab_size
        self.char_vocab_size = char_vocab_size

        words_input = Input(shape=(None, ), name='words_input')
        embedding_layer = Embedding(word_vocab_size,
                                    word_emb_dims,
                                    name='word_embedding')
        word_embeddings = embedding_layer(words_input)
        word_embeddings = Dropout(dropout)(word_embeddings)

        word_chars_input = Input(shape=(None, word_length),
                                 name='word_chars_input')
        char_embedding_layer = Embedding(char_vocab_size,
                                         char_emb_dims,
                                         input_length=word_length,
                                         name='char_embedding')
        char_embeddings = char_embedding_layer(word_chars_input)
        char_embeddings = TimeDistributed(Bidirectional(
            LSTM(char_lstm_dims)))(char_embeddings)
        char_embeddings = Dropout(dropout)(char_embeddings)

        # first BiLSTM layer (used for intent classification)
        first_bilstm_layer = Bidirectional(
            LSTM(tagger_lstm_dims, return_sequences=True, return_state=True))
        first_lstm_out = first_bilstm_layer(word_embeddings)

        lstm_y_sequence = first_lstm_out[:1][
            0]  # save y states of the LSTM layer
        states = first_lstm_out[1:]
        hf, _, hb, _ = states  # extract last hidden states
        h_state = concatenate([hf, hb], axis=-1)
        intents = Dense(num_intent_labels,
                        activation='softmax',
                        name='intent_classifier_output')(h_state)
        # create the 2nd feature vectors
        combined_features = concatenate([lstm_y_sequence, char_embeddings],
                                        axis=-1)

        # 2nd BiLSTM layer (used for entity/slots classification)
        second_bilstm_layer = Bidirectional(
            LSTM(tagger_lstm_dims, return_sequences=True))(combined_features)
        second_bilstm_layer = Dropout(dropout)(second_bilstm_layer)
        bilstm_out = Dense(num_labels)(second_bilstm_layer)

        # feed BiLSTM vectors into CRF
        crf = CRF(num_labels, name='intent_slot_crf')
        entities = crf(bilstm_out)

        model = Model(inputs=[words_input, word_chars_input],
                      outputs=[intents, entities])

        loss_f = {
            'intent_classifier_output': 'categorical_crossentropy',
            'intent_slot_crf': crf.loss
        }
        metrics = {
            'intent_classifier_output': 'categorical_accuracy',
            'intent_slot_crf': crf.viterbi_accuracy
        }
        model.compile(loss=loss_f, optimizer=AdamOptimizer(), metrics=metrics)
        self.model = model
Пример #22
0
    def E4(self, x, y, trails=8, epochs=30):

        # Fit params
        print('shape features ---', x.shape)
        print('shape labels -----', y.shape)
        print('epochs -----------', epochs, end='\n\n')

        # Trails
        for trail in range(trails):
            timestamp = str(int(time.time()))
            model = Sequential()
            D = trail * 0.1

            NAME = f'E4 [32C5-P2-] - D{D} - [64C5-P2] - D{D}'
            NAME = f'{NAME} - 128 - D{D} - 10 @{int(time.time())}'

            # 1st conv
            model.add(
                Conv2D(32,
                       kernel_size=5,
                       activation=self.activation,
                       input_shape=self.input_shape))
            model.add(MaxPooling2D(pool_size=2, strides=2))
            model.add(Dropout(D))

            # 2nd conv
            model.add(Conv2D(64, kernel_size=5, activation=self.activation))
            model.add(MaxPooling2D(pool_size=2, strides=2))
            model.add(Dropout(D))

            # Classification layer
            model.add(Flatten())
            model.add(Dense(128, activation=self.activation))
            model.add(Dropout(D))

            # Output layer
            model.add(Dense(10, activation='softmax'))
            model.compile(optimizer=self.optimizer,
                          loss=self.loss,
                          metrics=self.metrics)

            # Tensor Board
            self.printStart(NAME[:2], trail, trails, timestamp)
            tensorboard = TensorBoard(log_dir=f'{self.path_logs}\\{NAME}',
                                      profile_batch=0)

            # Fitting
            model.fit(x,
                      y,
                      epochs=epochs,
                      callbacks=[tensorboard],
                      validation_split=self.val_split,
                      verbose=self.verbose)

            # Saving
            model.save(f'{self.path_models}/{NAME}.model')

            # Appending
            date = datetime.datetime.fromtimestamp(int(timestamp))
            date = date.strftime('%Y-%m-%d %H:%M:%S')
            self.models['date'].append(date)
            self.models['timestamp'].append(int(timestamp))
            self.models['name'].append(NAME[:NAME.find(' @')])
            self.models['model'].append(model)
Пример #23
0
    conv1 = Conv2D(6, kernel_size=3, padding='same')
    bn1 = BatchNormalization()
    a1 = Activation('relu')
    conv2 = Conv2D(12, kernel_size=3, padding='same')
    bn2 = BatchNormalization()
    a2 = Activation('relu')
    mp1 = MaxPool2D()

    conv3 = Conv2D(24, kernel_size=3, padding='same')
    bn3 = BatchNormalization()
    a3 = Activation('relu')
    conv4 = Conv2D(48, kernel_size=3, padding='same')
    bn4 = BatchNormalization()
    a4 = Activation('relu')
    mp2 = MaxPool2D()
    drop1 = Dropout(0.25)

    conv11 = Conv2D(6, kernel_size=3, padding='same')
    bn11 = BatchNormalization()
    a11 = Activation('relu')
    conv22 = Conv2D(12, kernel_size=3, padding='same')
    bn22 = BatchNormalization()
    a22 = Activation('relu')
    mp11 = MaxPool2D()

    conv33 = Conv2D(24, kernel_size=3, padding='same')
    bn33 = BatchNormalization()
    a33 = Activation('relu')
    conv44 = Conv2D(48, kernel_size=3, padding='same')
    bn44 = BatchNormalization()
    a44 = Activation('relu')
Пример #24
0
    def modelArchitectureNew(self, x, y, val_set, epochs):
        timestamp = str(int(time.time()))
        model = Sequential()

        # 1st conv replacement
        model.add(
            Conv2D(32,
                   kernel_size=3,
                   activation=self.activation,
                   input_shape=self.input_shape))
        model.add(BatchNormalization())
        model.add(Conv2D(32, kernel_size=3, activation=self.activation))
        model.add(BatchNormalization())
        model.add(
            Conv2D(32,
                   kernel_size=5,
                   strides=2,
                   padding='same',
                   activation=self.activation))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))

        # 2nd conv replaement
        model.add(Conv2D(64, kernel_size=3, activation=self.activation))
        model.add(BatchNormalization())
        model.add(Conv2D(64, kernel_size=3, activation=self.activation))
        model.add(BatchNormalization())
        model.add(
            Conv2D(64,
                   kernel_size=5,
                   strides=2,
                   padding='same',
                   activation=self.activation))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))

        # Classification layer
        model.add(Flatten())
        model.add(Dense(128, activation=self.activation))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))

        # Output and compile
        model.add(Dense(10, activation='softmax'))
        model.compile(optimizer=self.optimizer,
                      loss=self.loss,
                      metrics=self.metrics)

        # TensorBoard
        NAME = 'E5 [32C3n-32C3n-32C5S2n] - D0.4 - [64C3n-64C3n-64C5S2n] - D0.4'
        NAME = f'{NAME} - 128n - D0.4 - 10 @{timestamp}'
        self.printStart(NAME[:2], 1, 2, timestamp)
        tensorboard = TensorBoard(log_dir=f'{self.path_logs}\\{NAME}',
                                  profile_batch=0)

        # Learning rate scheduler
        annealer = LearningRateScheduler(self.exponentialDecay)

        # Fitting
        model.fit(x,
                  y,
                  epochs=epochs,
                  callbacks=[tensorboard, annealer],
                  validation_data=val_set,
                  verbose=self.verbose)
        model.save(f'{self.path_models}/{NAME}.model')

        # Appending
        date = datetime.datetime.fromtimestamp(int(timestamp))
        date = date.strftime('%Y-%m-%d %H:%M:%S')
        self.models['date'].append(date)
        self.models['timestamp'].append(int(timestamp))
        self.models['name'].append(NAME[:NAME.find(' @')])
        self.models['model'].append(model)
Пример #25
0
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(x_train, y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(x_test, y_test))
Пример #26
0
# this script builds and trains a simple feed forward neural network using the Keras api of tensorflow

data = read_data('train')
y = data["Survived"].to_numpy()
X = data.drop(columns="Survived").to_numpy()

# Definition of the networks hyper-parameters
input_dim = len(X[0])
epochs = 100
hidden_sizes = [10, 20, 10]

# Definition of the model

input_placeholder = Input(shape=(input_dim, ))
layer = input_placeholder
while len(hidden_sizes) > 0:
    dim = hidden_sizes.pop(0)
    layer = Dense(dim, activation='sigmoid')(layer)
    layer = Dropout(0.1)(layer)
output = Dense(1, activation='sigmoid')(layer)
model = Model(input_placeholder, output)

# Compile model
opt = Adam(lr=1e-2, decay=1e-2 / epochs)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])

# Training of the model
model.fit(X, y, epochs=epochs, batch_size=20, shuffle=True)
# save the model
model.save("Models/FeedForward.hdf5")
y_train = []

for i in range(3, trainingData.shape[0]):
    X_train.append(trainingData[i - 3:i])
    y_train.append(trainingData[i, 0])

X_train, y_train = np.array(X_train), np.array(y_train)

regressor = Sequential()

regressor.add(
    LSTM(units=50,
         activation="relu",
         return_sequences=True,
         input_shape=(X_train.shape[1], 2)))
regressor.add(Dropout(0.2))

regressor.add(LSTM(units=60, activation="relu", return_sequences=True))
regressor.add(Dropout(0.3))

regressor.add(LSTM(units=80, activation="relu", return_sequences=True))
regressor.add(Dropout(0.4))

regressor.add(LSTM(units=80, activation="relu"))
regressor.add(Dropout(0.5))

regressor.add(Dense(units=1))

#Adam is stochasitc gradient descent and I use mean squared error as the loss function
regressor.compile(optimizer="adam", loss=tensorflow.keras.losses.MSE)
regressor.fit(X_train, y_train, epochs=5, batch_size=16)
Пример #28
0
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)

print(np.max(x), np.min(x))
print(np.max(x[0]))

#2 모델구성

input1 = Input(shape=(13, ))
dense1 = Dense(120, activation='relu')(input1)
dense1 = Dropout(0.4)(dense1)
dense1 = Dense(85)(dense1)
dense1 = Dropout(0.3)(dense1)
dense1 = Dense(70)(dense1)
dense1 = Dropout(0.3)(dense1)
dense1 = Dense(60)(dense1)
dense1 = Dropout(0.2)(dense1)
dense1 = Dense(30)(dense1)
dense1 = Dropout(0.2)(dense1)
dense1 = Dense(20)(dense1)
dense1 = Dropout(0.2)(dense1)
dense1 = Dense(4)(dense1)
output1 = Dense(1)(dense1)
model = Model(inputs=input1, outputs=output1)

#3 컴파일 훈련
Пример #29
0
#from  keras.datasets import cifar10

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()

# sort out some
y_train_known = y_train[np.squeeze(y_train) < 5]
y_train_novel = y_train[np.squeeze(y_train) >= 5]
x_train_known = x_train[np.squeeze(y_train) < 5] / 255
x_train_novel = x_train[np.squeeze(y_train) >= 5] / 255

y_train_known_cat = to_categorical(y_train_known)
y_test_cat = to_categorical(y_test)
img_inputs = keras.Input(shape=x_train[0].shape)

x = Conv2D(16, 3, activation="relu")(img_inputs)
x = Conv2D(32, 3, activation="relu")(x)
x = MaxPooling2D(3)(x)
x = Conv2D(32, 3, activation="relu")(x)
x = Conv2D(16, 3, activation="relu")(x)

x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(2048)(x)
x = Dropout(0.5)(x)
x = Dense(5, activation="softmax")(x)

model = Model(img_inputs, x)
model.summary()
model.compile("adam", loss="categorical_crossentropy", metrics=["acc"])
model.fit(x_train_known, y_train_known_cat, epochs=20)
Пример #30
0
def create_model(seed, epochs, batch_size):
    train_generator2 = train_datagen2.flow_from_directory(
        'data/train',
        target_size=img_size,
        batch_size=batch_size,
        class_mode='categorical',
        seed=seed)
    validation_generator2 = test_datagen.flow_from_directory(
        'data/validation',
        target_size=img_size,
        batch_size=batch_size,
        class_mode='categorical',
        seed=seed)

    reset_random_seeds(seed)
    model = Sequential([
        Conv2D(baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay),
               input_shape=(32, 32, 3)),
        Activation('relu'),
        BatchNormalization(),
        Conv2D(baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        MaxPool2D(pool_size=(2, 2)),
        Dropout(0.2),

        Conv2D(2 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        Conv2D(2 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        MaxPool2D(pool_size=(2, 2)),
        Dropout(0.3),

        Conv2D(4 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        Conv2D(4 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        MaxPool2D(pool_size=(2, 2)),
        Dropout(0.4),
        Flatten(),
        Dense(128, activation='relu'),
        BatchNormalization(),
        Dropout(0.4),
        Dense(num_classes, activation='softmax')
    ])

    lrr = ReduceLROnPlateau(
        monitor='val_accuracy',
        factor=.5,
        patience=8,
        min_lr=1e-4,
        verbose=1)
    opt_adam = Adam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt_adam,
                  metrics=['accuracy'])
    history = model.fit(train_generator2, epochs=epochs, validation_data=validation_generator2, callbacks=[lrr])
    loss, acc = model.evaluate(validation_generator2)
    return model, history, loss, acc