Esempio n. 1
0
def main():
    batch_size = 54
    epochs = 100
    img_height = 50
    img_width = 50
    train_set, test_set, valid_set = data(batch_size, img_height, img_width)

    model = Sequential([
        Flatten(),
        Dense(1250, activation='sigmoid'),
        Dense(512, activation='sigmoid'),
        Dense(10, activation='sigmoid'),
        Dense(1, activation='sigmoid')

    ])
    print("Built model successfully!~")
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    history = model.fit_generator(
        train_set,
        steps_per_epoch=5003 // batch_size,
        epochs=epochs,
        validation_data=valid_set,
        validation_steps=2001 // batch_size
    )

    json_str = model.to_json()
    with open(r'C:\Users\user1\PycharmProjects\gender-classification-1\Multilayer Perceptron\models\MLP_model.json',
              'w') as outfile:
        json.dump(json.loads(json_str), outfile, indent=4)
        model.save_weights(
            r"C:\Users\user1\PycharmProjects\gender-classification-1\Multilayer Perceptron\models\weights_MLP_model.h5",
            save_format="h5")
    print("Saved model to disk")
    print('\n# Evaluate on test data')
    results_test = model.evaluate_generator(test_set)
    print('test loss, test acc:', results_test)
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs_range = range(epochs)
    plt.figure(figsize=(6, 6))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, acc, label='Training Accuracy')
    plt.plot(epochs_range, val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')
    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, loss, label='Training Loss')
    plt.plot(epochs_range, val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.show()
Esempio n. 2
0
model.add(MaxPool2D(pool_size=3, strides=2))

model.add(
    Conv2D(256, kernel_size=3, strides=2, padding='SAME', activation='relu'))
model.add(
    Conv2D(256, kernel_size=3, strides=1, padding='SAME', activation='relu'))
model.add(
    Conv2D(256, kernel_size=3, strides=1, padding='SAME', activation='relu'))
model.add(MaxPool2D(pool_size=3, strides=2))
model.add(Dropout(rate=0.3))

model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(1, 'sigmoid'))
print(model.summary())
opt = tf.keras.optimizers.Adam()
ES = EarlyStopping(monitor='val_loss',
                   patience=3,
                   verbose=1,
                   restore_best_weights=True)
LRR = ReduceLROnPlateau(monitor='val_loss', factor=0.2, verbose=1)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_gen,
                              steps_per_epoch=steps,
                              epochs=15,
                              callbacks=[ES, LRR],
                              validation_data=val_gen)

score = model.evaluate_generator(test_gen)
print(score)
Esempio n. 3
0
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))

model.compile(loss="categorical_crossentropy",
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit_generator(
    train_generator,
    workers=100,
    steps_per_epoch=63,  # 2000 images over 32 batch size
    validation_data=valid_generator,
    epochs=20)

scores = model.evaluate_generator(valid_generator)
print("\n\n1 - Accuracy 1st test: " + str(scores[1]))
"""scores = model.evaluate_generator(valid_generator2)
print("\n\n1 - Accuracy 2nd test: "+str(scores[1]))
scores = model.evaluate_generator(valid_generator3)
print("\n\n1 - Accuracy 3rd test: "+str(scores[1])) """

loss1 = history.history['loss']
val_loss1 = history.history['val_loss']

plt.plot(loss1, color="red")
plt.plot(val_loss1, color="blue")
red_patch = mpatches.Patch(color="red", label="loss")
blue_patch = mpatches.Patch(color="blue", label="val_loss")
plt.legend(handles=[red_patch, blue_patch])
plt.show()
def main():
    ###predeclared parameters for the learning
    batch_size = 64
    epochs = 250
    IMG_HEIGHT = 200
    IMG_WIDTH = 200
    ###all data sets will use as train set, validation set and test set
    train_image_generator = ImageDataGenerator(
        rescale=1. / 255,
        rotation_range=45,
        width_shift_range=.15,
        height_shift_range=.15,
        horizontal_flip=True,
        zoom_range=0.5)  # Generator for our training data
    validation_image_generator = ImageDataGenerator(
        rescale=1. / 255)  # Generator for our validation data
    test_image_generator = ImageDataGenerator(rescale=1. / 255)
    train_data_gen = train_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=
        r"C:\Users\evgen\Desktop\DL_Git_forAllProj\CNN\dataset\train_set",
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    val_data_gen = validation_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=
        r"C:\Users\evgen\Desktop\DL_Git_forAllProj\CNN\dataset\val_set",
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    test_data_gen = test_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=
        r"C:\Users\evgen\Desktop\DL_Git_forAllProj\CNN\dataset\train_set",
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    ###building the model
    model = Sequential([
        Conv2D(16,
               3,
               padding='same',
               activation='relu',
               input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
        MaxPooling2D(),
        Dropout(0.2),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(128, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Dropout(0.2),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1, activation='sigmoid')
    ])
    ###complinig the model
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    history = model.fit_generator(train_data_gen,
                                  steps_per_epoch=6003 // batch_size,
                                  epochs=epochs,
                                  validation_data=val_data_gen,
                                  validation_steps=2001 // batch_size)

    ###summary of the model after traning
    print('\nhistory dict:', history.history)
    ###saving the model and weights as a json and h5 files
    json_str = model.to_json()
    with open(
            r'C:\Users\evgen\Desktop\n_models\saved_model_250ep_w_dropout_data_rich.json',
            'w') as outfile:
        json.dump(json.loads(json_str), outfile,
                  indent=4)  # Save the json on a file
        model.save_weights(
            r"C:\Users\evgen\Desktop\n_models\weights_250ep_w_dropout_data_rich.h5",
            save_format="h5")
    print("Saved model to disk")
    ###evaluating the model on the test data
    print('\n# Evaluate on test data')
    results_test = model.evaluate_generator(test_data_gen)
    print('test loss, test acc:', results_test)
    ####printing the model as a graph
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs_range = range(epochs)
    plt.figure(figsize=(6, 6))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, acc, label='Training Accuracy')
    plt.plot(epochs_range, val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')
    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, loss, label='Training Loss')
    plt.plot(epochs_range, val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.show()
Esempio n. 5
0
def train_binary_model(path,
                       epochs=100,
                       ft_epochs=100,
                       learning_rate=0.01,
                       classes_to_match: Union[int, List[int]] = 0,
                       classes_to_drop: Union[int, List[int]] = None):
    """
    Train a smaller binary model for empty/not empty classification and save it under the given path. The method first
    loads the models using :py:doc:`generate_datasets.py <training.generate_datasets.py>` methods. Then the model is
    trained, saved and finally evaluated.

    Training is run in two steps: It is first trained with synthetic data and then finetuned with real data. Early
    stopping is used to prevent overfitting.

    Args:
        path(str): The directory to save the trained model to.
        epochs(int): The number of epochs. (Default value = 100)
        ft_epochs: The number of finetuning epochs. (Default value = 100)
        learning_rate: The learning rate for the Adadelta optimizer. (Default value = 0.01)
        classes_to_match(Union[int, list[int]]): The classes to match as class 1. (Default value = 0)
        classes_to_drop(Union[int, list[int]]): The classes to drop from the dataset. (Default value = None)

    Returns:
        None

    """
    os.makedirs(path, exist_ok=True)
    concat_machine, concat_hand, concat_out, real_training, real_validation = load_datasets(
        TRANSFORMED_DATASET_NAMES)

    batch_size = 192
    train_generator = ToBinaryGenerator(concat_machine.train,
                                        concat_hand.train,
                                        concat_out.train,
                                        classes_to_match=classes_to_match,
                                        classes_to_drop=classes_to_drop,
                                        batch_size=batch_size,
                                        shuffle=True,
                                        truncate=True)

    dev_generator = ToBinaryGenerator(concat_machine.test,
                                      concat_hand.test,
                                      concat_out.test,
                                      classes_to_match=classes_to_match,
                                      classes_to_drop=classes_to_drop,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      truncate=True)

    ft_train_generator = ToBinaryGenerator(real_training.train,
                                           classes_to_match=classes_to_match,
                                           classes_to_drop=classes_to_drop,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           truncate=True)

    ft_dev_generator = ToBinaryGenerator(real_training.test,
                                         classes_to_match=classes_to_match,
                                         classes_to_drop=classes_to_drop,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         truncate=True)

    test_generator = ToBinaryGenerator(real_validation.test,
                                       classes_to_match=classes_to_match,
                                       classes_to_drop=classes_to_drop,
                                       batch_size=batch_size,
                                       shuffle=False)

    # Run training on the GPU
    with tf.device('/GPU:0'):
        # Keras Model
        print("Creating model..")
        model = Sequential()
        model.add(Conv2D(16, (5, 5), strides=2, input_shape=(28, 28, 1)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Conv2D(32, (2, 2)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())  # 32
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.25))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))

        # def mean_pred(_, y):
        #     return keras.backend.mean(y)

        print("Compiling model..")
        model.compile(
            loss=keras.losses.BinaryCrossentropy(from_logits=True),
            optimizer=keras.optimizers.Adadelta(learning_rate),
            metrics=[keras.metrics.binary_accuracy, 'mse'],
        )
        print(model.summary())

        print("Training model")
        model.fit_generator(train_generator,
                            validation_data=dev_generator,
                            epochs=epochs,
                            callbacks=[
                                EarlyStopping(monitor='val_accuracy',
                                              restore_best_weights=True,
                                              patience=3,
                                              min_delta=0.0001),
                            ])

        print("Finetuning model")
        model.fit_generator(ft_train_generator,
                            validation_data=ft_train_generator,
                            epochs=ft_epochs,
                            callbacks=[
                                EarlyStopping(monitor='val_accuracy',
                                              restore_best_weights=True,
                                              patience=3,
                                              min_delta=0.0001),
                            ])

        models.save_model(model, path + "model.h5", save_format='h5')

        print("Evaluating")
        print(
            "Training dev",
            list(
                zip(model.metrics_names,
                    model.evaluate_generator(dev_generator))))
        print(
            "Finetuning dev",
            list(
                zip(model.metrics_names,
                    model.evaluate_generator(ft_dev_generator))))
        print(
            "Test",
            list(
                zip(model.metrics_names,
                    model.evaluate_generator(test_generator))))
        evaluate(model, test_generator, binary=True)
    RNN(HIDDEN_SIZE),
    layers.RepeatVector(3),
    RNN(128, return_sequences=True),
    layers.TimeDistributed(layers.Dense(len(CHARS), activation='softmax'))
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

train_generator = encode_generator(training_generator, BATCH_SIZE)

hist = model.fit_generator(train_generator,
                           steps_per_epoch=STEPS_PER_EPOCH,
                           epochs=EPOCHS,
                           verbose=1,
                           use_multiprocessing=True,
                           workers=-2,
                           callbacks=callbacks,
                           validation_data=train_generator, validation_steps=30)

score = model.evaluate_generator(encode_generator(
    test_generator, BATCH_SIZE), steps=STEPS_PER_EPOCH)
print(score)

config = build_config(MODEL_NAME, LEARNING_RATE, BATCH_SIZE,
                      EPOCHS, STEPS_PER_EPOCH, score[0], score[1])
wrapper = ModelWrapper(model, config=config)
wrapper.save_model()
Esempio n. 7
0
                                          class_mode="categorical", batch_size=32, subset="validation")

mobilenet = MobileNetV2(weights = "imagenet",include_top = False,input_shape=(150,150,3))
for layer in mobilenet.layers:
    layer.trainable = False

model = Sequential()
model.add(mobilenet)
model.add(Flatten())
model.add(Dense(2,activation="sigmoid"))
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics ="accuracy")
checkpoint = ModelCheckpoint("moblenet_facemask.h5",monitor="val_accuracy",save_best_only=True,verbose=1)
earlystop = EarlyStopping(monitor="val_acc",patience=5,verbose=1)
history = model.fit_generator(generator=train,steps_per_epoch=len(train)// 32,validation_data=valid,
                             validation_steps = len(valid)//32,callbacks=[checkpoint,earlystop],epochs=15)
model.evaluate_generator(valid)
model.save("face_mask.h5")
pred = model.predict_classes(valid)
pred[:15]
#check

#without mask
mask = "../input/with-and-without-mask/"
plt.figure(figsize=(8, 7))
label = {0: "With Mask", 1: "Without Mask"}
color_label = {0: (0, 255, 0), 1: (0, 0, 255)}
cascade = cv2.CascadeClassifier("../input/frontalface/haarcascade_frontalface_default.xml")
count = 0
i = "../input/with-and-without-mask/mask9.jpg"

frame = cv2.imread(i)
Esempio n. 8
0
              metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
model.summary()

##### TRAINING #####
history = model.fit_generator(train_gen,
                              epochs=100,
                              verbose=2,
                              shuffle=False,
                              validation_data=test_gen)

##### PLOTTING LOSS ######
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
score = model.evaluate_generator(test_gen, verbose=1)
print()
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print()

###### RESHAPE ACTUAL DATA #######
actual_train = reshape_actual(train_gen)
predictions_train = model.predict_generator(train_gen, verbose=0)

##### RSME FOR TRAIN #####
rmse_train = math.sqrt(
    mean_squared_error(actual_train[:], predictions_train[:]))
print()
print(rmse_train)
Esempio n. 9
0
class C3d_Model():
    def __init__(self, learning_rate=0.0001, num_features=4096, L=16):
        self.model = Sequential()

        # reshape the input images from 224*224 into 112*112
        self.model.add(AveragePooling3D((1, 2, 2), strides=(1, 2, 2)))

        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(L, 112, 112, 3)))
        self.model.add(Conv3D(64, (3, 3, 3), activation='relu', name='conv1a'))
        self.model.add(MaxPooling3D((1, 2, 2), strides=(1, 2, 2)))

        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(L, 56, 56, 3)))
        self.model.add(Conv3D(128, (3, 3, 3), activation='relu',
                              name='conv2a'))
        self.model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(8, 28, 28, 3)))
        self.model.add(Conv3D(256, (3, 3, 3), activation='relu',
                              name='conv3a'))
        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(8, 28, 28, 3)))
        self.model.add(Conv3D(256, (3, 3, 3), activation='relu',
                              name='conv3b'))
        self.model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(4, 14, 14, 3)))
        self.model.add(Conv3D(512, (3, 3, 3), activation='relu',
                              name='conv4a'))
        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(4, 14, 14, 3)))
        self.model.add(Conv3D(512, (3, 3, 3), activation='relu',
                              name='conv4b'))
        self.model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(2, 7, 7, 3)))
        self.model.add(Conv3D(512, (3, 3, 3), activation='relu',
                              name='conv5a'))
        self.model.add(ZeroPadding3D((1, 1, 1), input_shape=(2, 7, 7, 3)))
        self.model.add(Conv3D(512, (3, 3, 3), activation='relu',
                              name='conv5b'))
        self.model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

        self.model.add(Flatten())
        self.model.add(
            Dense(num_features,
                  name='fc6',
                  kernel_initializer='glorot_uniform'))

        self.model.add(
            BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.1))

        self.model.add(
            Dense(num_features,
                  name='fc7',
                  kernel_initializer='glorot_uniform'))
        self.model.add(
            BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(
            Dense(1, name='predictions', kernel_initializer='glorot_uniform'))
        self.model.add(Activation('sigmoid'))

        adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        self.model.compile(optimizer=adam,
                           loss='binary_crossentropy',
                           metrics=['accuracy'])

        # self.model.build((None,L,224,224,3))
    def summary(self):
        self.model.summary()  # plot the structure of VGG model

    def get_model(self):
        return self.model

    def train(self,
              train_generator,
              valid_generator,
              train_file,
              valid_file,
              batch_size,
              callbacks_list,
              epoch=50,
              verbose=2):
        self.model.fit(train_generator,
                       steps_per_epoch=len(train_file) // batch_size,
                       epochs=epoch,
                       validation_data=valid_generator,
                       validation_steps=len(valid_file) // batch_size,
                       callbacks=callbacks_list,
                       verbose=2)

    def load_weights(self, h5_path):
        self.model.load_weights(r'%s' % h5_path)

    def test(self, test_generator, steps, verbose=2):
        self.model.evaluate_generator(test_generator,
                                      steps=steps,
                                      verbose=verbose)
    Dense(2, activation=tf.nn.softmax)
])

model.summary()

model.compile(optimizer=Adam(lr=0.001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

def scheduler(epoch):
    if epoch <= 2:
        return 0.001
    elif epoch > 2 and epoch <= 15:
        return 0.0001 
    else:
        return 0.00001

lr_callbacks = tf.keras.callbacks.LearningRateScheduler(scheduler)

# from keras.models import load_model
# model = load_model('model.h5')

model.fit_generator(train,
                    epochs=50,
                    callbacks=[lr_callbacks])

model.save("model.h5")

model.evaluate_generator(train)

model.predict_generator(train)
Esempio n. 11
0
class VGG_Model():
    def __init__(self, learning_rate=0.0001, num_features=4096, L=10):
        self.model = Sequential()
        # self.model = multi_gpu_model(self.model,gpus=2)
        self.model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 2 * L)))
        self.model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
        self.model.add(ZeroPadding2D((1, 1)))
        self.model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        self.model.add(Flatten())

        # self.model.add(LSTM(4096, input_shape=(train_X.shape[1], train_X.shape[2])))
        # self.model.add(Dense(1))
        # self.model.compile(loss='mae', optimizer='adam')

        self.model.add(
            Dense(4096, name='fc6', kernel_initializer='glorot_uniform'))

        self.model.add(
            BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.1))
        self.model.add(
            Dense(4096, name='fc2', kernel_initializer='glorot_uniform'))
        self.model.add(
            BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))
        self.model.add(
            Dense(1, name='predictions', kernel_initializer='glorot_uniform'))
        self.model.add(Activation('sigmoid'))

        adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        self.model.compile(optimizer=adam,
                           loss='binary_crossentropy',
                           metrics=['accuracy'])

    def summary(self):
        self.model.summary()  # plot the structure of VGG model

    def get_model(self):
        return self.model

    def train(self,
              train_generator,
              valid_generator,
              train_file,
              valid_file,
              batch_size,
              callbacks_list,
              epoch=50,
              verbose=2):
        self.model.fit(train_generator,
                       steps_per_epoch=len(train_file) // batch_size,
                       epochs=epoch,
                       validation_data=valid_generator,
                       validation_steps=len(valid_file),
                       callbacks=callbacks_list,
                       verbose=2)

    def load_weights(self, h5_path):
        self.model.load_weights(r'%s' % h5_path)

    def test(self, test_generator, steps, verbose=2):
        self.model.evaluate_generator(test_generator,
                                      steps=steps,
                                      verbose=verbose)
Esempio n. 12
0
        batch_size=b,
        class_mode='sparse',
        shuffle=True,
        subset='validation')
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    step_size_train = train_generator.n // train_generator.batch_size

    model.fit_generator(generator=train_generator,
                        steps_per_epoch=step_size_train,
                        epochs=eps,
                        verbose=1)

    scores = model.evaluate_generator(generator=validation_generator,
                                      verbose=1)

    print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

    cvscores.append(scores[1] * 100)

print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))

print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

cvscores.append(scores[1] * 100)

print("Starting loading test data and testing")

images = []
labels = []