예제 #1
0
# Compute class weights to deal with unbalanced data
class_weight = class_weight.compute_class_weight('balanced', np.unique(trainY),
                                                 trainY)

BATCH_SIZE = 64
learning_rate = 0.005
nb_epochs = 400
dropout_rate = 0.7

optimizer = tf.keras.optimizers.Adamax(learning_rate)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()

model = CNN(dropout_rate=dropout_rate)
model.compile(optimizer=optimizer,
              loss=loss_object,
              metrics=['sparse_categorical_accuracy'])

# Use early stopping to avoid overfitting
earlystopping = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=10)


# Train the model on the original dataset
def train():
    model.fit(trainX,
              trainY,
              batch_size=BATCH_SIZE,
              epochs=400,
              validation_data=(valX, valY),
              class_weight=class_weight,
              callbacks=[earlystopping])
예제 #2
0
def create_model_optimizer(optimizer=tf.keras.optimizers.SGD(0.001)):
    model = CNN()
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
    model.compile(optimizer=optimizer, loss=loss_object, metrics=['accuracy'])
    return model
예제 #3
0
파일: main.py 프로젝트: hck0821/ML2017FALL
    )

    num_batches = (train_x.shape[0] // args.batch) + 1
    csvlogger = CSVLogger(loggerfile)
    earlystopping = EarlyStopping(monitor='val_acc',
                                  patience=10,
                                  verbose=1,
                                  mode='max')
    checkpoint = ModelCheckpoint(paramfile,
                                 monitor='val_acc',
                                 save_best_only=True,
                                 save_weights_only=True,
                                 verbose=0,
                                 mode='max')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print('Start training...')
    if args.model != 'DNN':
        model.fit_generator(datagen.flow(train_x,
                                         train_y,
                                         batch_size=args.batch),
                            steps_per_epoch=5 * num_batches,
                            epochs=args.epoch,
                            verbose=1,
                            validation_data=(valid_x, valid_y),
                            workers=8,
                            callbacks=[checkpoint, earlystopping, csvlogger])
    else:
        model.fit(train_x,
                  train_y,
예제 #4
0
    _, x_valid, y_valid = Fer2013().gen_valid()
    _, x_test, y_test = Fer2013().gen_test()
    # target编码
    y_train = to_categorical(y_train).reshape(y_train.shape[0], -1)
    y_valid = to_categorical(y_valid).reshape(y_valid.shape[0], -1)
    # 为了统一几个数据集,必须增加一列为0的
    y_train = np.hstack((y_train, np.zeros((y_train.shape[0], 1))))
    y_valid = np.hstack((y_valid, np.zeros((y_valid.shape[0], 1))))
    print(
        "load fer2013 dataset successfully, it has {} train images and {} valid iamges"
        .format(y_train.shape[0], y_valid.shape[0]))

    model = CNN(input_shape=(48, 48, 1), n_classes=8)
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    callback = [
        #     EarlyStopping(monitor='val_loss', patience=50, verbose=True),
        #     ReduceLROnPlateau(monitor='lr', factor=0.1, patience=20, verbose=True),
        ModelCheckpoint('./models/cnn_best_weights.h5',
                        monitor='val_acc',
                        verbose=True,
                        save_best_only=True,
                        save_weights_only=True)
    ]

    train_generator = ImageDataGenerator(rotation_range=10,
                                         width_shift_range=0.05,
                                         height_shift_range=0.05,
                                         horizontal_flip=True,