示例#1
0
def demo_cnn_aug():
    X, y, w = load2d()
    y = flatten_except_1dim(y,ndim=3)
    w = flatten_except_1dim(w,ndim=2)
    X_train, X_val, y_train, y_val, w_train, w_val = train_test_split(X, y, w, test_size=0.2, random_state=42)

    model = CNN()
    flip_indices = [
            (0, 2), (1, 3),
            (4, 8), (5, 9), (6, 10), (7, 11),
            (12, 16), (13, 17), (14, 18), (15, 19),
            (22, 24), (23, 25),
            ]
    hist = model.fit_generator(FlippedImageDataGenerator(X_train, y_train, w_train, 32, flip_indices=flip_indices),
                                # steps_per_epoch=X_train.shape[0],
                                epochs=EPOCH,
                                validation_data=(X_val, y_val, w_val)
                                )
    model.save('./models/cnn_aug_weighted_model.h5')
    X_test,_,_ = load2d(test=True)
    df_y_pred = predict_single(model, X_test)
    prepare_submission(df_y_pred,"cnn_aug_weighted")
示例#2
0
    def train_CNN(data_portion=X.shape[0], epochs=20, ensemble=False):
        model = CNN(im_shape)

        # epochs = 50
        batch = 100
        if augment:
            history = model.fit_generator(datagen.flow(train_X, train_y),
                                          epochs=epochs,
                                          steps_per_epoch=data_portion // 32,
                                          verbose=2,
                                          validation_data=[val_X, val_y],
                                          callbacks=[annealer])
        else:
            history = model.fit(train_X[:data_portion],
                                train_y[:data_portion],
                                batch,
                                epochs,
                                verbose=2,
                                validation_split=0.3,
                                callbacks=[annealer])
        # print(model.summary())

        if not ensemble:
            # print('saving')
            # model.save('saved_models/my_model.h5')
            # print('done')

            fig = plt.figure(figsize=(15, 5))
            plt.plot(history.history['val_acc'])
            plt.xlabel('epoch')
            plt.ylabel('val_accuracy')
            plt.xlim(0, epochs)
            plt.grid()
            plt.ylim(.97, 1)
            plt.title('Max val_acc: ' + str(max(history.history['val_acc'])))
            fig.savefig('plots/testing.png')

        return model
示例#3
0
    checkpoint = ModelCheckpoint(paramfile,
                                 monitor='val_acc',
                                 save_best_only=True,
                                 save_weights_only=True,
                                 verbose=0,
                                 mode='max')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print('Start training...')
    if args.model != 'DNN':
        model.fit_generator(datagen.flow(train_x,
                                         train_y,
                                         batch_size=args.batch),
                            steps_per_epoch=5 * num_batches,
                            epochs=args.epoch,
                            verbose=1,
                            validation_data=(valid_x, valid_y),
                            workers=8,
                            callbacks=[checkpoint, earlystopping, csvlogger])
    else:
        model.fit(train_x,
                  train_y,
                  batch_size=args.batch,
                  epochs=args.epoch,
                  verbose=1,
                  validation_data=(valid_x, valid_y),
                  shuffle=True,
                  callbacks=[checkpoint, earlystopping, csvlogger])

if args.test == True:
示例#4
0
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                               factor=0.35,
                               patience=3,
                               verbose=1)

# lr_finder = LRFinder(min_lr=1e-6, max_lr=1e-2, steps_per_epoch=data_portion//batch_size, epochs=epochs)

model = CNN(input_shape)
history = model.fit_generator(
    train_generator,
    steps_per_epoch=int(data_portion * 0.8) // batch_size,
    epochs=epochs,
    validation_data=val_generator,
    validation_steps=int(data_portion * 0.2) // batch_size,
    callbacks=[lr_reducer, checkpoint],
    verbose=1)

print(model.summary())
model.save('my_model2.h5')
# lr_finder.plot_loss()

plt.subplot(1, 2, 1)
plt.plot(history.history['val_acc'])
plt.xlim(0, epochs)
plt.ylim(0.5, 1)
plt.xlabel('Epoch')
plt.ylabel('Validation Accuracy')
plt.title(max(history.history['val_acc']))
示例#5
0
    train_generator = ImageDataGenerator(rotation_range=10,
                                         width_shift_range=0.05,
                                         height_shift_range=0.05,
                                         horizontal_flip=True,
                                         shear_range=0.2,
                                         zoom_range=0.2).flow(
                                             x_train,
                                             y_train,
                                             batch_size=opt.batch_size)
    valid_generator = ImageDataGenerator().flow(x_valid,
                                                y_valid,
                                                batch_size=opt.batch_size)
    history_fer2013 = model.fit_generator(
        train_generator,
        steps_per_epoch=len(y_train) // opt.batch_size,
        epochs=opt.epochs,
        validation_data=valid_generator,
        validation_steps=len(y_valid) // opt.batch_size,
        callbacks=callback)
    his = history_fer2013

    # test
    pred = model.predict(x_test)
    pred = np.argmax(pred, axis=1)
    print("test accuacy",
          np.sum(pred.reshape(-1) == y_test.reshape(-1)) / y_test.shape[0])

elif opt.dataset == "jaffe":
    expressions, x, y = Jaffe().gen_train()
    y = to_categorical(y).reshape(y.shape[0], -1)
    # 为了统一几个数据集,必须增加一列为0的