Esempio n. 1
0
def demo_cnn():
    X,y,w = load2d()
    y = flatten_except_1dim(y,ndim=3)
    w = flatten_except_1dim(w,ndim=2)
    print(X.shape)
    print(y.shape)
    print(w.shape)
    model = CNN()
    hist = model.fit(X, y, sample_weight=w, epochs=EPOCH,batch_size=128, validation_split=0.2)
    # plot_loss(hist.history,"CNN model",plt)
    # plt.legend()
    # plt.grid()
    # plt.yscale("log")
    # plt.xlabel("epoch")
    # plt.ylabel("loss")
    # plt.show()
    model.save('./models/cnn_weighted_model.h5')

    X_test,_,_ = load2d(test=True)
    # y_test = model.predict(X_test)
    # fig = plt.figure(figsize=(10, 7))
    # fig.subplots_adjust(
    #     left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

    # for i in range(16):
    #     axis = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
    #     plot_sample(X_test[i], y_test[i], axis)
    # plt.show()
    df_y_pred = predict_single(model, X_test)
    prepare_submission(df_y_pred,"cnn_weighted")
Esempio n. 2
0
    def train_CNN(data_portion=X.shape[0], epochs=20, ensemble=False):
        model = CNN(im_shape)

        # epochs = 50
        batch = 100
        if augment:
            history = model.fit_generator(datagen.flow(train_X, train_y),
                                          epochs=epochs,
                                          steps_per_epoch=data_portion // 32,
                                          verbose=2,
                                          validation_data=[val_X, val_y],
                                          callbacks=[annealer])
        else:
            history = model.fit(train_X[:data_portion],
                                train_y[:data_portion],
                                batch,
                                epochs,
                                verbose=2,
                                validation_split=0.3,
                                callbacks=[annealer])
        # print(model.summary())

        if not ensemble:
            # print('saving')
            # model.save('saved_models/my_model.h5')
            # print('done')

            fig = plt.figure(figsize=(15, 5))
            plt.plot(history.history['val_acc'])
            plt.xlabel('epoch')
            plt.ylabel('val_accuracy')
            plt.xlim(0, epochs)
            plt.grid()
            plt.ylim(.97, 1)
            plt.title('Max val_acc: ' + str(max(history.history['val_acc'])))
            fig.savefig('plots/testing.png')

        return model
Esempio n. 3
0
    print('Start training...')
    if args.model != 'DNN':
        model.fit_generator(datagen.flow(train_x,
                                         train_y,
                                         batch_size=args.batch),
                            steps_per_epoch=5 * num_batches,
                            epochs=args.epoch,
                            verbose=1,
                            validation_data=(valid_x, valid_y),
                            workers=8,
                            callbacks=[checkpoint, earlystopping, csvlogger])
    else:
        model.fit(train_x,
                  train_y,
                  batch_size=args.batch,
                  epochs=args.epoch,
                  verbose=1,
                  validation_data=(valid_x, valid_y),
                  shuffle=True,
                  callbacks=[checkpoint, earlystopping, csvlogger])

if args.test == True:
    print('\nLoad model parameters...\n')
    model.load_weights(paramfile)

    print('Start testing...\n')
    pred = model.predict(test_x)
    pred = pred.argmax(axis=-1)

    print('Save prediction...\n')
    outfile = open(args.output_file, 'w')
    print('id,label', file=outfile)
Esempio n. 4
0
    sgd = Adam(lr=0.0001)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    callback = [
        # EarlyStopping(monitor='val_loss', patience=50, verbose=True),
        # ReduceLROnPlateau(monitor='lr', factor=0.1, patience=15, verbose=True),
        ModelCheckpoint('./models/cnn_best_weights.h5',
                        monitor='val_accuracy',
                        verbose=True,
                        save_best_only=True,
                        save_weights_only=True)
    ]
    history_jaffe = model.fit(train_generator,
                              steps_per_epoch=len(y_train) // opt.batch_size,
                              epochs=opt.epochs,
                              validation_data=valid_generator,
                              validation_steps=len(y_valid) // opt.batch_size,
                              callbacks=callback)
    his = history_jaffe
else:
    expr, x, y = CK().gen_train()
    y = to_categorical(y).reshape(y.shape[0], -1)
    # 划分训练集验证集
    x_train, x_valid, y_train, y_valid = train_test_split(x,
                                                          y,
                                                          test_size=0.2,
                                                          random_state=2019)
    print(
        "load CK+ dataset successfully, it has {} train images and {} valid iamges"
        .format(y_train.shape[0], y_valid.shape[0]))
    train_generator = ImageDataGenerator(rotation_range=10,
Esempio n. 5
0
from model import CNN
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score

import pandas as pd
import numpy as np

params = {'epoch': 100, 'batch_size': 32}

models = CNN(**params)

train = pd.read_csv('../data/payload_train.csv')
val = pd.read_csv('../data/payload_val.csv')

y_train = train['label'].values
x_train = train.drop('label', axis=1).values / 256
x_train = np.reshape(x_train, [-1, 64, 64, 1])

y_val = val['label'].values
x_val = val.drop('label', axis=1).values / 256
x_val = np.reshape(x_val, [-1, 64, 64, 1])
models.fit(x_train, y_train, x_val, y_val)