Exemplo n.º 1
0
                        help='save patterns images')

    args = parser.parse_args()

    n_classes = 7
    n_epochs = 200
    learning_rate = 0.001
    batch_size = 64

    current_working_dir = os.getcwd()
    print('current_working_dir: ', current_working_dir)
    pre = Preprocessing('fer2013', root_dir=current_working_dir)
    pre.load_data(filename='DatasetD.csv', name='train')
    pre.load_data(filename='test_public_norm.csv', name='validate')

    X_train_df = pre.get(name='train').drop(columns=['emotion'])
    y_train_df = pre.get(name='train')['emotion']
    X_val_df = pre.get(name='validate').drop(columns=['emotion'])
    y_val_df = pre.get(name='validate')['emotion']

    dtype = torch.float

    model_name = f'cnn_triple_layer_D_bs_{learning_rate}_{batch_size}_{n_epochs}_{n_classes}'
    model = CnnTripleLayer(model_name, d_out=n_classes)
    model.train()

    train_classifier = TrainClassifier2(model,
                                        X_train_df,
                                        y_train_df,
                                        X_val_df,
                                        y_val_df,
Exemplo n.º 2
0
    print('train learning transfer VGG16 DatasetA')

    parser = argparse.ArgumentParser(description='fer2013')
    parser.add_argument('--s_model', default=True, help='save trained model')
    parser.add_argument('--s_patterns', default=False, help='save patterns images')

    args=parser.parse_args()

    current_working_dir = os.getcwd()
    print('current_working_dir: ', current_working_dir)
    pre = Preprocessing('fer2013', root_dir=current_working_dir)

    pre.load_data('train_reduced_norm.csv.gz', name='train')
    pre.load_data('test_public_norm.csv.gz', name='val')

    X = pre.get('val').drop(columns=['emotion'])
    y = pre.get('val')['emotion']
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 42)
    val = pd.DataFrame(X_test)
    val['emotion'] = y_test
    pre.set(name='val', value=val)

    print(pre.get(name='val').head())

    train_pixels = pre.get(name='train').drop(columns=['emotion'])
    val_pixels = pre.get(name='val').drop(columns=['emotion'])

    print('data loaded')

    img_conv = ImageConverter()
Exemplo n.º 3
0
    parser.add_argument('--s_model', default=True, help='save trained model')
    parser.add_argument('--s_patterns',
                        default=False,
                        help='save patterns images')

    args = parser.parse_args()

    n_classes = 7
    n_epochs = 300
    learning_rate = 0.0001
    batch_size = 32

    pre = Preprocessing('fer2013')
    pre.load_data(filename='DatasetD.csv', name='train')

    X_df = pre.get(name='train').drop(columns=['emotion'])
    y_df = pre.get(name='train')['emotion']

    dtype = torch.float
    device = torch.device("cpu")

    model_name = f'cnn_double_layer_D_bs_{learning_rate}_{batch_size}_{n_epochs}_{n_classes}'
    model = CnnDoubleLayer(model_name, d_out=n_classes)
    model.train()

    train_classifier = TrainClassifier2(model, X_df, y_df)
    t = time.time()
    trained_model, optimizer, criterion, loss_hist, loss_val_hist, f1_val_hist = train_classifier.run_train(
        n_epochs=n_epochs, lr=learning_rate, batch_size=batch_size)
    print(f'trained in {time.time() - t} sec')
    pre.save_results(loss_hist, loss_val_hist, f1_val_hist, f'{model_name}')
Exemplo n.º 4
0
import matplotlib.pyplot as plt

if not __name__ == '__main_':

    parser = argparse.ArgumentParser(description='fer2013_DatasetA')
    parser.add_argument('--s_model', default=True, help='save trained model')

    args = parser.parse_args()

    n_epochs = 100

    pre = Preprocessing('fer2013_DatasetA')
    pre.load_data(filename='DatasetA.csv', name='train')

    X_train_df = pre.get(name='train').drop(columns=['emotion'])
    y_train_df = pre.get(name='train')['emotion']

    dtype = torch.float
    device = torch.device("cpu")

    H1 = 1764
    n_features = len(X_train_df.columns)
    n_features_encoded = 1296
    print(f'features {n_features}')
    print(f'H1 {H1}')
    print(f'n_features_encoded {n_features_encoded}')

    model_name = 'ann_encoder'
    model = AnnAutoencoder(model_name, n_features, H1, n_features_encoded)