Beispiel #1
0
def main():
    # prepare data
    print('Start loading data...')
    train_data, train_label, val_data, val_label, test_data, test_label = LoadDataset()
    train_data = (train_data * 1.0 / 128.0) - 1
    val_data = (val_data * 1.0 / 128.0) - 1
    test_data = (test_data * 1.0 / 128.0) - 1
    train_data = train_data.reshape(-1, 48 * 48)
    val_data = val_data.reshape(-1, 48 * 48)
    test_data = test_data.reshape(-1, 48 * 48)
    print('Finish loading data.')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    train_data, val_data, test_data = FeatureReduction(train_data, val_data, test_data, 256)

    print('After PCA')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    # train_model
    print('Start training...')
    model = RandomForestClassifier(n_estimators=25, max_depth=20)
    model.fit(train_data, train_label)
    print('Training finished.')
    TestModel(model, val_data, val_label, 'val_set')
    TestModel(model, test_data, test_label, 'test_set')
Beispiel #2
0
def main():
    # prepare data
    print('Start loading data...')
    train_data, train_label, val_data, val_label, test_data, test_label = LoadDataset(
    )
    train_data = (train_data * 1.0 / 128.0) - 1
    val_data = (val_data * 1.0 / 128.0) - 1
    test_data = (test_data * 1.0 / 128.0) - 1
    train_data = train_data.reshape(-1, 48 * 48)
    train_data = train_data[:18000, :]
    train_label = train_label[:18000]
    val_data = val_data.reshape(-1, 48 * 48)
    test_data = test_data.reshape(-1, 48 * 48)
    print('Finish loading data.')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    train_data, val_data, test_data = FeatureReduction(train_data, val_data,
                                                       test_data, 256)

    print('After PCA')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    # train_model
    print('Start training...')
    model = TrainSVM(train_data, train_label, 'poly')
    print('Training finished.')
    TestModel(model, val_data, val_label, 'val_set')
    TestModel(model, test_data, test_label, 'test_set')
Beispiel #3
0
def main():
    # prepare data
    print('Start loading data...')
    train_data, train_label, val_data, val_label, test_data, test_label = LoadDataset(
    )
    #train_data = (train_data * 1.0 / 128.0) - 1
    #val_data = (val_data * 1.0 / 128.0) - 1
    #test_data = (test_data * 1.0 / 128.0) - 1
    train_data = train_data.reshape(-1, 48 * 48)
    #train_data = train_data[:18000, :]
    #train_label = train_label[:18000]
    val_data = val_data.reshape(-1, 48 * 48)
    test_data = test_data.reshape(-1, 48 * 48)
    print('Finish loading data.')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    train_data, val_data, test_data = FeatureReduction(train_data, val_data,
                                                       test_data, 256)

    print('After PCA')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    # train_model
    print('Start training...')
    KNN = neighbors.KNeighborsClassifier(n_neighbors=5)
    KNN.fit(train_data, train_label)
    print('Training finished.')
    TestModel(KNN, val_data, val_label, 'val_set')
    TestModel(KNN, test_data, test_label, 'test_set')
Beispiel #4
0
def main():
    # prepare data
    print('Start loading data...')
    train_data, train_label, val_data, val_label, test_data, test_label = LoadDataset(
    )
    train_data = (train_data * 1.0 / 128.0) - 1
    val_data = (val_data * 1.0 / 128.0) - 1
    test_data = (test_data * 1.0 / 128.0) - 1
    train_data = train_data.reshape(-1, 48 * 48)
    val_data = val_data.reshape(-1, 48 * 48)
    test_data = test_data.reshape(-1, 48 * 48)
    print('Finish loading data.')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    train_data, val_data, test_data = FeatureReduction(train_data, val_data,
                                                       test_data, 256)

    print('After PCA')
    print('train data shape', train_data.shape)
    print('val data shape', val_data.shape)
    print('test data shape', test_data.shape)

    # train_model
    print('Start training...')
    model = MLPClassifier(solver='sgd',
                          activation='relu',
                          alpha=1e-4,
                          hidden_layer_sizes=(256, 256),
                          random_state=1,
                          max_iter=100,
                          verbose=10,
                          learning_rate_init=.05)
    model.fit(train_data, train_label)
    print('Training finished.')
    TestModel(model, val_data, val_label, 'val_set')
    TestModel(model, test_data, test_label, 'test_set')
Beispiel #5
0
parser = argparse.ArgumentParser(description='PyTorch Fer2013 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()

device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

# Load Data
print('==> Preparing data..')

from data_loader import LoadDataset

train_data, train_label, val_data, val_label, test_data, test_label = LoadDataset()

train_data = torch.from_numpy(train_data)
train_label = torch.from_numpy(train_label)

val_data = torch.from_numpy(val_data)
val_label = torch.from_numpy(val_label)

test_data = torch.from_numpy(test_data)
test_label = torch.from_numpy(test_label)

trainset = torch.utils.data.TensorDataset(train_data, train_label)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)

valset = torch.utils.data.TensorDataset(val_data, val_label)
valloader = torch.utils.data.DataLoader(valset, batch_size=100, shuffle=False, num_workers=2)
Beispiel #6
0
def train(config):
    encoder, decoder_m, decoder_f, dis_m, dis_f = init(config)

    criterion_mse = nn.MSELoss()
    criterion_l1 = nn.L1Loss()

    optim_dis_f = torch.optim.Adam(dis_f.parameters(), lr=0.0002)
    optim_dis_m = torch.optim.Adam(dis_m.parameters(), lr=0.0002)

    optim = torch.optim.Adam(list(decoder_m.parameters()) +
                             list(encoder.parameters()) +
                             list(decoder_f.parameters()),
                             lr=config["lr"])

    transcript_male_speech, transcript_female_speech = get_transcript_speech(
        config)
    train_loader = DataLoader(dataset=LoadDataset(transcript_male_speech,
                                                  transcript_female_speech),
                              batch_size=config["batch_size"],
                              shuffle=True,
                              num_workers=config["num_workers"])

    for i in range(config["epoch"]):
        decoder_f.train()
        decoder_m.train()
        encoder.train()

        for male, female in train_loader:
            male = male.to(config["device"])
            female = female.to(config["device"])

            # discriminator female
            optim_dis_f.zero_grad()

            d_real_preds = dis_f(female)
            labels = torch.ones_like(d_real_preds).to(config["device"])
            d_real_f_loss = criterion_mse(d_real_preds, labels)

            preds_female = decoder_f(encoder(female))

            d_fake_f_preds = dis_f(preds_female)
            labels = torch.zeros_like(d_fake_f_preds).to(config["device"])
            d_fake_f_loss = criterion_mse(d_fake_f_preds, labels)
            d_loss_f = (d_real_f_loss + d_fake_f_loss) / 2
            d_loss_f.backward()
            optim_dis_f.step()

            # discriminator male
            optim_dis_m.zero_grad()

            d_real_preds = dis_m(male)
            labels = torch.ones_like(d_real_preds).to(config["device"])
            d_real_m_loss = criterion_mse(d_real_preds, labels)

            preds_male = decoder_f(encoder(male))

            d_fake_m_preds = dis_m(preds_male)
            labels = torch.zeros_like(d_fake_m_preds).to(config["device"])
            d_fake_m_loss = criterion_mse(d_fake_m_preds, labels)
            d_loss_m = (d_real_m_loss + d_fake_m_loss) / 2
            d_loss_m.backward()
            optim_dis_m.step()

            # encoder backward

            optim.zero_grad()

            preds_male = decoder_m(encoder(male))
            preds_female = decoder_f(encoder(female))

            preds_real_fake_m = dis_m(preds_male)
            preds_real_fake_f = dis_f(preds_female)
            labels = torch.zeros_like(preds_real_fake_m).to(config["device"])

            loss_male = config["l1_wight"] * criterion_l1(
                male, preds_male) + criterion_mse(preds_real_fake_m, labels)
            loss_female = config["l1_wight"] * criterion_l1(
                female, preds_female) + criterion_mse(preds_real_fake_f,
                                                      labels)
            loss = (loss_female + loss_male) / 2
            loss.backward()
            optim.step()

    return encoder, decoder_m, decoder_f
Beispiel #7
0
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.utils.data import DataLoader

from model import *
from data_loader import LoadDataset

data_cfgs = {"name": "DL20", "num_classes": 20, "dir": "./data/DL20"}
train_cfgs = {"batch_size": 32, "lr": 0.0002, "total_epoch": 20}

### load small version of ResNet
model = Small_ResNet(BasicBlock, [3, 3, 3],
                     num_classes=data_cfgs['num_classes']).to('cuda')

### load train/valid/test dataset
train_dataset = LoadDataset(data_cfgs["dir"], mode="train", random_flip=True)
valid_dataset = LoadDataset(data_cfgs["dir"], mode="valid", random_flip=False)
# test_dataset = LoadDataset(data_cfgs["dir"], mode="test", random_flip=False)

### warp dataset using dataloader
train_dataloader = DataLoader(train_dataset,
                              batch_size=train_cfgs["batch_size"],
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True,
                              num_workers=64)
valid_dataloader = DataLoader(valid_dataset,
                              batch_size=train_cfgs["batch_size"],
                              shuffle=False,
                              pin_memory=True,
                              drop_last=False,