コード例 #1
0
def data_aug(data, lr=0.001, epoch=800, batch_size=128):
    folder = 'data_aug'
    save_path = f'{folder}/data_augment.csv'
    clean_file(save_path)
    store_e = [700, 750, 800]
    if not os.path.exists(folder):
        os.makedirs(folder)
    else:
        for i in store_e:
            result = test(data, folder, i)
        return result

    train_loss_curve = []
    valid_loss_curve = []
    # load model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = AE()
    model = model.to(device)
    model.train()

    dataset = AEDataset(data)
    train_size = int(0.85 * len(dataset))
    valid_size = len(dataset) - train_size
    train_dataset, valid_dataset = random_split(dataset,
                                                [train_size, valid_size])
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)
    valid_dataloader = DataLoader(dataset=valid_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)

    # loss function and optimizer
    # can change loss function and optimizer you want
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    best = 100
    # start training
    for e in range(epoch):
        train_loss = 0.0
        valid_loss = 0.0

        print(f'\nEpoch: {e+1}/{epoch}')
        print('-' * len(f'Epoch: {e+1}/{epoch}'))
        # tqdm to disply progress bar
        for inputs in tqdm(train_dataloader):
            # data from data_loader
            inputs = inputs.float().to(device)
            outputs = model(inputs, device)
            loss = criterion(outputs, inputs)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        for inputs in tqdm(valid_dataloader):
            # data from data_loader
            inputs = inputs.float().to(device)
            outputs = model(inputs, device)
            # MSE loss
            loss = criterion(outputs, inputs)
            # loss calculate
            valid_loss += loss.item()
        # save the best model weights as .pth file
        loss_epoch = train_loss / len(train_dataset)
        valid_loss_epoch = valid_loss / len(valid_dataset)
        # if valid_loss_epoch < best :
        #     best = valid_loss_epoch
        #     torch.save(model.state_dict(), 'data_aug.pth')
        if e in store_e:
            torch.save(model.state_dict(), f'{folder}/ep{e}data_aug.pth')
            print(f"training in epoch  {e},start augment data!!")
            result = test(data, folder, e)
        print(f'Training loss: {loss_epoch:.4f}')
        print(f'Valid loss: {valid_loss_epoch:.4f}')
        # save loss  every epoch
        train_loss_curve.append(loss_epoch)
        valid_loss_curve.append(valid_loss_epoch)
    # generate training curve
    # visualize(train_loss_curve,valid_loss_curve, 'Data Augmentation')
    return result

if __name__ == "__main__":
    device = 'cuda'

    from models import AE, RefineGenerator_art, RefineGenerator_face
    net_ae = AE()
    net_ae.style_encoder.reset_cls()
    net_ig = RefineGenerator_face()

    ckpt = torch.load('./models/16.pth')

    net_ae.load_state_dict(ckpt['ae'])
    net_ig.load_state_dict(ckpt['ig'])

    net_ae.to(device)
    net_ig.to(device)

    net_ae.eval()
    #net_ig.eval()

    data_root_colorful = './data/rgb/'
    #data_root_colorful = '/media/bingchen/database/images/celebaMask/CelebA_1024'

    data_root_sketch = './data/skt/'
    #data_root_sketch = './data/face_skt/'

    BATCH_SIZE = 3
    IM_SIZE = 512
    DATALOADER_WORKERS = 8