コード例 #1
0
    return total_loss / total_examples


def save_model():
    state = {
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict()
    }
    torch.save(state, os.path.join(folder, './bestmodel'))
    print('Model saved!')


# Save the model every 5 epochs
if __name__ == "__main__":
    model = MVAE()
    optimizer = torch.optim.Adam(model.parameters(), lr=L_RATE)

    # Directory to save results
    folder = './models'
    if not os.path.isdir(folder):
        os.mkdir(folder)

    # Train
    train(1)
    test_loss = test(1)
    best_loss = test_loss
    save_model()
    for epoch in range(2, EPOCHS + 1):
        train(epoch)
        test_loss = test(epoch)
        if test_loss < best_loss:
コード例 #2
0
        transforms.CenterCrop(64),
        transforms.ToTensor()
    ])

    train_loader = torch.utils.data.DataLoader(CelebAttributes(
        partition='train', data_dir='./data', image_transform=preprocess_data),
                                               batch_size=args.batch_size,
                                               shuffle=True)
    N_mini_batches = len(train_loader)
    test_loader = torch.utils.data.DataLoader(CelebAttributes(
        partition='val', data_dir='./data', image_transform=preprocess_data),
                                              batch_size=args.batch_size,
                                              shuffle=False)

    model = MVAE(args.n_latents)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    if args.cuda:
        model.cuda()

    def train(epoch):
        model.train()
        train_loss_meter = AverageMeter()

        # NOTE: is_paired is 1 if the example is paired
        for batch_idx, (image, attrs) in enumerate(train_loader):
            if epoch < args.annealing_epochs:
                # compute the KL annealing factor for the current mini-batch in the current epoch
                annealing_factor = (
                    float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
                    float(args.annealing_epochs * N_mini_batches))