Exemple #1
0
def part1(device):
    train, valid, test = get_data_loader("binarized_mnist", 64)

    vae = VAE()
    vae = vae.to(device)

    optimizer = optim.Adam(vae.parameters(), lr=3e-4)

    for epoch in range(20):

        print(f"------- EPOCH {epoch} --------")

        for i, x in enumerate(train):
            vae.train()
            optimizer.zero_grad()

            x = x.to(device)
            y, mu, logvar = vae(x)

            loss = -ELBO(y, x, mu, logvar)

            loss.backward()
            optimizer.step()

            if (i + 1) % 100 == 0:
                with torch.no_grad():
                    valid_elbo = validate(vae, valid, device)
                print(
                    f"Training example {i + 1} / {len(train)}. Validation ELBO: {valid_elbo}"
                )

    torch.save(vae.state_dict(), 'vae_save.pth')
Exemple #2
0
def part2(device):
    train, valid, test = get_data_loader("binarized_mnist", 64)

    vae = VAE()
    vae.load_state_dict(torch.load('vae_save.pth', map_location=device))
    vae.eval()
    vae = vae.to(device)

    for method in ['elbo', 'importance']:
        for dataset in [valid, test]:
            with torch.no_grad():
                metric = validate(vae, dataset, device, method=method)
                dataset_name = "validation" if dataset == valid else "test"
                print(
                    f"Evaluation of the trained model on the {dataset_name} set using the {method} method resulted in an evaluation of {metric}."
                )
Exemple #3
0
def train_valid_cycle(epochs=DEFAULT_NUM_EPOCHS, load_model=False):

    # Gets the datasets
    train_loader, valid_loader, test_loader = get_data_loader("binarized_mnist", DEFAULT_BATCH_SIZE)

    # Creates VAE Model
    model = VAE()
    
    if load_model:
        model.load_state_dict(torch.load('{}/model19.pt'.format(DEFAULT_SAVE_PATH)))
    
    if torch.cuda.is_available():
        model = model.cuda()
        
    # Creates Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=DEFAULT_LR)
    
    # Defines loss function
    bce_loss = nn.BCELoss(size_average=False)
    
    # Runs Train/Validation loop
    for epoch in range(epochs):
        train(epoch, model, optimizer, loss_fn=bce_loss, dataset=train_loader)
        # Saves the model
        torch.save(model.state_dict(), '{}/model_{}.pt'.format(DEFAULT_SAVE_PATH, epoch))
        valid(epoch, model, loss_fn=bce_loss, dataset=valid_loader)
        
    # Runs on Test Set
    test(model, loss_fn=bce_loss, dataset=test_loader)
    
    # Computes importance sampling logpx estimate on validation set
    values = []

    for index, data in enumerate(valid_loader):
        data = data.cuda()
        values.append(importance_sampling(model, data))
    logpx = np.mean(np.concatenate(values))
    print('log p(x) ~= {}'.format(logpx))
    
    # Computes importance sampling logpx estimate on testing set
    values = []

    for index, data in enumerate(test_loader):
        data = data.cuda()
        values.append(importance_sampling(model, data))
    logpx = np.mean(np.concatenate(values))
    print('log p(x) ~= {}'.format(logpx))
Exemple #4
0
            batchX = batchX.to(device)
            importance_samples.append(
                model.importance_sampling(batchX, k).mean().detach().cpu())

        return torch.stack(importance_samples).mean()


if __name__ == "__main__":
    np.random.seed(0)
    torch.cuda.manual_seed_all(0)
    torch.manual_seed(0)
    device = torch.device("cpu")
    if cuda.is_available():
        device = torch.device("cuda")

    train_loader, valid_loader, test_loader = get_data_loader(
        "binarized_mnist", 64)

    model = VAE()
    checkpoint = None
    model.to(device)
    optimizer = Adam(params=model.parameters(), lr=3 * 10**(-4))
    num_epochs = 20
    trainLosses = []
    validLosses = []
    for epoch in range(checkpoint["epoch"] if checkpoint else 0, 2):
        print("-------------- Epoch # " + str(epoch + 1) + " --------------")

        trainLoss = train(model, train_loader, optimizer, device)
        trainLosses.append(trainLoss)
        print("Epoch train loss: {:.4f}".format(trainLoss))
Exemple #5
0
    return logp_x


if __name__ == "__main__":

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = VAE().to(device)
    optimizer = optim.Adam(model.parameters(), lr=3e-4)

    ###Training

    #n_epochs = 20

    #Load data loaders
    train_loader, val_loader, test_loader = mnist_loader.get_data_loader(
        "binarized_mnist", 64)

    #Train + val
    #for epoch in range(n_epochs):
    #	train(epoch, train_loader)
    #eval(epoch, val_loader)

    #	with torch.no_grad():
    #Generate a batch of images using current parameters
    #		sample = torch.randn(64, 100).to(device)
    #		sample = model.decode(sample).cpu()
    #		save_image(sample.view(64, 1, 28, 28),
    #				   'results/sample_' + str(epoch) + '.png')

    #Saving the model weights
    #torch.save(model.state_dict(), 'weights/weights.h5')