Exemplo n.º 1
0
def objective(options=None):
    # Initialize the best validation loss, which is the value to be minimized by the network
    best_val_loss = float("Inf")

    # Define hyperparameters
    lr = 0.001
    dropout = 0.3
    batch_size = 128
    print(f"Learning rate: {lr}")
    print(f"Dropout: {dropout}")
    print(f"Batch size: {batch_size}")

    # Use CUDA if GPU is available, else CPU
    use_cuda = options["use_cuda"] and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print(f"Using device {device}")

    # Obtain the MNIST train and validation loaders using a helper function
    train_loader, val_loader = get_mnist_dataloaders(options["data_path"], batch_size)

    # Initialize network
    model = Net(dropout=dropout).to(device)

    # Learning rate optimizer
    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=0.7)

    # Network training & validation loop
    for epoch in range(0, options["epochs"]):
        avg_train_loss = train(
            options, model, device, train_loader, optimizer, epoch
        )
        avg_val_loss = validate(model, device, val_loader)

        if avg_val_loss <= best_val_loss:
            best_val_loss = avg_val_loss

        # Print intermediate validation & training loss
        print(f"Epoch {epoch + 1} of {options['epochs']} --- average train loss: {avg_train_loss} --- average validation loss: {avg_val_loss}")

        scheduler.step()

    # Return the best validation loss
    return best_val_loss
Exemplo n.º 2
0
import torch
import torch.optim as optim
from dataloaders import get_mnist_dataloaders, get_fashion_mnist_dataloaders, get_lsun_dataloader
from models import Generator, Discriminator
from training import Trainer

data_loader, _ = get_mnist_dataloaders(batch_size=64)
#data_loader, _ = get_fashion_mnist_dataloaders(batch_size=64)
#data_loader = get_lsun_dataloader(path_to_data="/ubc/cs/research/plai-scratch/saeid/datasets/lsun", batch_size=64)
img_size = (32, 32, 1)

generator = Generator(img_size=img_size, latent_dim=100, dim=16)
discriminator = Discriminator(img_size=img_size, dim=16)

print(generator)
print(discriminator)

# Initialize optimizers
lr = 1e-4
betas = (.5, .9)
G_optimizer = optim.Adam(generator.parameters(), lr=lr, betas=betas)
D_optimizer = optim.Adam(discriminator.parameters(), lr=lr, betas=betas)

# Train model
epochs = 200
trainer = Trainer(generator,
                  discriminator,
                  G_optimizer,
                  D_optimizer,
                  use_cuda=torch.cuda.is_available())
trainer.train(data_loader, epochs, save_training_gif=True)