Example #1
0
    batch_size=args.batch_size, shuffle=True)

# Model
model = DAE()
if args.cuda:
    model.cuda()
writer = SummaryWriter('./.logs/{0}'.format(args.output_folder))

# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# Fixed input for Tensorboard
fixed_x, _ = next(iter(data_loader))
fixed_grid = torchvision.utils.make_grid(fixed_x, normalize=True, scale_each=True)
writer.add_image('original', fixed_grid, 0)
fixed_x = to_var(fixed_x, args.cuda)

steps = 0
while steps < args.num_steps:
    for noisy_imgs, true_imgs in data_loader:
        noisy_imgs = to_var(noisy_imgs, args.cuda)
        true_imgs = to_var(true_imgs, args.cuda)
        logits = model(noisy_imgs)

        loss = F.mse_loss(F.sigmoid(logits), true_imgs)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        writer.add_scalar('loss', loss.data[0], steps)
Example #2
0
    vae = VAE64(num_channels=1, zdim=10)
elif args.dataset == 'celeba':
    dataset = CelebA(root='./data/celeba', transform=transforms.ToTensor())
    vae = VAE64(num_channels=3, zdim=32)
    args.obs = 'normal'
else:
    raise ValueError(
        'The `dataset` argument must be fashion-mnist, mnist, dsprites or celeba'
    )

data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                          batch_size=args.num_images,
                                          shuffle=True)

fixed_x, _ = next(iter(data_loader))
fixed_x = to_var(fixed_x, args.cuda, volatile=True)

if args.save_file is not None:
    filename = args.save_file
else:
    filename = get_latest_checkpoint(args.save_dir)
print(filename)
#with open(filename, 'r') as f:
#    if args.cuda:
#        ckpt = torch.load(f)
#   else:
#       ckpt = torch.load(f, map_location=lambda storage, loc: storage)
ckpt = torch.load(filename)

if args.cuda:
    vae.cuda()
Example #3
0
    with open(get_latest_checkpoint(args.pretrained), 'r') as f:
        state_dict = torch.load(f)
        state_dict = state_dict['model']
    model.load(state_dict)
writer = SummaryWriter('./.logs/{0}'.format(args.output_folder))

# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)

# Fixed input for Tensorboard
fixed_x, fixed_label = next(iter(data_loader))
fixed_grid = torchvision.utils.make_grid(fixed_x,
                                         normalize=True,
                                         scale_each=True)
writer.add_image('original', fixed_grid, 0)
fixed_x = to_var(fixed_x, args.cuda)

steps = 0
while steps < args.num_steps:
    for images, _ in data_loader:
        images = to_var(images, args.cuda)
        logits, mu, log_var, z = model(images)
        if args.anirudh:
            logits, mu, log_var, z = vae(logits.detach())
        if args.obs == 'normal':
            # QKFIX: We assume here that the image is in B&W
            reconst_loss = F.mse_loss(F.sigmoid(logits),
                                      images,
                                      size_average=False)
        elif args.obs == 'bernoulli':
            reconst_loss = F.binary_cross_entropy_with_logits(
Example #4
0
    state_dict = torch.load(f)
    state_dict = state_dict['model']
vae.load_state_dict(state_dict)
vae.eval()
writer = SummaryWriter('./.logs/{0}'.format(args.output_folder))

model = nn.Linear(vae.zdim, batch_sampler.num_factors)
if args.cuda:
    model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

diffs, factors = [], []
steps = 0
for images, targets in data_loader:
    images = to_var(images, args.cuda, volatile=True)
    latents = vae.encode(images)

    z1, z2 = torch.chunk(latents, 2, dim=0)
    diff = torch.mean(torch.abs(z1 - z2), dim=0)
    diffs.append(diff.data)

    targets_np = targets.numpy()
    common_factors = np.all(targets_np == targets_np[0], axis=0)
    p = common_factors.astype(np.float32) / np.sum(common_factors)
    factor = np.random.choice(len(common_factors), p=p)
    factors.append(factor)

    if len(diffs) == args.batch_size:
        diffs = to_var(torch.stack(diffs, dim=0), args.cuda)
        factors = to_var(
Example #5
0
    vae = VAE64(num_channels=1, zdim=10)
elif args.dataset == 'celeba':
    dataset = CelebA(root='./data/celeba', transform=transforms.ToTensor())
    vae = VAE64(num_channels=3, zdim=32)
    args.obs = 'normal'
else:
    raise ValueError(
        'The `dataset` argument must be fashion-mnist, mnist, dsprites or celeba'
    )

data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                          batch_size=args.num_images,
                                          shuffle=True)

fixed_x, _ = next(iter(data_loader))
fixed_x = to_var(fixed_x, args.cuda, volatile=True)

if args.save_file is not None:
    filename = args.save_file
else:
    filename = get_latest_checkpoint(args.save_dir)

with open(filename, 'r') as f:
    if args.cuda:
        ckpt = torch.load(f)
    else:
        ckpt = torch.load(f, map_location=lambda storage, loc: storage)

if args.cuda:
    vae.cuda()
vae.load_state_dict(ckpt['model'])