Esempio n. 1
0
##########
## Test ##
##########

print('Testing...')
with torch.no_grad():
    l = 0.0
    for i, x in enumerate(test_loader):
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        l += loss.detach().cpu().item()
        print('Iter: {}/{}, Bits/dim: {:.3f}'.format(i + 1, len(test_loader),
                                                     l / (i + 1)),
              end='\r')
    print('')

############
## Sample ##
############

print('Sampling...')
img = torch.from_numpy(data.test.data[:64]).permute([0, 3, 1, 2])
samples = model.sample(64)

vutils.save_image(img.cpu().float() / 255,
                  './examples/results/cifar10_data.png',
                  nrow=8)
vutils.save_image(samples.cpu().float() / 255,
                  './examples/results/cifar10_aug_flow.png',
                  nrow=8)
with open(
        'results/{}_test_loglik{}.txt'.format(
            run_name, args.iwbo_k if args.iwbo_k else ''), 'w') as f:
    f.write(str(test_ppll))

##############
## Sampling ##
##############

if args.dataset in {'spatial_mnist'}:
    bounds = [[0, 28], [0, 28]]
else:
    raise NotImplementedError()

model = model.eval()
samples = model.sample(args.rowcol**2)
samples = samples.cpu().numpy()
fig, ax = plt.subplots(args.rowcol,
                       args.rowcol,
                       figsize=(args.pixels / args.dpi,
                                args.pixels / args.dpi),
                       dpi=args.dpi)
for i in range(args.rowcol):
    for j in range(args.rowcol):
        idx = i + args.rowcol * j
        ax[i][j].scatter(samples[idx, :, 0], samples[idx, :, 1])
        ax[i][j].set_xlim(bounds[0])
        ax[i][j].set_ylim(bounds[1])
        ax[i][j].axis('off')
plt.savefig('figures/{}.png'.format(run_name),
            bbox_inches='tight',
Esempio n. 3
0
        loss = -model.log_prob(x).mean()
        loss.backward()
        optimizer.step()
        l += loss.detach().cpu().item()
        print('Epoch: {}/{}, Loglik: {:.3f}'.format(epoch + 1, 10,
                                                    l / (i + 1)),
              end='\r')
    print('')

############
## Sample ##
############

print('Sampling...')
data = test.data.numpy()
samples = model.sample(100000).numpy()

fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].set_title('Data')
ax[0].hist2d(data[..., 0], data[..., 1], bins=256, range=[[-4, 4], [-4, 4]])
ax[0].set_xlim([-4, 4])
ax[0].set_ylim([-4, 4])
ax[0].axis('off')
ax[1].set_title('Samples')
ax[1].hist2d(samples[..., 0],
             samples[..., 1],
             bins=256,
             range=[[-4, 4], [-4, 4]])
ax[1].set_xlim([-4, 4])
ax[1].set_ylim([-4, 4])
ax[1].axis('off')
Esempio n. 4
0
              end='\r')
    print('')
final_test_nats = loss_sum / len(test_loader)

##############
## Sampling ##
##############

print('Sampling...')
if args.dataset == 'face_einstein':
    bounds = [[0, 1], [0, 1]]
else:
    bounds = [[-4, 4], [-4, 4]]

# Plot samples
samples = model.sample(args.num_samples)
samples = samples.detach().cpu().numpy()
plt.figure(figsize=(args.pixels / args.dpi, args.pixels / args.dpi),
           dpi=args.dpi)
plt.hist2d(samples[..., 0], samples[..., 1], bins=256, range=bounds)
plt.xlim(bounds[0])
plt.ylim(bounds[1])
plt.axis('off')
plt.savefig('figures/{}_aug_flow_samples.png'.format(args.dataset),
            bbox_inches='tight',
            pad_inches=0)

# Plot density
xv, yv = torch.meshgrid([
    torch.linspace(bounds[0][0], bounds[0][1], args.grid_size),
    torch.linspace(bounds[1][0], bounds[1][1], args.grid_size)
Esempio n. 5
0
x = next(iter(train_loader))

x = x['X']
x = x.unsqueeze(1)
x = torch.cat([x, torch.zeros_like(x[..., [0] * 101])], dim=-1)
print(model.log_prob(x))

print('back')
print('back')
print('back')
print('back')
print('back')
print('back')
print('back')
print('back')
model.sample(64).shape
#%%
###########
## Optim ##
###########

optimizer = Adam(model.parameters(), lr=1e-3)

###########
## Train ##
###########

print('Training...')
for epoch in range(1000):
    l = [0.0]
    for i, x in enumerate(train_loader):