Example #1
0
###########
## Optim ##
###########

optimizer = Adam(model.parameters(), lr=1e-3)

###########
## Train ##
###########

print('Training...')
for epoch in range(10):
    l = 0.0
    for i, x in enumerate(train_loader):
        optimizer.zero_grad()
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        loss.backward()
        optimizer.step()
        l += loss.detach().cpu().item()
        print('Epoch: {}/{}, Iter: {}/{}, Bits/dim: {:.3f}'.format(
            epoch + 1, 10, i + 1, len(train_loader), l / (i + 1)),
              end='\r')
    print('')

##########
## Test ##
##########

print('Testing...')
with torch.no_grad():
    l = 0.0
Example #2
0
###########
## Optim ##
###########

optimizer = Adam(model.parameters(), lr=1e-3)

###########
## Train ##
###########

print('Training...')
for epoch in range(20):
    l = 0.0
    for i, x in enumerate(train_loader):
        optimizer.zero_grad()
        loss = -model.log_prob(x.to(device)).mean()
        loss.backward()
        optimizer.step()
        l += loss.detach().cpu().item()
        print('Epoch: {}/{}, Iter: {}/{}, Nats: {:.3f}'.format(epoch+1, 20, i+1, len(train_loader), l/(i+1)), end='\r')
    print('')

##########
## Test ##
##########

print('Testing...')
with torch.no_grad():
    l = 0.0
    for i, x in enumerate(test_loader):
        loss = iwbo_nats(model, x.to(device), k=10)
Example #3
0
#######################

if args.optimizer == 'adam':
    optimizer = Adam(pi.parameters(), lr=args.lr)
elif args.optimizer == 'adamax':
    optimizer = Adamax(pi.parameters(), lr=args.lr)

##############
## Training ##
##############

print('Training...')
loss_sum = 0.0
for i in range(args.iter):
    z, log_p_z = p.sample_with_log_prob(args.batch_size)
    log_pi_z = pi.log_prob(z)
    KL = (log_p_z - log_pi_z).mean()
    optimizer.zero_grad()
    loss = KL
    loss.backward()
    optimizer.step()
    loss_sum += loss.detach().cpu().item()
    d = 1+(i % args.print_every)
    print('Iter: {}/{}, KL: {:.3f}'.format(i+1, args.iter, loss_sum/d), end='\r')
    if (i+1) % args.print_every == 0:
        final_loss = loss_sum / args.print_every
        loss_sum = 0.0
        print('')

################
## Save model ##
if args.optimizer == 'adam':
    optimizer = Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'adamax':
    optimizer = Adamax(model.parameters(), lr=args.lr)

##############
## Training ##
##############

print('Training...')
for epoch in range(args.epochs):
    loss_sum = 0.0
    for i, x in enumerate(train_loader):
        optimizer.zero_grad()
        loss = -model.log_prob(x.to(args.device)).mean()
        loss.backward()
        optimizer.step()
        loss_sum += loss.detach().cpu().item()
        print('Epoch: {}/{}, Iter: {}/{}, Nats: {:.3f}'.format(epoch+1, args.epochs, i+1, len(train_loader), loss_sum/(i+1)), end='\r')
    print('')
final_train_bpd = loss_sum / len(train_loader)

#############
## Testing ##
#############

print('Testing...')
with torch.no_grad():
    loss_sum = 0.0
    for i, x in enumerate(test_loader):
Example #5
0
###########
## Optim ##
###########

optimizer = Adam(model.parameters(), lr=1e-3)

###########
## Train ##
###########

print('Training...')
for epoch in range(10):
    l = 0.0
    for i, x in enumerate(train_loader):
        optimizer.zero_grad()
        loss = -model.log_prob(x).mean()
        loss.backward()
        optimizer.step()
        l += loss.detach().cpu().item()
        print('Epoch: {}/{}, Loglik: {:.3f}'.format(epoch + 1, 10,
                                                    l / (i + 1)),
              end='\r')
    print('')

############
## Sample ##
############

print('Sampling...')
data = test.data.numpy()
samples = model.sample(100000).numpy()