writer.add_scalar('train_ppll', train_ppll, global_step=epoch + 1)
        if (epoch + 1) % args.valid_every == 0:
            valid_ppll = evaluate(model, valid_loader)
            writer.add_scalar('valid_ppll', valid_ppll, global_step=epoch + 1)

    # Save log-likelihood
    with open('results/{}_valid_loglik.txt'.format(run_name), 'w') as f:
        f.write(str(valid_ppll))

    # Save args
    args_table = get_args_table(vars(args))
    with open('results/{}_args.txt'.format(run_name), 'w') as f:
        f.write(str(args_table))

    # Save model
    state_dict = model.state_dict()
    torch.save(state_dict, 'models/{}.pt'.format(run_name))

##########
## Test ##
##########

# Test
if args.iwbo_k is None:
    test_ppll = evaluate(model, test_loader)
else:
    test_ppll = evaluate_iwbo(model, test_loader)
if args.train:
    writer.add_scalar('test_ppll', test_ppll, global_step=epoch + 1)

# Save log-likelihood
Example #2
0
# Save args
with open('log/{}_args.pkl'.format(args.target), "wb") as f:
    pickle.dump(args, f)
table = PrettyTable(['Arg', 'Value'])
for arg, val in vars(args).items():
    table.add_row([arg, val])
with open('log/{}_args.txt'.format(args.target), 'w') as f:
    f.write(str(table))

# Save result
with open('log/{}_loss.txt'.format(args.target), 'w') as f:
    f.write(str(final_loss))

# Save model
torch.save(pi.state_dict(), 'log/{}.pt'.format(args.target))


##############
## Sampling ##
##############

if args.num_dims == 2:

    print('Sampling...')

    # Make dir
    if not os.path.exists('figures'):
        os.mkdir('figures')

    # Learned distribution