train_set = datasets.FashionMNIST(root='../MNIST-data',
                                  train=True,
                                  download=True,
                                  transform=transforms.Compose(
                                      [transforms.ToTensor()]))

test_set = datasets.MNIST(root='../MNIST-data',
                          train=False,
                          download=True,
                          transform=transforms.Compose([transforms.ToTensor()
                                                        ]))

CNN = False

train_loader, labeled_subset, test_set = ut.get_mnist_data(
    device, train_set, test_set, use_test_subset=True, CNN=CNN)

hkvae = HKVAE(rec_weight=args.recw,
              kl_xy_x_weight=args.kl_xy_xw,
              kl_xy_y_weight=args.kl_xy_yw,
              gen_weight=args.gw,
              class_weight=args.cw,
              name=model_name,
              CNN=CNN).to(device)

Train = True
if Train:
    writer = ut.prepare_writer(model_name, overwrite_existing=True)
    train(model=hkvae,
          train_loader=train_loader,
          labeled_subset=labeled_subset,
Esempio n. 2
0
parser.add_argument('--k',         type=int, default=500,   help="Number mixture components in MoG prior")
parser.add_argument('--iter_max',  type=int, default=20000, help="Number of training iterations")
parser.add_argument('--iter_save', type=int, default=10000, help="Save model every n iterations")
parser.add_argument('--run',       type=int, default=0,     help="Run ID. In case you want to run replicates")
args = parser.parse_args()
layout = [
    ('model={:s}',  'gmvae'),
    ('z={:02d}',  args.z),
    ('k={:03d}',  args.k),
    ('run={:04d}', args.run)
]
model_name = '_'.join([t.format(v) for (t, v) in layout])
pprint(vars(args))
print('Model name:', model_name)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_loader, labeled_subset, _ = ut.get_mnist_data(device, use_test_subset=True)
gmvae = GMVAE(z_dim=args.z, k=args.k, name=model_name).to(device)

ut.load_model_by_name(gmvae, global_step=args.iter_max)
ut.evaluate_lower_bound(gmvae, labeled_subset, run_iwae=False)
samples = torch.reshape(gmvae.sample_x(200), (10, 20, 28, 28))

f, axarr = plt.subplots(10,20)

for i in range(samples.shape[0]):
    for j in range(samples.shape[1]):
        axarr[i,j].imshow(samples[i,j].detach().numpy())
        axarr[i,j].axis('off')

plt.show() 
Esempio n. 3
0
                    default=10000,
                    help="Save model every n iterations")
parser.add_argument('--run',
                    type=int,
                    default=0,
                    help="Run ID. In case you want to run replicates")
parser.add_argument('--train', type=int, default=1, help="Flag for training")
args = parser.parse_args()
layout = [('model={:s}', 'ssvae'), ('gw={:03d}', args.gw),
          ('cw={:03d}', args.cw), ('run={:04d}', args.run)]
model_name = '_'.join([t.format(v) for (t, v) in layout])
pprint(vars(args))
print('Model name:', model_name)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_loader, labeled_subset, test_set = ut.get_mnist_data(
    device, use_test_subset=False)
ssvae = SSVAE(gen_weight=args.gw, class_weight=args.cw,
              name=model_name).to(device)

if args.train:
    writer = ut.prepare_writer(model_name, overwrite_existing=True)
    train(model=ssvae,
          train_loader=train_loader,
          labeled_subset=labeled_subset,
          device=device,
          y_status='semisup',
          tqdm=tqdm.tqdm,
          writer=writer,
          iter_max=args.iter_max,
          iter_save=args.iter_save)