def main(): parser = _build_parser() args = parser.parse_args() logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG) device = torch.device('cpu') if torch.cuda.is_available(): device = torch.device('cuda') model = None if args.model == 'vae': model = VAE().double().to(device) else: logging.critical('model unimplemented: %s' % args.model) return if not args.out.parent.exists(): args.out.parent.mkdir() _, test_ds = build_datasets(args.im_path, train_test_split=1) ckpt = torch.load(args.save_path, map_location=device) model.load_state_dict(ckpt['model_state_dict']) model.eval() with torch.no_grad(): samps = model.sample(args.samples).reshape(-1, *IM_DIMS, 3) loader = DataLoader(test_ds, batch_size=args.batch, num_workers=args.workers, pin_memory=torch.cuda.is_available()) record = _init_record(samps) with tqdm(total=TOTAL_IMAGES) as pbar: for chunk in loader: _update_winner(chunk.reshape(-1, *IM_DIMS, 3), record, pbar) np.save(args.out, record['pair']) print('final distances:', record['distance'])
print(iteration) print('|--------ce------aux-ce-----kld--------|') print('|----------------train-----------------|') print( cross_entropy.data.cpu().numpy()[0] / (210 * args.batch_size), aux_cross_entropy.data.cpu().numpy()[0] / (210 * args.batch_size), kld.data.cpu().numpy()[0]) print('|----------------valid-----------------|') print( valid_cross_entropy.data.cpu().numpy()[0] / (210 * args.batch_size), valid_aux_cross_entropy.data.cpu().numpy()[0] / (210 * args.batch_size), valid_kld.data.cpu().numpy()[0]) print('|--------------------------------------|') input, _, _ = batch_loader.next_batch(2, 'valid', args.use_cuda) mu, logvar = vae.inference(input[0].unsqueeze(1)) std = t.exp(0.5 * logvar) z = Variable(t.randn([1, parameters.latent_size])) if args.use_cuda: z = z.cuda() z = z * std + mu print(''.join([ batch_loader.idx_to_char[idx] for idx in input.data.cpu().numpy()[0] ])) print(vae.sample(batch_loader, args.use_cuda, z)) print('|--------------------------------------|')