def main(): args = parse_args() input_size = (settings.reduced_image_channels, settings.reduced_image_width, settings.reduced_image_height) vae = VAE(input_size=input_size, latent_dim=settings.vae_latent_dim).to(settings.device) savefile = Path(args.savefile) if savefile.exists(): vae.load_state_dict(torch.load(f'{savefile}')) vae.eval() run(vae, savefile)
def eval(config, testloader): storage = { # 'll_precision': None, 'll_recall': None, 'log_densities': None, 'params': None, 'ground_truth': None } input_dim = testloader.dataset.input_dim_ vae = VAE(input_dim, config, checkpoint_directory=None) vae.to(config['model']['device']) if args.restore_filename is not None: vae.restore_model(args.restore_filename, epoch=None) vae.eval() precisions, recalls, all_log_densities = [], [], [] # z sample sizes: 100 for i in range(100): print("evaluation round {}".format(i)) _, _, precision, recall, log_densities, ground_truth = vae.evaluate( testloader) precisions.append(precision) recalls.append(recall) all_log_densities.append(np.expand_dims(log_densities, axis=1)) print(mean_confidence_interval(precisions)) print(mean_confidence_interval(recalls)) all_log_densities = np.concatenate(all_log_densities, axis=1) # log sum exponential storage['log_densities'] = logsumexp(all_log_densities, axis=1) - np.log(100) storage['ground_truth'] = ground_truth # storage['ll_precision'] = mean_confidence_interval(precisions) # storage['ll_recall'] = mean_confidence_interval(recalls) # storage['params'] = self._get_parameters(testloader) pkl_filename = './results/test/{}{}/{}.pkl'.format(config['model']['name'], \ config['model']['config_id'], args.restore_filename) os.makedirs(os.path.dirname(pkl_filename), exist_ok=True) with open(pkl_filename, 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL)