'NoImproveEpochs {:02d}/{:02d}'.format( itr, val_loss.avg, val_nfe.avg, n_vals_without_improvement, args.early_stopping)) logger.info(log_message) model.train() logger.info('Training has finished.') model = restore_model(model, os.path.join(args.save, 'checkpt.pth')).to(device) set_cnf_options(args, model) logger.info('Evaluating model on test set.') model.eval() override_divergence_fn(model, "brute_force") with torch.no_grad(): test_loss = utils.AverageMeter() test_nfe = utils.AverageMeter() for itr, x in enumerate( batch_iter(data.tst.x, batch_size=test_batch_size)): x = cvt(x) test_loss.update(compute_loss(x, model).item(), x.shape[0]) test_nfe.update(count_nfe(model)) logger.info('Progress: {:.2f}%'.format( 100. * itr / (data.tst.x.shape[0] / test_batch_size))) log_message = '[TEST] Iter {:06d} | Test Loss {:.6f} | NFE {:.0f}'.format( itr, test_loss.avg, test_nfe.avg) logger.info(log_message)
def evaluate(data_loader, model, args, logger, testing=False, epoch=0): model.eval() loss = 0. batch_idx = 0 bpd = 0. if args.input_type == 'binary': loss_type = 'elbo' else: loss_type = 'bpd' if testing and 'cnf' in args.flow: override_divergence_fn(model, "brute_force") for data, _ in data_loader: batch_idx += 1 if args.cuda: data = data.cuda() with torch.no_grad(): data = data.view(-1, *args.input_size) x_mean, z_mu, z_var, ldj, z0, zk = model(data) batch_loss, rec, kl, batch_bpd = calculate_loss( x_mean, data, z_mu, z_var, z0, zk, ldj, args) bpd += batch_bpd loss += batch_loss.item() # PRINT RECONSTRUCTIONS if batch_idx == 1 and testing is False: plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args) loss /= len(data_loader) bpd /= len(data_loader) # if testing: # logger.info('====> Test set loss: {:.4f}'.format(loss)) # Compute log-likelihood if testing and not ( "cnf" in args.flow): # don't compute log-likelihood for cnf models with torch.no_grad(): test_data = data_loader.dataset.tensors[0] if args.cuda: test_data = test_data.cuda() # logger.info('Computing log-likelihood on test set') model.eval() if args.dataset == 'caltech': log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500) else: log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500) if 'cnf' in args.flow: override_divergence_fn(model, args.divergence_fn) else: log_likelihood = None nll_bpd = None if args.input_type in ['multinomial']: bpd = loss / (np.prod(args.input_size) * np.log(2.)) if testing and not ("cnf" in args.flow): pass # logger.info('====> Test set log-likelihood: {:.4f}'.format(log_likelihood)) # if args.input_type != 'binary': # logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd)) # logger.info( # '====> Test set bpd (log-likelihood): {:.4f}'. # format(log_likelihood / (np.prod(args.input_size) * np.log(2.))) # ) if not testing: return loss, bpd else: return log_likelihood, nll_bpd
def evaluate(data_loader, model, args, logger, testing=False, epoch=0): model.eval() loss = 0. batch_idx = 0 bpd = 0. if args.input_type == 'binary': loss_type = 'elbo' else: loss_type = 'bpd' if testing and 'cnf' in args.flow: override_divergence_fn(model, "brute_force") for data, target in data_loader: batch_idx += 1 with torch.no_grad(): # if args.cuda: # data.to('cuda') # target.to('cuda') data = data.view(-1, *args.input_size) if args.conditional: x_mean, z_mu, z_var, ldj, z0, zk = model( data.to('cuda'), target.to('cuda')) else: x_mean, z_mu, z_var, ldj, z0, zk = model(data.to('cuda')) batch_loss, rec, kl, batch_bpd = calculate_loss( x_mean, data.to('cuda'), z_mu, z_var, z0, zk, ldj, args) bpd += batch_bpd loss += batch_loss.item() # PRINT RECONSTRUCTIONS if batch_idx == 1 and testing is False: if args.input_type == 'synthetic': sample_size = 500 normal_sample = torch.FloatTensor( sample_size * args.num_labels * args.z_size).normal_().reshape( sample_size * args.num_labels, -1).to(args.device) if args.conditional: tgt = torch.tensor( list(range(args.num_labels)) * sample_size).to( args.device) sample = model.decode(normal_sample, tgt) else: sample = model.decode(normal_sample, None) visualize_synthetic_data(sample.cpu().numpy(), tgt.cpu().numpy(), args.num_labels, 'rec') elif not args.evaluate: plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args) sample_lables_num = args.num_labels - 1 normal_sample = torch.FloatTensor( sample_lables_num * args.z_size).normal_().reshape( sample_lables_num, -1).to(args.device) if args.conditional: tgt = torch.tensor(list(range(sample_lables_num))).to( args.device) sample = model.decode(normal_sample, tgt) else: sample = model.decode(normal_sample, None) plot_images(args, sample.data.cpu().numpy(), args.snap_dir + 'reconstruction/', 'sample_of_1_e_' + str(epoch)) else: print('###############################') sample_size = 100 normal_sample = torch.FloatTensor( sample_size * args.num_labels * args.z_size).normal_().reshape( sample_size * args.num_labels, -1).to(args.device) if args.conditional: sample_labels = [] for i in range(args.num_labels): for j in range(sample_size): sample_labels.append(i) tgt = torch.tensor(sample_labels).to(args.device) import cv2 samples = model.decode(normal_sample, tgt) samples, tgt = samples.data.cpu().numpy( ), tgt.data.cpu().numpy() if not os.path.exists(args.snap_dir + 'samples/'): os.makedirs(args.snap_dir + 'samples/') for i, sample in enumerate(samples): l = tgt[i] if not os.path.exists(args.snap_dir + 'samples/' + str(l)): os.makedirs(args.snap_dir + 'samples/' + str(l)) sample = sample.swapaxes(0, 2) sample = sample.swapaxes(0, 1) sample = sample * 255 sample = cv2.cvtColor(sample, cv2.COLOR_GRAY2BGR) cv2.imwrite( args.snap_dir + 'samples/' + str(l) + '/' + str(i) + '.jpg', sample) loss /= len(data_loader) bpd /= len(data_loader) if testing: logger.info('====> Test set loss: {:.4f}'.format(loss)) # Compute log-likelihood if testing and not ( "cnf" in args.flow): # don't compute log-likelihood for cnf models with torch.no_grad(): test_data = data_loader.dataset.tensors[0] if args.cuda: test_data = test_data.cuda() logger.info('Computing log-likelihood on test set') model.eval() if args.dataset == 'caltech': log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500) else: log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500) if 'cnf' in args.flow: override_divergence_fn(model, args.divergence_fn) else: log_likelihood = None nll_bpd = None if args.input_type in ['multinomial']: bpd = loss / (np.prod(args.input_size) * np.log(2.)) if testing and not ("cnf" in args.flow): logger.info( '====> Test set log-likelihood: {:.4f}'.format(log_likelihood)) if args.input_type != 'binary': logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd)) logger.info('====> Test set bpd (log-likelihood): {:.4f}'.format( log_likelihood / (np.prod(args.input_size) * np.log(2.)))) if not testing: return loss, bpd else: return log_likelihood, nll_bpd
itr, val_loss.avg, val_nfe.avg, n_vals_without_improvement, args.early_stopping)) logger.info(log_message) model.train() logger.info('Training has finished.') model = restore_model(model, os.path.join(args.save, 'checkpt.pth')).to(device) set_cnf_options(args, model) logger.info('Evaluating model on test set.') model.eval() override_divergence_fn( model, "brute_force") # brute forces the testing data trace computation with torch.no_grad(): test_loss = utils.AverageMeter() test_nfe = utils.AverageMeter() for itr, x in enumerate( batch_iter(data.tst.x, batch_size=test_batch_size)): x = cvt(x) test_loss.update(compute_loss(x, model).item(), x.shape[0]) test_nfe.update(count_nfe(model)) logger.info('Progress: {:.2f}%'.format( 100. * itr / (data.tst.x.shape[0] / test_batch_size))) log_message = '[TEST] Iter {:06d} | Test Loss {:.6f} | NFE {:.0f}'.format( itr, test_loss.avg, test_nfe.avg) logger.info(log_message)