def log_gen_data(gen, device, epoch, n_labels, log_dir): ordered_labels = pt.repeat_interleave(pt.arange(n_labels), n_labels)[:, None].to(device) gen_code, _ = gen.get_code(100, device, labels=ordered_labels) gen_samples = gen(gen_code).detach() plot_samples = gen_samples[:100, ...].cpu().numpy() plot_mnist_batch(plot_samples, 10, n_labels, log_dir + f'samples_ep{epoch}', denorm=False)
def test(gen, device, test_loader, rff_mmd_loss, epoch, batch_size, do_gen_labels, uniform_labels, log_dir): test_loss = 0 gen_labels, ordered_labels = None, None with pt.no_grad(): for data, labels in test_loader: data = data.to(device) data = flat_data(data.to(device), labels.to(device), device, n_labels=10, add_label=False) bs = labels.shape[0] if not do_gen_labels: gen_samples = gen(gen.get_code(bs, device)) gen_labels = None loss = rff_mmd_loss(data, gen_samples) elif uniform_labels: one_hots = pt.zeros(bs, 10, device=device) one_hots.scatter_(1, labels.to(device)[:, None], 1) gen_code, gen_labels = gen.get_code(bs, device) gen_samples = gen(gen_code) loss = rff_mmd_loss(data, one_hots, gen_samples, gen_labels) else: one_hots = pt.zeros(bs, 10, device=device) one_hots.scatter_(1, labels.to(device)[:, None], 1) gen_samples, gen_labels = gen(gen.get_code(bs, device)) loss = rff_mmd_loss(data, one_hots, gen_samples, gen_labels) test_loss += loss.item() # sum up batch loss test_loss /= (len(test_loader.dataset) / batch_size) data_enc_batch = data.cpu().numpy() med_dist = meddistance(data_enc_batch) print( f'med distance for encodings is {med_dist}, heuristic suggests sigma={med_dist ** 2}' ) if uniform_labels: ordered_labels = pt.repeat_interleave(pt.arange(10), 10)[:, None].to(device) gen_code, gen_labels = gen.get_code(100, device, labels=ordered_labels) gen_samples = gen(gen_code).detach() plot_samples = gen_samples[:100, ...].cpu().numpy() plot_mnist_batch(plot_samples, 10, 10, log_dir + f'samples_ep{epoch}', denorm=False) if gen_labels is not None and ordered_labels is None: save_gen_labels(gen_labels[:100, ...].cpu().numpy(), 10, 10, log_dir + f'labels_ep{epoch}') print('Test set: Average loss: {:.4f}'.format(test_loss))
def vis_dpgan(): os.makedirs('vis', exist_ok=True) for data_key in ('d', 'f'): for run in range(5): load_path = f'synth_data/apr19_sig1.41_{data_key}{run}/synthetic_mnist.npz' mat = np.load(load_path) # print(mat['data']) data = mat['data'] targets = mat['labels'] data = np.reshape(data, (-1, 784)) mat3 = np.concatenate( [data[targets[:, k] == 1.][:10] for k in range(10)]) plot_mnist_batch(mat3, 10, 10, save_path=f'vis/plot_{data_key}{run}', denorm=False, save_raw=False)
def test(gen, device, test_loader, rff_mmd_loss, epoch, batch_size, log_dir): test_loss = 0 with pt.no_grad(): for data, labels in test_loader: data, labels = data.to(device), labels.to(device) data = flat_data(data, labels, device, n_labels=10, add_label=False) loss = compute_rff_loss(gen, data, labels, rff_mmd_loss, device) test_loss += loss.item() # sum up batch loss test_loss /= (len(test_loader.dataset) / batch_size) data_enc_batch = data.cpu().numpy() med_dist = meddistance(data_enc_batch) print(f'med distance for encodings is {med_dist}, heuristic suggests sigma={med_dist ** 2}') ordered_labels = pt.repeat_interleave(pt.arange(10), 10)[:, None].to(device) gen_code, gen_labels = gen.get_code(100, device, labels=ordered_labels) gen_samples = gen(gen_code).detach() plot_samples = gen_samples[:100, ...].cpu().numpy() plot_mnist_batch(plot_samples, 10, 10, log_dir + f'samples_ep{epoch}', denorm=False) print('Test set: Average loss: {:.4f}'.format(test_loss))
def first_look_dpgan(): # mat = np.load('../dpgan/synth_data/apr17_sigma1.5_test/synthetic_mnist.npy') mat = np.load('synth_data/apr18_sigma1.5_baseline/synthetic_mnist.npz') # print(mat['data']) data = mat['data'] targets = mat['labels'] print(data.shape) print(targets.shape) print(np.max(data), np.min(data)) data = np.reshape(data, (-1, 784)) print(data.shape) mat1 = np.reshape(data[:100, :], (100, 784)) plot_mnist_batch(mat1, 10, 10, save_path='dpgan_test_plot', denorm=False, save_raw=False) mat2 = data[6000:6100, :] plot_mnist_batch(mat2, 10, 10, save_path='dpgan_test_zeros', denorm=False, save_raw=False) mat3 = np.concatenate([data[targets[:, k] == 1.][:10] for k in range(10)]) plot_mnist_batch(mat3, 10, 10, save_path='dpgan_test_classes', denorm=False, save_raw=False)
def test(enc, dec, device, test_loader, epoch, losses, label_ae, conv_ae, log_spec, last_epoch, data_is_normed): enc.eval() dec.eval() rec_loss_agg = 0 siam_loss_agg = 0 with pt.no_grad(): for data, labels in test_loader: bs = data.shape[0] data = data.to(device) labels = labels.to(device) if not conv_ae: data = flat_data(data, labels, device, add_label=label_ae) data_enc = enc(data) reconstruction = dec(data_enc) rec_loss = bin_ce_loss(reconstruction, data) if losses.do_ce else mse_loss( reconstruction, data) rec_loss_agg += rec_loss.item() * bs if losses.wsiam > 0.: siam_loss = losses.wsiam * siamese_loss( data_enc, labels, losses.msiam) siam_loss_agg += siam_loss.item() * bs n_data = len(test_loader.dataset) rec_loss_agg /= n_data siam_loss_agg /= n_data full_loss = rec_loss_agg + siam_loss_agg reconstruction = reconstruction.cpu().numpy() labels = labels.cpu().numpy() reconstruction, labels = select_balaned_plot_batch(reconstruction, labels, n_classes=10, n_samples_per_class=10) if label_ae: rec_labels = reconstruction[:, 784:] save_gen_labels(rec_labels, 10, 10, log_spec.log_dir + f'rec_ep{epoch}_labels', save_raw=False) reconstruction = reconstruction[:, :784].reshape(-1, 28, 28) else: reconstruction = reconstruction plot_mnist_batch(reconstruction, 10, 10, log_spec.log_dir + f'rec_ep{epoch}', denorm=data_is_normed) if last_epoch: save_dir = log_spec.base_dir + '/overview/' if not os.path.exists(save_dir): os.makedirs(save_dir) save_path = save_dir + log_spec.log_name + f'_rec_ep{epoch}' plot_mnist_batch(reconstruction, 10, 10, save_path, denorm=data_is_normed, save_raw=False) print('Test ep {}: Average loss: full {:.4f}, rec {:.4f}, siam {:.4f}'. format(epoch, full_loss, rec_loss_agg, siam_loss_agg))