cvae = CVAE( input_size=INPUT_SIZE, output_size=OUTPUT_SIZE, latent_size=LATENT_SIZE, encoder_layer_sizes=(200, 200), decoder_layer_sizes=(200, 200), dataset_name='mnist' ) time_st = time.time() for it in range(int(2e4)): y, x = mnist.train.next_batch(BATCH_SIZE) loss = cvae.update(x, y) if it % int(1e3) == 0: print('Iter-{}; Loss: {:.4f}, fps:{}'.format(it, loss.data, (it+1) // (time.time() - time_st))) x = np.zeros(shape=[BATCH_SIZE, INPUT_SIZE], dtype=np.float32) x[:, np.random.randint(0, 10)] = 1. samples = cvae.sample(x).data.numpy()[:16] fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([])
decoder_layer_sizes=(64, 64), dataset_name='gmm', alpha=ALPHA, ) time_st = time.time() os.makedirs('out/gmm', exist_ok=True) save_dir = 'out/gmm/%d_%d_%d_%d_%.2f' % (INPUT_SIZE, OUTPUT_SIZE, LATENT_SIZE, N_INFERENCE, ALPHA) os.makedirs(save_dir, exist_ok=True) file = open(os.path.join(save_dir, 'progress.csv'), 'wt') csv_writer = None for it in range(int(2e4)): inds = np.random.choice(nb_train, BATCH_SIZE, replace=False) x, y, prob = xs_train[inds], ys_train[inds], probs_train[inds] loss = cvae.update(x[:, None], y[:, None]) if it % int(1e3) == 0: inds = np.random.choice(nb_train, BATCH_SIZE, replace=False) x, y, prob = xs_train[inds], ys_train[inds], probs_train[inds] prob_pred = [] for _ in range(N_INFERENCE): mean, log_var = cvae.sample(x[:, None]) _log_prob = gaussian_log_density(mean, log_var, torch.tensor(y[:, None], dtype=torch.float32)) _prob = torch.exp(_log_prob).detach().numpy() prob_pred.append(_prob) prob_pred = np.mean(np.asarray(prob_pred), axis=0) test_loss = np.mean((prob - prob_pred)**2) print('Iter-{}; Train Loss: {:.4f}, Test Loss:{:.4f} fps:{}'.format(it, loss.data, test_loss, (it+1) // (time.time() - time_st))) print(np.round(prob[:3], 3), np.round(prob_pred[:3], 3))