def sample_data(): return util.sample_x_and_label_variables(batchsize, conf.ndim_x, conf.ndim_y, dataset, labels, gpu_enabled=conf.gpu_enabled)
def forward_one_step(num_images): x, y_labeled, label_ids = util.sample_x_and_label_variables(num_images, conf.ndim_x, conf.ndim_y, dataset, labels, gpu_enabled=False) x.to_gpu() y = vae.sample_x_y(x, test=True) z = vae.encoder_xy_z(x, y, test=True) _x = vae.decode_zy_x(z, y, test=True) if conf.gpu_enabled: z.to_cpu() _x.to_cpu() _x = _x.data return z, _x, label_ids
def sample_labeled_data(): x, y_onehot, y_id = util.sample_x_and_label_variables( batchsize, conf.ndim_x, conf.ndim_y, labeled_dataset, labels, gpu_enabled=conf.gpu_enabled) noise = sampler.gaussian(batchsize, conf.ndim_x, mean=0, var=0.3, gpu_enabled=conf.gpu_enabled) # x.data += noise.data return x, y_onehot, y_id
def forward_one_step(num_images): x, y_labeled, label_ids = util.sample_x_and_label_variables( num_images, conf.ndim_x, conf.ndim_y, dataset, labels, gpu_enabled=False) x.to_gpu() y = vae.sample_x_y(x, test=True) z = vae.encoder_xy_z(x, y, test=True) _x = vae.decode_zy_x(z, y, test=True) if conf.gpu_enabled: z.to_cpu() _x.to_cpu() _x = _x.data return z, _x, label_ids
import util from args import args from model import conf, aae try: os.mkdir(args.vis_dir) except: pass dataset, labels = util.load_labeled_images(args.test_image_dir) n_analogies = 10 n_image_channels = 1 image_width = 28 image_height = 28 x, y, label_ids = util.sample_x_and_label_variables(n_analogies, conf.ndim_x, conf.ndim_y, dataset, labels, gpu_enabled=conf.gpu_enabled) _, z = aae.generator_x_yz(x, test=True, apply_f=True) if n_image_channels == 1: pylab.gray() xp = np if conf.gpu_enabled: x.to_cpu() xp = cuda.cupy for m in xrange(n_analogies): pylab.subplot(n_analogies, conf.ndim_y + 2, m * 12 + 1) if n_image_channels == 1: pylab.imshow(x.data[m].reshape((image_width, image_height)), interpolation="none") elif n_image_channels == 3: pylab.imshow(x.data[m].reshape((n_image_channels, image_width, image_height)), interpolation="none") pylab.axis("off")
import numpy as np from chainer import cuda, Variable import matplotlib.patches as mpatches sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "../../"))) import util import sampler import visualizer from args import args from model import conf, aae try: os.mkdir(args.vis_dir) except: pass dataset, labels = util.load_labeled_images(args.test_image_dir) num_images = len(dataset) x, y, label_ids = util.sample_x_and_label_variables(num_images, conf.ndim_x, 10, dataset, labels, gpu_enabled=False) if conf.gpu_enabled: x.to_gpu() z = aae.generator_x_z(x, test=True, apply_f=True) _x = aae.decoder_z_x(z, test=True, apply_f=True) if conf.gpu_enabled: z.to_cpu() _x.to_cpu() visualizer.tile_x(_x.data, dir=args.vis_dir) visualizer.plot_labeled_z(z.data, label_ids.data, dir=args.vis_dir)
from args import args from model import conf, vae from vae_m1 import GaussianM1VAE from chainer import functions as F from PIL import Image try: os.mkdir(args.vis_dir) except: pass dist = "bernoulli" if isinstance(vae, GaussianM1VAE): dist = "gaussian" dataset, labels = util.load_labeled_images(args.test_image_dir, dist=dist) num_images = 5000 x, y_labeled, label_ids = util.sample_x_and_label_variables(num_images, conf.ndim_x, 10, dataset, labels, gpu_enabled=False) if conf.gpu_enabled: x.to_gpu() z = vae.encoder(x, test=True) _x = vae.decoder(z, True, True) if conf.gpu_enabled: z.to_cpu() _x.to_cpu() util.visualize_x(_x.data, dir=args.vis_dir) print "visualizing x" util.visualize_z(z.data, dir=args.vis_dir) print "visualizing z" util.visualize_labeled_z(z.data, label_ids.data, dir=args.vis_dir) print "visualizing labeled z"
# -*- coding: utf-8 -*- import os, sys, time import numpy as np from chainer import cuda, Variable sys.path.append(os.path.split(os.getcwd())[0]) import util from args import args from model import conf1, vae1, conf2, vae2 from vae_m1 import GaussianM1VAE dist = "bernoulli" if isinstance(vae1, GaussianM1VAE): dist = "gaussian" dataset, labels = util.load_labeled_images(args.test_image_dir, dist=dist) num_data = len(dataset) x_labeled, _, label_ids = util.sample_x_and_label_variables(num_data, conf1.ndim_x, conf2.ndim_y, dataset, labels, gpu_enabled=False) if conf1.gpu_enabled: x_labeled.to_gpu() z_labeled = vae1.encoder(x_labeled, test=True) prediction = vae2.sample_x_label(z_labeled, test=True, argmax=True) correct = 0 for i in xrange(num_data): if prediction[i] == label_ids.data[i]: correct += 1 print "test:: classification accuracy: {:.3f}".format(correct / float(num_data))
def sample_validation_data(): return util.sample_x_and_label_variables(n_validation_data, conf.ndim_x, conf.ndim_y, validation_dataset, validation_labels, gpu_enabled=False)
# sys.stdout.write("\rTraining M1 in progress...(%d / %d)" % (t, vae1_num_trains_per_epoch)) # sys.stdout.flush() # epoch_time = time.time() - epoch_time # total_time += epoch_time # sys.stdout.write("\r") # print "[M1] epoch:", epoch, "loss: {:.3f}".format(sum_loss / vae1_num_trains_per_epoch), "time: {:d} min".format(int(epoch_time / 60)), "total: {:d} min".format(int(total_time / 60)) # sys.stdout.flush() # vae1.save(args.model_dir) # Train M2 sum_loss_labeled = 0 sum_loss_unlabeled = 0 sum_loss_classifier = 0 epoch_time = time.time() for t in xrange(vae2_num_trains_per_epoch): x_labeled, y_labeled, label_ids = util.sample_x_and_label_variables(batchsize, conf1.ndim_x, conf2.ndim_y, labeled_dataset, labels, gpu_enabled=conf2.gpu_enabled) x_unlabeled = util.sample_x_variable(batchsize, conf1.ndim_x, unlabeled_dataset, gpu_enabled=conf2.gpu_enabled) z_labeled = Variable(vae1.encoder(x_labeled, test=True, apply_f=True).data) z_unlabeled = Variable(vae1.encoder(x_unlabeled, test=True, apply_f=True).data) # train # loss_labeled, loss_unlabeled, loss_classifier = vae2.train_jointly(z_labeled, y_labeled, label_ids, z_unlabeled, alpha=alpha, test=False) # train loss_labeled, loss_unlabeled = vae2.train(z_labeled, y_labeled, label_ids, z_unlabeled) loss_classifier = vae2.train_classification(z_labeled, label_ids, alpha=alpha) sum_loss_labeled += loss_labeled sum_loss_unlabeled += loss_unlabeled sum_loss_classifier += loss_classifier if t % 10 == 0:
from chainer import cuda, Variable sys.path.append(os.path.split(os.getcwd())[0]) import util from args import args from model import conf1, vae1, conf2, vae2 from vae_m1 import GaussianM1VAE dist = "bernoulli" if isinstance(vae1, GaussianM1VAE): dist = "gaussian" dataset, labels = util.load_labeled_images(args.test_image_dir, dist=dist) num_data = len(dataset) x_labeled, _, label_ids = util.sample_x_and_label_variables(num_data, conf1.ndim_x, conf2.ndim_y, dataset, labels, gpu_enabled=False) if conf1.gpu_enabled: x_labeled.to_gpu() z_labeled = vae1.encoder(x_labeled, test=True) prediction = vae2.sample_x_label(z_labeled, test=True, argmax=True) correct = 0 for i in xrange(num_data): if prediction[i] == label_ids.data[i]: correct += 1 print "test:: classification accuracy: {:.3f}".format(correct / float(num_data))
# from PIL import Image # for i in xrange(len(labeled_dataset)): # image = Image.fromarray(np.uint8(labeled_dataset[i].reshape(28, 28) * 255)) # image.save("labeled_images/{:d}.bmp".format(i)) # Export result to csv csv_epoch = [] total_time = 0 for epoch in xrange(max_epoch): sum_loss_labeled = 0 sum_loss_unlabeled = 0 sum_loss_classifier = 0 epoch_time = time.time() for t in xrange(num_trains_per_epoch): x_labeled, y_labeled, label_ids = util.sample_x_and_label_variables(batchsize_l, conf.ndim_x, conf.ndim_y, labeled_dataset, labels, gpu_enabled=conf.gpu_enabled) x_unlabeled = util.sample_x_variable(batchsize_u, conf.ndim_x, unlabeled_dataset, gpu_enabled=conf.gpu_enabled) # train loss_labeled, loss_unlabeled = vae.train(x_labeled, y_labeled, label_ids, x_unlabeled) loss_classifier = vae.train_classification(x_labeled, label_ids, alpha=alpha) sum_loss_labeled += loss_labeled sum_loss_unlabeled += loss_unlabeled sum_loss_classifier += loss_classifier if t % 10 == 0: sys.stdout.write("\rTraining in progress...({:d} / {:d})".format(t, num_trains_per_epoch)) sys.stdout.flush() epoch_time = time.time() - epoch_time total_time += epoch_time sys.stdout.write("\r")