예제 #1
0
def main():
    images, labels = dataset.load_test_images()
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)
    with tf.device(config.device):
        t = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        z, _x = sess.run([t.z_r, t.x_r], feed_dict={t.x: _images})

        plot.scatter_labeled_z(z, label_id, dir=config.ckpt_dir)
        plot.tile_images(_x[:100], dir=config.ckpt_dir)
        plot.plot_loss_tendency(config.ckpt_dir)

    hist_value, hist_head = plot.load_pickle_to_data(config.ckpt_dir)
    for loss_name in ['reconstruction']:
        plot.plot_loss_trace(hist_value[loss_name], loss_name, config.ckpt_dir)

    plot.plot_adversarial_trace(hist_value['discriminator'],
                                hist_value['generator'], 'z', config.ckpt_dir)
    plot.plot_adversarial_trace(hist_value['discriminator_z'],
                                hist_value['generator_z'], 'z',
                                config.ckpt_dir)
    plot.plot_adversarial_trace(hist_value['discriminator_img'],
                                hist_value['generator_img'], 'z',
                                config.ckpt_dir)
예제 #2
0
def main():
    # load MNIST images
    images, labels = dataset.load_test_images()

    # Settings
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)

    with tf.device(config.device):
        t = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        representation, x_reconstruction = sess.run([t.yz, t.x_r],
                                                    feed_dict={t.x: _images})
        plot.scatter_labeled_z(representation, label_id, dir=config.ckpt_dir)
        plot.tile_images(x_reconstruction[:100], dir=config.ckpt_dir)

        # z distributed plot
        num_segments = 20
        limit = (-5, 5)
        x_values = np.linspace(limit[0], limit[1], num_segments)
        y_values = np.linspace(limit[0], limit[1], num_segments)
        vacant = np.zeros((28 * num_segments, 28 * num_segments))
        for i, x_element in enumerate(x_values):
            for j, y_element in enumerate(y_values):
                x_reconstruction = sess.run(
                    t.x_r,
                    feed_dict={
                        t.yz: np.reshape([x_element, y_element], [1, 2])
                    })
                vacant[(num_segments - 1 - i) * 28:(num_segments - i) * 28,
                       j * 28:(j + 1) * 28] = x_reconstruction.reshape(28, 28)

        vacant = (vacant + 1) / 2
        pylab.figure(figsize=(10, 10), dpi=400, facecolor='white')
        pylab.imshow(vacant, cmap='gray', origin='upper')
        pylab.tight_layout()
        pylab.axis('off')
        pylab.savefig("{}/clusters.png".format(config.ckpt_dir))

        # loss part
        hist_value, hist_head = plot.load_pickle_to_data(config.ckpt_dir)
        for loss_name in ['reconstruction', 'supervised']:
            plot.plot_loss_trace(hist_value[loss_name], loss_name,
                                 config.ckpt_dir)

        plot.plot_adversarial_trace(hist_value['discriminator_y'],
                                    hist_value['generator_y'], 'y',
                                    config.ckpt_dir)
        plot.plot_adversarial_trace(hist_value['discriminator_z'],
                                    hist_value['generator_z'], 'z',
                                    config.ckpt_dir)
        plot.plot_adversarial_trace(hist_value['validation_accuracy'],
                                    hist_value['transform'],
                                    'validation_accuracy', config.ckpt_dir)
예제 #3
0
def main():
    # load MNIST images
    images, labels = dataset.load_test_images()

    # Settings
    num_anologies = 10
    pylab.gray()

    # generate style vector z
    x = dataset.sample_unlabeled_data(images, num_anologies)
    x = (x + 1) / 2

    with tf.device(config.device):
        x_input, img_y, img_z, reconstruction = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        z = sess.run(img_z, feed_dict={x_input: x})

        for m in range(num_anologies):
            pylab.subplot(num_anologies, config.ndim_y + 2, m * 12 + 1)
            pylab.imshow(x[m].reshape((28, 28)), interpolation='none')
            pylab.axis('off')

        all_y = np.identity(config.ndim_y, dtype=np.float32)
        for m in range(num_anologies):
            fixed_z = np.repeat(z[m].reshape(1, -1), config.ndim_y, axis=0)
            gen_x = sess.run(reconstruction,
                             feed_dict={
                                 img_z: fixed_z,
                                 img_y: all_y
                             })
            gen_x = (gen_x + 1) / 2

            for n in range(config.ndim_y):
                pylab.subplot(num_anologies, config.ndim_y + 2, m * 12 + 3 + n)
                pylab.imshow(gen_x[n].reshape((28, 28)), interpolation='none')
                pylab.axis('off')

        fig = pylab.gcf()
        fig.set_size_inches(num_anologies, config.ndim_y)
        pylab.savefig('{}/analogy.png'.format(config.ckpt_dir))

        hist_value, hist_head = plot.load_pickle_to_data(config.ckpt_dir)
        for loss_name in [
                'reconstruction', 'validation_accuracy', 'supervised'
        ]:
            plot.plot_loss_trace(hist_value[loss_name], loss_name,
                                 config.ckpt_dir)

        plot.plot_adversarial_trace(hist_value['discriminator_y'],
                                    hist_value['generator_y'], 'y',
                                    config.ckpt_dir)
        plot.plot_adversarial_trace(hist_value['discriminator_z'],
                                    hist_value['generator_z'], 'z',
                                    config.ckpt_dir)
예제 #4
0
def main():
	images, labels = dataset.load_test_images()
	num_scatter = len(images)

	y_distribution, z = aae.encode_x_yz(images, apply_softmax=False, test=True)
	y = aae.argmax_onehot_from_unnormalized_distribution(y_distribution)
	representation = aae.to_numpy(aae.encode_yz_representation(y, z, test=True))

	plot.scatter_labeled_z(representation, labels, dir=args.plot_dir)
예제 #5
0
def main():
	# load MNIST images
	images, labels = dataset.load_test_images()

	# config
	config = aae.config
	num_scatter = len(images)

	x, _, label_ids = dataset.sample_labeled_data(images, labels, num_scatter, config.ndim_x, config.ndim_y)
	z = aae.to_numpy(aae.encode_x_z(x, test=True))
	visualizer.plot_labeled_z(z, label_ids, dir=args.plot_dir)
def main():
    # load MNIST images
    images, labels = dataset.load_test_images()

    # config
    config = aae.config
    num_scatter = len(images)

    x, _, labels = dataset.sample_labeled_data(images, labels, num_scatter,
                                               config.ndim_x, config.ndim_y)
    y_distribution, z = aae.encode_x_yz(x, apply_softmax=False, test=True)
    y = aae.argmax_onehot_from_unnormalized_distribution(y_distribution)
    representation = aae.to_numpy(aae.encode_yz_representation(y, z,
                                                               test=True))

    visualizer.plot_labeled_z(representation, labels, dir=args.plot_dir)
예제 #7
0
def main():
    images, labels = dataset.load_test_images()
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)
    with tf.device(config.device):
        x, z_respresentation, x_construction = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        z, _x = sess.run([z_respresentation, x_construction],
                         feed_dict={x: _images})

        scatter_labeled_z(z, label_id, dir=config.ckpt_dir)
        tile_images(_x[:100], dir=config.ckpt_dir)
        plot_loss_tendency(config.ckpt_dir)
def main():
    # load MNIST images
    images, labels = dataset.load_test_images()

    # config
    config = aae.config

    # settings
    num_analogies = 10
    pylab.gray()

    # generate style vector z
    x = dataset.sample_unlabeled_data(images,
                                      num_analogies,
                                      config.ndim_x,
                                      binarize=False)
    _, z = aae.encode_x_yz(x, apply_softmax=True)
    z = aae.to_numpy(z)

    # plot original image on the left
    for m in xrange(num_analogies):
        pylab.subplot(num_analogies, config.ndim_y + 2, m * 12 + 1)
        pylab.imshow(x[m].reshape((28, 28)), interpolation="none")
        pylab.axis("off")

    all_y = np.identity(config.ndim_y, dtype=np.float32)
    for m in xrange(num_analogies):
        # copy z as many as the number of classes
        fixed_z = np.repeat(z[m].reshape(1, -1), config.ndim_y, axis=0)
        gen_x = aae.to_numpy(aae.decode_yz_x(all_y, fixed_z))
        # plot images generated from each label
        for n in xrange(config.ndim_y):
            pylab.subplot(num_analogies, config.ndim_y + 2, m * 12 + 3 + n)
            pylab.imshow(gen_x[n].reshape((28, 28)), interpolation="none")
            pylab.axis("off")

    fig = pylab.gcf()
    fig.set_size_inches(num_analogies, config.ndim_y)
    pylab.savefig("{}/analogy.png".format(args.plot_dir))
예제 #9
0
def main():
    # load MNIST images
    images, labels = dataset.load_test_images()

    # Settings
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)

    with tf.device(config.device):
        t = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        # z distributed plot
        num_segments = 20
        limit = (-5, 5)
        x_values = np.linspace(limit[0], limit[1], num_segments)
        y_values = np.linspace(limit[0], limit[1], num_segments)
        vacant = np.zeros((28 * num_segments, 28 * num_segments))
        for i, x_element in enumerate(x_values):
            for j, y_element in enumerate(y_values):
                x_reconstruction = sess.run(
                    t.x_r,
                    feed_dict={
                        t.yz: np.reshape([x_element, y_element], [1, 2])
                    })
                vacant[(num_segments - 1 - i) * 28:(num_segments - i) * 28,
                       j * 28:(j + 1) * 28] = x_reconstruction.reshape(28, 28)

        vacant = (vacant + 1) / 2
        pylab.figure(figsize=(10, 10), dpi=400, facecolor='white')
        pylab.imshow(vacant, cmap='gray', origin='upper')
        pylab.tight_layout()
        pylab.axis('off')
        pylab.savefig("{}/clusters.png".format(config.ckpt_dir))
예제 #10
0
# -*- coding: utf-8 -*-
import os, sys, time, pylab
import numpy as np
import matplotlib.patches as mpatches
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "../../")))
import dataset
from args import args
from model import aae

try:
	os.mkdir(args.plot_dir)
except:
	pass

images, labels = dataset.load_test_images()
config = aae.config
num_clusters = config.ndim_y
num_plots_per_cluster = 11
image_width = 28
image_height = 28
ndim_x = image_width * image_height
pylab.gray()

# plot cluster head
head_y = np.identity(config.ndim_y, dtype=np.float32)
zero_z = np.zeros((config.ndim_y, config.ndim_z), dtype=np.float32)
head_x = aae.to_numpy(aae.decode_yz_x(head_y, zero_z, test=True))
head_x = (head_x + 1.0) / 2.0
for n in xrange(num_clusters):
	pylab.subplot(num_clusters, num_plots_per_cluster + 2, n * (num_plots_per_cluster + 2) + 1)
	pylab.imshow(head_x[n].reshape((image_width, image_height)), interpolation="none")
예제 #11
0
파일: analogy.py 프로젝트: musyoku/adgm
import os, sys, time, pylab
import numpy as np
from chainer import cuda, Variable
import matplotlib.patches as mpatches
sys.path.append(os.path.split(os.getcwd())[0])
import dataset
from model import adgm
from args import args

try:
	os.mkdir(args.plot_dir)
except:
	pass

# load test images
images, labels = dataset.load_test_images()

# config
config = adgm.config
num_analogies = 10
xp = np
if args.gpu_device != -1:
	xp = cuda.cupy

# sample data
x = dataset.sample_unlabeled_data(images, num_analogies, config.ndim_x, binarize=False)
z = adgm.encode_x_z(x, argmax_y=True, test=True)

# plot
fig = pylab.gcf()
fig.set_size_inches(16.0, 16.0)
예제 #12
0
def main(run_load_from_file=False):
    # load MNIST images
    images, labels = dataset.load_test_images()

    # config
    opt = Operation()
    opt.check_dir(config.ckpt_dir, is_restart=False)
    opt.check_dir(config.log_dir, is_restart=True)

    max_epoch = 510
    num_trains_per_epoch = 500
    batch_size_u = 100

    # training
    with tf.device(config.device):
        h = build_graph()

    sess_config = tf.ConfigProto(allow_soft_placement=True,
                                 log_device_placement=True)
    sess_config.gpu_options.allow_growth = True
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    saver = tf.train.Saver(max_to_keep=2)

    with tf.Session(config=sess_config) as sess:
        '''
         Load from checkpoint or start a new session

        '''
        if run_load_from_file:
            saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
            training_epoch_loss, _ = pickle.load(
                open(config.ckpt_dir + '/pickle.pkl', 'rb'))
        else:
            sess.run(tf.global_variables_initializer())
            training_epoch_loss = []

        # Recording loss per epoch
        process = Process()
        for epoch in range(max_epoch):
            process.start_epoch(epoch, max_epoch)
            '''
            Learning rate generator

            '''
            learning_rate = 0.0001

            # Recording loss per iteration
            sum_loss_reconstruction = 0
            sum_loss_discrminator_z = 0
            sum_loss_discrminator_img = 0
            sum_loss_generator_z = 0
            sum_loss_generator_img = 0
            process_iteration = Process()
            for i in range(num_trains_per_epoch):
                process_iteration.start_epoch(i, num_trains_per_epoch)
                # Inputs
                '''
                _l -> labeled
                _u -> unlabeled

                '''
                images_u = dataset.sample_unlabeled_data(images, batch_size_u)
                if config.distribution_sampler == 'swiss_roll':
                    z_true_u = sampler.swiss_roll(batch_size_u, config.ndim_z,
                                                  config.num_types_of_label)
                elif config.distribution_sampler == 'gaussian_mixture':
                    z_true_u = sampler.gaussian_mixture(
                        batch_size_u, config.ndim_z, config.num_types_of_label)
                elif config.distribution_sampler == 'uniform_desk':
                    z_true_u = sampler.uniform_desk(batch_size_u,
                                                    config.ndim_z,
                                                    radius=2)
                elif config.distribution_sampler == 'gaussian':
                    z_true_u = sampler.gaussian(batch_size_u,
                                                config.ndim_z,
                                                var=1)
                elif config.distribution_sampler == 'uniform':
                    z_true_u = sampler.uniform(batch_size_u,
                                               config.ndim_z,
                                               minv=-1,
                                               maxv=1)

                # reconstruction_phase
                _, loss_reconstruction = sess.run([h.opt_r, h.loss_r],
                                                  feed_dict={
                                                      h.x: images_u,
                                                      h.lr: learning_rate
                                                  })

                # adversarial phase for discriminator_z
                images_u_s = dataset.sample_unlabeled_data(
                    images, batch_size_u)
                _, loss_discriminator_z = sess.run([h.opt_dz, h.loss_dz],
                                                   feed_dict={
                                                       h.x: images_u,
                                                       h.z: z_true_u,
                                                       h.lr: learning_rate
                                                   })

                _, loss_discriminator_img = sess.run([h.opt_dimg, h.loss_dimg],
                                                     feed_dict={
                                                         h.x: images_u,
                                                         h.x_s: images_u_s,
                                                         h.lr: learning_rate
                                                     })

                # adversarial phase for generator
                _, loss_generator_z = sess.run([h.opt_e, h.loss_e],
                                               feed_dict={
                                                   h.x: images_u,
                                                   h.lr: learning_rate
                                               })

                _, loss_generator_img = sess.run([h.opt_d, h.loss_d],
                                                 feed_dict={
                                                     h.x: images_u,
                                                     h.lr: learning_rate
                                                 })

                sum_loss_reconstruction += loss_reconstruction
                sum_loss_discrminator_z += loss_discriminator_z
                sum_loss_discrminator_img += loss_discriminator_img
                sum_loss_generator_z += loss_generator_z
                sum_loss_generator_img += loss_generator_img

                if i % 1000 == 0:
                    process_iteration.show_table_2d(
                        i, num_trains_per_epoch, {
                            'reconstruction':
                            sum_loss_reconstruction / (i + 1),
                            'discriminator_z':
                            sum_loss_discrminator_z / (i + 1),
                            'discriminator_img':
                            sum_loss_discrminator_img / (i + 1),
                            'generator_z':
                            sum_loss_generator_z / (i + 1),
                            'generator_img':
                            sum_loss_generator_img / (i + 1),
                        })

            average_loss_per_epoch = [
                sum_loss_reconstruction / num_trains_per_epoch,
                sum_loss_discrminator_z / num_trains_per_epoch,
                sum_loss_discrminator_img / num_trains_per_epoch,
                sum_loss_generator_z / num_trains_per_epoch,
                sum_loss_generator_img / num_trains_per_epoch,
                (sum_loss_discrminator_z + sum_loss_discrminator_img) /
                num_trains_per_epoch,
                (sum_loss_generator_z + sum_loss_generator_img) /
                num_trains_per_epoch
            ]
            training_epoch_loss.append(average_loss_per_epoch)
            training_loss_name = [
                'reconstruction', 'discriminator_z', 'discriminator_img',
                'generator_z', 'generator_img', 'discriminator', 'generator'
            ]

            if epoch % 1 == 0:
                process.show_bar(
                    epoch, max_epoch, {
                        'loss_r': average_loss_per_epoch[0],
                        'loss_d': average_loss_per_epoch[5],
                        'loss_g': average_loss_per_epoch[6]
                    })

                plt.scatter_labeled_z(
                    sess.run(h.z_r, feed_dict={h.x: images[:1000]}),
                    [int(var) for var in labels[:1000]],
                    dir=config.log_dir,
                    filename='z_representation-{}'.format(epoch))

            if epoch % 10 == 0:
                saver.save(sess,
                           os.path.join(config.ckpt_dir, 'model_ckptpoint'),
                           global_step=epoch)
                pickle.dump((training_epoch_loss, training_loss_name),
                            open(config.ckpt_dir + '/pickle.pkl', 'wb'))
예제 #13
0
def main():
	images, labels = dataset.load_test_images()
	num_scatter = len(images)
	x, _, label_ids = dataset.sample_labeled_data(images, labels, num_scatter)
	z = aae.to_numpy(aae.encode_x_z(x, test=True))
	plot.scatter_labeled_z(z, label_ids, dir=args.plot_dir)