Esempio n. 1
0
def main():
    images, labels = dataset.load_test_images()
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)
    with tf.device(config.device):
        t = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        z, _x = sess.run([t.z_r, t.x_r], feed_dict={t.x: _images})

        plot.scatter_labeled_z(z, label_id, dir=config.ckpt_dir)
        plot.tile_images(_x[:100], dir=config.ckpt_dir)
        plot.plot_loss_tendency(config.ckpt_dir)

    hist_value, hist_head = plot.load_pickle_to_data(config.ckpt_dir)
    for loss_name in ['reconstruction']:
        plot.plot_loss_trace(hist_value[loss_name], loss_name, config.ckpt_dir)

    plot.plot_adversarial_trace(hist_value['discriminator'],
                                hist_value['generator'], 'z', config.ckpt_dir)
    plot.plot_adversarial_trace(hist_value['discriminator_z'],
                                hist_value['generator_z'], 'z',
                                config.ckpt_dir)
    plot.plot_adversarial_trace(hist_value['discriminator_img'],
                                hist_value['generator_img'], 'z',
                                config.ckpt_dir)
Esempio n. 2
0
def main():
    # load MNIST images
    images, labels = dataset.load_test_images()

    # Settings
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)

    with tf.device(config.device):
        t = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        representation, x_reconstruction = sess.run([t.yz, t.x_r],
                                                    feed_dict={t.x: _images})
        plot.scatter_labeled_z(representation, label_id, dir=config.ckpt_dir)
        plot.tile_images(x_reconstruction[:100], dir=config.ckpt_dir)

        # z distributed plot
        num_segments = 20
        limit = (-5, 5)
        x_values = np.linspace(limit[0], limit[1], num_segments)
        y_values = np.linspace(limit[0], limit[1], num_segments)
        vacant = np.zeros((28 * num_segments, 28 * num_segments))
        for i, x_element in enumerate(x_values):
            for j, y_element in enumerate(y_values):
                x_reconstruction = sess.run(
                    t.x_r,
                    feed_dict={
                        t.yz: np.reshape([x_element, y_element], [1, 2])
                    })
                vacant[(num_segments - 1 - i) * 28:(num_segments - i) * 28,
                       j * 28:(j + 1) * 28] = x_reconstruction.reshape(28, 28)

        vacant = (vacant + 1) / 2
        pylab.figure(figsize=(10, 10), dpi=400, facecolor='white')
        pylab.imshow(vacant, cmap='gray', origin='upper')
        pylab.tight_layout()
        pylab.axis('off')
        pylab.savefig("{}/clusters.png".format(config.ckpt_dir))

        # loss part
        hist_value, hist_head = plot.load_pickle_to_data(config.ckpt_dir)
        for loss_name in ['reconstruction', 'supervised']:
            plot.plot_loss_trace(hist_value[loss_name], loss_name,
                                 config.ckpt_dir)

        plot.plot_adversarial_trace(hist_value['discriminator_y'],
                                    hist_value['generator_y'], 'y',
                                    config.ckpt_dir)
        plot.plot_adversarial_trace(hist_value['discriminator_z'],
                                    hist_value['generator_z'], 'z',
                                    config.ckpt_dir)
        plot.plot_adversarial_trace(hist_value['validation_accuracy'],
                                    hist_value['transform'],
                                    'validation_accuracy', config.ckpt_dir)
Esempio n. 3
0
def main():
	images, labels = dataset.load_test_images()
	num_scatter = len(images)

	y_distribution, z = aae.encode_x_yz(images, apply_softmax=False, test=True)
	y = aae.argmax_onehot_from_unnormalized_distribution(y_distribution)
	representation = aae.to_numpy(aae.encode_yz_representation(y, z, test=True))

	plot.scatter_labeled_z(representation, labels, dir=args.plot_dir)
Esempio n. 4
0
def main():
    images, labels = dataset.load_test_images()
    num_scatter = len(images)
    _images, _, label_id = dataset.sample_labeled_data(images, labels,
                                                       num_scatter)
    with tf.device(config.device):
        x, z_respresentation, x_construction = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
        z, _x = sess.run([z_respresentation, x_construction],
                         feed_dict={x: _images})

        scatter_labeled_z(z, label_id, dir=config.ckpt_dir)
        tile_images(_x[:100], dir=config.ckpt_dir)
        plot_loss_tendency(config.ckpt_dir)
Esempio n. 5
0
def main(run_load_from_file=False):
    # load MNIST images
    images, labels = dataset.load_test_images()

    # config
    opt = Operation()
    opt.check_dir(config.ckpt_dir, is_restart=False)
    opt.check_dir(config.log_dir, is_restart=True)

    max_epoch = 510
    num_trains_per_epoch = 500
    batch_size_u = 100

    # training
    with tf.device(config.device):
        h = build_graph()

    sess_config = tf.ConfigProto(allow_soft_placement=True,
                                 log_device_placement=True)
    sess_config.gpu_options.allow_growth = True
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    saver = tf.train.Saver(max_to_keep=2)

    with tf.Session(config=sess_config) as sess:
        '''
         Load from checkpoint or start a new session

        '''
        if run_load_from_file:
            saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
            training_epoch_loss, _ = pickle.load(
                open(config.ckpt_dir + '/pickle.pkl', 'rb'))
        else:
            sess.run(tf.global_variables_initializer())
            training_epoch_loss = []

        # Recording loss per epoch
        process = Process()
        for epoch in range(max_epoch):
            process.start_epoch(epoch, max_epoch)
            '''
            Learning rate generator

            '''
            learning_rate = 0.0001

            # Recording loss per iteration
            sum_loss_reconstruction = 0
            sum_loss_discrminator_z = 0
            sum_loss_discrminator_img = 0
            sum_loss_generator_z = 0
            sum_loss_generator_img = 0
            process_iteration = Process()
            for i in range(num_trains_per_epoch):
                process_iteration.start_epoch(i, num_trains_per_epoch)
                # Inputs
                '''
                _l -> labeled
                _u -> unlabeled

                '''
                images_u = dataset.sample_unlabeled_data(images, batch_size_u)
                if config.distribution_sampler == 'swiss_roll':
                    z_true_u = sampler.swiss_roll(batch_size_u, config.ndim_z,
                                                  config.num_types_of_label)
                elif config.distribution_sampler == 'gaussian_mixture':
                    z_true_u = sampler.gaussian_mixture(
                        batch_size_u, config.ndim_z, config.num_types_of_label)
                elif config.distribution_sampler == 'uniform_desk':
                    z_true_u = sampler.uniform_desk(batch_size_u,
                                                    config.ndim_z,
                                                    radius=2)
                elif config.distribution_sampler == 'gaussian':
                    z_true_u = sampler.gaussian(batch_size_u,
                                                config.ndim_z,
                                                var=1)
                elif config.distribution_sampler == 'uniform':
                    z_true_u = sampler.uniform(batch_size_u,
                                               config.ndim_z,
                                               minv=-1,
                                               maxv=1)

                # reconstruction_phase
                _, loss_reconstruction = sess.run([h.opt_r, h.loss_r],
                                                  feed_dict={
                                                      h.x: images_u,
                                                      h.lr: learning_rate
                                                  })

                # adversarial phase for discriminator_z
                images_u_s = dataset.sample_unlabeled_data(
                    images, batch_size_u)
                _, loss_discriminator_z = sess.run([h.opt_dz, h.loss_dz],
                                                   feed_dict={
                                                       h.x: images_u,
                                                       h.z: z_true_u,
                                                       h.lr: learning_rate
                                                   })

                _, loss_discriminator_img = sess.run([h.opt_dimg, h.loss_dimg],
                                                     feed_dict={
                                                         h.x: images_u,
                                                         h.x_s: images_u_s,
                                                         h.lr: learning_rate
                                                     })

                # adversarial phase for generator
                _, loss_generator_z = sess.run([h.opt_e, h.loss_e],
                                               feed_dict={
                                                   h.x: images_u,
                                                   h.lr: learning_rate
                                               })

                _, loss_generator_img = sess.run([h.opt_d, h.loss_d],
                                                 feed_dict={
                                                     h.x: images_u,
                                                     h.lr: learning_rate
                                                 })

                sum_loss_reconstruction += loss_reconstruction
                sum_loss_discrminator_z += loss_discriminator_z
                sum_loss_discrminator_img += loss_discriminator_img
                sum_loss_generator_z += loss_generator_z
                sum_loss_generator_img += loss_generator_img

                if i % 1000 == 0:
                    process_iteration.show_table_2d(
                        i, num_trains_per_epoch, {
                            'reconstruction':
                            sum_loss_reconstruction / (i + 1),
                            'discriminator_z':
                            sum_loss_discrminator_z / (i + 1),
                            'discriminator_img':
                            sum_loss_discrminator_img / (i + 1),
                            'generator_z':
                            sum_loss_generator_z / (i + 1),
                            'generator_img':
                            sum_loss_generator_img / (i + 1),
                        })

            average_loss_per_epoch = [
                sum_loss_reconstruction / num_trains_per_epoch,
                sum_loss_discrminator_z / num_trains_per_epoch,
                sum_loss_discrminator_img / num_trains_per_epoch,
                sum_loss_generator_z / num_trains_per_epoch,
                sum_loss_generator_img / num_trains_per_epoch,
                (sum_loss_discrminator_z + sum_loss_discrminator_img) /
                num_trains_per_epoch,
                (sum_loss_generator_z + sum_loss_generator_img) /
                num_trains_per_epoch
            ]
            training_epoch_loss.append(average_loss_per_epoch)
            training_loss_name = [
                'reconstruction', 'discriminator_z', 'discriminator_img',
                'generator_z', 'generator_img', 'discriminator', 'generator'
            ]

            if epoch % 1 == 0:
                process.show_bar(
                    epoch, max_epoch, {
                        'loss_r': average_loss_per_epoch[0],
                        'loss_d': average_loss_per_epoch[5],
                        'loss_g': average_loss_per_epoch[6]
                    })

                plt.scatter_labeled_z(
                    sess.run(h.z_r, feed_dict={h.x: images[:1000]}),
                    [int(var) for var in labels[:1000]],
                    dir=config.log_dir,
                    filename='z_representation-{}'.format(epoch))

            if epoch % 10 == 0:
                saver.save(sess,
                           os.path.join(config.ckpt_dir, 'model_ckptpoint'),
                           global_step=epoch)
                pickle.dump((training_epoch_loss, training_loss_name),
                            open(config.ckpt_dir + '/pickle.pkl', 'wb'))
Esempio n. 6
0
                                                   description))


if __name__ == "__main__":
    # main
    main()

    file_path = '{}/{}-metric_plus.pkl'.format(config.logs_path,
                                               config.description)
    [q_errors, r_adjs, z_adjs, z_true], name = pickle_load(file_path)
    # Load data

    # latent space
    label = np.tile(np.array(dp.test.e), [test_times, 1])
    scatter_labeled_z(z_adjs,
                      label,
                      dir=config.latent_path,
                      filename='latent-Q2')
    vis.kdeplot(z_adjs[:, 0],
                z_adjs[:, 1],
                config.latent_path,
                name='Kde_Latent_Space')
    vis.jointplot(z_adjs[:, 0],
                  z_adjs[:, 1],
                  config.latent_path,
                  name='Scatter_latent')
    vis.jointplot(z_true[:, 0],
                  z_true[:, 1],
                  config.latent_path,
                  name='Scatter_latent')
    #
Esempio n. 7
0
def main():
	images, labels = dataset.load_test_images()
	num_scatter = len(images)
	x, _, label_ids = dataset.sample_labeled_data(images, labels, num_scatter)
	z = aae.to_numpy(aae.encode_x_z(x, test=True))
	plot.scatter_labeled_z(z, label_ids, dir=args.plot_dir)
Esempio n. 8
0
def main(run_load_from_file=False):
    # config
    opt = Operation()
    opt.check_dir(config.ckpt_dir, is_restart=False)
    opt.check_dir(config.log_dir, is_restart=True)

    max_epoch = 510
    num_trains_per_epoch = 500
    batch_size_l = 100
    batch_size_u = 100

    # create semi-supervised split
    # Load minist images
    images, labels = dataset.load_train_images()
    num_labeled_data = 10000
    num_types_of_label = 11  # additional label corresponds to unlabeled data
    training_images_l, training_labels_l, training_images_u, _, _ = dataset.create_semisupervised(images, labels, 0, num_labeled_data, num_types_of_label)

    # training
    with tf.device(config.device):
        h = build_graph()

    sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
    sess_config.gpu_options.allow_growth = True
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    saver = tf.train.Saver(max_to_keep=2)

    with tf.Session(config=sess_config) as sess:
        '''
         Load from checkpoint or start a new session

        '''
        if run_load_from_file:
            saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir))
            training_epoch_loss, _ = pickle.load(open(config.ckpt_dir + '/pickle.pkl', 'rb'))
        else:
            sess.run(tf.global_variables_initializer())
            training_epoch_loss = []

        # Recording loss per epoch
        process = Process()
        for epoch in range(max_epoch):
            process.start_epoch(epoch, max_epoch)

            '''
            Learning rate generator

            '''
            learning_rate = opt.ladder_learning_rate(epoch + len(training_epoch_loss))

            # Recording loss per iteration
            sum_loss_reconstruction = 0
            sum_loss_discrminator = 0
            sum_loss_generator = 0
            process_iteration = Process()
            for i in range(num_trains_per_epoch):
                process_iteration.start_epoch(i, num_trains_per_epoch)
                # Inputs
                '''
                _l -> labeled
                _u -> unlabeled

                '''
                images_l, label_onehot_l, label_id_l = dataset.sample_labeled_data(training_images_l, training_labels_l, batch_size_l, ndim_y=num_types_of_label)
                images_u = dataset.sample_unlabeled_data(training_images_u, batch_size_u)
                onehot = np.zeros((1, num_types_of_label), dtype=np.float32)
                onehot[-1] = 1
                label_onehot_u = np.repeat(onehot, batch_size_u, axis=0)
                z_true_l = sampler.supervised_swiss_roll(batch_size_l, config.ndim_z, label_id_l, num_types_of_label - 1)
                z_true_u = sampler.swiss_roll(batch_size_u, config.ndim_z, num_types_of_label - 1)
                # z_true_l = sampler.supervised_gaussian_mixture(batch_size_l, config.ndim_z, label_id_l, num_types_of_label - 1)
                # z_true_u = sampler.gaussian_mixture(batch_size_u, config.ndim_z, num_types_of_label - 1)

                # reconstruction_phase
                _, loss_reconstruction = sess.run([h.opt_r, h.loss_r], feed_dict={
                    h.x: images_u,
                    h.lr: learning_rate
                })

                # adversarial phase for discriminator
                _, loss_discriminator_l = sess.run([h.opt_d, h.loss_d], feed_dict={
                    h.x: images_l,
                    h.label: label_onehot_l,
                    h.z: z_true_l,
                    h.lr: learning_rate
                })

                _, loss_discriminator_u = sess.run([h.opt_d, h.loss_d], feed_dict={
                    h.x: images_u,
                    h.label: label_onehot_u,
                    h.z: z_true_u,
                    h.lr: learning_rate
                })

                loss_discriminator = loss_discriminator_l + loss_discriminator_u

                # adversarial phase for generator
                _, loss_generator_l= sess.run([h.opt_e, h.loss_e,], feed_dict={
                    h.x: images_l,
                    h.label: label_onehot_l,
                    h.lr: learning_rate
                })

                _, loss_generator_u = sess.run([h.opt_e, h.loss_e], feed_dict={
                    h.x: images_u,
                    h.label: label_onehot_u,
                    h.lr: learning_rate
                })
                loss_generator = loss_generator_l + loss_generator_u

                sum_loss_reconstruction += loss_reconstruction / batch_size_u
                sum_loss_discrminator += loss_discriminator
                sum_loss_generator += loss_generator

                if i % 1000 == 0:
                    process_iteration.show_table_2d(i, num_trains_per_epoch, {
                        'reconstruction': sum_loss_reconstruction / (i + 1),
                        'discriminator': sum_loss_discrminator / (i + 1),
                        'generator': sum_loss_generator / (i + 1),
                    })

            average_loss_per_epoch = [
                sum_loss_reconstruction / num_trains_per_epoch,
                sum_loss_discrminator / num_trains_per_epoch,
                sum_loss_generator / num_trains_per_epoch,
            ]
            training_epoch_loss.append(average_loss_per_epoch)
            training_loss_name = [
                'reconstruction',
                'discriminator',
                'generator'
            ]

            if epoch % 1 == 0:
                process.show_bar(epoch, max_epoch, {
                    'loss_r': average_loss_per_epoch[0],
                    'loss_d': average_loss_per_epoch[1],
                    'loss_g': average_loss_per_epoch[2]
                })

                plt.tile_images(sess.run(h.x_, feed_dict={h.x: images_u}),
                                dir=config.log_dir,
                                filename='x_rec_epoch_{}'.format(str(epoch).zfill(3)))

                plt.scatter_labeled_z(sess.run(h.z_r, feed_dict={h.x: images[:1000]}), [int(var) for var in labels[:1000]],
                                      dir=config.log_dir,
                                      filename='z_representation-{}'.format(epoch))

            if epoch % 10 == 0:
                saver.save(sess, os.path.join(config.ckpt_dir, 'model_ckptpoint'), global_step=epoch)
                pickle.dump((training_epoch_loss, training_loss_name), open(config.ckpt_dir + '/pickle.pkl', 'wb'))
                plt.plot_double_scale_trend(config.ckpt_dir)