Пример #1
0
def create_vanilla_auto_encoder(n_features, dimensions):
    ds = MNIST()
    # X is the list of all the images in MNIST dataset
    imgs = ds.X[:1000].reshape((-1, 28, 28))
    # Then create a montage and draw the montage
    plt.imshow(montage(imgs), cmap='gray')
    plt.show()
    mean_img = np.mean(ds.X, axis=0)
    std_img = np.std(imgs, axis=0)
    X = tf.placeholder(tf.float32, [None, n_features])
    current_input = X
    n_input = n_features
    Ws = []
    for layer_i, dimension_i in enumerate(dimensions, start=1):
        with tf.variable_scope("encoder/layer/{}".format(layer_i)):
            w = tf.get_variable(name='W',
                                shape=[n_input, dimension_i],
                                initializer=tf.random_normal_initializer(
                                    mean=0.0, stddev=2.0))
            b = tf.get_variable(name='b',
                                shape=[dimension_i],
                                dtype=tf.float32,
                                initializer=tf.constant_initializer(0.0))

            h = tf.nn.bias_add(name='h',
                               value=tf.matmul(current_input, w),
                               bias=b)
            current_input = tf.nn.relu(h)
            Ws.append(w)
            n_input = dimension_i

    Ws = Ws[::-1]
    dimensions = dimensions[::-1][1:] + [n_features]
    print('dimensions=', dimensions)
    for layer_i, dimension_i in enumerate(dimensions):
        with tf.variable_scope("decoder/layer/{}".format(layer_i)):
            w = tf.transpose(Ws[layer_i])
            b = tf.get_variable(name='b',
                                shape=[dimension_i],
                                dtype=tf.float32,
                                initializer=tf.constant_initializer(0.0))

            print('current_input= ', current_input)
            print('w = ', w)

            h = tf.nn.bias_add(name='h',
                               value=tf.matmul(current_input, w),
                               bias=b)
            current_input = tf.nn.relu(h)
            n_input = dimension_i

    Y = current_input
    cost = tf.reduce_mean(tf.squared_difference(X, Y), 1)
    print('cost.getshape', cost.get_shape())
    cost = tf.reduce_mean(cost)
    learning_rate = 0.01
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    batch_size = 100
    n_epochs = 60
    # We'll try to reconstruct the same first 100 images and show how The network does over the course of training.
    examples = ds.X[:100]
    mean_img = np.mean(examples, axis=0)
    #recon0 = np.clip(examples.reshape((-1, 28, 28)), 0, 255)
    #img_or = montage(recon0).astype(np.uint8)
    #img_or.append('0')
    #gif.build_gif(img_or, saveto='example.{}.gif'.format(np.random.rand()), cmap='gray')
    #plt.show()
    # We'll store the reconstructions in a list
    imgs = []
    fig, ax = plt.subplots(1, 1)
    for epoch_i in range(n_epochs):
        for batch_X, _ in ds.train.next_batch():
            sess.run(optimizer, feed_dict={X: batch_X - mean_img})
        recon = sess.run(Y, feed_dict={X: examples - mean_img})
        recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255)
        img_i = montage(recon).astype(np.uint8)
        imgs.append(img_i)
        ax.imshow(img_i, cmap='gray')
        fig.canvas.draw()
        #plt.imshow(img_i, cmap='gray')
        #plt.show()
        print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img}))
    gif.build_gif(imgs,
                  saveto='ae6-learning0.{}.gif'.format(np.random.rand()),
                  cmap='gray')
    ipyd.Image(url='ae.gif?{}'.format(np.random.rand()), height=500, width=500)
    return Y
def test_mnist():
    """Train an autoencoder on MNIST.
    This function will train an autoencoder on MNIST and also
    save many image files during the training process, demonstrating
    the latent space of the inner most dimension of the encoder,
    as well as reconstructions of the decoder.
    """

    # load MNIST
    n_code = 2
    mnist = MNIST(split=[0.8, 0.1, 0.1])
    ae = VAE(input_shape=[None, 784],
             n_filters=[512, 256],
             n_hidden=64,
             n_code=n_code,
             activation=tf.nn.sigmoid,
             convolutional=False,
             variational=True)

    n_examples = 100
    zs = np.random.uniform(-1.0, 1.0, [4, n_code]).astype(np.float32)
    zs = utils.make_latent_manifold(zs, n_examples)

    learning_rate = 0.02
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
        ae['cost'])

    # We create a session to use the graph
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Fit all training data
    t_i = 0
    batch_i = 0
    batch_size = 200
    n_epochs = 10
    test_xs = mnist.test.images[:n_examples]
    utils.montage(test_xs.reshape((-1, 28, 28)), 'test_xs.png')
    for epoch_i in range(n_epochs):
        train_i = 0
        train_cost = 0
        for batch_xs, _ in mnist.train.next_batch(batch_size):
            train_cost += sess.run([ae['cost'], optimizer],
                                   feed_dict={
                                       ae['x']: batch_xs,
                                       ae['train']: True,
                                       ae['keep_prob']: 1.0
                                   })[0]
            train_i += 1
            if batch_i % 10 == 0:
                # Plot example reconstructions from latent layer
                recon = sess.run(ae['y'],
                                 feed_dict={
                                     ae['z']: zs,
                                     ae['train']: False,
                                     ae['keep_prob']: 1.0
                                 })
                utils.montage(recon.reshape((-1, 28, 28)),
                              'manifold_%08d.png' % t_i)
                # Plot example reconstructions
                recon = sess.run(ae['y'],
                                 feed_dict={
                                     ae['x']: test_xs,
                                     ae['train']: False,
                                     ae['keep_prob']: 1.0
                                 })
                utils.montage(recon.reshape((-1, 28, 28)),
                              'reconstruction_%08d.png' % t_i)
                t_i += 1
            batch_i += 1

        valid_i = 0
        valid_cost = 0
        for batch_xs, _ in mnist.valid.next_batch(batch_size):
            valid_cost += sess.run([ae['cost']],
                                   feed_dict={
                                       ae['x']: batch_xs,
                                       ae['train']: False,
                                       ae['keep_prob']: 1.0
                                   })[0]
            valid_i += 1
        print('train:', train_cost / train_i, 'valid:', valid_cost / valid_i)
Пример #3
0
"""
# %%
import tensorflow as tf
from libs.batch_norm import batch_norm
from libs.activations import lrelu
from libs.connections import conv2d, linear
from libs.datasets import MNIST
#import  tf.train.ExponentialMovingAverage
'''
这个文件一开始是错的,
https://github.com/pkmital/tensorflow_tutorials/issues/43
在项目中,的libs.batch_norm是错的.少了一行.我已经添加好了.
'''
# %% Setup input to the network and true output label.  These are
# simply placeholders which we'll fill in later.
mnist = MNIST()
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])

# %% We add a new type of placeholder to denote when we are training.
# This will be used to change the way we compute the network during
# training/testing.
is_training = tf.placeholder(tf.bool, name='is_training')

# %% We'll convert our MNIST vector data to a 4-D tensor:
# N x W x H x C
x_tensor = tf.reshape(x, [-1, 28, 28, 1])
#ema.apply([batch_mean, batch_var])
# %% We'll use a new method called  batch normalization.
# This process attempts to "reduce internal covariate shift"
# which is a fancy way of saying that it will normalize updates for each
Пример #4
0
"""
Imports MNIST dataset
convolution autoencoder for MNIST dataset
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from libs.utils import montage
from libs import gif
from libs.datasets import MNIST

ds = MNIST()
mean_img = np.mean(ds.X, axis=0)

# Let's get the first 1000 images of the dataset and reshape them
imgs = ds.X[:1000].reshape((-1, 28, 28))
# So the number of features is the second dimension of our inputs matrix, 784
n_features = ds.X.shape[1]
# And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs.
X = tf.placeholder(tf.float32, [None, n_features])
X_tensor = tf.reshape(X, [-1, 28, 28, 1])
n_filters = [16, 16, 16]
filter_sizes = [4, 4, 4]
current_input = X_tensor
# notice instead of having 784 as our input features, we're going to have
# just 1, corresponding to the number of channels in the image.
# We're going to use convolution to find 16 filters, or 16 channels of information in each spatial location we perform convolution at.
n_input = 1
# We're going to keep every matrix we create so let's create a list to hold them all
Ws = []
shapes = []
def load_data():
    mnist = MNIST()
    return mnist
Пример #6
0
"""
Imports MNIST dataset
Predict labels from mnist dataset using CNN
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from libs import utils
from libs.utils import montage
from libs import gif
from libs.datasets import MNIST

ds = MNIST(one_hot=True, split=[0.8, 0.1, 0.1])

X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
n_output = 10
X_tensor = tf.reshape(X, [-1, 28, 28, 1])

filter_size = 5
n_filters_in = 1
n_filters_out = 32
W_1 = tf.get_variable(
    name='W',
    shape=[filter_size, filter_size, n_filters_in, n_filters_out],
    initializer=tf.random_normal_initializer())
b_1 = tf.get_variable(name='b',
                      shape=[n_filters_out],
                      initializer=tf.constant_initializer())
h_1 = tf.nn.relu(
    tf.nn.bias_add(