Example #1
0
def load_mnist(mode='training'):
    (train_x, train_y), (test_x, test_y) = mnist.load_data()

    if mode == 'training':
        return train_x.reshape(list(train_x.shape) + [1]), train_y
    else:
        return test_x.reshape(list(test_x.shape) + [1]), test_y
def load_data():
    '''
    load data

    :return:
    '''
    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')
    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)
    return [x_train, y_train, x_test, y_test]
Example #3
0
def load_mnist(classes=10):  #MNIST(テスト用)
    #the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    #この時点でx_train.shape=(60000,28,28), x_test.shape=(10000,28,28), y_train.shape=(60000,), y_test.shape=(10000,)
    x_train = x_train.reshape(-1, 28, 28,
                              1).astype('float32')  #-1はそれ以外に合わせるように合わせるという意味
    x_test = x_test.reshape(-1, 28, 28, 1).astype('float32')
    #normalization(0〜255の値から0〜1に変換)
    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    #convert class vectors to binary class matrices(1 of nb_classesのベクトルに変換)
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    return x_train, y_train, x_test, y_test, 'mnist', classes, 28
Example #4
0
def train():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = (X_train.astype(np.float32) - 127.5)/127.5
    X_train = X_train[:, :, :, None]
    X_test = X_test[:, :, :, None]
    # X_train = X_train.reshape((X_train.shape, 1) + X_train.shape[1:])
    d = discriminator_model()
    g = generator_model()
    d_on_g = generator_containing_discriminator(g, d)
    d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    g.compile(loss='binary_crossentropy', optimizer="SGD")
    d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    d.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_optim)
    for epoch in range(100):
        print("Epoch is", epoch)
        print("Number of batches", int(X_train.shape[0]/BATCH_SIZE))
        for index in range(int(X_train.shape[0]/BATCH_SIZE)):
            noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))
            image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
            generated_images = g.predict(noise, verbose=0)
            if index % 20 == 0:
                image = combine_images(generated_images)
                image = image*127.5+127.5
                Image.fromarray(image.astype(np.uint8)).save(
                    str(epoch)+"_"+str(index)+".png")
            X = np.concatenate((image_batch, generated_images))
            y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
            d_loss = d.train_on_batch(X, y)
            print("batch %d d_loss : %f" % (index, d_loss))
            noise = np.random.uniform(-1, 1, (BATCH_SIZE, 100))
            d.trainable = False
            g_loss = d_on_g.train_on_batch(noise, [1] * BATCH_SIZE)
            d.trainable = True
            print("batch %d g_loss : %f" % (index, g_loss))
            if index % 10 == 9:
                g.save_weights('generator', True)
                d.save_weights('discriminator', True)
Example #5
0
''' TensorFlow implementation of denoising autoencoder (dA) 

This code is TensorFlow implementation of denoising autoencoder with an architecture described in
Convolution by Evolution (Fernando, et al.) from Google DeepMind. The purpose of this code is to
evaluate the performance of DPPN-encoded version of dA with the same architecture.

'''

import argparse
import numpy as np
import tensorflow as tf

from tensorflow.contrib.keras.python.keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()

# add channels to input data and normalize to [0, 1]
x_train = x_train[:, :, :, np.newaxis] / 255.0
x_test = x_test[:, :, :, np.newaxis] / 255.0


def weight_variable(shape):
    init_val = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(init_val, name='W')

def bias_variable(shape):
    init_val = tf.constant(0.1, shape=shape)
    return tf.Variable(init_val, name='bias')

def conv2d(x, w):
    return tf.nn.conv2d(x, w, strides=[1, 2, 2, 1], padding='VALID')
"""
Load keras dataset 
"""
from tensorflow.contrib.keras.python.keras.datasets import mnist
from tensorflow.contrib.keras.python.keras.utils import to_categorical

mnist_path = mnist.get_file(
    fname='mnist.npz',
    origin=None)  # the dataset is already downloaded, no origin is needed
"""
(['def get_file(fname,\n',
  '             origin,\n',
  '             untar=False,\n',
  '             md5_hash=None,\n',
  '             file_hash=None,\n',
  "             cache_subdir='datasets',\n",
  "             hash_algorithm='auto',\n",
  '             extract=False,\n',
  "             archive_format='auto',\n",
  '             cache_dir=None):\n',
"""
(train_img, train_lab), (test_img, test_lab) = mnist.load_data(path=mnist_path)

# Convert labels to categorical one-hot encoding
train_lab_hot = to_categorical(train_lab, num_classes=10)
Example #7
0
from tensorflow.contrib.keras.python.keras.datasets import mnist
from tensorflow.contrib.keras.python.keras.engine import Input, Model
from tensorflow.contrib.keras.python.keras.layers import Dense, K, Lambda
from tensorflow.contrib.keras.python.keras.losses import binary_crossentropy

import numpy as np
import matplotlib.pyplot as plt

(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

batch_size = 100
original_dim = 784
latent_dim = 2
intermediate_dim = 256
epochs = 50
epsilon_std = 1.0


def sampling(args):
    z_mean, z_log_sigma = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_sigma) * epsilon

Example #8
0
def train():
    # Prepare Training Data
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = X_train[:, :, :, None]
    X_test = X_test[:, :, :, None]

    # Initialize Models
    d = discriminator_model()
    g = generator_model()
    q = q_model()
    d_on_g = generator_containing_discriminator(g, d)
    q_on_g = generator_containing_discriminator(g, q)

    # Initialize Optimizers
    d_optim = Adam(lr=LR, beta_1=0.5, beta_2=0.999, epsilon=EPSILON)
    g_optim = Adam(lr=LR, beta_1=0.5, beta_2=0.999, epsilon=EPSILON)
    q_optim = Adam(lr=LR, beta_1=0.5, beta_2=0.999, epsilon=EPSILON)

    # Compile Models with loss functions
    g.compile(loss='binary_crossentropy', optimizer="SGD")
    d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    q_on_g.compile(loss=disc_mutual_info_loss, optimizer=g_optim)

    d.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_optim)

    q.trainable = True
    q.compile(loss=disc_mutual_info_loss, optimizer=q_optim)

    try:
        # Main Training Loop
        for epoch in range(100):
            print("Epoch is", epoch)
            print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))

            for index in range(int(X_train.shape[0] / BATCH_SIZE)):
                # Get Real and Generated Images
                noise = sample_zc()

                real_images = X_train[index * BATCH_SIZE:(index + 1) *
                                      BATCH_SIZE]
                generated_images = g.predict(noise,
                                             batch_size=BATCH_SIZE,
                                             verbose=0)

                # Train Discriminator and Q network
                training_images = np.concatenate(
                    (real_images, generated_images))
                labels = [1] * BATCH_SIZE + [0] * BATCH_SIZE
                latent_code = np.concatenate(noise[1:], axis=1)

                d_loss = d.train_on_batch(training_images, labels)
                q_loss = q.train_on_batch(generated_images, latent_code)

                # Train Generator using Fake/Real Signal
                noise = sample_zc()

                d.trainable = False
                g_d_loss = d_on_g.train_on_batch(noise, [1] * BATCH_SIZE)
                d.trainable = True

                # Train Generator using Mutual Information Lower Bound
                noise = sample_zc()
                latent_code = np.concatenate(noise[1:], axis=1)

                q.trainable = False
                g_q_loss = q_on_g.train_on_batch(noise, latent_code)
                q.trainable = True

                print(
                    "batch %d d_loss : %.3f q_loss: %.3f g_loss_d: %.3f g_loss_q: %.3f"
                    % (index, d_loss, q_loss, g_d_loss, g_q_loss))

                # Generate Sample Images
                if index % 20 == 0:
                    image = make_image(g)

                    Image.fromarray(image.astype(np.uint8)).save(
                        str(epoch) + "_" + str(index) + ".png")

                # Save weights
                if index % 10 == 9:
                    g.save_weights('g.kerasweights', True)
                    d.save_weights('d.kerasweights', True)
                    q.save_weights('q.kerasweights', True)

    except KeyboardInterrupt:
        pass

    print("\rFinished training")
Example #9
0
''' TensorFlow implementation of MNIST classifier '''

__author__ = 'Jin Yeom'
__copyright__ = 'Copyright (C) 2017 Jin Yeom'
__license__ = 'GNU General Public License (GPL)'

import numpy as np
import tensorflow as tf

from tensorflow.contrib.keras.python.keras.datasets import mnist
(mnist_x_train, mnist_y_train), (mnist_x_test, mnist_y_test) = mnist.load_data()

# preprocess mnist data
mnist_x_train = mnist_x_train.reshape((60000, 784)) / 255.0
mnist_x_test = mnist_x_test.reshape((10000, 784)) / 255.0

# convert mnist labels to one hot vectors
y_train_one_hot = np.zeros((60000, 10))
y_train_one_hot[np.arange(60000), mnist_y_train] = 1.0
y_test_one_hot = np.zeros((10000, 10))
y_test_one_hot[np.arange(10000), mnist_y_test] = 1.0

n_iter = 10000  # number of training iterations
batch_size = 32  # training batch size

x = tf.placeholder(tf.float32, shape=[None, 784], name='data') 
y_ = tf.placeholder(tf.float32, shape=[None, 10], name='label')

# hidden layer 1
with tf.name_scope('hidden_layer_1'): 
    w_1 = tf.Variable(tf.random_normal([784, 256]), name='w_1')