Ejemplo n.º 1
0
def test_load_data():
    data = load_data()
    assert len(data) == 3  # 3 data sets
    train_data = list(data[0])
    validation_data = list(data[1])
    test_data = list(data[2])

    assert len(train_data) == 50000  # 50,000 training images
    assert len(validation_data) == 10000  # 10,000 testing images
    assert len(test_data) == 10000  # 10,000 testing images

    for data_set in [train_data, validation_data, test_data]:
        assert len(
            data_set[0][0]
        ) == 784  # feature is a vector of size 784, 28x28 pixels image flattened
        assert type(data_set[0][0]) == np.ndarray
        assert data_set[0][0].shape == (784, 1)

        assert len(data_set[0]
                   [1]) == 10  # label is a one-hot encoding vector of size 10
        assert type(data_set[0][1]) == np.ndarray
        assert data_set[0][1].shape == (10, 1)
        assert sum(
            data_set[0]
            [1]) == 1  # only one bit in the label vector is 1, other 9 are 0
Ejemplo n.º 2
0
def main():
    # can use sigmoid by inputting T.nnet.sigmoid
    rng = np.random.RandomState(1234)
    training_data, valid_data, test_data = load_data('mnist.pkl.gz')
    trainer = MLP(rng, [28 * 28, 500, 10], T.tanh)
    m_b_s = 100  #mini_batch_size
    n_epoch = 1000
    eta = 0.01
    L1_reg = 0.00
    L2_reg = 0.0001
    trainer.SGD(training_data, test_data, L1_reg, L2_reg, eta, n_epoch, m_b_s)
Ejemplo n.º 3
0
def train_cvae_mnist():
    # load data
    dataset = load_mnist.load_data()
    X_train, y_train = dataset[0]
    X_valid, y_valid = dataset[1]
    y_train = onehot(y_train).astype(np.float32)
    y_valid = onehot(y_valid).astype(np.float32)
    y = onehot((np.arange(100)/10) % 10).astype(np.float32)
    # set params
    batch_size = 100
    epoch = 200
    rng1 = np.random.RandomState(1)
    z_dim = 50

    # make encoder, decoder
    encoder = Sequential(n_in=X_train.shape[1]+y_train.shape[1], rng=rng1)
    encoder.add(Dense(600))
    encoder.add(BatchNormalization())
    encoder.add(Activation('softplus'))
    encoder.add(Dense(600))
    encoder.add(BatchNormalization())
    encoder.add(Activation('softplus'))
    encoder = Encoder(network=encoder, mean_layer=Dense(z_dim), logvar_layer=Dense(z_dim), rng=rng1)

    decoder = Sequential(n_in=z_dim+y_train.shape[1], rng=rng1)
    decoder.add(BatchNormalization())
    decoder.add(Activation('softplus'))
    decoder.add(Dense(600))
    decoder.add(BatchNormalization())
    decoder.add(Activation('softplus'))
    decoder = Decoder(rng=rng1, network=decoder, mean_layer=Dense(X_train.shape[1]))

    # concat encoder and decoder, and define loss
    cvae = CVAE(rng1, encoder=encoder, decoder=decoder)
    opt = Adam(lr=3e-4)
    cvae.compile(opt=opt)

    f_encode = encoder.function(variable(X_train), variable(y), mode='pred')
    f_decode = decoder.function(variable(X_train), variable(y), mode='pred')

    def binarize(x):
        return rng1.binomial(1, x).astype(np.float32)

    train_batches = BatchIterator([X_train, y_train], batch_size, aug=[binarize, None])
    # train
    for i in xrange(epoch/10):
        print('epoch:{0}-{1}'.format(i*10, (i+1)*10))
        cvae.fit(train_batches, epoch=10)
        z = f_encode(np.concatenate((X_valid[:100], y_valid[:100]), axis=1))[0]
        z = np.tile(z[:10, :z_dim], (10, 1)).reshape(100, z_dim)
        reconstract = f_decode(np.concatenate((z, y), axis=1))[0]
        plot = 255 * np.vstack((X_valid[:10], reconstract))
        saveimg(plot.reshape(110, 28, 28).astype(np.uint8), (11, 10),
                'imgs/CVAE/CVAE_MNIST_analogy_epoch' + str((i+1)*10) + '.png')
Ejemplo n.º 4
0
def main():
    dataset = load_mnist.load_data()
    X_train, y_train = dataset[0]
    X_valid, _ = dataset[1]
    batch_size = 64
    z_dim = 50
    n_hidden = 500
    epoch = 200
    opt_gen = Adam(lr=1e-4, beta1=0.5)
    opt_dis = Adam(lr=1e-4, beta1=0.5)
    train_began_mnist(X_train, X_valid, z_dim, n_hidden, opt_gen, opt_dis, 'elu', epoch, batch_size)
Ejemplo n.º 5
0
def main():
    dataset = load_mnist.load_data()
    X_train, y_train = dataset[0]
    batch_size = 64
    z_dim = 100
    n_hidden = 500
    k = 5
    epoch = 500
    opt_gen = RMSprop(lr=0.00005, rho=0.5)
    opt_dis = RMSprop(lr=0.00005, rho=0.5)
    train_wgan_mnist(X_train, z_dim, n_hidden, opt_gen, opt_dis, 'relu', epoch, batch_size, k)
Ejemplo n.º 6
0
def main():
	# can use sigmoid by inputting T.nnet.sigmoid
	rng = np.random.RandomState(1234)
	training_data, valid_data, test_data = load_data('mnist.pkl.gz')
	trainer = MLP(rng,[28*28,500,10],T.tanh)
	m_b_s = 100 #mini_batch_size
	n_epoch = 1000
	eta = 0.01
	L1_reg = 0.00
	L2_reg = 0.0001
	trainer.SGD(training_data,test_data,L1_reg, L2_reg,eta,n_epoch,m_b_s)
Ejemplo n.º 7
0
def main():
    dataset = load_mnist.load_data()
    X_train, y_train = dataset[0]
    batch_size = 100
    z_dim = 50
    n_hidden = 600
    k = 1
    epoch = 200*k
    opt_gen = Adam(lr=2e-4, beta1=0.5)
    opt_dis = Adam(lr=2e-4, beta1=0.5)
    train_ebgan_mnist(X_train, z_dim, n_hidden, opt_gen, opt_dis, 'elu', epoch, batch_size, k,
                      use_PT=True)
Ejemplo n.º 8
0
def svm_baseline():
    train_data, _, test_data = load_data()
    train_features = [e[0].reshape(784, ) for e in train_data]
    train_labels = [np.argmax(e[1].reshape(10, )) for e in train_data]

    # train
    clf = svm.SVC()
    print("DEBUG 01 -> before fit")
    clf.fit(train_features, train_labels)
    print("DEBUG 02 -> after fit")
    # test
    test_features = [e[0].reshape(784, ) for e in test_data]
    test_labels = [np.argmax(e[1].reshape(10, )) for e in test_data]

    predictions = [a for a in clf.predict(test_features)]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_labels))
    print("Baseline classifier using an SVM.")
    print("%s of %s values correct." % (num_correct, len(test_labels)))
Ejemplo n.º 9
0
def main():
    dataset = load_mnist.load_data()
    X_train, y_train = dataset[0]
    X_valid, y_valid = dataset[1]

    print('training M1 model...')

    vae = train_vae_mnist(X_train, X_valid, z_dim=50, n_hidden=600, lr=3e-4,
                          activation='softplus', epoch=1000, batch_size=100)

    n_l = 100

    X_l = np.zeros((n_l, X_train.shape[1]), dtype=np.float32)
    y_l = np.zeros((n_l, 10), dtype=np.float32)
    idx = np.zeros(49900, dtype=np.int32)
    start = 0

    for i in range(10):
        idx_i = np.where(y_train == i)[0]
        X_l[i * n_l/10:(i + 1) * n_l/10] = X_train[idx_i[:n_l/10]]
        idx[start:start+len(idx_i[n_l/10:])] = idx_i[n_l/10:]
        y_l[i * n_l/10:(i + 1) * n_l/10, i] = 1.
        start += len(idx_i[n_l/10:])
    X_u = X_train[idx]
    epoch = 2000
    n_hidden = 500
    z_dim = 50
    dim = 50
    alpha = 1.
    mode = 'mc'
    filename = ('results/SSVAE/m1m2_valid_acc_' + str(mode) +'_alpha_'
                + str(alpha) +'_.txt')
    print('training M2 model...')

    encoder_function = vae.encoder.function(variable(X_l), mode='sampling')

    def sampler(x):
        return encoder_function(x)[0]

    X_valid = vae.encoder.predict(BatchIterator([X_valid], 100))
    train_ssvae_mnist(X_l, y_l, X_u, X_valid, y_valid, dim, z_dim, n_hidden,
                      alpha, 3e-4, 'softplus', epoch,
                      'gaussian', sampler, mode, filename)
Ejemplo n.º 10
0
import numpy as np
from ml.bo import bo
import load_mnist
from ml.deeplearning.layers import Dense, Activation
from ml.deeplearning.optimizers import SGD
from ml.deeplearning.objectives import MulticlassLogLoss, L2Regularization
from ml.deeplearning.models import Sequential

if __name__ == '__main__':
    dataset = load_mnist.load_data()
    x_train, y_train = dataset[0]
    x_test, y_test = dataset[1]


    def make(params):
        rng = np.random.RandomState(123)
        opt = SGD(lr=params[0], momentum=params[1])
        loss = [MulticlassLogLoss(), L2Regularization(params[2])]
        clf = Sequential(784, rng=rng, iprint=False)
        clf.add(Dense(10))
        clf.add(Activation('softmax'))
        clf.compile(loss=loss, opt=opt, batch_size=100, nb_epoch=100)

        return clf

    def eval(clf, train_x, train_y, valid_x, valid_y):
        clf.fit(train_x, train_y)
        score = clf.accuracy(valid_x, valid_y)
        return score

    intervals = [[0.001, 1.], [0.01, 1.], [0.001, 0.01]]
Ejemplo n.º 11
0
from load_mnist import load_data
from utils import tile_raster_images
from PIL import Image

CORRUPTION_LEVEL = 0.3
LEARNING_RATE = 0.1
TRAINING_EPOCHS = 5
BATCH_SIZE = 20
DATASET = '../Datasets/mnist.pkl.gz'

import theano.tensor as T
import timeit

if __name__ == '__main__':

    datasets = load_data(DATASET)
    train_set_x, train_set_y = datasets[0]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / BATCH_SIZE
    np_data = train_set_x.get_value()

    ####################################
    # BUILDING THE MODEL NO CORRUPTION #
    ####################################
    
    da = DenoisingAutoencoder(n_visible=28 * 28, n_hidden=500, batch_size=BATCH_SIZE)
    start_time = timeit.default_timer()

    # go through training epochs
    for epoch in range(TRAINING_EPOCHS):
Ejemplo n.º 12
0
def train_dcgan_mnist():
    # load data
    dataset = load_mnist.load_data()
    X_train, y_train = dataset[0]
    X_valid, y_valid = dataset[1]
    X_train = X_train.reshape((50000, 1, 28, 28)) * 2. - 1
    X_valid = X_valid.reshape((10000, 1, 28, 28)) * 2 - 1.
    batch_size = 100
    k = 1
    n_epoch = 200

    filename = 'imgs/DCGAN/DCGAN_MNIST_'
    # make discriminator
    rng1 = np.random.RandomState(1)
    d = discriminator(rng1)

    # make generator
    rng2 = np.random.RandomState(1234)
    g = generator(rng2)

    # concat models for training generator
    concat_g = Sequential(100, rng2)
    concat_g.add(g)
    concat_g.add(d, add_params=False)
    concat_g.compile(train_loss=[CrossEntropy(),
                                 L2Regularization(weight=1e-5)],
                     opt=Adam(lr=0.0002, beta1=0.5))

    # make label
    ones = np.ones(batch_size).astype(np.int8)
    zeros = np.zeros(batch_size).astype(np.int8)

    # generate first fake
    for i, layer in enumerate(g.layers):
        if hasattr(layer, 'moving'):
            g.layers[i].moving = False
    z = np.random.uniform(low=-1, high=1, size=batch_size*100)
    z = z.reshape(batch_size, 100).astype(np.float32)
    fake = g.predict(z)
    for i, layer in enumerate(g.layers):
        if hasattr(layer, 'moving'):
            g.layers[i].moving = True
    g.pred_function = None

    z_plot = np.random.uniform(low=-1, high=1, size=100*100)
    z_plot = z_plot.reshape(batch_size, 100).astype(np.float32)

    # training
    for i in xrange(n_epoch):
        start = 0
        print('epoch:{0}'.format(i+1))
        X_train, y_train = utils.shuffle(X_train, y_train)
        s = timeit.default_timer()
        for j in xrange(50000/batch_size):
            # train discriminator
            d.fit_on_batch(X_train[start:start+batch_size], ones)
            d.fit_on_batch(fake, zeros)

            # train generator
            if j % k == 0:
                z = np.random.uniform(low=-1, high=1, size=batch_size*100)
                z = z.reshape(batch_size, 100).astype(np.float32)
                concat_g.fit_on_batch(z, ones)
            # generate fake
            z = np.random.uniform(low=-1, high=1, size=batch_size*100)
            z = z.reshape(batch_size, 100).astype(np.float32)
            fake = g.predict(z)
            start += batch_size
            e1 = timeit.default_timer()
            utils.progbar(j+1, 50000/batch_size, e1-s)

        # validation
        z = np.random.uniform(low=-1, high=1, size=10000*100)
        z = z.reshape(10000, 100).astype(np.float32)
        fake_valid = g.predict(z)
        acc_real = d.accuracy(X_valid, np.ones(10000).astype(np.int8))
        sys.stdout.write(' Real ACC:{0:.3f}'.format(acc_real))
        acc_fake = d.accuracy(fake_valid, np.zeros(10000).astype(np.int8))
        sys.stdout.write(' Gene ACC:{0:.3f}'.format(acc_fake))

        e = timeit.default_timer()
        sys.stdout.write(', {0:.2f}s'.format(e-s))
        sys.stdout.write('\n')
        print g.layers[1].var_inf.get_value()
        if (i+1) % 10 == 0:
            print('generate fake...')
            generation = 255.0 * (g.predict(z_plot)+1) / 2.
            generation = generation.reshape(100, 28, 28)
            utils.saveimg(generation, (10, 10),
                          filename + 'epoch' + str(i+1) + '.png')

        if (i+1) % 10 == 0:
            z1 = np.random.uniform(low=-1, high=1, size=100)
            z2 = np.random.uniform(low=-1, high=1, size=100)
            z = np.zeros((100, 100))
            for j in xrange(100):
                z[j] = z1 + (-z1 + z2) * j / 99.
            generation = 255.0 * (g.predict(z.astype(np.float32))+1) / 2.
            generation = generation.reshape(100, 28, 28)

            utils.saveimg(generation, (10, 10),
                                filename + 'Analogy_epoch' + str(i+1) + '.png')
Ejemplo n.º 13
0
# tag::test_setup[]
from load_mnist import load_data
from network import SequentialNetwork
from layers import DenseLayer, ActivationLayer

training_data, test_data = load_data()  # <1>
"""
print(type(training_data)) # list
print(len(training_data))  # 60 000
print(len(test_data))      # 10 000
"""

len_train_dat = len(training_data)
len_test_dat = len(test_data)
kilo = 1000
new_len1 = int(0.5 * kilo)
# to speed up on slow PC's ;-)
new_len2 = int(0.2 * kilo)

training_data, test_data = training_data[:new_len1], test_data[:new_len2]

net = SequentialNetwork()  # <2>

net.add(DenseLayer(784, 392))  # <3>
net.add(ActivationLayer(392))
net.add(DenseLayer(392, 196))
net.add(ActivationLayer(196))
net.add(DenseLayer(196, 10))
net.add(ActivationLayer(10))  # <4>

# <1> First, load training and test data.
Ejemplo n.º 14
0
import network
from load_mnist import load_data

# net = network.Network([784, 196, 49, 10])
net = network.Network([784, 30, 10])

train_data, _, test_data = load_data()

net.sgd(train_data,
        epochs=30,
        mini_batch_size=10,
        eta=3.0,
        test_data=test_data)
Ejemplo n.º 15
0
import numpy as np
from load_mnist import load_data
from layers import sigmoid_double


def average_digit(data, digit):
    filtered_data = [x[0] for x in data if np.argmax(x[1]) == digit]
    filtered_array = np.asarray(filtered_data)
    return np.average(filtered_array, axis=0)


train, test = load_data()
avg_eight = average_digit(train, 8)

from matplotlib import pyplot as plt

img = (np.reshape(avg_eight, (28, 28)))
plt.imshow(img)
# plt.show()

x_3 = train[2][0]  # 4
x_18 = train[17][0]  # 8

W = np.transpose(avg_eight)
np.dot(W, x_3)
np.dot(W, x_18)


def predict(x, W, b):
    return sigmoid_double(np.dot(W, x) + b)
Ejemplo n.º 16
0
__author__ = 'sorensonderby'
import numpy as np
from load_mnist import load_data
from PIL import Image
import scipy.io as sio
import matplotlib
import matplotlib.pyplot as plt
import os
ORG_SHP = [28, 28]
OUT_SHP = [100, 100]
NUM_DISTORTIONS = 8
dist_size = (9, 9)  # should be odd?
NUM_DISTORTIONS_DB = 100000

mnist_data = load_data('MNIST/mnist.pkl.gz')
outfile = "mnist_sequence3_sample_8distortions9x9"

np.random.seed(1234)

### create list with distortions
all_digits = np.concatenate([mnist_data['X_train'], mnist_data['X_valid']],
                            axis=0)
all_digits = all_digits.reshape([-1] + ORG_SHP)
num_digits = all_digits.shape[0]

distortions = []
for i in range(NUM_DISTORTIONS_DB):
    rand_digit = np.random.randint(num_digits)
    rand_x = np.random.randint(ORG_SHP[1] - dist_size[1])
    rand_y = np.random.randint(ORG_SHP[0] - dist_size[0])
__author__ = 'sorensonderby'
import numpy as np
from load_mnist import load_data
from PIL import Image
import scipy.io as sio
import matplotlib
import matplotlib.pyplot as plt
import os
ORG_SHP = [28, 28]
OUT_SHP = [100, 100]
NUM_DISTORTIONS = 8
dist_size = (9, 9)  # should be odd?
NUM_DISTORTIONS_DB = 100000

mnist_data = load_data('../MNIST/mnist.pkl.gz')
outfile = "mnist_sequence3_sample_8dsistortions9x9"

np.random.seed(1234)


### create list with distortions
all_digits = np.concatenate([mnist_data['X_train'],
                             mnist_data['X_valid'],
                             mnist_data['X_train']], axis=0)
all_digits = all_digits.reshape([-1] + ORG_SHP)
num_digits = all_digits.shape[0]


distortions = []
for i in range(NUM_DISTORTIONS_DB):
    rand_digit = np.random.randint(num_digits)
Ejemplo n.º 18
0
import numpy as np
from load_mnist import load_data
from PIL import Image
import scipy.io as sio
import matplotlib
import matplotlib.pyplot as plt
import os

ORG_SHP = [28, 28]
OUT_SHP = [40, 40]
NUM_DISTORTIONS = 5
dist_size = (5, 5)  # should be odd?
NUM_DISTORTIONS_DB = 100000

mnist_data = load_data(
    './mnist.pkl.gz', "https://s3.amazonaws.com/img-datasets/mnist.pkl.gz")
outfile = "mnist_sequence3_sample_5dsistortions5x5"

np.random.seed(1234)


# create list with distortions
all_digits = np.concatenate([mnist_data['X_train'],
                             mnist_data['X_valid']], axis=0)
all_digits = all_digits.reshape([-1] + ORG_SHP)
num_digits = all_digits.shape[0]


distortions = []
for i in range(NUM_DISTORTIONS_DB):
    rand_digit = np.random.randint(num_digits)
import numpy as np
from scipy.optimize import minimize
from initial_params import initial_params
from sparse_autoencoder_cost import sparse_autoencoder_cost, sigmoid, der_sigmoid
from load_mnist import generate_patch, load_data
from display_network import display_network


visible_size = 28 * 28
hidden_size = 196
sparsity_param = 0.1
lamda = 0.003
beta = 3

images = np.transpose(load_data())[:, 0:10000]

patches = generate_patch()
theta = initial_params(visible_size, hidden_size)

J = lambda th: sparse_autoencoder_cost(
    visible_size, hidden_size, th, lambda x: sigmoid(x), lambda x: der_sigmoid(x), lamda, beta, sparsity_param, images
)

options_ = {"maxiter": 800, "disp": True}
result = minimize(J, theta, method="L-BFGS-B", jac=True, options=options_)
opt_theta = result.x

print result

W1 = opt_theta[0 : hidden_size * visible_size].reshape(hidden_size, visible_size).transpose()
Ejemplo n.º 20
0
    h = dropout(h, p_drop_hidden)
    h2 = rectify(T.dot(h, w_h2))

    h2 = dropout(h2, p_drop_hidden)
    h3 = rectify(T.dot(h2, w_h3))

    h3 = dropout(h3, p_drop_hidden)
    h4 = rectify(T.dot(h3, w_h4))

    h4 = dropout(h4, p_drop_hidden)
    py_x = softmax(T.dot(h4, w_o))
    return h, h2, h3, h4, py_x


(trX, trY), (teX, teY), num_of_class = load_mnist.load_data()
row, col = np.shape(trX)
# col = c * x * y
rank = num_of_class / 2
hidden = (col + num_of_class) * 2 / 3
print "Num of hidden neurons:", hidden
print "Num of classes:", num_of_class
print "Rank:", rank
X = T.fmatrix()
Y = T.fmatrix()

w_h = init_weights((col, hidden))  # 784 <- input
w_h2 = init_weights((hidden, hidden))
w_h3 = init_weights((hidden, hidden))
w_h4 = init_weights((hidden, hidden))
w_o = init_weights((hidden, num_of_class))  # 10 <- output
Ejemplo n.º 21
0
import network2
from load_mnist import load_data

# net = network2.Network([784, 196, 49, 10])
net = network2.Network([784, 30, 10])

train_data, validation_data, test_data = load_data()

net.sgd(train_data,
        epochs=30,
        mini_batch_size=10,
        eta=0.5,
        lmbda=1.0E-4,
        evaluation_data=test_data,
        monitor_evaluation_cost=True,
        monitor_evaluation_accuracy=True,
        monitor_training_cost=True,
        monitor_training_accuracy=True,
        monitor_weight_stats=True)
# tag::test_setup[]
import load_mnist
import network
from layers import DenseLayer, ActivationLayer

training_data, test_data = load_mnist.load_data()  # <1>

net = network.SequentialNetwork()  # <2>

net.add(DenseLayer(784, 392))  # <3>
net.add(ActivationLayer(392))
net.add(DenseLayer(392, 196))
net.add(ActivationLayer(196))
net.add(DenseLayer(196, 10))
net.add(ActivationLayer(10))  # <4>

# <1> First, load training and test data.
# <2> Next, initialize a sequential neural network.
# <3> You can then add dense and activation layers one by one.
# <4> The final layer has size 10, the number of classes to predict.
# end::test_setup[]

# tag::test_run[]
net.train(training_data,
          epochs=10,
          mini_batch_size=10,
          learning_rate=3.0,
          test_data=test_data)  # <1>

# <1> You can now easily train the model by specifying train and test data, the number of epochs, the mini-batch size and the learning rate.
# end::test_run[]
Ejemplo n.º 23
0
Archivo: AE.py Proyecto: neonnnnn/ml
from ml.deeplearning.models import Sequential
from ml.utils import visualize, BatchIterator


def autoencoder(n_in, n_hidden=500, rng=np.random.RandomState(1234),
                activations=['relu', 'sigmoid']):
    clf = Sequential(n_in, rng)
    clf.add(Dense(n_hidden))
    clf.add(Activation(activations[0]))
    clf.add(Decoder(clf.layers[0]))
    clf.add(Activation(activations[1]))
    
    return clf

if __name__ == '__main__':
    dataset = load_data()
    X_train, y_train = dataset[0]
    X_valid, y_valid = dataset[1]
    X_test, y_test = dataset[2]

    opt = SGD(lr=0.01, momentum=0.9)
    loss = SquaredError()

    clf = autoencoder(28*28)
    clf.compile(opt=opt, train_loss=loss)
    train_batches = BatchIterator([X_train, X_train], batch_size=128,
                                  shuffle=True)
    valid_batches = BatchIterator([X_valid, X_valid], batch_size=128,
                                  shuffle=False)
    clf.fit(train_batches, 100, valid_batches)