Exemplo n.º 1
0
def load_MNIST_dataset():
    mnist = tf.keras.datasets.mnist
    (Xs_tr, Ys_tr), (Xs_te, Ys_te) = mnist.load_data()
    Xs_tr = Xs_tr / 255.0
    Xs_te = Xs_te / 255.0
    Xs_tr = Xs_tr.reshape(Xs_tr.shape[0], 28, 28,
                          1)  # 28 rows, 28 columns, 1 channel
    Xs_te = Xs_te.reshape(Xs_te.shape[0], 28, 28, 1)
    return (Xs_tr, Ys_tr, Xs_te, Ys_te)
Exemplo n.º 2
0
def main():

    x_train, y_train, x_test, y_test = mnist.load_data(reshape=[784, 1])
    # (50000, 784) (50000, 10) (10000, 784) (10000, 10)
    print('x_train, y_train, x_test, y_test:', x_train.shape, y_train.shape,
          x_test.shape, y_test.shape)

    np.random.seed(66)

    model = Network([784, 30, 10])
    data_train = list(zip(x_train, y_train))
    data_test = list(zip(x_test, y_test))
    model.SGD(data_train, 10000, 10, 0.1, data_test)
Exemplo n.º 3
0
def main():
    type = sys.argv[1]
    li = convert_to_list(sys.argv[2])
    li[-1] = 10
    epoch = int(sys.argv[3])
    lrate = float(sys.argv[4])

    if (type == '1'):
        img_name = sys.argv[5]
        img = imread(img_name)
        img = np.reshape(img, (28 * 28, 1))

        print 'loading...'
        li[0] = img.size
        training_data = mnist.load_data('data/train_images.idx3-ubyte',
                                        'data/train_labels.idx1-ubyte', 'rb',
                                        60000, 1)
        net = nets.Neural_nets(li)
        print 'training...'
        net.training_model(training_data, epoch, lrate)
        val = np.argmax(net.learning_model(img))

        print 'The number is %d.' % (val)

    else:
        print 'loading...'
        li[0] = 784
        training_data = mnist.load_data('data/train_images.idx3-ubyte',
                                        'data/train_labels.idx1-ubyte', 'rb',
                                        60000, 1)
        test_data = mnist.load_data('data/test_images.idx3-ubyte',
                                    'data/test_labels.idx1-ubyte', 'rb', 10000,
                                    0)
        net = nets.Neural_nets(li)
        print 'training...'
        net.training_model(training_data, epoch, lrate, test_data)
Exemplo n.º 4
0
def main():
    if RE_TRAIN:
        if os.path.exists(CKPT_PATH):
            shutil.rmtree(CKPT_PATH)
        if os.path.exists(GRAPH_PATH):
            shutil.rmtree(GRAPH_PATH)
    if not os.path.exists(CKPT_PATH):
        os.mkdir(CKPT_PATH)

    model = VAE(code_size=CODE_SIZE, ckpt_path=CKPT_PATH)
    data = mnist.load_data().train

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(GRAPH_PATH, sess.graph)
        model.train(sess, data, FINAL_STEP, LR, BATCH_SIZE, writer, CKPT_STEP)
        writer.close()
Exemplo n.º 5
0
def load_data():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    number = 10000
    x_train = x_train[0:number]
    y_train = y_train[0:number]
    x_train = x_train.reshape(number,227*227)
    x_test = x_test.reshape(x_test.shape[0],227*227)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    #
    y_train = np_utils.to_categorical(y_train,10)
    y_test = np_utils.to_categorical(y_test,10)
    x_train = x_train
    x_test = x_test
    #
    x_train = x_train/255
    x_test = x_test/255
    return (x_train,y_train),(x_test,y_test)
Exemplo n.º 6
0
def keras_fmin_fnct(space):

    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
Exemplo n.º 7
0
def trainSizeModerator(train_set_size):
    np.random.seed(1)
    Xtrain, ytrain, Xtest, ytest = mnist.load_data()  #loading the dataset
    train_size = train_set_size
    test_size = 10000
    Xtrain = Xtrain[0:train_size]
    ytrain = ytrain[0:train_size]
    Xtest = Xtest[0:test_size]
    ytest = ytest[0:test_size]

    #  Precompute sum of squares term for speed
    XtrainSOS = np.sum(
        Xtrain**2, axis=1,
        keepdims=True)  #computes sum of squares of element in each row
    XtestSOS = np.sum(
        Xtest**2, axis=1,
        keepdims=True)  #computes sum of squares of element in each row

    #  fully solution takes too much memory so we will classify in batches
    #  nbatches must be an even divisor of test_size, increase if you run out of memory
    if test_size > 1000:
        nbatches = 50
    else:
        nbatches = 5

    batches = np.array_split(np.arange(test_size), nbatches)
    ypred = np.zeros_like(ytest)

    #  Classify
    for i in range(nbatches):
        dst = sqDistance(
            Xtest[batches[i]], Xtrain, XtestSOS[batches[i]], XtrainSOS
        )  #computes eucledian distance between test and train data
        closest = np.argmin(dst, axis=1)  #picks index of the minimum distance
        ypred[batches[i]] = ytrain[
            closest]  #assigns the value(label) of the minimum index to the predicted array

    #  Report
    errorRate = (ypred != ytest).mean(
    )  #taking average of errors between actual and predcited labels
    #errorString = 'Error Rate: {:.2f}%\n'.format(100*errorRate)#formatting the output
    return errorRate * 100
Exemplo n.º 8
0
def plot_error(n):
    train_size = n
    print("Training size", train_size)
    test_size = 10000
    Xtrain, ytrain, Xtest, ytest = mnist.load_data()
    Xtrain = Xtrain[0:train_size]
    ytrain = ytrain[0:train_size]

    Xtest = Xtest[0:test_size]
    ytest = ytest[0:test_size]

    #  Precompute sum of squares term for speed
    XtrainSOS = np.sum(Xtrain**2, axis=1, keepdims=True)
    XtestSOS = np.sum(Xtest**2, axis=1, keepdims=True)

    #  fully solution takes too much memory so we will classify in batches
    #  nbatches must be an even divisor of test_size, increase if you run out of memory
    if test_size > 1000:
        nbatches = 50
    else:
        nbatches = 5

    batches = np.array_split(np.arange(test_size), nbatches)
    ypred = np.zeros_like(ytest)

    #  Classify
    for i in range(nbatches):
        dst = sqDistance(Xtest[batches[i]], Xtrain, XtestSOS[batches[i]],
                         XtrainSOS)
        closest = np.argmin(dst, axis=1)
        ypred[batches[i]] = ytrain[closest]

    #  Report
    errorRate = (ypred != ytest).mean()
    print('Error Rate: {:.2f}%\n'.format(100 * errorRate))
    error.append(errorRate)
    #image plot
    #plt.imshow(Xtrain[0].reshape(28, 28), cmap='gray')
    # plt.show()
    return error
Exemplo n.º 9
0
def optical_character_recognition(train_size):
    Xtrain, ytrain, Xtest, ytest = mnist.load_data()
    # train_size = 10000
    test_size = 10000

    Xtrain = Xtrain[0:train_size]
    ytrain = ytrain[0:train_size]

    Xtest = Xtest[0:test_size]
    ytest = ytest[0:test_size]

    #  Precompute sum of squares term for speed
    XtrainSOS = np.sum(Xtrain**2, axis=1, keepdims=True)
    XtestSOS = np.sum(Xtest**2, axis=1, keepdims=True)

    #  fully solution takes too much memory so we will classify in batches
    #  nbatches must be an even divisor of test_size, increase if you run out of memory
    if test_size > 1000:
        nbatches = 50
    else:
        nbatches = 5

    batches = np.array_split(np.arange(test_size), nbatches)
    ypred = np.zeros_like(ytest)

    #  Classify
    for i in range(nbatches):
        dst = sqDistance(Xtest[batches[i]], Xtrain, XtestSOS[batches[i]],
                         XtrainSOS)
        closest = np.argmin(dst, axis=1)
        ypred[batches[i]] = ytrain[closest]

    #  Report
    errorRate = (ypred != ytest).mean()
    print('Error Rate: {:.2f}%\n'.format(100 * errorRate))

    return errorRate
Exemplo n.º 10
0
def nfold_cross_validation(k, train_size):
    n = train_size // k
    Xtrain, ytrain, Xtest, ytest = mnist.load_data()
    Xtrain = Xtrain[0:train_size]
    ytrain = ytrain[0:train_size]
    indices = list(range(train_size))
    np.random.shuffle(indices)
    partitions = [
        indices[i * n:(i + 1) * n] for i in range((len(indices) + n - 1) // n)
    ]
    if (n != (train_size / k)):  #leave out the last partition
        del partitions[-1]

    error = 0.0
    for i in range(len(partitions)):
        validation_idx = partitions[i]
        training_idx = list(set(indices) - set(partitions[i]))
        X_training, X_validation = Xtrain[training_idx], Xtrain[validation_idx]
        y_training, y_validation = ytrain[training_idx], ytrain[validation_idx]

        #  Precompute sum of squares term for speed
        X_training_SOS = np.sum(X_training**2, axis=1, keepdims=True)
        X_validation_SOS = np.sum(X_validation**2, axis=1, keepdims=True)
        ypred = np.zeros_like(y_validation)

        #  Classify
        dst = sqDistance(X_validation, X_training, X_validation_SOS,
                         X_training_SOS)
        closest = np.argmin(dst, axis=1)
        ypred = y_training[closest]

        #  Report
        errorRate = (ypred != y_validation).mean()
        error = error + errorRate

    return (error / len(partitions))
Exemplo n.º 11
0
def sqDistance(p, q, pSOS, qSOS):
    #  Efficiently compute squared euclidean distances between sets of vectors

    #  Compute the squared Euclidean distances between every d-dimensional point
    #  in p to every d-dimensional point in q. Both p and q are
    #  npoints-by-ndimensions.
    #  d(i, j) = sum((p(i, :) - q(j, :)).^2)

    d = np.add(pSOS, qSOS.T) - 2 * np.dot(p, q.T)
    return d


np.random.seed(1)

#  Set training & testing
Xtrain, ytrain, Xtest, ytest = mnist.load_data()

#train_size = 10000
test_size = 10000

train_sample_size = [100, 1000, 2500, 5000, 7500, 10000]
n_folds = [3, 10, 50, 100, 1000]
error_rate_sample = []
error_rate_n_fold = []
error_n_fold_means = []

Xtest = Xtest[0:test_size]
ytest = ytest[0:test_size]

#  Precompute sum of squares term for speed
#XtrainSOS = np.sum(Xtrain**2, axis=1, keepdims=True)
Exemplo n.º 12
0
import numpy
import mnist as mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils

# make seed for cycle results
numpy.random.seed(42)

# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# resize data
X_train = X_train.reshape(60000, 784)

# normalize data
X_train = X_train.astype('float32')
X_train /= 255

# transform tags to categories
y_train = np_utils.to_categorical(y_train, 10)

# 0 -> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# 2 -> [0, 0, 1, 0, 0, 0, 0, 0, 0, 0] # only the right element == 1

# create sequential model
model = Sequential()

# add network levels
model.add(Dense(800, input_dim=784, init="normal", activation="relu"))
model.add(Dense(10, init="normal", activation="softmax"))
Exemplo n.º 13
0
"""Train a CNN model on MNIST."""

from __future__ import print_function
import keras
from keras.utils import np_utils
import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D

# training specific hyperparameters
batch_size = 128
epochs = 1

# Load the data, shuffled and split between train and test sets
data = mnist.load_data({'dataset': {}})
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

# Bring data into necessary format
x_train = mnist.preprocess(x_train, subtact_mean=False)
x_test = mnist.preprocess(x_test, subtact_mean=False)
y_train = np_utils.to_categorical(y_train, mnist.n_classes)
y_test = np_utils.to_categorical(y_test, mnist.n_classes)

# Define model
input_shape = (mnist.img_rows, mnist.img_cols, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
Exemplo n.º 14
0
from numpyDNN.losses import *

# set hyper parameters
input_size = 28 * 28
output_size = 10

n_hid1 = 100
n_hid2 = 100
n_hid3 = 100

batch_size = 100
learning_rate = 0.01
training_epoch_num = 50

# load mnist data
train_x, train_y = mnist.load_data("mnist_train.csv", one_hot=True)
test_x, test_y = mnist.load_data("mnist_test.csv", one_hot=True)

# build model
model = Model([
    Affine(inputs=input_size, outputs=n_hid1, dtype=np.float32),
    Sigmoid(inputs=n_hid1, dtype=np.float32),
    Affine(inputs=n_hid1, outputs=n_hid2, dtype=np.float32),
    Sigmoid(inputs=n_hid1, dtype=np.float32),
    Affine(inputs=n_hid2, outputs=n_hid3, dtype=np.float32),
    Sigmoid(inputs=n_hid3, dtype=np.float32),
    Affine(inputs=n_hid3, outputs=output_size, dtype=np.float32),
    Softmax(inputs=output_size, dtype=np.float32)
])

# compile model
Exemplo n.º 15
0

def feed_forward(X, weights):
    a = [X]
    for w in weights:
        a.append(np.maximum(a[-1].dot(w), 0))
    return a


def grads(X, Y, weights):
    grads = np.empty_like(weights)
    a = feed_forward(X, weights)
    delta = a[-1] - Y
    grads[-1] = a[-2].T.dot(delta)
    for i in xrange(len(a) - 2, 0, -1):
        delta = (a[i] > 0) * delta.dot(weights[i].T)
        grads[i - 1] = a[i - 1].T.dot(delta)
    return grads / len(X)


trX, trY, teX, teY = mnist.load_data()
weights = [np.random.randn(*w) * 0.1 for w in [(784, 100), (100, 10)]]
num_epochs, batch_size, learn_rate = 30, 20, 0.1

for i in xrange(num_epochs):
    for j in xrange(0, len(trX), batch_size):
        X, Y = trX[j:j + batch_size], trY[j:j + batch_size]
        weights -= learn_rate * grads(X, Y, weights)
    prediction = np.argmax(feed_forward(teX, weights)[-1], axis=1)
    print i, np.mean(prediction == np.argmax(teY, axis=1))
import tensorflow as tf
import numpy as np
import mnist

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))

def feed_forward(X, w_h, w_o):
    h = tf.nn.sigmoid(tf.matmul(X, w_h))
    return tf.matmul(h, w_o)

(trX, trY), _, (teX, teY) = mnist.load_data(one_hot=True)

w_h, w_o = init_weights([784, 100]), init_weights([100, 10])
num_epochs, batch_size, learn_rate = 30, 10, 0.2

X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])
out = feed_forward(X, w_h, w_o)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(out, Y))
train = tf.train.GradientDescentOptimizer(learn_rate).minimize(cost)

sess = tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(num_epochs):
    for j in xrange(0, len(trX), batch_size):
        batch_x, batch_y = trX[j:j+batch_size], trY[j:j+batch_size]
        sess.run(train, feed_dict={X: batch_x, Y: batch_y})
    prediction = sess.run(tf.argmax(out, 1), feed_dict={X: teX, Y: teY})
    print i, np.mean(prediction == np.argmax(teY, axis=1))
Exemplo n.º 17
0

def feed_forward(X, weights):
    a = [X]
    for w in weights:
        a.append(np.maximum(a[-1].dot(w), 0))
    return a


def grads(X, Y, weights):
    grads = np.empty_like(weights)
    a = feed_forward(X, weights)
    delta = a[-1] - Y
    grads[-1] = a[-2].T.dot(delta)
    for i in xrange(len(a) - 2, 0, -1):
        delta = (a[i] > 0) * delta.dot(weights[i].T)
        grads[i - 1] = a[i - 1].T.dot(delta)
    return grads / len(X)


trX, trY, teX, teY = mnist.load_data()
weights = [np.random.randn(*w) * 0.1 for w in [(784, 100), (100, 10)]]
num_epochs, batch_size, learn_rate = 30, 20, 0.1

for i in xrange(num_epochs):
    for j in xrange(0, len(trX), batch_size):
        X, Y = trX[j : j + batch_size], trY[j : j + batch_size]
        weights -= learn_rate * grads(X, Y, weights)
    prediction = np.argmax(feed_forward(teX, weights)[-1], axis=1)
    print i, np.mean(prediction == np.argmax(teY, axis=1))
Exemplo n.º 18
0

def create_cnn(input_shape, n_outputs):
    model = keras.models.Sequential()

    model.add(keras.layers.InputLayer(input_shape))
    model.add(keras.layers.Conv2D(16, (7, 7), activation='relu'))
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(n_outputs, activation='softmax'))
    return model


(train_images,
 train_labels), (val_images,
                 val_labels), (test_images,
                               test_labels) = mnist.load_data(nval=1000)

cnn = create_cnn(input_shape=(28, 28, 1), n_outputs=10)
sgd = keras.optimizers.SGD(lr=1e-2)

cnn.compile(loss='categorical_crossentropy',
            optimizer=sgd,
            metrics=['accuracy'])

logs = cnn.fit(train_images,
               train_labels,
               batch_size=32,
               epochs=20,
               verbose=1,
               validation_data=(val_images, val_labels))
Exemplo n.º 19
0
import theano.tensor as T
import numpy as np
import mnist


def init_weights(n_in, n_out):
    weights = np.random.randn(n_in, n_out) / np.sqrt(n_in)
    return theano.shared(np.asarray(weights, dtype=theano.config.floatX))


def feed_forward(X, w_h, w_o):
    h = T.nnet.sigmoid(T.dot(X, w_h))
    return T.nnet.softmax(T.dot(h, w_o))


trX, trY, teX, teY = mnist.load_data(one_hot=True)

w_h, w_o = init_weights(28 * 28, 100), init_weights(100, 10)
num_epochs, batch_size, learn_rate = 30, 10, 0.2

X, Y = T.fmatrices('X', 'Y')
y_ = feed_forward(X, w_h, w_o)

weights = [w_h, w_o]
grads = T.grad(cost=T.nnet.categorical_crossentropy(y_, Y).mean(), wrt=weights)
train = theano.function(inputs=[X, Y],
                        updates=[[w, w - g * learn_rate]
                                 for w, g in zip(weights, grads)],
                        allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=T.argmax(y_, axis=1))
Exemplo n.º 20
0
#!/usr/bin/env python
"""Train MNIST classifier."""

import mnist
import numpy as np
import tensorflow as tf

# training specific hyperparameters
batch_size = 128
epochs = 1

# Load the data, shuffled and split between train and test sets
data = mnist.load_data({'dataset': {}})
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

feature_columns = [tf.contrib.layers.real_valued_column("", dimension=1024)]

# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
                                            hidden_units=[10, 20, 10],
                                            n_classes=3,
                                            model_dir="/tmp/iris_model")


# Define the training inputs
def get_train_inputs():
    x = tf.constant(x_train)
    y = tf.constant(y_train)
Exemplo n.º 21
0
import numpy as np
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
import mnist
import matplotlib.pyplot as plt
import keras.backend.tensorflow_backend as KTF
from gan import build_generator, build_discriminator, plot_images, make_trainable, get_session

log_dir = "."
KTF.set_session(get_session(
))  # Allows 2 jobs per GPU, Please do not change this during the tutorial

# prepare MNIST dataset
data = mnist.load_data()
X_train = data.train_images.reshape(-1, 28, 28, 1) / 255.
X_test = data.test_images.reshape(-1, 28, 28, 1) / 255.

# plot some real images
idx = np.random.choice(len(X_train), 16)
plot_images(X_train[idx], fname=log_dir + '/real_images.png')

# --------------------------------------------------
# Set up generator, discriminator and GAN (stacked generator + discriminator)
# Feel free to modify eg. :
# - the provided models (see gan.py)
# - the learning rate
# - the batchsize
# --------------------------------------------------

# Set up generator
Exemplo n.º 22
0
def test_mnist_load():
    mnist.load_data("mnist")
Exemplo n.º 23
0
import minimalist_nn
import numpy as np
import mnist

trX, trY, teX, teY = mnist.load_data(flatten=True)
weights = [
    np.random.randn(*w) * 0.1 for w in [(784, 100), (100, 100), (100, 10)]
]

minimalist_nn.train(trX, trY, teX, teY, weights)
Exemplo n.º 24
0
 def __init__(self, learning_rate):
     self.trX, self.trY, self.teX, self.teY = mnist.load_data(one_hot=False,
                                                              flatten=False)
     self.NCLASSES = 10
     self.w = None
     self.learning_rate = learning_rate
Exemplo n.º 25
0
def compute_accuracy(X_test, Y_test, parameters):
    m = Y_test.shape[1]
    AL, _ = forward_propagation(X_test, parameters)
    Yp = AL.argmax(axis=0).reshape(1, -1)
    p = np.sum(Yp == Y_test) / m
    return p


########### start of the program ##############

output_channel = 10
num_iteration = 2000

#load data and formalize them
X, Y = mnist.load_data("training")
XX, YY = mnist.load_data("testing")

X_train = X[0:30000, :, :]
Y_train = Y[:, 0:30000]
X_test = XX[0:5000, :, :]
Y_test = YY[:, 0:5000]

X_train_flatten = X_train.reshape(X_train.shape[0], -1).T
X_test_flatten = X_test.reshape(X_test.shape[0], -1).T
Y_train_all = one_to_all_convertion(Y_train, output_channel)

#finalize layers size
img_size = X_train_flatten.shape[0]  #28*28=784
hidden_layer_1 = 10
hidden_layer_2 = 5
Exemplo n.º 26
0
def run(hyperparams):
    ##########################################
    # Your DL start here. See mnist_mlp.py   #
    ##########################################
    '''Trains a simple deep NN on the MNIST dataset.

    Gets to 98.40% test accuracy after 20 epochs
    (there is *a lot* of margin for parameter tuning).
    2 seconds per epoch on a K520 GPU.
    '''

    # from __future__ import print_function

    import keras
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers import Dense, Dropout
    from keras.optimizers import RMSprop

    batch_size = hyperparams['batch_size']
    num_classes = 10
    epochs = hyperparams['epochs']

    activation = hyperparams['activation']
    optimizer = hyperparams['optimizer']

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(Dense(512, activation=activation, input_shape=(784,)))
    model.add(Dropout(0.2))
    model.add(Dense(512, activation=activation))
    model.add(Dropout(0.2))
    model.add(Dense(num_classes, activation='softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                optimizer=optimizer,
                metrics=['accuracy'])

    history = model.fit(x_train, y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    ##########################################
    # End of mnist_mlp.py ####################
    ##########################################
    return history
import tensorflow as tf 
import numpy as np
import os
import mnist # get data from
import matplotlib.pyplot as plt # grph
from keras.models import Sequential # ann architecture
from keras.layers import Dense # Layer in ann
from keras.utils import to_categorical
from keras.models import load_model

# load dataset from mnist
mnist = tf.keras.datasets.mnist

# normize the images
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, test_images = (train_images / 255) - 0.5, (test_images / 255) - 0.5

train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1, 784))

#Defining the model
def baselineK_model():
    # create model
    model = Sequential()
    model.add( Dense(64, activation='relu', input_dim=784))
    model.add( Dense(64, activation='relu'))
    model.add( Dense(10, activation='softmax'))

    # Compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
Exemplo n.º 28
0
import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD

trX, trY, teX, teY = mnist.load_data(one_hot=True, reshape=(-1, 1, 28, 28))

model = Sequential()
model.add(ZeroPadding2D((2, 2), input_shape=trX.shape[1:]))

model.add(Convolution2D(16, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization(axis=1))
model.add(Convolution2D(16, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization(axis=1))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization(axis=1))
model.add(Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization(axis=1))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization(axis=1))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization(axis=1))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
Exemplo n.º 29
0
import numpy as np 
import optimise
from nn import NeuralNetwork
import loadData as ld
import mnist

# ds = ld.loadPickle('data/mnist.pkl.gz')


# mnist = ld.loadMNIST('data/mnist.pkl.gz')
mnist = mnist.load_data()
print mnist

# exit()

rng = np.random.RandomState(1111)

nn = NeuralNetwork(rng, 28*28, 10, [500, 500, 500])
optimise.bsgd(nn, mnist)
Exemplo n.º 30
0
def test_dots(t, dots):
    i = int(t / presentation_time) % len(test_images)
    j = np.argmax(dots)
    return test_labels[i] == vocab_labels[j]

# --- load the RBM data
rbm_file = 'rbm.npz'
if not os.path.exists(rbm_file):
    urllib.urlretrieve("http://files.figshare.com/1448053/rbm.npz", rbm_file)

rbm = np.load(rbm_file)
weights = rbm['weights']
biases = rbm['biases']

# --- load the testing data
[train_set, test_set] = mnist.load_data(train=True, valid=False, test=True)
train_images, train_labels = train_set
test_images, test_labels = test_set

# shuffle
rng = np.random.RandomState(92)
inds = rng.permutation(len(test_images))
test_images = test_images[inds]
test_labels = test_labels[inds]

# --- find average semantic pointers (codes) for each label
train_codes = forward(train_images, weights, biases)
vocab_labels = np.unique(train_labels)
vocab_codes = np.zeros((len(vocab_labels), train_codes.shape[-1]))
for i, label in enumerate(vocab_labels):
    vocab_codes[i] = train_codes[train_labels.flatten() == label].mean(0)
Exemplo n.º 31
0
import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD

trX, trY, teX, teY = mnist.load_data(one_hot=True, reshape=(-1, 1, 28, 28))

model = Sequential()
model.add(Convolution2D(8, 5, 5, input_shape=trX.shape[1:]))
model.add(Activation('sigmoid'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('sigmoid'))
model.add(Dense(10))
model.add(Activation('softmax'))

num_epochs, batch_size, learn_rate = 30, 10, 0.2

model.compile(SGD(learn_rate), 'categorical_crossentropy', metrics=['accuracy'])
model.fit(trX, trY, batch_size, num_epochs, verbose=1, validation_data=(teX, teY))
Exemplo n.º 32
0
    def savenetwork(self,filename):
        """
        :return:
        """
        with open(filename,"wb") as f:
            pickle.dump(self.W,f)


    def loadnetwork(self,filename):
        with open(filename,"rb") as f:
            self.W=pickle.load(f)


if __name__=="__main__":
    Mnist=mnist.load_data()
    margin,pool=2,4
    data,target=mnist.data_process2D(Mnist,margin,pool,"cosine")
    Dbond,d,Dout=6,2,10
    lamb=0
    tnn=tnn_classifier(Dbond,d,Dout)
    trainend,testend=40000,50000
    train_data,train_target=data[0:trainend],target[0:trainend]
    test_data,test_target=data[trainend:testend],target[trainend:testend]
    tnn.initialize(train_data.shape[1],train_data.shape[2])
    tnn.isometrize()
    trainlost,testlost,trainprecision,testprecision=tnn.sweep(train_data,train_target,test_data,test_target,lamb)
    tnn.savenetwork("WtrainedWithLinearE.txt")
    plt.figure("Precision")
    plt.plot(trainprecision)
    plt.plot(testprecision)
Exemplo n.º 33
0
        a.append(sigmoid(a[-1].dot(w)))
    return a

def grads(X, Y, weights):
    grads = np.empty_like(weights)
    a = feed_forward(X, weights)
    delta = a[-1] - Y # cross-entropy
    grads[-1] = np.dot(a[-2].T, delta)
    for i in xrange(len(a)-2, 0, -1):
        delta = np.dot(delta, weights[i].T) * d_sigmoid(a[i])
        grads[i-1] = np.dot(a[i-1].T, delta)
    return grads / len(X)

sigmoid = lambda x: 1 / (1 + np.exp(-x))
d_sigmoid = lambda y: y * (1 - y)

(trX, trY), _, (teX, teY) = mnist.load_data()
trY = mnist.to_one_hot(trY)

weights = [
    np.random.randn(784, 100) / np.sqrt(784),
    np.random.randn(100, 10) / np.sqrt(100)]
num_epochs, batch_size, learn_rate = 30, 10, 0.2

for i in xrange(num_epochs):
    for j in xrange(0, len(trX), batch_size):
        X, Y = trX[j:j+batch_size], trY[j:j+batch_size]
        weights -= learn_rate * grads(X, Y, weights)
    out = feed_forward(teX, weights)[-1]
    print i, np.mean(np.argmax(out, axis=1) == teY)
Exemplo n.º 34
0
def run(gParameters):
    ##########################################
    # Your DL start here. See mnist_cnn.py   #
    ##########################################
    '''Trains a simple convnet on the MNIST dataset.

    Gets to 99.25% test accuracy after 12 epochs
    (there is still a lot of margin for parameter tuning).
    16 seconds per epoch on a GRID K520 GPU.
    '''

    # from __future__ import print_function

    import keras
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Flatten
    from keras.layers import Conv2D, MaxPooling2D
    from keras import backend as K

    batch_size = gParameters['batch_size']
    num_classes = 10
    epochs = gParameters['epochs']

    activation = gParameters['activation']
    optimizer = gParameters['optimizer']

    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    model.summary()

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    ##########################################
    # End of mnist_mlp.py ####################
    ##########################################

    return history
Exemplo n.º 35
0
    for w in weights:
        a.append(sigmoid(a[-1].dot(w)))
    return a

def grads(X, Y, weights):
    grads = np.empty_like(weights)
    a = feed_forward(X, weights)
    delta = a[-1] - Y # cross-entropy
    grads[-1] = np.dot(a[-2].T, delta)
    for i in xrange(len(a)-2, 0, -1):
        delta = np.dot(delta, weights[i].T) * d_sigmoid(a[i])
        grads[i-1] = np.dot(a[i-1].T, delta)
    return grads / len(X)

sigmoid = lambda x: 1 / (1 + np.exp(-x))
d_sigmoid = lambda y: y * (1 - y)

trX, trY, teX, teY = mnist.load_data(one_hot=True)

weights = [
    np.random.randn(28*28, 100) / np.sqrt(28*28),
    np.random.randn(100, 10) / np.sqrt(100)]
num_epochs, batch_size, learn_rate = 30, 10, 0.2

for i in xrange(num_epochs):
    for j in xrange(0, len(trX), batch_size):
        X, Y = trX[j:j+batch_size], trY[j:j+batch_size]
        weights -= learn_rate * grads(X, Y, weights)
    prediction = np.argmax(feed_forward(teX, weights)[-1], axis=1)
    print i, np.mean(prediction == np.argmax(teY, axis=1))
Exemplo n.º 36
0

def generate_false_labels(Y):
    """
    Y: shape (num examples, 10)
    """
    return np.random.randint(0, NUM_CLASSES, Y.shape[0])


if __name__ == "__main__":
    # Load classifier parameters
    W = np.load("./params/W.npy")
    b = np.load("./params/b.npy")

    # Load data
    train_X, train_Y, test_X, test_Y = load_data()

    # Generate a random batch on *test data*
    X, Y = get_batch(test_X, test_Y)

    # Perform adversarial attacks; for each of these, you should also keep
    # score of the classifier's accuracy during each type of attack to compare
    # afterwards

    # First compute gradients
    grad = gradients(W, b, X, Y)

    Y = np.argmax(Y, axis=1)

    # 0. original example (not an attack!)
    Y_hat_original = np.argmax(forward(W, b, X), axis=1)
Exemplo n.º 37
0
# -*- coding: utf-8 -*-
import pickle
import sys
from mnist import load_data
# f = open('mnist.pkl', 'rb')
# train_set, valid_set, test_set = pickle.load(f)
train_set, test_set = load_data()
train_set_x, train_set_y = train_set
i = 7
for y in range(0,28):
	for x in range(0,28):
	  if train_set_x[i][y][x]<0.5:
	    sys.stdout.write(" ")
	  elif train_set_x[i][y][x]<0.8:
	    sys.stdout.write("+")
	  else:
	    sys.stdout.write("*")
	sys.stdout.write("\n")
print("this is labeled ", train_set_y[i])
Exemplo n.º 38
0
        output = self.forward_pass(x.reshape(-1, 1))
        pred = np.argmax(output)
        predictions.append(pred == np.argmax(y))

    return np.mean(predictions)


  def total_loss(loss_list):
    size = loss_list.size
    return np.sum(loss_list)/size

if __name__ == "__main__":
    #load the data
    train_data_size = 6000 # Only a subset of training data is utilized, dont change for submission
    test_data_size = 1000
    train_data = mnist.load_data("train-images-idx3-ubyte.gz")[0:train_data_size]
    train_labels = mnist.load_labels("train-labels-idx1-ubyte.gz")[0:train_data_size]
    test_data = mnist.load_data("t10k-images-idx3-ubyte.gz")[train_data_size:train_data_size + test_data_size]
    test_labels = mnist.load_labels("t10k-labels-idx1-ubyte.gz")[train_data_size:train_data_size + test_data_size]

    y_train = NeuralNetwork.prep_labels(train_labels)
    y_test = NeuralNetwork.prep_labels(test_labels)
    # Initialise the Neural Network
    hparams = {"l_rate": 0.001}
    np.random.seed(42)

    network_params = NeuralNetwork.init_params(num_hidden=160, input_size=784, output_size=10)
    feedforwardnn = NeuralNetwork(hparams, train_data, y_train, test_data, y_test, network_params)
    # Train
    feedforwardnn.train(train_data, y_train, test_data, y_test, 10)
    # Evaluate
om hyperopt import Trials, STATUS_OK, tpe
 hyperas import optim
 hyperas.distributions import choice, uniform, conditional
 keras.datasets import mnist
 keras.utils import np_utils
 keras.models import Sequential
 keras.layers.core import Dense, Dropout, Activation

data():
'''
Data providing function:

This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)


def keras_fmin_fnct(space):

    '''
    Data providing function: