Exemple #1
0
    n, dim = x.shape
    if not L:
        L = np.eye(dim)

    Lx = np.dot(x, L)
    Lpredx = np.dot(predx, L)

    print 'Predicting ...'
    predict = []
    for ind, ele in enumerate(Lpredx):
        dist = ((ele - Lx)**2).sum(1)
        pred = y[dist.argsort()[1:K+1]]
        bincount = np.bincount(pred)
        maxcount = bincount.max()
        candidates = [predy for predy in pred if bincount[predy] == maxcount]
        predict.append( np.random.choice(candidates) )
    return np.array(predict)

if __name__ == '__main__':
    train_data, test_data = load_mnist(percentage=0.1, skip_valid=True)
    train_x, train_y = train_data
    test_x, test_y = test_data

    predict_y = KNN_predict(train_x, train_y, test_x, K=5)
    correct = (predict_y==test_y).sum()
    print 'acc = {}/{} = {}%'.format(correct, 
            predict_y.shape[0],
            float(correct)/predict_y.shape[0]*100
            )

    # ------------------------------------------------------------------------------
    # TRAINING

    learning_rate = 0.001  # my experience shows that larger learning rates lead to divergence

    # number of training epochs, i.e., passes over training set.
    # there are 50.000 samples in training set. 200 epochs means training over 10.000.000 samples. In the paper, they
    # keep training for much longer (around 100.000.000)
    epoch_count = 10

    # report log_likelihood bound on training after this many training samples
    report_interval = 20000

    # load the training data
    (tx, ty), (vx, vy), (_, _) = load_mnist(path='../datasets')
    train_n = tx.shape[0]
    val_n = vx.shape[0]
    # we load the data into shared variables, this is recommended if you do training on GPU.
    train_x = theano.shared(np.asarray(tx, dtype=np.float32))
    val_x = theano.shared(np.asarray(vx, dtype=np.float32))

    # index of the first sample in batch
    batch_start = T.lscalar()

    # Theano function for training
    # This function feeds the current batch to model and applies simple gradient descent updates on model parameters
    train_model = theano.function(
        [batch_start],
        ll_bound,
        updates=((w1, w1 - learning_rate * dw1), (b1,
Adventures in Deep Learning

Multilayer Perceptron for MNIST Handwritten Digit Recognition

Goker Erdogan
https://github.com/gokererdogan
"""
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils.np_utils import to_categorical

from common import load_mnist

# load data
(x_train, y_train), (x_val, y_val), (x_test, y_test) = load_mnist()
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)

# build model
model = Sequential()
model.add(Dense(input_dim=784, output_dim=400))
model.add(Activation("sigmoid"))
model.add(Dense(input_dim=400, output_dim=10))
model.add(Activation("softmax"))

model.compile(optimizer="sgd", loss="categorical_crossentropy")

# fit model
model.fit(x=x_train, y=y_train, batch_size=128, nb_epoch=5, verbose=1,
          validation_data=(x_val, y_val))
Multilayer Perceptron for MNIST Handwritten Digit Recognition

07 July 2018
Balavivek Sivanantham
https://github.com/bsivanantham
"""
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils.np_utils import to_categorical

from common import load_mnist

# load data
(x_train, y_train), (x_val, y_val), (x_test, y_test) = load_mnist()
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)

# build model
model = Sequential()
model.add(Dense(input_dim=784, output_dim=400))
model.add(Activation("sigmoid"))
model.add(Dense(input_dim=400, output_dim=10))
model.add(Activation("softmax"))

model.compile(optimizer="sgd", loss="categorical_crossentropy")

# fit model
model.fit(x=x_train,
          y=y_train,
Exemple #5
0
            active_set = new_active_set
            violate_set = new_violate_set
            gradient = new_gradient
            self.M = newM
            t += 1

        return self


if __name__ == '__main__':
#   iris_data = load_iris()
#   X = iris_data['data']
#   Y = iris_data['target']
#    trainx, testx, trainy, testy = train_test_split(X, Y, test_size=0.66)

    (trainx, trainy), (testx, testy) = load_mnist(percentage=0.01, skip_valid=True)

    pca = PCA(whiten=True)
    pca.fit(trainx)
    components, variance = 0, 0.0
    for components, ele in enumerate(pca.explained_variance_ratio_):
        variance += ele
        if variance > 0.90: break
    components += 1
    print 'n_components=%d'%components
    pca.set_params(n_components=components)
    pca.fit(trainx)

    trainx = pca.transform(trainx)
    testx = pca.transform(testx)
Exemple #6
0
import numpy as np
from common import load_mnist
from knn import KNN_predict
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier, DistanceMetric  

from metric_learn import LMNN

def knn(train_x, train_y, test_x, test_y):
    neigh = KNeighborsClassifier(n_neighbors=5)
    neigh.fit(train_x, train_y)
    acc = (neigh.predict(test_x) == test_y).sum()
    return float(acc))/test_y.shape[0]

if __name__ == '__main__':
    (train_x, train_y), (test_x, test_y) = load_mnist(percentage=0.01, skip_valid=True)

    pca = PCA(whiten=True)
    pca.fit(train_x)
    components, variance = 0, 0.0
    for components, ele in enumerate(pca.explained_variance_ratio_):
        variance += ele
        if variance > 0.90: break
    components += 1
    print 'n_components=%d'%components
    pca.set_params(n_components=components)
    pca.fit(train_x)

    train_x = pca.transform(train_x)
    test_x = pca.transform(test_x)