import theano
import theano.tensor as T
import numpy as np
import layers
import load_mnist
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams

x_test, t_test, x_valid, t_valid, x_train, t_train = load_mnist.load()

x_train = np.concatenate((x_train,x_valid),axis=0)
t_train = np.concatenate((t_train,t_valid),axis=0)

# define symbolic Theano variables
x = T.matrix()
t = T.matrix()
lr = T.scalar()
B = T.scalar()
#prepare weight
#BC architecture is 2X128C3 - MP2 - 2x256C3 - MP2 - 2x512C3 - MP2 - 2x1024FC - 10
params = layers.loadMNIST('mnist_pretrained_plain.save')
def feedForward(x, params,B):
    x = layers.quantizeAct(x,B)
    l=0
    current_params = params[l]
    current_params[0] = layers.quantizeWeight(current_params[0],B+2)
    current_params[1] = layers.quantizeWeight(current_params[1],B+2)
    c1 = layers.linOutermost(x,current_params)
    c1 = layers.slopedClipping(c1)
    c1 = layers.quantizeAct(c1,B)

    l+=1
Пример #2
0
updates = [(classifier.W, classifier.W - learning_rate * g_W),
           (classifier.b, classifier.b - learning_rate * g_b)]

train_model = theano.function(
    inputs=[index],
    outputs=cost,
    updates=updates,
    givens={
        x: train_set_x[index * batch_size:(index + 1) * batch_size],
        y: train_set_y[index * batch_size:(index + 1) * batch_size]
    })

test_model = theano.function(
    inputs=[index],
    outputs=classifier.errors(y),
    givens={
        x: test_set_x[index * batch_size:(index + 1) * batch_size],
        y: test_set_y[index * batch_size:(index + 1) * batch_size]
    })

validate_model = theano.function(
    inputs=[index],
    outputs=classifier.errors(y),
    givens={
        x: valid_set_x[index * batch_size:(index + 1) * batch_size],
        y: valid_set_y[index * batch_size:(index + 1) * batch_size]
    })

(train_set, valid_set, test_set) = load_mnist.load(mnist_file)
Пример #3
0
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA

#Load our implimentations
import freeman_code as fc
import knn
import load_mnist as lm
import smtplib
import pickle

#%%
#Load mnist dataset
images, labels, labels_vector = lm.load('Data/train.csv', 2000)
#images,labels,labels_vector = lm.load('train.csv',2000)

#Separate training and validation sets
train_images = images[0]
train_labels_vect = labels_vector[0]
val_images = images[1]
val_labels_vect = labels_vector[1]

print('load complete')


#%%
#Preprocess and convert into binary images
def preprocess_convert(train_images):
    for i in range(len(train_images)):
Пример #4
0
        x1 = self.enc(c1)
        x2 = self.enc(c2)

        ys = cos(x1, x2)
        sims = Variable(np.array(sims, dtype=np.float32))
        loss = F.mean_squared_error(ys, sims)

        self.enc.zerograds()
        loss.backward()
        self.opt.update()

        return loss

net = Network()
data, test_data = load(100, 100)

for _ in range(1000):

    pairs = []
    sims = []

    for __ in range(70):  # batch

        idx1 = random.randrange(10)
        idx2 = (idx1 + random.randrange(9)) % 10
        i1 = random.randrange(len(data[idx1]))
        i2 = random.randrange(len(data[idx1]))
        i3 = random.randrange(len(data[idx2]))

        pairs.append((data[idx1][i1], data[idx1][i2]))