def __init__(self, Model_Class, NF):
     # load train and test features as numpy arrays
     self.Model_Class = Model_Class
     self.NF = NF
     self.net = Network(NF)
 def reset(self):
     self.net = Network(self.NF)
예제 #3
0
import network3
from network3 import Network, ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer

training_data, validation_data, test_data = network3.load_data_shared()
mini_batch_size = 10
net = Network([
    FullyConnectedLayer(n_in=784, n_out=100),
    SoftmaxLayer(n_in=100, n_out=10)
], mini_batch_size)
net.stochastic_gradient_descent(training_data, 60, mini_batch_size, 0.1,
                                validation_data, test_data)
class NeuralNetwork:
    def __init__(self, Model_Class, NF):
        # load train and test features as numpy arrays
        self.Model_Class = Model_Class
        self.NF = NF
        self.net = Network(NF)

    def reset(self):
        self.net = Network(self.NF)

    def train(self, X_train, Y_train):
        # SGD inputs training_data,epochs,batch size,learning_rate
        #self.net.set_params(verbose=True)
        self.net.fit(X_train, Y_train)
        Y_pred_train = self.net.predict(X_train)
        slope, intercept, r_value, p_value, std_err = stats.linregress(
            Y_train, Y_pred_train)

        return Y_train, Y_pred_train, r_value

    def test(self, X_train, X_test, Y_train, Y_test, plot=True):
        # Determine Labels for Training and Test Dat
        Y_pred_train = self.net.predict(X_train)

        m_train, intercept, r_value, p_value, std_err = stats.linregress(
            Y_train, Y_pred_train)
        Y_pred_test = self.net.predict(X_test)  #/ m_train

        r_value, p_value, std_err = self.Model_Class.plot(Y_train,
                                                          Y_pred_train,
                                                          Y_test,
                                                          Y_pred_test,
                                                          plot=plot)
        return Y_test, Y_pred_test, r_value

    def predict(self, X):
        # make predictions on unlabeled data
        return self.net.predict(X)

    def tune(self, X_train, Y_train):
        # define grid of possible hyper parameter values
        param_dist = {
            'nodes': sp_randint(10, 50),
            'eta': sp_norm(.035, .005),
            'lmbda': sp_norm(0, 1),
            'patience': sp_norm(15, 5)
        }

        self.net.set_params(verbose=False)
        random_search = RandomizedSearchCV(self.net,
                                           scoring='neg_mean_squared_error',
                                           param_distributions=param_dist,
                                           cv=5,
                                           iid=False)
        random_search.fit(X_train, Y_train)
        self.params = random_search.best_params_
        self.net = Network(self.NF,
                           nodes=self.params['nodes'],
                           eta=self.params['eta'],
                           lmbda=self.params['lmbda'],
                           patience=self.params['patience'])
        return self.params

    def select_features(self, X_train, Y_train, keep='all', iterations=50):
        # perform feature ranking with subsets of training data

        # define percentage of training data to keep when bootstrapping
        p_bootstrap = .75
        NS, NF = X_train.shape
        feature_importances = np.zeros([iterations, NF])

        if keep == 'all':
            keep = NF
        elif keep > NF:
            keep = NF

        # set NN to be in feature_selection mode
        self.net.set_params(verbose=False)

        for i in range(iterations):
            # keep p_bootstrap % of training sample set after shuffling
            rand_inds = np.random.permutation(NS)
            X_train_sample = X_train[rand_inds, :][:int(p_bootstrap * NS), :]
            Y_train_sample = Y_train[rand_inds][:int(p_bootstrap * NS)]
            # fit NN model to bootstrap sample of training data
            feature_importances[i, :] = self.net.fit(X_train_sample,
                                                     Y_train_sample,
                                                     FS=True)

        # record mean feature importances
        mean_feature_importances = np.mean(feature_importances, 0)

        # return X_train with kept features
        selected_features = np.argsort(np.abs(mean_feature_importances))[::-1]
        sorted_feature_importances = mean_feature_importances[
            selected_features][:keep]
        X_train_select = X_train[:, selected_features[:keep]]

        # check if hyper parameters already optimized before re-instantiating
        try:
            self.NF = keep
            self.net = Network(self.NF,
                               nodes=self.params['nodes'],
                               eta=self.params['eta'],
                               lmbda=self.params['lmbda'])
        except:
            self.NF = keep
            self.net = Network(self.NF)

        return X_train_select, selected_features[:
                                                 keep], sorted_feature_importances

    def set_model_params(self, params):
        self.net.set_params(**params)
예제 #5
0
        np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
    return shared_x, T.cast(shared_y, "int32")
    #return shared_x, T.cast(shared_y, "int64")
training_data = make_shared_GPU([train_features_normed, train_labels])
validation_data = make_shared_GPU([test_features_normed, test_labels])
test_data = validation_data


# In[36]:

train_features_normed.shape, len(train_labels), test_features_normed.shape, len(test_labels)


# ### Learn neural network

# In[ ]:

## adding a conv layer
#THEANO_FLAGS="exception_verbosity=high"
mini_batch_size = 83
net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 64, 64), 
                      filter_shape=(20, 1, 5, 5), 
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        FullyConnectedLayer(n_in=20*30*30, n_out=100, activation_fn=ReLU),
        SoftmaxLayer(n_in=100, n_out=16141)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, 
            validation_data, test_data)

예제 #6
0
def main():
    training_data, validation_data, test_data = load_data_wrapper()

    net = Network([784, 50, 30, 10], 10)
    net.SGD(list(training_data), 50, 10, 4.0, list(validation_data),
            list(test_data))
예제 #7
0
testD = genfromtxt('D:\\Study\\eaglabay\\DeelLData2\\testD.csv', delimiter=',')
trainD = genfromtxt('D:\\Study\\eaglabay\\DeelLData2\\trainD.csv', delimiter=',')
validD = genfromtxt('D:\\Study\\eaglabay\\DeelLData2\\validD.csv', delimiter=',')
#testD = genfromtxt('D:\\Study\\eaglabay\\DeelLData2\\testD.csv', delimiter=',')
#trainD = genfromtxt('D:\\Study\\eaglabay\\DeelLData2\\trainD.csv', delimiter=',')
#validD = genfromtxt('D:\\Study\\eaglabay\\DeelLData2\\validD.csv', delimiter=',')
testt=(testD[:,0:3072],testD[:,3072])
traint=(trainD[:,0:3072],trainD[:,3072])
#np.random.shuffle(traint)
validt=(validD[:,0:3072],validD[:,3072])
train_d,valid_d,test_d=network3.load_data_shared2(traint,validt,testt)
#train_d,valid_d,test_d=network3.load_data_shared()
mini_batch_size=50
net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 3, 32, 32), 
                      filter_shape=(20, 3, 5, 5), 
                      poolsize=(2, 2)),
        FullyConnectedLayer(n_in=20*14*14, n_out=100),
        SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(train_d, 5, mini_batch_size, 0.005, 
            valid_d, test_d)
            
a=net.testResult(test_d,50)
#net = Network([
#        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), 
#                      filter_shape=(20, 1, 5, 5), 
#                      poolsize=(2, 2)),
#        ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12), 
#                      filter_shape=(20, 20, 5, 5), 
#                      poolsize=(2, 2)),
#        FullyConnectedLayer(n_in=20*4*4, n_out=100),
#        SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
예제 #8
0
                  poolsize=(2, 2)),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2)),
    FullyConnectedLayer(n_in=40*4*4, n_out=100),
    SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
'''

# chapter 6 -  rectified linear units and some l2 regularization (lmbda=0.1) => even better accuracy
from network3 import ReLU
net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
    SoftmaxLayer(n_in=100, n_out=10)
], mini_batch_size)
net.SGD(training_data,
        60,
        mini_batch_size,
        0.03,
        validation_data,
        test_data,
        lmbda=0.1)
예제 #9
0
from random import randint
from network3 import Node, Network
from math import isnan as nan
nodes = []
nodes.append(Node('out', [], {1: 2, 2: -2, 3: 0.1}))
nodes.append(Node(1, ['out'], {11: 2}))
nodes.append(Node(2, ['out'], {11: 4, 22: 3}))
nodes.append(Node(3, ['out'], {22: -1}))
nodes.append(Node(11, [1, 2], {}))
nodes.append(Node(12, [2, 3], {}))
nodes.append(Node(13, [2, 3], {}))
nodes.append(Node(14, [2, 3], {}))
nodes.append(Node(15, [2, 3], {}))
nodes.append(Node(16, [2, 3], {}))

net = Network({node.id: node for node in nodes})
targetFunc = lambda X: 3 * X[0] - 2 * (2 * X[0] + 3 * X[1]) + 10 * X[1]
X = [[randint(0, 9), randint(0, 9)] for x in range(1, 500)]
lRate, oscillations, lwp, mse = 1, 0, True, 101
iters = 0
while (mse > 100 and iters < 10):
    print("ITERATION: ", iters)
    sum_e = 0
    nans = 0
    for i in range(0, len(X)):
        inputs = {11: X[i][0], 22: X[i][1]}
        lRate, oscillations, lwp, e = net.interation(inputs, targetFunc(X[i]),
                                                     lRate, oscillations, lwp)
        if (nan(e)):
            nans += 1
        else:
예제 #10
0
lmbda = 0
p_dropout = 0.16666666666667

# Set seed to facilitate reproducibility
random.seed(12345678)
np.random.seed(12345678)

# Build network
net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(20, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(
        n_in=20 * 4 * 4, n_out=30, activation_fn=ReLU, p_dropout=p_dropout),
    FullyConnectedLayer(
        n_in=30, n_out=30, activation_fn=ReLU, p_dropout=p_dropout),
    SoftmaxLayer(n_in=30, n_out=10, p_dropout=p_dropout)
], mini_batch_size)

# Call SGD
training_accuracy, validation_accuracy = net.SGD(training_data,
                                                 num_epochs,
                                                 mini_batch_size,
                                                 eta,
                                                 validation_data,
                                                 test_data,
예제 #11
0
import theano.tensor as tf

training_data, validation_data, test_data = network3.load_data_shared()

# PARAMETERS
mini_batch_size = 10
epochs = 60
eta = 0.1

# LAYER 1: CONVOLUTIONAL POOL LAYER PARAMETERS
image_shape = (mini_batch_size, 1, 28, 28)
filter_shape = (20, 1, 5, 5)
poolsize = (2, 2)

# LAYER 2: FULLY CONNECTED LAYER PARAMETERS
input_cells_2 = 20*12*12
output_cells_2 = 100

# LAYER 3: SOFTMAX LAYER PARAMETERS
output_cells_3 = 10

# NETWORK
n = Network([
    ConvPoolLayer(image_shape=image_shape, filter_shape=filter_shape, poolsize=poolsize), 
    FullyConnectedLayer(n_in=input_cells_2, n_out=output_cells_2), 
    SoftmaxLayer(n_in=output_cells_2, n_out=output_cells_3)
], mini_batch_size)

# EXECUTION
n.SGD(training_data, epochs, mini_batch_size, eta, validation_data, test_data)  
예제 #12
0
"""
Created on Mon Oct 29 11:36:48 2018

@author: roohollah
"""

import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
import time

start = time.time()
training_data, validation_data, test_data = network3.load_data_shared()
mini_batch_size = 10
net = Network(
    [
        ConvPoolLayer(
            image_shape=(mini_batch_size, 1, 28, 28),
            filter_shape=(20, 1, 5, 5),  # Each conv 1 map is 24x24
            poolsize=(2, 2)),  # Each pool 1 map is 12x12
        FullyConnectedLayer(n_in=20 * 12 * 12, n_out=100),
        SoftmaxLayer(n_in=100, n_out=47)
    ],
    mini_batch_size)

net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data,
        test_data)  # λ = 0.

end = time.time()

print('time needed to run program:', end - start)
예제 #13
0
import network3
from network3 import Network, ReLU
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer

training_data, validation_data, test_data = network3.load_data_shared()

mini_batch_size = 10
net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(
        n_in=20 * 12 * 12, n_out=100, activation_fn=ReLU, p_dropout=0.0),
    SoftmaxLayer(n_in=100, n_out=10, p_dropout=0.5)
], mini_batch_size)

n_epochs, eta = 30, 0.1
net.SGD(training_data, n_epochs, mini_batch_size, eta, validation_data,
        test_data)
예제 #14
0
    voters = 1
    vote_box = []
    vote_prob_box = []
    for vote in xrange(voters):
        #expanded_training_data, validation_data, test_data = \
        #                        network3.load_data_shared(expanded_time=10)
        # from book chap6
        #'''
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 29, 29),
                          filter_shape=(6, 1, 17, 17),
                          poolsize=(1, 1),
                          activation_fn=ReLU),
            ConvPoolLayer(image_shape=(mini_batch_size, 6, 13, 13),
                          filter_shape=(50, 6, 9, 9),
                          poolsize=(1, 1),
                          activation_fn=ReLU),
            FullyConnectedLayer(
                n_in=50 * 5 * 5, n_out=1000, activation_fn=ReLU,
                p_dropout=0.5),
            FullyConnectedLayer(
                n_in=1000, n_out=500, activation_fn=ReLU, p_dropout=0.5),
            SoftmaxLayer(n_in=500, n_out=10, p_dropout=0.5)
        ], mini_batch_size)

        print "=========Currently Calculating Voter number %s=========" % vote
        k, p = net.SGD(expanded_training_data,
                       120,
                       mini_batch_size,
                       0.001,
                       validation_data,
                       test_data,
예제 #15
0
파일: run3.py 프로젝트: roachsinai/books
# use right random seed and dropout
net = Network([
    ConvPoolLayer(input_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2)),
    FullyConnectedLayer(n_in=20*12*12, n_out=100, p_dropout=0.5),
    SoftmaxLayer(n_in=100, n_out=10, p_dropout=0.5)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1,
        validation_data, test_data)
98.77 vs 98.78(no dropout)
"""

net = Network([
    ConvPoolLayer(input_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(input_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(
        n_in=40*4*4, n_out=1000, activation_fn=ReLU, p_dropout=0.5),
    FullyConnectedLayer(
        n_in=1000, n_out=1000, activation_fn=ReLU, p_dropout=0.5),
    SoftmaxLayer(n_in=1000, n_out=10, p_dropout=0.5)],
    mini_batch_size)
net.SGD(expanded_training_data, 40, mini_batch_size, 0.03,
        validation_data, test_data)
# 99.52
learning_rate = 0.03
regularization_factor = 0.1
topology = [
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(n_in=40*4*4, n_out=100, activation_fn=ReLU),
    SoftmaxLayer(n_in=100, n_out=10)]


net = Network(topology, mini_batch_size)
result.append(net.SGD(training_data, epochs, mini_batch_size, learning_rate, validation_data, test_data, lmbda=regularization_factor))
dump_file(result, "result_pickle"+str(filter_size))




filter_size=1

mini_batch_size = 10
epochs = 30
learning_rate = 0.03
regularization_factor = 0.1
topology = [
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, filter_size, filter_size),
예제 #17
0
#                       poolsize=(2, 2),
#                       activation_fn=ReLU),
#         FullyConnectedLayer(
#             n_in=40*6*6, n_out=1000, activation_fn=ReLU, p_dropout=0.5),
#         FullyConnectedLayer(
#             n_in=1000, n_out=1000, activation_fn=ReLU, p_dropout=0.5),
#         SoftmaxLayer(n_in=1000, n_out=36, p_dropout=0.5)],
#         mini_batch_size)

net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 36, 36),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 16, 16),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(
        n_in=40 * 6 * 6, n_out=500, activation_fn=ReLU, p_dropout=0.5),
    SoftmaxLayer(n_in=500, n_out=34)
], mini_batch_size)

# net = Network([
#         ConvPoolLayer(image_shape=(mini_batch_size, 1, 48, 48),
#                       filter_shape=(25, 1, 5, 5),
#                       poolsize=(2, 2),
#                       activation_fn=ReLU),
#         ConvPoolLayer(image_shape=(mini_batch_size, 25, 22, 22),
#                       filter_shape=(16, 25, 5, 5),
#                       poolsize=(2, 2),
예제 #18
0
import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
training_data, validation_data, test_data = network3.load_data_shared()



mini_batch_size = 10
net = Network([
        FullyConnectedLayer(n_in=784, n_out=100),
        SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1,
            validation_data, test_data)
예제 #19
0
#!/Users/jblue/anaconda2/bin/python
# export DYLD_FALLBACK_LIBRARY_PATH=/Users/jblue/anaconda2/lib/
## note: I had the best luck with Anaconda running theano - the export statement above was necessary to get my code to run
import datetime
import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
training_data, validation_data, test_data = network3.load_data_shared()
mini_batch_size = 10

print "starting: ", datetime.datetime.now()

net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2)),
    FullyConnectedLayer(n_in=20 * 12 * 12, n_out=100),
    SoftmaxLayer(n_in=100, n_out=10)
], mini_batch_size)

net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)

print "finished: ", datetime.datetime.now()
import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
from network3 import ReLU

training_data, validation_data, test_data = network3.load_data_shared()
#expanded_training_data, _, _ = network3.load_data_shared("../data/mnist_expanded.pkl.gz")
mini_batch_size = 10

net = Network.load("out.txt")
print net.accuracy(test_data, mini_batch_size)
예제 #21
0
# expanded_training_data, _, _ = network3.load_data_shared(
#          "../data/mnist_expanded.pkl.gz")
# net = Network([
#         FullyConnectedLayer(n_in=784, n_out=100),
#         SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
# net.SGD(training_data, 60, mini_batch_size, 0.1,
#             validation_data, test_data)

net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(
        n_in=40 * 4 * 4, n_out=1000, activation_fn=ReLU, p_dropout=0.5),
    FullyConnectedLayer(
        n_in=1000, n_out=1000, activation_fn=ReLU, p_dropout=0.5),
    SoftmaxLayer(n_in=1000, n_out=10, p_dropout=0.5)
], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.03, validation_data, test_data)

########################################
import numpy
import csv
# unsigned char
dt = numpy.dtype('B')