Ejemplo n.º 1
0
def lossless_decoding(population):

    # vocabulary of target neural nets
    nets = {}

    for i in range(len(population)):

        # get each element of the net by splitting the string that represent it
        #  in its components
        el = population[i].split('[')
        input_size = int(el[0].replace(',', ''))

        el = el[1].split(']')
        net_layers = el[0].split(',')

        el = el[1].split(',')[1:]
        net_loss = el[0].replace(',', '')
        net_l_rate = el[1].replace(',', '')

        # create the net
        layers = np.array([[int(net_layers[l]), net_layers[l + 1]]
                           for l in range(0,
                                          len(net_layers) - 1, 2)])
        net = dn.DeepNet(input_size, layers, net_loss)
        net.learning_rate = net_l_rate

        # add the net to the vocabulary
        nets[i] = net

    return nets
Ejemplo n.º 2
0
def demo_autoencoder():
    #load and norm the data
    data = np.load('scaled_images.npy')
    data = np.asarray(data, dtype='float32')
    data /= 255.0
    #set up and train the initial deepnet
    dnn = deepnet.DeepNet(
        [data.shape[1], data.shape[1], data.shape[1], data.shape[1] * 2],
        ['gaussian', 'sigmoid', 'sigmoid', 'sigmoid'])
    dnn.train(data, [5, 5, 5], 0.0025)
    #save the trained deepnet
    pickle.dump(dnn, file('pretrained.pkl', 'wb'))
    #unroll the deepnet into an autoencoder
    autoenc = unroll_network(dnn.network)
    #print out the sizes of the autoenc layers
    for i in range(len(autoenc)):
        print autoenc[i].W.shape
        print autoenc[i].hbias.shape
    #fine-tune with backprop
    mlp = backprop.NeuralNet(network=autoenc)
    trained = mlp.train(mlp.network,
                        data,
                        data,
                        max_iter=10,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr')
    #save
    pickle.dump(trained, file('network.pkl', 'wb'))
Ejemplo n.º 3
0
def demo_autoencoder():

    #load and norm the xs
    import GwData
    xs = GwData.GwData.as_binary()

    #OLD - MNIST training
    #xs = np.load('scaled_images.npy')
    #xs = np.asarray(xs, dtype='float32')
    #xs /= 255.0

    #set up and train the initial deepnet
    dnn = deepnet.DeepNet([xs.shape[1], 300, 150],
                          ['sigmoid', 'sigmoid', 'sigmoid'])
    dnn.train(xs, [500, 500], [0.25, 0.1])
    #save the trained deepnet
    pickle.dump(dnn, file('pretrained.pkl', 'wb'))
    #unroll the deepnet into an autoencoder
    autoenc = unroll_network(dnn.network)
    ##fine-tune with backprop
    mlp = backprop.NeuralNet(network=autoenc)

    trained = mlp.train(mlp.network,
                        xs,
                        xs,
                        max_iter=30,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr')
    ##save
    pickle.dump(trained, file('network.pkl', 'wb'))
Ejemplo n.º 4
0
    def train(self):
        # this will be replaced by calls to loadData.py
        #data = np.load('scaled_images.npy')
        #data = np.asarray(data, dtype='float32')
        #data /= 255.0
        
        l = loadData.Loader(str(self.dataDir),stream=self.stream)
        if self.layer_types[0] != 'sigmoid':
            layer1_sigmoid = False
        else:
            layer1_sigmoid = True
        l.loadData(layer1_sigmoid)
        data = l.XC
        
        if self.limit:
            inds = np.arange(data.shape[0])
            np.random.shuffle(inds)
            data = data[inds[:self.limit_num],:]
        self.stream.write(data.shape)

        # parse the layer sizes
        sizes = []
        for i in self.layer_sizes:
            if i == -1:
                sizes.append(data.shape[1])
            else:
                sizes.append(i)
        
        #set up and train the initial deepnet
        dnn = deepnet.DeepNet(sizes, self.layer_types, stream=self.stream)
        dnn.train(data, self.pretrain_iter, self.pretrain_lr)

        #save the trained deepnet
        #pickle.dump(dnn, file('pretrained.pkl','wb')) # Looks like pickle won't work with Qt :(
        self.save(dnn.network, 'pretrained.mat')
        
        #unroll the deepnet into an autoencoder
        autoenc = autoencoder.unroll_network(dnn.network)
        
        #fine-tune with backprop
        mlp = backprop.NeuralNet(network=autoenc, stream=self.stream)
        trained = mlp.train(mlp.network, data, data, max_iter=self.backprop_iter, 
                validErrFunc='reconstruction', targetCost='linSquaredErr')
        
        #save
        #pickle.dump(trained, file('network.pkl','wb'))
        self.save(trained, 'network.mat')
Ejemplo n.º 5
0
def rand_population(i_size, o_size, pop_size, connection_percentage=1.):

    # list that contains the final population of nets
    net_population = list()

    # size of activations' dictionary in deepnet module
    dict_act_size = len(dn.activations_dict)

    # size of the losses' dictionary in deepnet module
    loss_dict_size = len(dn.loss_dict)

    for n in range(pop_size):

        n_layers = np.random.randint(MIN_NUM_LAYERS, MAX_NUM_LAYERS)
        layer = np.array([])

        # append a random layer
        for i in range(n_layers - 1):

            neurons = np.random.randint(MIN_NUM_NEURONS, MAX_NUM_NEURONS)
            activation = list(dn.activations_dict.keys())[np.random.randint(
                0, dict_act_size)]
            layer = np.append(layer, np.array([neurons, activation]))

        # append the last layer
        activation = list(dn.activations_dict.keys())[np.random.randint(
            0, dict_act_size)]
        layer = np.append(layer, np.array([o_size, activation]))

        # generate the parameters for the net
        loss = list(dn.loss_dict.keys())[np.random.randint(0, loss_dict_size)]

        if connection_percentage == 1.:

            # tuple's form (verbose=, fully_connected=, connection_percentage=)
            params = (True, True, 1.)

        else:

            # tuple's form (verbose=, fully_connected=, connection_percentage=)
            params = (True, False, connection_percentage)

        # create the net and append it to the population
        net = dn.DeepNet(i_size, layer.reshape(n_layers, 2), loss, *params)
        net_population.append(net)

    return net_population
Ejemplo n.º 6
0
def demo_autoencoder():
    #load and norm the data
    data = np.load('scaled_images.npy')
    data = np.asarray(data, dtype='float32')
    data /= 255.0
    #set up and train the initial deepnet
    dnn = deepnet.DeepNet([data.shape[1], data.shape[1], data.shape[1], 42],
                          ['sigmoid', 'sigmoid', 'sigmoid', 'sigmoid'])
    dnn.train(data, [225, 75, 75], 0.0025)
    #save the trained deepnet
    pickle.dump(dnn, file('pretrained.pkl', 'wb'))
    #unroll the deepnet into an autoencoder
    autoenc = unroll_network(dnn.network)
    ##fine-tune with backprop
    mlp = backprop.NeuralNet(network=autoenc)
    trained = mlp.train(mlp.network,
                        data,
                        data,
                        max_iter=30,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr')
    ##save
    pickle.dump(trained, file('network.pkl', 'wb'))
Ejemplo n.º 7
0
MNIST test script
"""

# solve the relative import ussues
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../deepnet/")
import deepnet as dn
import utils.utils_digit_recognition as drec
import numpy as np

if __name__ == "__main__":

    # create the net and use a 50% connectivity (pre defined dropout)
    net = dn.DeepNet(input_size=784,
                     layers=np.array([[35, "relu"], [10, "sigmoid"]]),
                     loss="CrossEntropy",
                     verbose=True)

    net.learning_rate = 1e-4

    # initialize train, test, validation
    train_percentage = 90
    train_digits = np.loadtxt('data/mnist/mnist_train.csv',
                              delimiter=',',
                              skiprows=40000)

    test_digits = np.loadtxt('data/mnist/mnist_test.csv',
                             delimiter=',',
                             skiprows=0)

    train_size = len(train_digits)
Ejemplo n.º 8
0
# solve the relative import ussues
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../deepnet/");

import deepnet as dn
import numpy as np
import utils.utils_digit_recognition as drec

from sklearn import datasets

if __name__ == "__main__":
    
        # create the net
        net = dn.DeepNet(4, np.array([[21, "relu"], [3, "sigmoid"]]), "CrossEntropy", verbose=True); 
        net.learning_rate = 1e-4; # set the learning rate 
        
        
        # import data
        iris = datasets.load_iris();
        X = iris.data;
        y = drec.binarization(iris.target)[:,:3];
        
        # split train and test (70% train, 30% test)
        train, test = drec.data_split(X, 70);
        train_Y, test_Y = drec.data_split(y, 30);
               
        # initialize the weights
        for i in range(len(net.weights)): 
            net.weights[i] = dn.weights_dict['lecun'](net.weights[i]); 
Ejemplo n.º 9
0
# solve the relative import ussues
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../deepnet/")

import deepnet as dn
import parallelization.dispatcher as disp
import parallelization.dprediction as dpred
import numpy as np

if __name__ == '__main__':
    # without this instruction it does not work ok the IPyhton cosole
    __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
    # create the nets
    net1 = dn.DeepNet(2,
                      np.array([[3, "linear"], [2, "exp"]]),
                      "L2",
                      verbose=True)
    net2 = dn.DeepNet(2,
                      np.array([[3, "linear"], [2, "exp"]]),
                      "L2",
                      verbose=True)
    net3 = dn.DeepNet(2,
                      np.array([[3, "linear"], [2, "exp"]]),
                      "L2",
                      verbose=True)
    net4 = dn.DeepNet(2,
                      np.array([[3, "linear"], [2, "exp"]]),
                      "L2",
                      verbose=True)
    # put the nets in a list
    nets = list([net1, net2, net3, net4])
Ejemplo n.º 10
0
        elif re.match(':', chunk):
            r1 = 1;
            r2 = layer_shape;
            
        else:
            
            print('error while parsing string', chunk);
            return;
            
        return '['+str(r1) + ',' + str(r2) +']';       
    

""" 
    In place test: take this code as a small demonstration of the code 
"""

import deepnet as dn
import deepplot.netplot as nep

verbose = False;

if verbose:
    
    example_str = 'layer(1): 1|1, 2|2:3, 3|4:5 layer(2): :|:';
    
    net = dn.DeepNet(3, np.array([[5, "tanh"], [6, "linear"]]), "L2", True); 
    mask = Mask(net, example_str);
    net.fully_connected = False;
    net.set_mask(mask.weights);
    
    nep.NetPlot(net);
Ejemplo n.º 11
0
    sample_size = 3
    # input/output size
    i_size = 100
    o_size = 1

    X = np.zeros((sample_size, 100, 1))
    # turn on 3 bits on each sample, randomly
    for x in X:
        for i in np.random.choice(i_size, sample_size):
            x[i] = 1

    # target
    Y = 3 * np.ones(X.shape[:1]).reshape(sample_size, 1, 1)

    # create the perceptron
    net = dn.DeepNet(i_size, np.array([[1, "linear"]]), "L2")
    net.learning_rate = 3.75e-1

    # initialize the parameters
    for i in range(len(net.weights)):
        net.weights[i] = dn.weights_dict['lecun'](net.weights[i])
        net.bias[i] = dn.weights_dict['lecun'](net.bias[i])

    # train
    for i in range(sample_size):
        net.batch_backpropagation(X[i], Y[i], batch_size=1)

    test_size = 10
    X_test = np.zeros((test_size, 100, 1))

    # test on 10 samples
Ejemplo n.º 12
0
"""

# solve the relative import ussues
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../deepnet/")

import matplotlib.pyplot as plt
import numpy as np
import utils.utils_digit_recognition as drec
import deepnet as dn

if __name__ == "__main__":

    # initialize the net
    net = dn.DeepNet(64, np.array([[35, "leakyrelu"], [10, "leakyrelu"]]),
                     "L2")
    net.learning_rate = 2e-3

    # uncomment the following two lines if you want to try a non-fully connected topology
    # net = dn.DeepNet(64, np.array([[35, "sigmoid"]]), "CrossEntropy");
    # net.add_block(10, "sigmoid", fully_connected=False, connection_percentage=.5);

    # initialize train, test, validation
    train_percentage = 70
    # this percentage must be lower than the test set, since it's taken directly
    #  from it (for the sake of simplicity)
    validation_percentage = 50

    # import the dataset
    digits = drec.load_digits()
    # shuffle together inputs and supervised outputs