Exemplo n.º 1
0
    def __init__(self, n_in, n_hidden, n_out, scale=0.5):
        Layer.__init__(self)

        self.n_in = n_in
        self.n_hidden = n_hidden
        self.n_out = n_out
        self.scale = sqrt(1.0 / n_in) or scale
        self.scale_out = sqrt(1.0 / n_hidden) or scale

        self.W_hi = shared_normal((n_hidden, n_in), scale=self.scale)
        self.W_ci = shared_normal((n_hidden, n_hidden), scale=self.scale)
        self.b_i = shared_zeros((n_hidden, ))
        self.W_hf = shared_normal((n_hidden, n_in), scale=self.scale)
        self.W_cf = shared_normal((n_hidden, n_hidden), scale=self.scale)
        self.b_f = shared_zeros((n_hidden, ))
        self.W_hc = shared_normal((n_hidden, n_in), scale=self.scale)
        self.b_c = shared_zeros((n_hidden, ))
        self.W_ho = shared_normal((n_hidden, n_in), scale=self.scale)
        self.W_co = shared_normal((n_hidden, n_hidden), scale=self.scale)
        self.b_o = shared_zeros((n_hidden, ))
        self.W_od = shared_normal((n_out, n_hidden),
                                  scale=self.scale_out)  #output decoder
        self.b_od = shared_zeros((n_out, n_hidden))

        self.params = [
            self.W_hi, self.W_ci, self.b_i, self.W_hf, self.W_cf, self.b_f,
            self.W_hc, self.b_c, self.W_ho, self.W_co, self.b_o, self.W_od,
            self.b_od
        ]
Exemplo n.º 2
0
    def __init__ (self, layerID, inputSize, kernelSize,
                  downsampleFactor, learningRate=0.001, momentumRate=0.9,
                  dropout=None, initialWeights=None, initialThresholds=None,
                  activation=tanh, randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate, dropout,
                       activation)

        if inputSize[1] != kernelSize[1] :
            raise ValueError('ConvolutionalLayer Error: ' +
                             'Number of Channels must match in ' +
                             'inputSize and kernelSize')

        # NOTE: use None instead of the batch size to allow variable batch
        #       sizes during deployment.
        self._inputSize = tuple([None] + list(inputSize[1:]))
        self._kernelSize = tuple(kernelSize)
        self._downsampleFactor = tuple(downsampleFactor)

        # create weights based on the optimal distribution for the activation
        if initialWeights is None or initialThresholds is None :
            self._initializeWeights(
                size=self._kernelSize,
                fanIn=np.prod(self._inputSize[1:]),
                fanOut=self._kernelSize[0],
                randomNumGen=randomNumGen)
Exemplo n.º 3
0
    def __init__(self,
                 layerID,
                 inputSize,
                 kernelSize,
                 downsampleFactor,
                 learningRate=0.001,
                 momentumRate=0.9,
                 dropout=None,
                 initialWeights=None,
                 initialThresholds=None,
                 activation=tanh,
                 randomNumGen=None):
        Layer.__init__(self, layerID, learningRate, momentumRate, dropout,
                       activation)

        # TODO: this check is likely unnecessary
        if inputSize[2] == kernelSize[2] or inputSize[3] == kernelSize[3]:
            raise ValueError('ConvolutionalLayer Error: ' +
                             'inputSize cannot equal kernelSize')
        if inputSize[1] != kernelSize[1]:
            raise ValueError('ConvolutionalLayer Error: ' +
                             'Number of Channels must match in ' +
                             'inputSize and kernelSize')

        self._inputSize = inputSize
        self._kernelSize = kernelSize
        self._downsampleFactor = downsampleFactor

        # create weights based on the optimal distribution for the activation
        if initialWeights is None or initialThresholds is None:
            self._initializeWeights(size=self._kernelSize,
                                    fanIn=np.prod(self._inputSize[1:]),
                                    fanOut=self._kernelSize[0],
                                    randomNumGen=randomNumGen)
Exemplo n.º 4
0
    def __init__(self, n_in, n_out):
        Layer.__init__(self)

        self.n_in = n_in
        self.n_out = n_out

        scale = sqrt(1.0 / n_in)
        self.W = shared_normal((self.n_in, self.n_out), scale=scale)
        self.b = shared_zeros((self.n_out, ))
        self.params = [self.W, self.b]
Exemplo n.º 5
0
    def __init__(self, nodes, learning_rate=0.01):
        """
        Params:\n
        nodes []int:array of the number of neurons each layer has\n
        Inicial la red con los datos ingresados
        """
        # Layers
        self.hiddenLayers = []

        self.learningRate = learning_rate

        for i in range(1, len(nodes) - 1):
            self.hiddenLayers.append(Layer(nodes[i - 1], nodes[i]))

        self.outputLayer = Layer(nodes[-2], nodes[-1])
Exemplo n.º 6
0
 def __str__(self) :
     '''Output Layer to String.'''
     from nn.layer import Layer
     s = ''
     s += '\tLayer Type         : ConvolutionalAutoEncoder\n'
     s += Layer.__str__(self)
     return s
Exemplo n.º 7
0
 def _decode(self, input) :
     from nn.layer import Layer
     weightsBack = self._getWeightsBack()
     deconvolve = conv2d(input, weightsBack, self.getFeatureSize(),
                         tuple(weightsBack.shape.eval()),
                         border_mode='full')
     out = deconvolve + self._thresholdsBack.dimshuffle('x', 0, 'x', 'x')
     return Layer._setActivation(self, out)
Exemplo n.º 8
0
    def __init__(self, n_hidden, n_out, reg_exp_size, ae_size, id_to_reg_exp,
                 id_to_word, word_lookup_table, auto_encoder, L2_reg=0.0001):

        self.n_hidden = n_hidden
        self.n_out = n_out
        self.L2_reg = L2_reg
        self.activation = tf.tanh
        self.auto_encoder = auto_encoder
        self.word_lookup_table = word_lookup_table
        self.id_to_word = id_to_word
        self.id_to_reg_exp = id_to_reg_exp
        rng = np.random.RandomState(random.randint(1, 2 ** 30))

        # Adapting learning rate
        self.learning_rate = OrderedDict({})
        self.batch_grad = OrderedDict({})

        # word dict size and ner dict size and reg_exp_dict size
        self.ae_size = ae_size
        self.reg_V = reg_exp_size

        self.x_in = tf.placeholder(tf.float32, shape=(None, 20, 200))#batch* sequence*
        self.reg_x=tf.placeholder(tf.int32, shape=(None,))
        self.y=tf.placeholder(tf.int32)
        self.i=0

        # Skip Layer for encoder
        # The detailed tensorflow structure is used in Layer method

        self.skip_layer_ae = Layer(rng, ae_size, n_out, "tanh", self.learning_rate, self.batch_grad)
        # Skip Layer for reg,
        self.skip_layer_re = Layer(rng, self.reg_V, n_out, "tanh", self.learning_rate, self.batch_grad)
        # Hidden Layer, ae_size=n_hidden=200
        self.hiddenLayer = Layer(rng, ae_size, n_hidden, "tanh", self.learning_rate, self.batch_grad)
        # Output Layer
        self.outputLayer = Layer(rng, n_hidden, n_out, "tanh", self.learning_rate, self.batch_grad)


        reg_lookup_table_value = rng.uniform(low=-0.01, high=0.01, size=(self.reg_V, n_hidden))
        self.reg_lookup_table = tf.Variable(np.asarray(reg_lookup_table_value), dtype=tf.float64, name='rlt')
        self.learning_rate[self.reg_lookup_table]=tf.Variable(np.ones(reg_lookup_table_value.shape),dtype=tf.float64, name='learnrate')

        print (reg_lookup_table_value.shape)
        self.batch_grad[self.reg_lookup_table]=tf.Variable(np.zeros(reg_lookup_table_value.shape),dtype=tf.float64,name='batchgrad')
        self.params = self.hiddenLayer.params + self.outputLayer.params + self.skip_layer_ae.params + self.skip_layer_re.params + [
            self.reg_lookup_table]
Exemplo n.º 9
0
    def __init__ (self, layerID, inputSize, numNeurons,
                  learningRate=0.001, momentumRate=0.9, dropout=None,
                  initialWeights=None, initialThresholds=None, activation=tanh,
                  randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate,
                       dropout, activation)

        self._inputSize = (None, inputSize[1])
        self._numNeurons = numNeurons

        # create weights based on the optimal distribution for the activation
        if initialWeights is None or initialThresholds is None :
            self._initializeWeights(
                size=(self._inputSize[1], self._numNeurons),
                fanIn=self._inputSize[1],
                fanOut=self._numNeurons,
                randomNumGen=randomNumGen)
Exemplo n.º 10
0
    def __init__ (self, layerID, inputSize, numNeurons,
                  learningRate=0.001, momentumRate=0.9, dropout=None,
                  initialWeights=None, initialThresholds=None, activation=tanh,
                  randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate, 
                       dropout, activation)

        self._inputSize = inputSize
        if isinstance(self._inputSize, six.integer_types) or \
           len(self._inputSize) is not 2 :
            self._inputSize = (1, inputSize)
        self._numNeurons = numNeurons

        # create weights based on the optimal distribution for the activation
        if initialWeights is None or initialThresholds is None :
            self._initializeWeights(
                size=(self._inputSize[1], self._numNeurons), 
                fanIn=self._inputSize[1],
                fanOut=self._numNeurons,
                randomNumGen=randomNumGen)
    def __init__(self, shape, config, dropout_probability=0.0):
        """
        Simple neural network object
        :param shape: dictionary of layers. Contains information required to build network
        :param config: configuration for the neural network
        """
        self.l_rate_bound = config['learning_rate_bounds']
        self.l_rate = self.l_rate_bound[1]
        self.decay_rate = config['decay_rate']
        self.default_dropout_chance = dropout_probability
        self.dropout_probability = self.default_dropout_chance
        self.momentum_parameter = config['momentum_parameter']

        self.epochs = config['epochs']
        self.loss_function = m.select_loss(config['loss'])
        self.batch_size = config['batch_size']

        self.batch_loss = 0.0

        # create input and output layers
        input_layer = InputLayer(shape["input"], self.l_rate)
        output_layer = OutputLayer(shape["output"],
                                   self.l_rate,
                                   loss=self.loss_function)

        # predictions
        self.predicts = []
        self.hit_count = 0.0

        # create hidden layers
        self.network = [input_layer]
        for layer in range(1, len(shape) - 1):
            self.network.append(
                Layer(shape["hidden_" + str(layer)], self.l_rate))
        self.network.append(output_layer)

        self.in_layer = self.network[0]
        self.out_layer = self.network[-1]

        # attach input and output
        self.in_layer.attach(None, self.network[1])
        self.out_layer.attach(self.network[-2], None)

        # attach the hidden layers
        for layer in range(1, len(self.network) - 1):
            self.network[layer].attach(self.network[layer - 1],
                                       self.network[layer + 1])
Exemplo n.º 12
0
    def __init__ (self, layerID, input, inputSize, kernelSize, 
                  downsampleFactor, learningRate=0.001, momentumRate=0.9,
                  dropout=None, initialWeights=None, initialThresholds=None,
                  activation=tanh, randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate, dropout)

        # TODO: this check is likely unnecessary
        if inputSize[2] == kernelSize[2] or inputSize[3] == kernelSize[3] :
            raise ValueError('ConvolutionalLayer Error: ' +
                             'inputSize cannot equal kernelSize')
        if inputSize[1] != kernelSize[1] :
            raise ValueError('ConvolutionalLayer Error: ' +
                             'Number of Channels must match in ' +
                             'inputSize and kernelSize')
        from theano.tensor.nnet.conv import conv2d
        from theano.tensor.signal.downsample import max_pool_2d

        # theano variables don't actually preserve buffer sizing
        self.input = input if isinstance(input, tuple) else (input, input)

        self._inputSize = inputSize
        self._kernelSize = kernelSize
        self._downsampleFactor = downsampleFactor

        # setup initial values for the weights -- if necessary
        if initialWeights is None :
            # create a rng if its needed
            if randomNumGen is None :
                from numpy.random import RandomState
                from time import time
                randomNumGen = RandomState(int(time()))

            # this creates optimal initial weights by randomizing them
            # to an appropriate range around zero, which leads to better
            # convergence.
            downRate = np.prod(self._downsampleFactor)
            fanIn = np.prod(self._kernelSize[1:])
            fanOut = self._kernelSize[0] * \
                     np.prod(self._kernelSize[2:]) / downRate
            scaleFactor = np.sqrt(6. / (fanIn + fanOut))
            initialWeights = np.asarray(randomNumGen.uniform(
                    low=-scaleFactor, high=scaleFactor, size=self._kernelSize),
                    dtype=config.floatX)
        self._weights = shared(value=initialWeights, borrow=True)

        # setup initial values for the thresholds -- if necessary
        if initialThresholds is None :
            initialThresholds = np.zeros((self._kernelSize[0],),
                                         dtype=config.floatX)
        self._thresholds = shared(value=initialThresholds, borrow=True)

        def findLogits(input, weights, 
                       inputSize, kernelSize, downsampleFactor, thresholds) :
            # create a function to perform the convolution
            convolve = conv2d(input, weights, inputSize, kernelSize)

            # create a function to perform the max pooling
            pooling = max_pool_2d(convolve, downsampleFactor, True)

            # the output buffer is now connected to a sequence of operations
            return pooling + thresholds.dimshuffle('x', 0, 'x', 'x')

        outClass = findLogits(self.input[0], self._weights,
                              self._inputSize, self._kernelSize,
                              self._downsampleFactor, self._thresholds)
        outTrain = findLogits(self.input[1], self._weights,
                              self._inputSize, self._kernelSize,
                              self._downsampleFactor, self._thresholds)

        # determine dropout if requested
        if self._dropout is not None :
            # here there are two possible paths --
            # outClass : path of execution intended for classification. Here
            #            all neurons are present and weights must be scaled by
            #            the dropout factor. This ensures resultant 
            #            probabilities fall within intended bounds when all
            #            neurons are present.
            # outTrain : path of execution for training with dropout. Here each
            #            neuron's output goes through a Bernoulli Trial. This
            #            retains a neuron with the probability specified by the
            #            dropout factor.
            outClass = outClass / self._dropout
            outTrain = switch(self._randStream.binomial(
                size=self.getOutputSize()[1:], p=self._dropout), outTrain, 0)

        # activate the layer --
        # output is a tuple to represent two possible paths through the
        # computation graph. 
        self.output = (outClass, outTrain) if activation is None else \
                      (activation(outClass), activation(outTrain))

        # we can call this method to activate the layer
        self.activate = function([self.input[0]], self.output[0])
Exemplo n.º 13
0
class NN:
    def __init__(self, nodes, learning_rate=0.01):
        """
        Params:\n
        nodes []int:array of the number of neurons each layer has\n
        Inicial la red con los datos ingresados
        """
        # Layers
        self.hiddenLayers = []

        self.learningRate = learning_rate

        for i in range(1, len(nodes) - 1):
            self.hiddenLayers.append(Layer(nodes[i - 1], nodes[i]))

        self.outputLayer = Layer(nodes[-2], nodes[-1])

    def update(self, input):
        """
        Realiza la predicción a apartir del input
        """
        self.input = input
        self.hiddenLayers[0].predict(input)
        for i in range(1, len(self.hiddenLayers)):
            self.hiddenLayers[i].predict(self.hiddenLayers[i - 1].output())
        return self.outputLayer.predict(self.hiddenLayers[-1].output())

    def backPropagate(self, action, value):
        """
        realiza el backpropagate a la red neuronal
        """
        # Calcula el error para la capa de salida
        error = value - self.outputLayer.output()[action]
        self.outputLayer.neurons[action].calculate_error(error)

        # Calcula el error para la capa oculta
        for i in range(self.hiddenLayers[-1].size()):
            error = self.outputLayer.neurons[action].error * \
                self.outputLayer.neurons[action].weights[i]
            self.hiddenLayers[-1].neurons[i].calculate_error(error)

        for i in range(len(self.hiddenLayers) - 2, -1, -1):
            for j in range(self.hiddenLayers[i].size()):
                error = 0.0
                for k in range(self.hiddenLayers[i + 1].size()):
                    error = error + \
                        self.hiddenLayers[i+1].neurons[k].error * \
                        self.hiddenLayers[i+1].neurons[k].weights[j]
                self.hiddenLayers[i].neurons[j].calculate_error(error)
                # hidden_deltas[i][j] = dsigmoid(self.ah[i][k]) * error

        for i in range(1, len(self.hiddenLayers)):
            for neuron in self.hiddenLayers[i].neurons:
                neuron.update_weights(self.learningRate,
                                      self.hiddenLayers[i - 1].output())
        for neuron in self.hiddenLayers[0].neurons:
            neuron.update_weights(self.learningRate, self.input)

    def output(self):
        """
        Devuelve el resultado
        """
        return self.outputLayer.output()
Exemplo n.º 14
0
 def __init__(self, f):
     Layer.__init__(self)
     self.f = f
Exemplo n.º 15
0
 def add(self, layer: Layer):
     self.layers.append(layer)
     layer.describe()
     if len(self.layers) > 1:
         self.layers[-1].connect(self.layers[-2])
Exemplo n.º 16
0
from nn.neuralnetwork import NeuralNetwork
from nn.dataset import Dataset
from nn.layer import Layer
from nn.utils import kfold_cv_generator

import numpy as np

if __name__ == '__main__':
    dataset = Dataset('iris.csv')
    data = dataset.data
    output_real = dataset.attr

    nn = NeuralNetwork()
    nn.add(Layer(4))
    nn.add(Layer(32, activation='relu'))
    nn.add(Layer(3, activation='sigmoid'))

    id_train, id_test = kfold_cv_generator(data, n_splits=8)

    kf = 1
    acc = []
    for train_idx, test_idx in zip(id_train, id_test):
        train = data.iloc[train_idx]
        test = data.iloc[test_idx]

        print("#FOLD: ", kf)
        score = nn.learn(train,
                         test,
                         output_real,
                         kf,
                         epochs=100,
Exemplo n.º 17
0
 def __str__(self) :
     '''Output Layer to String.'''
     s = ''
     s += '\tLayer Type         : ConvolutionalLayer\n'
     s += Layer.__str__(self)
     return s
Exemplo n.º 18
0
 def _setActivation(self, out) :
     from nn.layer import Layer
     from theano.tensor import round
     act = Layer._setActivation(self, out)
     return round(act, mode='half_away_from_zero') \
            if self._forceSparse else act
Exemplo n.º 19
0
 def __init__(self, shape):
     Layer.__init__(self)
     self.shape = shape
Exemplo n.º 20
0
 def __init__(self, p=0.5):
     Layer.__init__(self)
     self.p = p
     self.training = True
     self.rng = RandomStreams()
Exemplo n.º 21
0
 def __init__(self, pattern):
     Layer.__init__(self)
     self.pattern = pattern
Exemplo n.º 22
0
 def __setstate__(self, dict):
     '''Load layer pickle'''
     if hasattr(self, '_prePoolingInput'):
         delattr(self, '_prePoolingInput')
     Layer.__setstate__(self, dict)
Exemplo n.º 23
0
 def __getstate__(self):
     '''Save layer pickle'''
     dict = Layer.__getstate__(self)
     dict['_prePoolingInput'] = None
     return dict
Exemplo n.º 24
0
class Identity(Layer):
    __init__ = lambda self: Layer.__init__(self)
    forward = lambda self, input: input
Exemplo n.º 25
0
 def __str__(self) :
     '''Output Layer to String.'''
     s = ''
     s += '\tLayer Type         : ContiguousLayer\n'
     s += Layer.__str__(self)
     return s
Exemplo n.º 26
0
    def __init__ (self, layerID, input, inputSize, numNeurons,
                  learningRate=0.001, momentumRate=0.9, dropout=None,
                  initialWeights=None, initialThresholds=None, activation=tanh,
                  randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate, dropout)

        # adjust the input for the correct number of dimensions        
        if isinstance(input, tuple) :
            if input[1].ndim > 2 : input = input[0].flatten(2), \
                                           input[1].flatten(2)
        else :
            if input.ndim > 2 : input = input.flatten(2)

        # store the input buffer -- this can either be a tuple or scalar
        # The input layer will only have a scalar so its duplicated here
        self.input = input if isinstance(input, tuple) else (input, input)
        self._inputSize = inputSize
        if isinstance(self._inputSize, six.integer_types) or \
           len(self._inputSize) is not 2 :
            self._inputSize = (1, inputSize)
        self._numNeurons = numNeurons

        # setup initial values for the weights
        if initialWeights is None :
            # create a rng if its needed
            if randomNumGen is None :
               from numpy.random import RandomState
               from time import time
               randomNumGen = RandomState(int(time()))

            initialWeights = np.asarray(randomNumGen.uniform(
                low=-np.sqrt(6. / (self._inputSize[1] + self._numNeurons)),
                high=np.sqrt(6. / (self._inputSize[1] + self._numNeurons)),
                size=(self._inputSize[1], self._numNeurons)),
                dtype=config.floatX)
            if activation == sigmoid :
                initialWeights *= 4.
        self._weights = shared(value=initialWeights, borrow=True)

        # setup initial values for the thresholds
        if initialThresholds is None :
            initialThresholds = np.zeros((self._numNeurons,),
                                         dtype=config.floatX)
        self._thresholds = shared(value=initialThresholds, borrow=True)

        # create the logits
        def findLogit(input, weights, thresholds) :
            return dot(input, weights) + thresholds
        outClass = findLogit(self.input[0], self._weights, self._thresholds)
        outTrain = findLogit(self.input[1], self._weights, self._thresholds)

        # determine dropout if requested
        if self._dropout is not None :
            # here there are two possible paths --
            # outClass : path of execution intended for classification. Here
            #            all neurons are present and weights must be scaled by
            #            the dropout factor. This ensures resultant 
            #            probabilities fall within intended bounds when all
            #            neurons are present.
            # outTrain : path of execution for training with dropout. Here each
            #            neuron's output goes through a Bernoulli Trial. This
            #            retains a neuron with the probability specified by the
            #            dropout factor.
            outClass = outClass / self._dropout
            outTrain = switch(self._randStream.binomial(
                size=(self._numNeurons,), p=self._dropout), outTrain, 0)

        # activate the layer --
        # output is a tuple to represent two possible paths through the
        # computation graph. 
        self.output = (outClass, outTrain) if activation is None else \
                      (activation(outClass), activation(outTrain))

        # create a convenience function
        self.activate = function([self.input[0]], self.output[0])