def __init__(self, layers_shape, bias_unit=True): """ initialisation of the ff neural network""" # check minimum layer shape if len(layers_shape) < 2: raise FeedForwardNeuralNetworkError, "can't init a neural net without at least the in and out size" # save the layer shape self._layers_shape = layers_shape self._layers_count = len(layers_shape) self._bias_unit = bias_unit # init layer input/output memory self._layer_input = list() self._layer_output = list() # init the neural layer self._weights = [] for s_in, s_out in zip(layers_shape[:-1], layers_shape[1:]): # random init with mean 0 of a layer (with bias unit if require) self._weights.append(2 * Matrix.random(s_in + bias_unit, s_out) - 1)
from MatrixVector import Matrix, Vector ##### 2 Layer Neural Net # input data X = Matrix([Vector([0.,0.,1.]), Vector([0.,1.,1.]), Vector([1.,0.,1.]), Vector([1.,1.,1.])]) y = Matrix([Vector([0.,1.,1.,0.])]).transpose() # randomly initialize our weights with mean 0 syn0 = 2 * Matrix.random(3, 4) - 1 syn1 = 2 * Matrix.random(4, 1) - 1 for j in xrange(60000): # Feed forward through layers 0, 1, and 2 l0 = X l1 = l0.dotProduct(syn0).nonlin() l2 = l1.dotProduct(syn1).nonlin() # how much did we miss the target value? l2_error = y - l2 if (j% 10000) == 0: print "Error:" + str(l2_error.abs().mean()) # in what direction is the target value?
from MatrixVector import Matrix, Vector ##### 2 Layer Neural Net # input data X = Matrix([Vector([0.,0.,1.]), Vector([0.,1.,1.]), Vector([1.,0.,1.]), Vector([1.,1.,1.])]) y = Matrix([Vector([0.,1.,1.,0.]), Vector([0.,1.,0.,0.])]).transpose() # randomly initialize our weights with mean 0 syn0 = 2 * Matrix.random(3, 4) - 1 syn1 = 2 * Matrix.random(4, 2) - 1 for j in xrange(60000): # Feed forward through layers 0, 1, and 2 l0 = X l1 = l0.dotProduct(syn0).nonlin() l2 = l1.dotProduct(syn1).nonlin() # how much did we miss the target value? l2_error = y - l2 if (j% 10000) == 0: print "Error:" + str(l2_error.abs().mean()) # in what direction is the target value?
from MatrixVector import Matrix, Vector ##### 2 Layer Neural Net # input data X = Matrix([Vector([0.0, 0.0, 1.0]), Vector([0.0, 1.0, 1.0]), Vector([1.0, 0.0, 1.0]), Vector([1.0, 1.0, 1.0])]) Xones = Matrix(Matrix.ones(1, X.getRowLen())._matrix + X.transpose()._matrix).transpose() y = Matrix([Vector([0.0, 1.0, 1.0, 0.0])]).transpose() # randomly initialize our weights with mean 0 (entry + 1 for bias unit) syn0 = 2 * Matrix.random(4, 4) - 1 syn1 = 2 * Matrix.random(5, 1) - 1 # set the alpha alpha = 10 for j in xrange(1000): # Feed forward through layers 0, 1, and 2 l0 = Xones l1 = l0.dotProduct(syn0).nonlin() l1 = Matrix(Matrix.ones(1, l0.getRowLen())._matrix + l1.transpose()._matrix).transpose() l2 = l1.dotProduct(syn1).nonlin() # how much did we miss the target value? l2_error = y - l2 if (j % 100) == 0: print "Error:" + str(l2_error.abs().mean())
from MatrixVector import Matrix, Vector ##### 2 Layer Neural Net # input data X = Matrix([Vector([0.,0.,1.]), Vector([0.,1.,1.]), Vector([1.,0.,1.]), Vector([1.,1.,1.])]) y = Matrix([Vector([0.,0.,1.,1.])]).transpose() syn0 = 2 * Matrix.random(3,1) - 1 print syn0 for iter in xrange(10000): # forward propagation l0 = X l1 = (l0.dotProduct(syn0)).nonlin() # error eval l1_error = y - l1 # multiply how much we missed by the # slope of the sigmoid at the values in l1 l1_delta = l1_error * l1.nonlin(True) # weight update syn0 = syn0 + (l0.transpose()).dotProduct(l1_delta)
def append_bias(mtx): return Matrix(Matrix.ones(1, mtx.getRowLen())._matrix + mtx.transpose()._matrix).transpose()
from MatrixVector import Matrix, Vector def append_bias(mtx): return Matrix(Matrix.ones(1, mtx.getRowLen())._matrix + mtx.transpose()._matrix).transpose() def remove_bias(mtx): return Matrix(mtx.transpose()._matrix[1:]).transpose() ##### 4 Layer Neural Net # input data X = Matrix([Vector([0.0, 0.0, 1.0]), Vector([0.0, 1.0, 1.0]), Vector([1.0, 0.0, 1.0]), Vector([1.0, 1.0, 1.0])]) Xones = Matrix(Matrix.ones(1, X.getRowLen())._matrix + X.transpose()._matrix).transpose() y = Matrix([Vector([0.0, 1.0, 1.0, 0.0])]).transpose() # randomly initialize our weights with mean 0 (entry + 1 for bias unit) syn0 = 2 * Matrix.random(4, 4) - 1 syn1 = 2 * Matrix.random(5, 4) - 1 syn2 = 2 * Matrix.random(5, 1) - 1 # set the alpha alpha = 0.07 for j in xrange(10000): # Feed forward through layers 0, 1, 2 and 3