def create_regressor(rng=np.random, batchsize=1, window=240, input=1, dropout=0.25): print('inside create_regressor') return Network( #DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(64, input, 45), input_shape=(batchsize, input, window), rng=rng), BiasLayer(shape=(64, 1)), ActivationLayer(), #DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(128, 64, 25), input_shape=(batchsize, 64, window), rng=rng), BiasLayer(shape=(128, 1)), ActivationLayer(), #DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(256, 128, 15), input_shape=(batchsize, 128, window), rng=rng), BiasLayer(shape=(256, 1)), ActivationLayer(), Pool1DLayer(input_shape=(batchsize, 256, window)))
def create_footstepper(rng=np.random, batchsize=1, window=250, dropout=0.25): return Network( DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(64, 3, 65), input_shape=(batchsize, 3, window), rng=rng), BiasLayer(shape=(64, 1)), ActivationLayer(), DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(5, 64, 45), input_shape=(batchsize, 64, window), rng=rng), BiasLayer(shape=(5, 1)), )
def __init__(self, rng=rng, input_shape=1, output_shape=1, dropout=0.7): self.nslices = 4 self.dropout0 = DropoutLayer(dropout, rng=rng) self.dropout1 = DropoutLayer(dropout, rng=rng) self.dropout2 = DropoutLayer(dropout, rng=rng) self.activation = ActivationLayer('ELU') self.W0 = HiddenLayer((self.nslices, 512, input_shape-1), rng=rng, gamma=0.01) self.W1 = HiddenLayer((self.nslices, 512, 512), rng=rng, gamma=0.01) self.W2 = HiddenLayer((self.nslices, output_shape, 512), rng=rng, gamma=0.01) self.b0 = BiasLayer((self.nslices, 512)) self.b1 = BiasLayer((self.nslices, 512)) self.b2 = BiasLayer((self.nslices, output_shape)) self.layers = [ self.W0, self.W1, self.W2, self.b0, self.b1, self.b2] self.params = sum([layer.params for layer in self.layers], [])
def createcore_rightleg(rng=np.random, batchsize=1, window=240, dropout=0.25, depooler='random'): return Network( Network( DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(256, 12, 25), input_shape=(batchsize, 12, window), rng=rng), BiasLayer(shape=(256, 1)), ActivationLayer(), Pool1DLayer(input_shape=(batchsize, 256, window)), ), Network( Depool1DLayer(output_shape=(batchsize, 256, window), depooler='random', rng=rng), DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(12, 256, 25), input_shape=(batchsize, 256, window), rng=rng), BiasLayer(shape=(12, 1))))
def create_core(rng=np.random, batchsize=1, window=240, dropout=0.25, depooler='random'): print('inside create_core') return Network( Network( DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(256, 73, 25), input_shape=(batchsize, 73, window), rng=rng), BiasLayer(shape=(256, 1)), ActivationLayer(), Pool1DLayer(input_shape=(batchsize, 256, window)), ), Network( Depool1DLayer(output_shape=(batchsize, 256, window), depooler='random', rng=rng), DropoutLayer(amount=dropout, rng=rng), Conv1DLayer(filter_shape=(73, 256, 25), input_shape=(batchsize, 256, window), rng=rng), BiasLayer(shape=(73, 1))))
x_train = x_train.reshape(x_train.shape[0], 1, 28 * 28) x_train = x_train.astype('float32') x_train /= 255 y_train = np_utils.to_categorical(y_train) x_test = x_test.reshape(x_test.shape[0], 1, 28 * 28) x_test = x_test.astype('float32') x_test /= 255 y_test = np_utils.to_categorical(y_test) # Réseau de neurone net = Network() net.add(FCLayer(28 * 28, 100)) # input_shape=(1, 28*28) ; output_shape=(1, 100) net.add(ActivationLayer(tanh, tanh_prime)) net.add(FCLayer(100, 50)) # input_shape=(1, 100) ; output_shape=(1, 50) net.add(ActivationLayer(tanh, tanh_prime)) net.add(FCLayer(50, 10)) # input_shape=(1, 50) ; output_shape=(1, 10) net.add(ActivationLayer(tanh, tanh_prime)) net.use(mse, mse_prime) net.fit(x_train[0:1000], y_train[0:1000], epochs=35, learning_rate=0.1) # Test sur 3 exemples out = net.predict(x_test[0:3]) net.print_error() print("\n") print("predicted values : ")
def __init__(self, rng=rng, input_shape=1, output_shape=1, dropout=0.7): self.nslices = 4 self.dropout0 = DropoutLayer(dropout, rng=rng) self.dropout1 = DropoutLayer(dropout, rng=rng) self.dropout2 = DropoutLayer(dropout, rng=rng) self.activation = ActivationLayer('ELU') self.W0 = HiddenLayer((self.nslices, 512, input_shape - 1), rng=rng, gamma=0.01) self.W1 = HiddenLayer((self.nslices, 512, 512), rng=rng, gamma=0.01) self.W2 = HiddenLayer((self.nslices, output_shape, 512), rng=rng, gamma=0.01) self.b0 = BiasLayer((self.nslices, 512)) self.b1 = BiasLayer((self.nslices, 512)) self.b2 = BiasLayer((self.nslices, output_shape)) self.ang_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.ang_b = BiasLayer((self.nslices, 512)) self.chi_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.chi_b = BiasLayer((self.nslices, 512)) self.dep_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.dep_b = BiasLayer((self.nslices, 512)) self.neu_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.neu_b = BiasLayer((self.nslices, 512)) self.old_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.old_b = BiasLayer((self.nslices, 512)) self.pro_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.pro_b = BiasLayer((self.nslices, 512)) self.sex_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.sex_b = BiasLayer((self.nslices, 512)) self.str_W = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.str_b = BiasLayer((self.nslices, 512)) self.layers = [ self.W0, self.W1, self.W2, self.b0, self.b1, self.b2, self.ang_W, self.ang_b, self.chi_W, self.chi_b, self.dep_W, self.dep_b, self.neu_W, self.neu_b, self.old_W, self.old_b, self.pro_W, self.pro_b, self.sex_W, self.sex_b, self.str_W, self.str_b ] self.params = sum([layer.params for layer in self.layers], []) ang_label = np.zeros(L.shape[1]) ang_label[w * 0:w * 1] = 1 chi_label = np.zeros(L.shape[1]) chi_label[w * 1:w * 2] = 1 dep_label = np.zeros(L.shape[1]) dep_label[w * 2:w * 3] = 1 neu_label = np.zeros(L.shape[1]) neu_label[w * 3:w * 4] = 1 old_label = np.zeros(L.shape[1]) old_label[w * 4:w * 5] = 1 pro_label = np.zeros(L.shape[1]) pro_label[w * 5:w * 6] = 1 sex_label = np.zeros(L.shape[1]) sex_label[w * 6:w * 7] = 1 str_label = np.zeros(L.shape[1]) str_label[w * 7:w * 8] = 1 self.ang_label = theano.shared(ang_label, borrow=True) self.chi_label = theano.shared(chi_label, borrow=True) self.dep_label = theano.shared(dep_label, borrow=True) self.neu_label = theano.shared(neu_label, borrow=True) self.old_label = theano.shared(old_label, borrow=True) self.pro_label = theano.shared(pro_label, borrow=True) self.sex_label = theano.shared(sex_label, borrow=True) self.str_label = theano.shared(str_label, borrow=True) zeros = np.zeros((1, output_shape)) self.zeros = T.addbroadcast(theano.shared(zeros, borrow=True), 0)
""" import numpy as np from Network import Network from FCLayer import FullConectedLayer from ActivationLayer import ActivationLayer from activation import tanh, tanh_prime from loss import mse, mse_prime # Donnée d'apprentissage x_train = np.array([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]]) y_train = np.array([[[0]], [[1]], [[1]], [[0]]]) # Création du réseau model = Network() model.add(FullConectedLayer(2, 3)) model.add(ActivationLayer(tanh, tanh_prime)) model.add(FullConectedLayer(3, 1)) model.add(ActivationLayer(tanh, tanh_prime)) # Entrainement model.use(mse, mse_prime) model.fit(x_train, y_train, epochs=1000, learning_rate=0.1) # Test model = model.predict(x_train) model.print_error() print(model)
def __init__(self, rng=rng, input_shape=1, output_shape=1, dropout=0.7, dropout_res=0.5, style='Balance', batchsize=20): self.style = style self.batchsize = batchsize self.nslices = 4 self.dropout0 = DropoutLayer(dropout, rng=rng) self.dropout1 = DropoutLayer(dropout, rng=rng) self.dropout_res = DropoutLayer(dropout_res, rng=rng) self.dropout2 = DropoutLayer(dropout, rng=rng) self.activation = ActivationLayer('ELU') W0_load = np.empty((self.nslices, 512, input_shape - 1), dtype=np.float32) W1_load = np.empty((self.nslices, 512, 512), dtype=np.float32) W2_load = np.empty((self.nslices, output_shape, 512), dtype=np.float32) b0_load = np.empty((self.nslices, 512), dtype=np.float32) b1_load = np.empty((self.nslices, 512), dtype=np.float32) b2_load = np.empty((self.nslices, output_shape), dtype=np.float32) for i in range(4): W0_load[i] = np.fromfile( './Parameters/' + mname + '/W0_%03i.bin' % (int)(i * 12.5), dtype=np.float32).reshape(512, input_shape - 1) W1_load[i] = np.fromfile('./Parameters/' + mname + '/W1_%03i.bin' % (int)(i * 12.5), dtype=np.float32).reshape(512, 512) W2_load[i] = np.fromfile( './Parameters/' + mname + '/W2_%03i.bin' % (int)(i * 12.5), dtype=np.float32).reshape(output_shape, 512) b0_load[i] = np.fromfile('./Parameters/' + mname + '/b0_%03i.bin' % (int)(i * 12.5), dtype=np.float32) b1_load[i] = np.fromfile('./Parameters/' + mname + '/b1_%03i.bin' % (int)(i * 12.5), dtype=np.float32) b2_load[i] = np.fromfile('./Parameters/' + mname + '/b2_%03i.bin' % (int)(i * 12.5), dtype=np.float32) self.W0 = HiddenLayer((self.nslices, 512, input_shape - 1), rng=rng, gamma=0.01) self.W1 = HiddenLayer((self.nslices, 512, 512), rng=rng, gamma=0.01) self.W2 = HiddenLayer((self.nslices, output_shape, 512), rng=rng, gamma=0.01) self.b0 = BiasLayer((self.nslices, 512)) self.b1 = BiasLayer((self.nslices, 512)) self.b2 = BiasLayer((self.nslices, output_shape)) self.W0.W.set_value(W0_load) self.W1.W.set_value(W1_load) self.W2.W.set_value(W2_load) self.b0.b.set_value(b0_load) self.b1.b.set_value(b1_load) self.b2.b.set_value(b2_load) self.style_W0 = DiagLayer((self.nslices, 1, 512), rng=rng, gamma=0.01) self.style_b = BiasLayer((self.nslices, 512)) self.layers = [self.style_W0, self.style_b] self.params = sum( [layer.params for layer in self.layers], [] ) # The only parameters we want to update are the residual adapter ones style_label = np.zeros(L.shape[1]) style_label[w * styletransfer_styles.index(self.style):w * (styletransfer_styles.index(self.style) + 1)] = 1 self.style_label = theano.shared(style_label, borrow=True) zeros = np.zeros((1, output_shape)) self.zeros = T.addbroadcast(theano.shared(zeros, borrow=True), 0)
import numpy as np from NeuralNetwork import NeuralNetwork from FullyConnectedLayer import FullyConnectedLayer from ActivationLayer import ActivationLayer from ActivationFunctions import tanh, tanhDerivative from LossFunction import meanSquaredError, meanSquaredErrorDerivative # Sample training data inputData = np.array([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]]) expectedOutput = np.array([[[0]], [[1]], [[1]], [[0]]]) # Creating a neural network with 3 nodes in first hidden layer, 1 node in final layer, with activation functions # after each layer network = NeuralNetwork() network.add(FullyConnectedLayer(2, 3)) network.add(ActivationLayer(tanh, tanhDerivative)) network.add(FullyConnectedLayer(3, 1)) network.add(ActivationLayer(tanh, tanhDerivative)) # Training network network.setLoss(meanSquaredError, meanSquaredErrorDerivative) network.train(inputData, expectedOutput, epochs=1000, learningRate=.1) # Test the network output = network.predict(inputData) for set in range(len(inputData)): print("For set {} my prediction is {}. The correct value is {}".format( inputData[set], output[set], expectedOutput[set]))