def test_forward(self): input_ = np.random.random((5, 3)) nn = NN.NN(loss_func=loss_fs.MeanSquaredLoss()) nn.add_layer( layers.DenseLayer(n_neurons=5, activation_func=af.Sigmoid())) nn.add_layer( layers.DenseLayer(n_neurons=7, activation_func=af.Sigmoid())) rv = nn.forward(input_) assert rv.shape == (5, 7)
def test_backward(self): input_ = np.random.random((5, 3)) output_grad = np.random.random((5, 7)) nn = NN.NN(loss_func=loss_fs.MeanSquaredLoss()) nn.add_layer( layers.DenseLayer(n_neurons=4, activation_func=af.Sigmoid())) nn.add_layer( layers.DenseLayer(n_neurons=7, activation_func=af.Sigmoid())) nn.forward(input_) nn.backward(output_grad)
def test_eval(self): input_ = np.random.random((5, 3)) true = np.random.random((5, 4)) nn = NN.NN(loss_func=loss_fs.MeanSquaredLoss()) nn.add_layer( layers.DenseLayer(n_neurons=8, activation_func=af.Sigmoid())) nn.add_layer( layers.DenseLayer(n_neurons=4, activation_func=af.Sigmoid())) loss = nn.eval(input_, true) assert loss > 0 assert list(nn.parameters) != [] assert list(nn.parameter_gradients) != []
def test_Sigmoid_backward(self): input_ = np.random.random((5, 4)) output_grad = np.random.random((5, 4)) op = af.Sigmoid() op.forward(input_) rv = op.backward(output_grad) assert rv.shape == input_.shape
def test_nbr_neuron(list_test): color_list=['r','g','b','k','m','c','y'] color_list *= 3 k=0 for i in list_test : my_layer1 = Layer.Linear(6,i) my_layer2 = ActivationFunctions.Tanh() my_layer5 = Layer.Linear(i,i) my_layer6 = ActivationFunctions.Tanh() my_layer3 = Layer.Linear(i,1) my_layer4 = ActivationFunctions.Sigmoid() my_NN = Neural_network.NeuralNet([my_layer1, my_layer2, my_layer5, my_layer6, my_layer3, my_layer4]) chi2_list, error_list = User.train(my_NN, data_train_input, data_train_target, num_epochs = num_epoch_max, optimizer = Optimizer.SGD(lr = my_lr), batch_size=my_batch_size) data_test_prediction = User.prediction(my_NN,data_test_input) error_final = Error_round.error_round(data_test_prediction, data_test_target) plt.plot(range(num_epoch_max), error_list, label= str(i), c=color_list[k]) plt.plot([num_epoch_max],[error_final], marker='o', c=color_list[k]) plt.xlabel('Epoch') plt.ylabel('Training round error') k+=1 plt.legend(title='Neurons') plt.title('Optimisation of the number of neurons') plt.show()
def test_DenseLayer_backward(self): input_ = np.random.random((5, 4)) valid_output_grad = np.random.random((5, 3)) layer = layers.DenseLayer(3, activation_func=af.Sigmoid()) layer.forward(input_) rv = layer.backward(valid_output_grad) assert rv.shape == input_.shape
def test_nbr_layer(list_test, n_neuron): color_list=['r','g','b','k','m','c','y'] color_list *= 3 k=0 my_layerini1 = Layer.Linear(6,n_neuron) my_layerini2 = ActivationFunctions.Tanh() my_layerfini1 = Layer.Linear(n_neuron,1) my_layerfini2 = ActivationFunctions.Sigmoid() for i in list_test : layers_new = [my_layerini1, my_layerini2] for j in range(i) : layers_new += [Layer.Linear(n_neuron,n_neuron),ActivationFunctions.Tanh()] layers_new += [my_layerfini1, my_layerfini2] my_NN = Neural_network.NeuralNet(layers_new) chi2_list, error_list = User.train(my_NN, data_train_input, data_train_target, num_epochs = num_epoch_max,optimizer = Optimizer.SGD(lr = my_lr), batch_size=my_batch_size) data_test_prediction = User.prediction(my_NN,data_test_input) error_final = Error_round.error_round(data_test_prediction, data_test_target) plt.plot(range(num_epoch_max), error_list, label= str(i),c=color_list[k]) plt.plot([num_epoch_max],[error_final], marker='o', c=color_list[k]) plt.xlabel('Epoch') plt.ylabel('Training round error') k+=1 plt.legend(title='Hidden layers') plt.title('Optimisation of the number of hidden layers') plt.show()
def gate_transform(self, affine_gates): """ apply gate Non-Linearity """ h = self.hl_size affine_gates[0:h, :] = activation_functions.Sigmoid().transform( self.i(affine_gates)) affine_gates[h:2 * h, :] = activation_functions.Sigmoid().transform( self.f(affine_gates)) affine_gates[2 * h:3 * h, :] = activation_functions.Sigmoid().transform( self.o(affine_gates)) affine_gates[3 * h:, :] = activation_functions.Tanh().transform( self.g(affine_gates)) transformed_gates = affine_gates return transformed_gates
def _create_activations(self): funs = [] for n in self.n_neurons_per_layer: F = [] for i in range(n): f = af.Tanh() if i % 2 == 0: f = af.Sigmoid() F.append(f) F = np.array(F) funs.append(F) self.activation_funs = np.array(funs)
def test_XOR() : my_layer1 = Layer.Linear(2,3) my_layer2 = ActivationFunctions.Tanh() my_layer3 = Layer.Linear(3,1) my_layer4 = ActivationFunctions.Sigmoid() #my_layer3 = lib2.Arondi() my_NN = Neural_network.NeuralNet([my_layer1,my_layer2,my_layer3,my_layer4]) input =np.array([[0, 0], [1, 0], [0, 1], [1, 1]]) target = np.array([[0], [1], [1], [0]]) User.train(my_NN, input, target, batch_size = 1,,num_epochs= 1000) # By careful, we must have size_training = number of rows in our data input_predict = np.array([[0, 0], [1, 0], [0, 1], [1, 1]]) print(User.prediction(my_NN,input_predict)) ''' OK '''
def test_Sigmoid_forward(self): input_ = np.random.random((5, 4)) op = af.Sigmoid() rv = op.forward(input_) assert rv.shape == input_.shape
def test_DenseLayer_forward(self): input_ = np.random.random((5, 4)) layer = layers.DenseLayer(3, activation_func=af.Sigmoid()) rv = layer.forward(input_) assert rv.shape == (5, 3)
def train_simultaneousNN( inputs_train: Tensor, targets_train: Tensor, loss: Loss.Loss = Loss.MeanSquareError(), optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(), num_epochs: int = 5000, batch_size: int = 32) -> tuple: size_training = inputs_train.shape[0] Result_chi2 = [[], [], [], [], [], [], [], [], []] list_epoch = np.array(range(10, 50, 5)) / 100 * num_epochs '''initialisation des 9 NN''' #verifier question seed() list_net = [] for i in range(9): layers = [] layers.append(Layer.Linear(6, 4)) layers.append(ActivationFunctions.Tanh()) layers.append(Layer.Linear(4, 2)) layers.append(ActivationFunctions.Tanh()) layers.append(Layer.Linear(2, 1)) layers.append(ActivationFunctions.Sigmoid()) list_net.append(Neural_network.NeuralNet(layers)) destroyed_NN = [] nbr_batch = size_training // batch_size ''' training des 9 NN''' for epoch in range(num_epochs): for k in range(9): if k not in destroyed_NN: Chi2_train = 0 for i in range(0, size_training, batch_size): # 1) feed forward y_actual = list_net[k].forward(inputs_train[i:i + batch_size]) # 2) compute the loss and the gradients Chi2_train += loss.loss(targets_train[i:i + batch_size], y_actual) grad_ini = loss.grad(targets_train[i:i + batch_size], y_actual) # 3)feed backwards grad_fini = list_net[k].backward(grad_ini) # 4) update the net optimizer.step(list_net[k], n_epoch=epoch) Chi2_train = Chi2_train / nbr_batch Result_chi2[k].append(Chi2_train) '''Supression du NN le moins efficace ''' if epoch in list_epoch: Comparaison = [[], []] for k in range(9): if k not in destroyed_NN: ErrorSlope = np.polyfit(np.array(range(epoch - 49, epoch)), Result_chi2[k][-50:-1], 1)[0] MixedError = Result_chi2[k][-1] * (1 - np.arctan(ErrorSlope) / (np.pi / 2)) Comparaison[0].append(k) Comparaison[1].append(MixedError) k = Comparaison[0][Comparaison[1].index(max(Comparaison[1]))] destroyed_NN.append(k) if epoch % 100 == 0: print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r", end="") for k in range(9): if k not in destroyed_NN: my_NN = list_net[k] return my_NN, Result_chi2
''' learning rate''' my_lr = 0.001 my_initial_lr = 0.1 my_decay_coeff = 1 / 200 '''Construction of the neural network ''' my_layer1 = Layer.Linear(6, 5) my_layer2 = ActivationFunctions.Tanh() my_layer3 = Layer.Linear(5, 4) my_layer4 = ActivationFunctions.Tanh() my_layer5 = Layer.Linear(4, 3) my_layer6 = ActivationFunctions.Tanh() my_layer7 = Layer.Linear(3, 2) my_layer8 = ActivationFunctions.Tanh() my_layer9 = Layer.Linear(2, 1) my_layer10 = ActivationFunctions.Sigmoid() my_NN = Neural_network.NeuralNet([ my_layer1, my_layer2, my_layer3, my_layer4, my_layer5, my_layer6, my_layer7, my_layer8, my_layer9, my_layer10 ]) ## Importation of the training and testing data os.chdir(path_ini[:-4]) data_training_path = 'Data/data_train.csv' data_test_path = 'Data/data_test.csv' Data_train = pd.read_csv(data_training_path) Data_test = pd.read_csv(data_test_path) param = ['cosTBz', 'R2', 'chiProb', 'Ks_distChi', 'm_KSpipi_norm', 'Mbc_norm']
n_iter = 100 weights = [] functions = [] # Loop for creating random weights between in range (-2, 2), and activation # functions alternating between sigmoid and tanh for i in range(n_layers): n = n_neurons_per_layer[i] ins = inputs_per_layer[i] layer_w = [] layer_f = [] for j in range(n): layer_w.append(np.random.uniform(-2, 2, ins)) if i % 2 == 0: layer_f.append(af.Sigmoid()) else: layer_f.append(af.Tanh()) functions.append(np.array(layer_f)) weights.append(np.array(layer_w)) weights = np.array(weights) functions = np.array(functions) #%% network = nn.NeuralNetwork(n_layers, n_neurons_per_layer, n_in, n_out) # Set network parameters network.set_iterations(n_iter)