def training_ANN(self): # Normalize because collective signals have E so high init.INPUT_DATASETs = np.divide(init.INPUT_DATASETs, 500) self.NN = ann.Neural_Network(Lambda = 0.0001) self.T = ann.trainer(self.NN) self.T.train(init.INPUT_DATASETs, init.OUTPUT_DATASETs) ''' Draw training data, relation between T and error E ''' plt.figure(1) plt.plot(self.T.E, label = 'Train line', linewidth = 2.0) plt.legend() plt.grid(1) plt.xlabel('Epochs') plt.ylabel('Cost') plt.show()
temp_accuracy = [] count1 = 0 count2 = 0 count3 = 0 count4 = 0 count = 0 temp_accuracy_test = [] count1_test = 0 count2_test = 0 count3_test = 0 count4_test = 0 count_test = 0 ''' Training ANN ''' NN = ANN.Neural_Network(Lambda = 0.0001) T = ANN.trainer(NN) T.train(init.INPUT_DATASETs, init.OUTPUT_DATASETs) endTime = time.clock() # Get end time # Calculate processing time processing_time = startTime - endTime test = NN.foward(init.INPUT_DATASETs) test_test = NN.foward(INPUT_DATASETs_test) # Accuracy of UP state for t in range(0, test.shape[0]/4): if test[t][0] == np.max(test[t]): count1 += 1 for t in range(0, test_test.shape[0]/4): if test_test[t][0] == np.max(test_test[t]):
temp_accuracy = [] count1 = 0 count2 = 0 count3 = 0 count4 = 0 count = 0 count1_test = 0 count2_test = 0 count3_test = 0 count4_test = 0 count_test = 0 startTime1 = time.clock() ''' Training ANN ''' NN_UP = ANN.Neural_Network(Lambda=0.0001) T_UP = ANN.trainer(NN_UP) T_UP.train(init.UP_INPUT, init.UP_OUTPUT) # RIGHT Neural Nets NN_RIGHT = ANN.Neural_Network(Lambda=0.0001) T_RIGHT = ANN.trainer(NN_RIGHT) T_RIGHT.train(init.RIGHT_INPUT, init.RIGHT_OUTPUT) # DOWN Neural Nets NN_DOWN = ANN.Neural_Network(Lambda=0.0001) T_DOWN = ANN.trainer(NN_DOWN) T_DOWN.train(init.DOWN_INPUT, init.DOWN_OUTPUT) # LEFT Neural Nets NN_LEFT = ANN.Neural_Network(Lambda=0.0001)