def testNetwork(self, test_data, test_labels): test_data = batch_functions.normalize_batch(test_data) prediction = self.f_predict(test_data) #print(test_labels,prediction) moy = 0 for i in range(prediction.shape[0]): q = [ test_labels[i, 0], test_labels[i, 1], test_labels[i, 2], test_labels[i, 3] ] anglesR = np.array(euler.quat2euler(prediction[i, :])) anglesV = np.array(euler.quat2euler(q)) for j in range(3): anglesR[j] = anglesR[j] * 180 / np.pi if anglesR[j] < 0: anglesR[j] = anglesR[j] + 360.0 anglesV[j] = anglesV[j] * 180 / np.pi if anglesV[j] < 0: anglesV[j] = anglesV[j] + 360.0 #print(anglesR,anglesV) anglesD = [] for j in range(3): angle = np.abs(anglesR[j] - anglesV[j]) % 360 if angle > 180: anglesD.append(360 - angle) else: anglesD.append(angle) #print(anglesD) moy += np.mean(anglesD) moy /= prediction.shape[0] return moy
def trainNetwork(self, train_data, train_labels, valid_data, valid_labels, trigger, batchSize=20, epochs=100, learningRate=0.001, penalty=0.001): print("TRAINING STARTS") epoch_kept = 0 min_valid_error = 10000 valid_data = batch_functions.normalize_batch(valid_data) for e in range(0, epochs): f = open(self.name + '_data.txt', 'a') train_err = 0 train_batches = 0 valid_error = 0 for batch in batch_functions.iterate_minibatches(train_data, train_labels, batchSize, shuffle=True): inputs, targets = batch inputs = batch_functions.normalize_batch(inputs) train_err += self.f_train(inputs, targets, learningRate, penalty) train_batches += 1 valid_error = self.f_accuracy(valid_data, valid_labels) learningRate = learningRate / 1.005 if valid_error < min_valid_error: min_valid_error = valid_error epoch_kept = e f.write( str(e) + ';' + str(train_err) + ';' + str(valid_error) + '\n') f.close() self._observers[0]() print("EPOCH : " + str(e) + ' LOSS: ' + str(train_err) + ' ERROR: ' + str(valid_error) + ' RATE: ' + str(learningRate)) print('END - EPOCH KEPT : ' + str(epoch_kept)) return 1
def trainNetwork(self, train_data, train_labels, valid_data, valid_labels, trigger, batchSize=20, epochs=100, learningRate=0.001, penalty=0.001): epoch_kept = 0 min_valid_error = 10000 valid_data = batch_functions.normalize_batch(valid_data) for e in range(0, epochs): train_err = 0 train_batches = 0 valid_error = 0 for batch in batch_functions.iterate_minibatches(train_data, train_labels, 20, shuffle=True): inputs, targets = batch inputs = batch_functions.normalize_batch(inputs) train_err += self.f_train(inputs, targets, learningRate) train_batches += 1 valid_error = self.f_accuracy(valid_data, valid_labels) learningRate = learningRate / 1.01 if valid_error < min_valid_error: min_valid_error = valid_error epoch_kept = e params = lasagne.layers.get_all_param_values(self.last_layer) #self.exportNetwork() self.set_loss(train_err) print("EPOCH : " + str(e) + ' LOSS: ' + str(train_err) + ' ERROR: ' + str(valid_error) + ' RATE: ' + str(learningRate)) print('END - EPOCH KEPT : ' + str(epoch_kept)) return Network(self.dimChannels, self.dimFeatures, self.dimOutput, self.name, paramsImport=params)
TEST = False [train_data, train_labels] = datasetImportConv(MODEL_PATH, DATASET, MODEL_OBJECT, DIMENSION, 'TRAIN', DEPTH) [valid_data, valid_labels] = datasetImportConv(MODEL_PATH, DATASET, MODEL_OBJECT, DIMENSION, 'VALIDATION', DEPTH) [test_data, test_labels] = datasetImportConv(MODEL_PATH, DATASET, MODEL_OBJECT, DIMENSION, 'TEST', DEPTH) #[test_dataV,test_labelsV]=datasetImportConv(MODEL_PATH,DATASET,MODEL_OBJECT,'VRAC','TEST',DEPTH) if TEST: f = open('network1.dat', 'rb') networkF = pickle.load(f) test_data = batch_functions.normalize_batch(test_data) res = networkF.testNetwork(test_data, test_labels) print(res) else: network1 = Network(train_data.shape[1], train_data.shape[2], train_labels.shape[1], 'network', networkFile='networks/network1.txt') network1.trainNetwork(train_data, train_labels, valid_data, valid_labels, batchSize=10, epochs=100,
def predict(self, test_data): batch_functions.normalize_batch(test_data) return self.f_predict(test_data)