def subSetMLP(self, filename, percentage, layer_width, layer_height, learning_rate, dropout_rate, fitness_threshold, batch_size, display_step, save_option=True): self.staticController = self.importer.readStaticController(filename) fullSet = DataSet() fullSet.readSetFromController(self.staticController) fullSet.formatToBinary() subSet = DataSet() subSet.readSubsetFromController(self.staticController, percentage) subSet.formatToBinary() self.nnm.setDebugMode(True) self.nnm.setType(NNTypes.MLP) self.nnm.setTrainingMethod(NNOptimizer.Adam) self.nnm.setActivationFunction(NNActivationFunction.Sigmoid) self.nnm.setDataSet(subSet) self.nnm.setDropoutRate(dropout_rate) self.nnm.rectangularHiddenLayers(layer_width, layer_height) self.nnm.initialize(learning_rate, fitness_threshold, batch_size, display_step, -1, 5000) self.nnm.getDataSize() # Train model and visualize performance self.nnm.train() self.nnm.plot() fitness, wrong_states = self.nnm.checkFitness(fullSet) self.nnm.randomCheck(fullSet) if (save_option): self.exporter.saveNetwork(self.nnm) self.exporter.saveWrongStates(wrong_states) self.exporter.saveMatlabMLP(self.staticController, self.nnm) self.nnm.close() self.cleanMemory()
def importMLP(self, import_path, filename, layer_width, layer_height, learning_rate, dropout_rate, fitness_threshold, batch_size, display_step, save_option=True): self.staticController = self.importer.readStaticController(filename) fullSet = DataSet() fullSet.readSetFromController(self.staticController) fullSet.formatToBinary() self.nnm.setDebugMode(True) self.nnm.setType(NNTypes.MLP) self.nnm.setTrainingMethod(NNOptimizer.Adam) self.nnm.setActivationFunction(NNActivationFunction.Sigmoid) self.nnm.setDataSet(fullSet) # Option to adjust parameters for new training session self.nnm.setDropoutRate(dropout_rate) self.nnm.rectangularHiddenLayers(layer_width, layer_height) self.nnm.initialize(learning_rate, fitness_threshold, batch_size, display_step) self.nnm.getDataSize() # Restore Network from saved file: self.importer.restoreNetwork(self.nnm, import_path) # Train model and visualize performance self.nnm.train() self.nnm.plot() fitness, wrong_states = self.nnm.checkFitness(fullSet) self.nnm.randomCheck(fullSet) # Save Network or Variables if (save_option): self.exporter.saveNetwork(self.nnm) self.exporter.saveWrongStates(wrong_states) self.exporter.saveMatlabMLP(self.staticController, self.nnm) self.nnm.close()
def scoutLearningRateConvergence(self, filename, layer_width, layer_height, epoch_threshold, rates, batch_size, display_step): self.staticController = self.importer.readStaticController(filename) dataSet = DataSet() dataSet.readSetFromController(self.staticController) dataSet.formatToBinary() self.nnm.setDebugMode(False) fitnesses = [] for r in rates: print("\nLearning rate: " + str(r)) self.nnm.setType(NNTypes.MLP) self.nnm.setTrainingMethod(NNOptimizer.Adam) self.nnm.setActivationFunction(NNActivationFunction.Sigmoid) self.nnm.setDataSet(dataSet) self.nnm.rectangularHiddenLayers(layer_width, layer_height) self.nnm.initializeNeuralNetwork() self.nnm.initializeTraining(r, 1.0, batch_size, display_step, epoch_threshold) self.nnm.train() fitness, wrong_states = self.nnm.checkFitness(dataSet) self.fitnesses.append(fitness) self.nnm.close() # Plot plt.semilogx(rates, fitnesses, 'r-') plt.xlabel("Rates") plt.ylabel("Fitness") plt.grid() (x1, x2, y1, y2) = plt.axis() plt.axis((min(rates), max(rates), 0.0, y2 + 0.1)) plt.show() self.cleanMemory()