def train(self, verbose=True): """Train the Multi-layer Perceptrons Parameters ---------- verbose : boolean Print logging messages with validation accuracy if verbose is True. """ #### #evalutation is copy paste from Run.py ... evaluator = Evaluator() if verbose == True: print "epoch-nr. & Result of the Multi-layer Perceptron recognizer (on test set):" for epoch in range(self.epochs): self._train_one_epoch(); if verbose == True: print epoch evaluator.printAccuracy(self.test_set, self.evaluate()) pass
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myPerceptronClassifier = Perceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.01, epochs=10) # Train the classifiers print("=========================") print("Training..") print("\nTraining the Perceptron..") myPerceptronClassifier.train() print("Done..") # Do the recognizer perceptronPred = myPerceptronClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("\nResult of the Perceptron recognizer:") #evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, perceptronPred)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) myPerceptronClassifier = Perceptron(data.trainingSet, data.validationSet, data.testSet, epochs=10) # Train the classifiers print("=========================") print("Training..") myStupidClassifier.train() myPerceptronClassifier.train() # Do the recognizer stupidPred = myStupidClassifier.evaluate() perceptronPred = myPerceptronClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") #evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) print("\nResult of the perceptron recognizer:") #evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, perceptronPred)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) myMLP = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) # myMLP.fortest() myMLP.train() # Do the recognizer # Explicitly specify the test set to be evaluated MLPPred = myMLP.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("\nResult of the MLP:") evaluator.printAccuracy(data.testSet, MLPPred) # Draw plot = PerformancePlot("MLP validation") plot.draw_performance_epoch(myMLP.performances, myMLP.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) myPerceptronClassifier = Perceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) myLogisticRegressionClassifier = LogisticRegression(data.trainingSet, data.validationSet, data.testSet, learningRate=0.001, epochs=50, error='mse') # Train the classifiers print("=========================") print("Training..") #print("\nStupid Classifier has been training..") #myStupidClassifier.train() #print("Done..") #print("\nPerceptron has been training..") #myPerceptronClassifier.train() #print("Done..") print("\nLogistic Regression Classifier has been training..") myLogisticRegressionClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated #stupidPred = myStupidClassifier.evaluate() #perceptronPred = myPerceptronClassifier.evaluate() LogisticRegressionPred = myLogisticRegressionClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() #print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) #evaluator.printAccuracy(data.testSet, stupidPred) #print("\nResult of the Perceptron recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) #evaluator.printAccuracy(data.testSet, perceptronPred) print("Result of the Logistic Regression:") evaluator.printAccuracy(data.testSet, LogisticRegressionPred)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) # myStupidClassifier = StupidRecognizer(data.trainingSet, # data.validationSet, # data.testSet) # Uncomment this to make your Perceptron evaluated # myPerceptronClassifier = Perceptron(data.trainingSet, # data.validationSet, # data.testSet, # learningRate=0.005, # epochs=30) myNeuralNetwork = LogisticRegression(data.trainingSet, data.validationSet, data.testSet) # Train the classifiers print("=========================") print("Training..") # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") print("\nNeural Network training..") #myPerceptronClassifier.train() myNeuralNetwork.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # Uncomment this to make your Perceptron evaluated # perceptronPred = myPerceptronClassifier.evaluate() neuralPred = myNeuralNetwork.evaluate() # Report the result print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.testSet, stupidPred) print("\nResult of the Perceptron recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) # Uncomment this to make your Perceptron evaluated # evaluator.printAccuracy(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, neuralPred) # evaluator.printConfusionMatrix(data.testSet, perceptronPred) evaluator.printConfusionMatrix(data.testSet, neuralPred)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) myPerceptronClassifier = Perceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) myLRClassifier = LogisticRegression(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) # Train the classifiers print("=========================") print("Training..") print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") print("\nPerceptron has been training..") myPerceptronClassifier.train() print("Done..") print("\nLogistic Regression has been training..") myLRClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() perceptronPred = myPerceptronClassifier.evaluate() lrPred = myLRClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) print("\nResult of the Perceptron recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, perceptronPred)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) # Uncomment this to make your Perceptron evaluated myPerceptronClassifier = Perceptron( data.trainingSet, data.validationSet, data.testSet, learningRate=1.0, #0.005, epochs=1 #30 ) # Train the classifiers print("=========================") print("Training..") print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") print("\nPerceptron has been training..") myPerceptronClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() # Uncomment this to make your Perceptron evaluated perceptronPred = myPerceptronClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") #evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) print("\nResult of the Perceptron recognizer:") #evaluator.printComparison(data.testSet, perceptronPred) # Uncomment this to make your Perceptron evaluated evaluator.printAccuracy(data.testSet, perceptronPred) evaluator.printConfusionMatrix(data.testSet, perceptronPred) evaluator.printClassificationResult(data.testSet, perceptronPred, ['class 0', 'class 1']) #target_names)
def main(): # ------------ NOTE -------------- # oneHot = False, as the framwork provided implements binary one-hot encoding (is it a 7 = True/False) # Our targets are one-of-k encoded (e.g. 1= (0,1,0,0,0,0,0,0,0)). Network predicts the exact number on the picture not just 7 = True/False data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) myMLPClassifier = MultilayerPerceptron( data.trainingSet, data.validationSet, data.testSet, hiddenLayerSizes=[ 65, 30 ], # size of hidden layers, input and output layers sizes are constant learningRate=0.028, # learning rate epochs=50) # epochs # Train the classifiers print("=========================") print("Training..") print("\nMLP has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") print("\nResult of the MLP:") #evaluator.printComparison(data.testSet, lrPred) evaluator.printAccuracy(data.testSet, mlpPred) # Draw plot = PerformancePlot("MLP validation") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def classify_all(): data = MNISTSeven("../data/mnist_seven.csv", 4000, 500, 500, one_hot=False) mlp = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, output_task="classify_all", cost='crossentropy', output_activation='softmax', learning_rate=0.30, epochs=50) mlp.train() pred = mlp.evaluate() evaluator = Evaluator() evaluator.printAccuracy(data.test_set, pred)
def trainAndEvaluateClassifier(classifier, test_set, verbose=False, graph=False): # Train print("Train " + classifier.__class__.__name__ + "..") classifier.train(verbose=verbose, graph=graph) print("Done..") print("") # Evaluate print("Evaluate..") pred = classifier.evaluate() print("Done..") print("") # Results print("Result:") evaluator = Evaluator() # evaluator.printComparison(data.test_set, stupidPred) evaluator.printAccuracy(test_set, pred) print("")
def train(self, verbose=True): """Train the perceptron with the perceptron learning algorithm. Parameters ---------- verbose : boolean Print logging messages with validation accuracy if verbose is True. """ evaluator = Evaluator() for i in range(self.epochs): for input, label in zip(self.trainingSet.input, self.trainingSet.label): input = self.augmentedInput(input) output = 1 if self.fire(input) else 0 error = label - output self.updateWeights(input, error) if verbose: print("Epoch: " + str(i+1)) evaluator.printAccuracy(self.validationSet, self.evaluate(self.validationSet))
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) myMLPClassifier = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.05, epochs=20) #no more changes after 20 epochs #Removed old stuff # Report the result # print("=========================") evaluator = Evaluator() # Train the classifiers print("=========================") print("Training..") print("\nMLP has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("\nResult of the MLP recognizer:") #evaluator.printComparison(data.testSet, lrPred) evaluator.printAccuracy(data.testSet, mlpPred) # Draw plot = PerformancePlot("MLP validation") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def train(self, verbose=True): """Train the perceptron with the perceptron learning algorithm. Parameters ---------- verbose : boolean Print logging messages with validation accuracy if verbose is True. """ evaluator = Evaluator() for i in range(self.epochs): if verbose: logging.debug("Epoch: " + str(i)) evaluator.printAccuracy(self.validationSet, self.evaluate(self.validationSet)) for j in range(len(self.trainingSet.input)): input = self.trainingSet.input[j] label = self.trainingSet.label[j] predictedClass = self.classify(input) error = label - predictedClass self.updateWeights(input, error)
def main(): # ------------ NOTE -------------- # oneHot does not work: It makes not sense to have binary labeled data (targetDigit or notTargetDigit), # but having a MLP with 10 output nodes and softmax function. It needs to be trained with digit labels, not binary ones! data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) myMLPClassifier = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, hiddenLayerSizes=[65,30], # size of hidden layers, input and output layers sizes are constant learningRate=0.028, # learning rate epochs=50) # epochs # Train the classifiers print("=========================") print("Training..") print("\nMLP has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") print("\nResult of the MLP:") #evaluator.printComparison(data.testSet, lrPred) evaluator.printAccuracy(data.testSet, mlpPred) # Draw plot = PerformancePlot("MLP validation") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) myMLPClassifier = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, loss='ce', layers=[128, 150], learningRate=0.005, epochs=10) # Report the result # # Train the classifiers print("=========================") print("Training..") print("\nMultilayer Perceptron has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("\nResult of the Multi Layer Perceptron recognizer:") evaluator.printComparison(data.testSet, mlpPred) evaluator.printAccuracy(data.testSet, mlpPred) # Draw plot = PerformancePlot("Multi Layer Perceptron validation") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def train(self, verbose=True): """Train the Multi-layer Perceptrons Parameters ---------- verbose : boolean Print logging messages with validation accuracy if verbose is True. """ evaluator = Evaluator() accuracies = [] for epoch in range(self.epochs): print("Training Epoch", epoch) for i, input, label in zip(range(len(self.training_set.input)), self.training_set.input, self.training_set.label): vec = np.zeros(self.layers[-1].n_out) vec[label] = 1 out = self._feed_forward(input) error = self._compute_error(vec) for layer, nextLayer in zip(self.layers[:-1], self.layers[1:]): weights = nextLayer.weights[1:].T layer.computeDerivative(error, weights) error = layer.deltas for layer in self.layers: layer.updateWeights(self.learning_rate) if verbose: evaluator.printAccuracy(self.test_set, self.evaluate()) accuracies.append( evaluator.getAccuracy(self.test_set, self.evaluate())) return accuracies
def main(): # data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # NOTE: # Other 1-digit classifiers do not make sense now for comparison purpose # So you should comment them out, let alone the MLP training and evaluation # Train the classifiers # print("=========================") print("Training..") # Stupid Classifier # myStupidClassifier = StupidRecognizer(data.training_set, # data.validation_set, # data.test_set) # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # Perceptron # myPerceptronClassifier = Perceptron(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=10) # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # perceptronPred = myPerceptronClassifier.evaluate() # Logistic Regression # myLRClassifier = LogisticRegression(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=30) # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # lrPred = myLRClassifier.evaluate() # Multi-layer Perceptron myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.05, epochs=30) print("\nMulti-layer Perceptron has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result # print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) print("\nResult of the Multi-layer Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # Draw plot = PerformancePlot("Multi-layer Perceptron on MNIST task") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) myPerceptronClassifier = Perceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) myLRClassifier = LogisticRegression(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) MlpClassifier = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.1, epochs = 30) # Train the classifiers print("=========================") print("Training..") print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") print("\nStarting Backpropagation MLP training...") MlpClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() # perceptronPred = myPerceptronClassifier.evaluate() # lrPred = myLRClassifier.evaluate() mlpPred = MlpClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) # print("\nResult of the Perceptron recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.testSet, perceptronPred) # print("\nResult of the Logistic Regression recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.testSet, lrPred) print("\nResult of the MLP recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, mlpPred) # eval.printConfusionMatrix(data.testSet, pred) # eval.printClassificationResult(data.testSet, pred, target_names) print("=========================")
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) # myLRClassifier = LogisticRegression(data.trainingSet, # data.validationSet, # data.testSet, # learningRate=0.005, # epochs=30) hidden_layers = [ LogisticLayer(128, 32, isClassifierLayer=True) for layer in range(1) ] mlp = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, hidden_layers, learningRate=0.005, epochs=30) # Train the classifiers #print("=========================") print("Training...") # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") print("Training MLP...") mlp.train() print("Done.") # Do the recognizer # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # perceptronPred = myPerceptronClassifier.evaluate() # lrPred = myLRClassifier.evaluate() mlpPred = mlp.evaluate() # Report the result print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # # #evaluator.printComparison(data.testSet, stupidPred) # # evaluator.printAccuracy(data.testSet, stupidPred) # # # # print("\nResult of the Perceptron recognizer:") # # #evaluator.printComparison(data.testSet, perceptronPred) # # evaluator.printAccuracy(data.testSet, perceptronPred) # # # # print("\nResult of the Logistic Regression recognizer:") # # #evaluator.printComparison(data.testSet, lrPred) # # evaluator.printAccuracy(data.testSet, lrPred) print("Result of the MLP recognizer:") evaluator.printComparison(data.testSet, mlpPred) evaluator.printAccuracy(data.testSet, mlpPred) # Draw plot = PerformancePlot("Logistic Regression validation") # plot.draw_performance_epoch(myLRClassifier.performances, # myLRClassifier.epochs) plot.draw_performance_epoch(mlp.performances, mlp.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) data.trainingSet.input = np.insert(data.trainingSet.input, 0, 1, axis=1) data.validationSet.input = np.insert(data.validationSet.input, 0, 1, axis=1) data.testSet.input = np.insert(data.testSet.input, 0, 1, axis=1) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) # myPerceptronClassifier = Perceptron(data.trainingSet, # data.validationSet, # data.testSet, # learningRate=0.005, # epochs=30) # myLRClassifier = LogisticRegression(data.trainingSet, # data.validationSet, # data.testSet, # learningRate=0.005, # epochs=30) MLPClassifier = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, netStruct = [800, 100, 10], actFunc = ['relu', 'relu', 'softmax'], dropout = True, loss = 'crossentropy', learningRate = 0.001, epochs = 300) # Report the result # print("=========================") evaluator = Evaluator() # Train the classifiers print("=========================") print("Training..") # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") # # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") print("\nMLP has been training..") MLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # perceptronPred = myPerceptronClassifier.evaluate() # lrPred = myLRClassifier.evaluate() mlpPred = MLPClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # #evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.testSet, stupidPred) # # print("\nResult of the Perceptron recognizer:") # #evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.testSet, perceptronPred) # print("\nResult of the Logistic Regression recognizer:") # #evaluator.printComparison(data.testSet, lrPred) # evaluator.printAccuracy(data.testSet, lrPred) print("\nResult of the MLP recognizer:") # evaluator.printComparison(data.testSet, lrPred) evaluator.printAccuracy(data.testSet, mlpPred) # Draw # plot = PerformancePlot("MLP validation") # plot.draw_performance_epoch(MLPClassifier.performances, # MLPClassifier.epochs) plt.plot(range(MLPClassifier.epochs), MLPClassifier.performances, 'r--') plt.show()
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) # myStupidClassifier = StupidRecognizer(data.trainingSet, # data.validationSet, # data.testSet) # myPerceptronClassifier = Perceptron(data.trainingSet, # data.validationSet, # data.testSet, # learningRate=0.005, # epochs=30) # # myLRClassifier = LogisticRegression(data.trainingSet, # data.validationSet, # data.testSet, # learningRate=0.005, # epochs=30) myMLPlassifier = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) # Report the result # print("=========================") evaluator = Evaluator() # Train the classifiers print("=========================") print("Training..") # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") print("\nMLP has been training..") myMLPlassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # perceptronPred = myPerceptronClassifier.evaluate() # lrPred = myLRClassifier.evaluate() mplPred = myMLPlassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") #evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.testSet, stupidPred) # print("\nResult of the Perceptron recognizer:") # #evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.testSet, perceptronPred) # # print("\nResult of the Logistic Regression recognizer:") # #evaluator.printComparison(data.testSet, lrPred) # evaluator.printAccuracy(data.testSet, lrPred) print("Result of the MLP recognizer:") #evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, mplPred) # Draw plot = PerformancePlot("MLP validation") plot.draw_performance_epoch(myMLPlassifier.performances, myMLPlassifier.epochs)
def classify_one(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task # data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=False) # NOTE: # Other 1-digit classifiers do not make sense now for comparison purpose # So you should comment them out, let alone the MLP training and evaluation # Train the classifiers # print("=========================") print("Training..") # Stupid Classifier myStupidClassifier = StupidRecognizer(data.training_set, data.validation_set, data.test_set) print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() # Perceptron myPerceptronClassifier = Perceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.005, epochs=10) print("\nPerceptron has been training..") myPerceptronClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated perceptronPred = myPerceptronClassifier.evaluate() # Logistic Regression myLRClassifier = LogisticRegression(data.training_set, data.validation_set, data.test_set, learning_rate=0.20, epochs=30) print("\nLogistic Regression has been training..") myLRClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated lrPred = myLRClassifier.evaluate() # Logistic Regression myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.30, epochs=50) print("\nMultilayer Perceptron has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result # print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.test_set, stupidPred) print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, perceptronPred) print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, lrPred) print("\nResult of the Multi-layer Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # Draw plot = PerformancePlot("Logistic Regression") plot.draw_performance_epoch(myLRClassifier.performances, myLRClassifier.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # Train # Denoising Auto Encoder n_encoder_neurons = 100 myDAE = DenoisingAutoEncoder(data.training_set, data.validation_set, data.test_set, n_hidden_neurons=n_encoder_neurons, noise_ratio=0.2, learning_rate=0.05, epochs=5) print("Train autoencoder..") myDAE.train(verbose=True) print("Done..") # Multilayer Perceptron layers = [] # Add auto envoder hidden layer. layers.append(LogisticLayer(data.training_set.input.shape[1], n_encoder_neurons, weights=myDAE.get_weights(), cost="mse", activation="sigmoid", learning_rate=0.05)) # Add another hidden layer just like in the previous exercise. n_second_hidden_neurons = 100 layers.append(LogisticLayer(n_encoder_neurons, n_second_hidden_neurons, cost="mse", activation="sigmoid", learning_rate=0.05)) # Add output classifier layer with one neuron per digit. n_out_neurons = 10 layers.append(LogisticLayer(n_second_hidden_neurons, n_out_neurons, cost="crossentropy", activation="softmax", learning_rate=0.05)) myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, layers=layers, epochs=15) print("Train MLP..") myMLPClassifier.train(verbose=True) print("Done..") print("") # Evaluate print("Evaluate..") mlpPred = myMLPClassifier.evaluate() print("Done..") print("") print("Results:") evaluator = Evaluator() print("") # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # Draw plot = PerformancePlot("DAE + MLP on MNIST task") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) mylogisticClassifier = LogisticRegression(data.trainingSet, data.validationSet, data.testSet, learningRate=0.005, epochs=30) # Train the classifiers print("=========================") print("Training..") print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") print("\nLogsticregression has been training..") mylogisticClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() perceptronPred = mylogisticClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) print("\n Result of the Logsticregression recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, perceptronPred) for i in range(2): for j in range(2): learningRate = (i + 1) * 0.002 epochs = (j + 1) * 20 mylogisticClassifier = LogisticRegression( data.trainingSet, data.validationSet, data.testSet, learningRate=learningRate, epochs=epochs) # Train the classifiers print("=========================") print("learning rate :" + str(learningRate)) print("epoch :" + str(epochs)) print("Training..") print("\nLogsticregression has been training..") mylogisticClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated perceptronPred = mylogisticClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("\n Result of the Logsticregression recognizer:") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, perceptronPred)
def main(): # data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # NOTE: # Other 1-digit classifiers do not make sense now for comparison purpose # So you should comment them out, let alone the MLP training and evaluation # Train the classifiers # print("=========================") print("Training..") # Stupid Classifier # myStupidClassifier = StupidRecognizer(data.training_set, # data.validation_set, # data.test_set) # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # Perceptron # myPerceptronClassifier = Perceptron(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=10) # # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # perceptronPred = myPerceptronClassifier.evaluate() # Logistic Regression # myLRClassifier = LogisticRegression(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=30) # # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated # lrPred = myLRClassifier.evaluate() # Multi layer perceptron layers = [LogisticLayer(data.training_set.input.shape[1], 30, None, 'sigmoid', False), LogisticLayer(30, 10, None, 'softmax', True)] myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, layers=layers, learning_rate=0.005, epochs=30) print("\nLogistic Regression has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result # print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) print("\nResult of the Multi-layer Perceptron recognizer (on test set):") evaluator.printComparison(data.test_set, mlpPred) evaluator.printAccuracy(data.test_set, mlpPred)
def main(): #data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # # NOTE: # # Other 1-digit classifiers do not make sense now for comparison purpose # # So you should comment them out, let alone the MLP training and evaluation # # # Train the classifiers # # print("=========================") # print("Training..") # # # Stupid Classifier # myStupidClassifier = StupidRecognizer(data.training_set, # data.validation_set, # data.test_set) # # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") # # Do the recognizer # # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # # # Perceptron # myPerceptronClassifier = Perceptron(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=10) # # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # # Do the recognizer # # Explicitly specify the test set to be evaluated # perceptronPred = myPerceptronClassifier.evaluate() # # # Logistic Regression # myLRClassifier = LogisticRegression(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=30) # # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") # # Do the recognizer # # Explicitly specify the test set to be evaluated # lrPred = myLRClassifier.evaluate() # Build up the network from specific layers # Here is an example of a MLP acting like the Logistic Regression layers = [] layers.append(LogisticLayer(784, 5, None, "sigmoid", True)) layers.append(LogisticLayer(5, 10, None, "softmax", False)) myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.5, epochs=30, layers=layers) print("\nLogistic Regression has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # # Report the result # print("=========================") evaluator = Evaluator() # # # print("Result of the stupid recognizer:") # # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # # # # print("\nResult of the Perceptron recognizer (on test set):") # # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # # # # print("\nResult of the Logistic Regression recognizer (on test set):") # # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) # print("\nResult of the Multi-layer Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # # # Draw # plot = PerformancePlot("Logistic Regression") # plot.draw_performance_epoch(myLRClassifier.performances, # myLRClassifier.epochs) # 3D Plot learning_rates + epochs -> accuracies print("Creating 3D plot. This may take some minutes...") learning_rate_sample_count = 5 epochs_sample_count = 20 xticks = np.logspace(-10.0, 0, base=10, num=learning_rate_sample_count, endpoint=False) accuracies = [] learning_rates = [] epoch_values = [] for i in itertools.product(range(learning_rate_sample_count)): learning_rate = 100 / np.exp(i) print("Calculating accuracy for: learning rate = %s" % (learning_rate)) myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=learning_rate, epochs=epochs_sample_count, layers=layers) epoch_accuracies = myMLPClassifier.train(False) lrPred = myMLPClassifier.evaluate() epoch_values.append([e for e in range(epochs_sample_count)]) learning_rates.append( [learning_rate for _ in range(epochs_sample_count)]) accuracies.append(epoch_accuracies) accuracies_merged = list(itertools.chain(*accuracies)) epochs_merged = list(itertools.chain(*epoch_values)) learning_rates_merged = list(itertools.chain(*learning_rates)) print(accuracies_merged) print(epochs_merged) print(learning_rates) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(np.log10(learning_rates_merged), epochs_merged, accuracies_merged) ax.set_xlabel("Learning Rate") ax.set_xticks(np.log10(xticks)) ax.set_xticklabels(xticks) ax.set_ylabel('Epochs') ax.set_zlabel('Accuracy') plt.show()
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, oneHot=False) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) #myPerceptronClassifier = Perceptron(data.trainingSet, #data.validationSet, #data.testSet, #learningRate=0.005, #epochs=30) #myLRClassifier = LogisticRegression(data.trainingSet, #data.validationSet, #data.testSet, #learningRate=0.005, #epochs=30) mlp = MultilayerPerceptron(data.trainingSet, data.validationSet, data.testSet, layers=None, inputWeights=None, outputTask='classification', outputActivation='softmax', loss='cee', learningRate=0.01, epochs=50) # Report the result # print("=========================") evaluator = Evaluator() # Train the classifiers print("=========================") print("Training..") print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") print("\nPerceptron has been training..") #myPerceptronClassifier.train() print("Done..") print("\nLogistic Regression has been training..") #myLRClassifier.train() print("Done..") print("\nmlp has been training..") mlp.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() #perceptronPred = myPerceptronClassifier.evaluate() #lrPred = myLRClassifier.evaluate() mlppred = MultilayerPerceptron.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") #evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) print("\nResult of the Perceptron recognizer:") #evaluator.printComparison(data.testSet, perceptronPred) #evaluator.printAccuracy(data.testSet, perceptronPred) print("\nResult of the Logistic Regression recognizer:") #evaluator.printComparison(data.testSet, lrPred) #evaluator.printAccuracy(data.testSet, lrPred) print("Result of the mlp:") evaluator.printAccuracy(data.testSet, mlppred) # Draw #plot = PerformancePlot("Logistic Regression validation") #plot.draw_performance_epoch(myLRClassifier.performances, #myLRClassifier.epochs) ####可能有问题 plot = PerformancePlot("mlp validation") plot.draw_performance_epoch(mlp.performances, mlp.epochs)
def main(): data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000) myStupidClassifier = StupidRecognizer(data.trainingSet, data.validationSet, data.testSet) # parameters learnRate = 0.005 maxEpochs = 20 #epochNumber = 30 xEpochs = [] yAccuracyPerceptron = [] yAccuracyLogistic = [] # loop for gathering data for graph plotting for epochNumber in xrange(1, maxEpochs + 1): myPerceptronClassifier = Perceptron( data.trainingSet, data.validationSet, data.testSet, learningRate=learnRate, #0.005, epochs=epochNumber) # Uncomment this to run Logistic Neuron Layer myLRClassifier = LogisticRegression( data.trainingSet, data.validationSet, data.testSet, learningRate=learnRate, #0.005, epochs=epochNumber #30 ) # Train the classifiers print("=========================") print("Training..") print("\nStupid Classifier has been training..") myStupidClassifier.train() print("Done..") print("\nPerceptron has been training..") myPerceptronClassifier.train() print("Done..") print("\nLogistic Regression has been training..") myLRClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated stupidPred = myStupidClassifier.evaluate() perceptronPred = myPerceptronClassifier.evaluate() lrPred = myLRClassifier.evaluate() # Report the result print("=========================") evaluator = Evaluator() print("Result of the stupid recognizer:") #evaluator.printComparison(data.testSet, stupidPred) evaluator.printAccuracy(data.testSet, stupidPred) print("\nResult of the Perceptron recognizer:") #evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.testSet, perceptronPred) print("\nResult of the Logistic Regression recognizer:") #evaluator.printComparison(data.testSet, lrPred) evaluator.printAccuracy(data.testSet, lrPred) # accumulate plotting data xEpochs.append(epochNumber) yAccuracyPerceptron.append( accuracy_score(data.testSet.label, perceptronPred) * 100) yAccuracyLogistic.append( accuracy_score(data.testSet.label, lrPred) * 100) # === end of for loop === # plot the graph plt.plot(xEpochs, yAccuracyPerceptron, marker='o', label='Perceptron') plt.plot(xEpochs, yAccuracyLogistic, marker='o', color='r', label='Logistic Neuron') plt.xlabel('Number of epochs') plt.ylabel('Accuracy [%]') plt.title( 'Performance on different epochs\n(using: testSet | learningRate: ' + str(learnRate) + ')') #plt.legend() plt.legend(loc=4) #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show()
def main(): # data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # NOTE: # Other 1-digit classifiers do not make sense now for comparison purpose # So you should comment them out, let alone the MLP training and evaluation # Train the classifiers # print("=========================") print("Training the autoencoder..") myDAE = DenoisingAutoEncoder(data.training_set, data.validation_set, data.test_set, learning_rate=0.05, epochs=10) print("\nAutoencoder has been training..") myDAE.train() print("Done..") # Multi-layer Perceptron # NOTES: # Now take the trained weights (layer) from the Autoencoder # Feed it to be a hidden layer of the MLP, continue training (fine-tuning) # And do the classification # Correct the code here myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.05, epochs=30, input_weights=myDAE._get_weights()) print("\nMulti-layer Perceptron has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result # print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) print("\nResult of the DAE + MLP recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # Draw #plot = PerformancePlot("DAE + MLP on MNIST task") #plot.draw_performance_epoch(myMLPClassifier.performances, # myMLPClassifier.epochs) #print myDAE._get_weights().shape[1] weights = 0.5 * myDAE._get_weights() + 0.5 wplot = WeightVisualizationPlot(weights) wplot.draw_weights()
def main(): #data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # # NOTE: # # Other 1-digit classifiers do not make sense now for comparison purpose # # So you should comment them out, let alone the MLP training and evaluation # # # Train the classifiers # # print("=========================") # print("Training..") # # # Stupid Classifier # myStupidClassifier = StupidRecognizer(data.training_set, # data.validation_set, # data.test_set) # # print("\nStupid Classifier has been training..") # myStupidClassifier.train() # print("Done..") # # Do the recognizer # # Explicitly specify the test set to be evaluated # stupidPred = myStupidClassifier.evaluate() # # # Perceptron # myPerceptronClassifier = Perceptron(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=10) # # print("\nPerceptron has been training..") # myPerceptronClassifier.train() # print("Done..") # # Do the recognizer # # Explicitly specify the test set to be evaluated # perceptronPred = myPerceptronClassifier.evaluate() # # # Logistic Regression # myLRClassifier = LogisticRegression(data.training_set, # data.validation_set, # data.test_set, # learning_rate=0.005, # epochs=30) # # print("\nLogistic Regression has been training..") # myLRClassifier.train() # print("Done..") # # Do the recognizer # # Explicitly specify the test set to be evaluated # lrPred = myLRClassifier.evaluate() # Build up the network from specific layers # Here is an example of a MLP acting like the Logistic Regression layers = [] layers.append(LogisticLayer(784, 5, None, "sigmoid", True)) layers.append(LogisticLayer(5, 10, None, "softmax", False)) myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.5, epochs=30, layers=layers) print("\nLogistic Regression has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # # Report the result # print("=========================") evaluator = Evaluator() # # # print("Result of the stupid recognizer:") # # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # # # # print("\nResult of the Perceptron recognizer (on test set):") # # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # # # # print("\nResult of the Logistic Regression recognizer (on test set):") # # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) # print("\nResult of the Multi-layer Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # # # Draw # plot = PerformancePlot("Logistic Regression") # plot.draw_performance_epoch(myLRClassifier.performances, # myLRClassifier.epochs) # 3D Plot learning_rates + epochs -> accuracies print("Creating 3D plot. This may take some minutes...") learning_rate_sample_count = 5 epochs_sample_count = 20 xticks = np.logspace(-10.0, 0, base=10, num=learning_rate_sample_count, endpoint=False) accuracies = [] learning_rates = [] epoch_values = [] for i in itertools.product(range(learning_rate_sample_count)): learning_rate = 100 / np.exp(i) print("Calculating accuracy for: learning rate = %s" % (learning_rate)) myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=learning_rate, epochs=epochs_sample_count, layers=layers) epoch_accuracies = myMLPClassifier.train(False) lrPred = myMLPClassifier.evaluate() epoch_values.append([e for e in range(epochs_sample_count)]) learning_rates.append([learning_rate for _ in range(epochs_sample_count)]) accuracies.append(epoch_accuracies) accuracies_merged = list(itertools.chain(*accuracies)) epochs_merged = list(itertools.chain(*epoch_values)) learning_rates_merged = list(itertools.chain(*learning_rates)) print(accuracies_merged) print(epochs_merged) print(learning_rates) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(np.log10(learning_rates_merged), epochs_merged, accuracies_merged) ax.set_xlabel("Learning Rate") ax.set_xticks(np.log10(xticks)) ax.set_xticklabels(xticks) ax.set_ylabel('Epochs') ax.set_zlabel('Accuracy') plt.show()
def main(): # data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # NOTE: # Other 1-digit classifiers do not make sense now for comparison purpose # So you should comment them out, let alone the MLP training and evaluation # Train the classifiers # print("=========================") print("Training the autoencoder..") myDAE = DenoisingAutoEncoder(data.training_set, data.validation_set, data.test_set, learning_rate=0.05, epochs=30) print("\nAutoencoder has been training..") myDAE.train() print("Done..") # Multi-layer Perceptron # NOTES: # Now take the trained weights (layer) from the Autoencoder # Feed it to be a hidden layer of the MLP, continue training (fine-tuning) # And do the classification # Correct the code here myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, learning_rate=0.05, epochs=30) print("\nMulti-layer Perceptron has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result # print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) print("\nResult of the DAE + MLP recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) # Draw plot = PerformancePlot("DAE + MLP on MNIST task") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs)
def main(): # data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, # one_hot=True, target_digit='7') # NOTE: # Comment out the MNISTSeven instantiation above and # uncomment the following to work with full MNIST task data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000, one_hot=False) # NOTE: # Other 1-digit classifiers do not make sense now for comparison purpose # So you should comment them out, let alone the MLP training and evaluation # Train the classifiers # print("=========================") print("Training the autoencoder..") myDAE = DenoisingAutoEncoder(data.training_set, data.validation_set, data.test_set, learning_rate=dae_lr, noiseRatio=dae_nr, hiddenLayerNeurons=hiddenLayerNeurons, epochs=dae_epochs) print("\nAutoencoder has been training..") myDAE.train() print("Done..") # Multi-layer Perceptron # NOTES: # Now take the trained weights (layer) from the Autoencoder # Feed it to be a hidden layer of the MLP, continue training (fine-tuning) # And do the classification myMLPLayers = [] # First hidden layer number_of_1st_hidden_layer = hiddenLayerNeurons myMLPLayers.append(LogisticLayer(data.training_set.input.shape[1]-1, # bias "1" already added so remove one number_of_1st_hidden_layer, weights=myDAE._get_weights(), activation="sigmoid", is_classifier_layer=False)) # Output layer number_of_output_layer = 10 myMLPLayers.append(LogisticLayer(number_of_1st_hidden_layer, number_of_output_layer, None, activation="softmax", is_classifier_layer=True)) # Correct the code here myMLPClassifier = MultilayerPerceptron(data.training_set, data.validation_set, data.test_set, layers=myMLPLayers, learning_rate=mlp_lr, epochs=mlp_epochs) # remove double added bias "1" myMLPClassifier.__del__() print("\nMulti-layer Perceptron has been training..") myMLPClassifier.train() print("Done..") # Do the recognizer # Explicitly specify the test set to be evaluated mlpPred = myMLPClassifier.evaluate() # Report the result # print("=========================") evaluator = Evaluator() # print("Result of the stupid recognizer:") # evaluator.printComparison(data.testSet, stupidPred) # evaluator.printAccuracy(data.test_set, stupidPred) # print("\nResult of the Perceptron recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, perceptronPred) # print("\nResult of the Logistic Regression recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) # evaluator.printAccuracy(data.test_set, lrPred) print("\nResult of the DAE + MLP recognizer (on test set):") # evaluator.printComparison(data.testSet, perceptronPred) evaluator.printAccuracy(data.test_set, mlpPred) os.chdir("..") # Draw plot = PerformancePlot("DAE + MLP on MNIST task on validation set") plot.draw_performance_epoch(myMLPClassifier.performances, myMLPClassifier.epochs, "plots", filename) print("drawing weights of auto encoder mlp input…") weight_plotter = WeightVisualizationPlot(myDAE.autoencMLP) weight_plotter.plot()