def test_network_and_plot_results(self):
        accuracy_results = []
        for training_iterations in self.training_iterations_list:
            print "Testing network for " + str(
                training_iterations) + " iterations"

            # Initialise lists to store mean accuracy and standard error values
            mean_accuracies = []
            standard_errors = []

            # Iterates through the hidden layer sizes specified
            for hidden_layer_size in self.hidden_layer_sizes:
                # initialise list to store accuracy results
                accuracy_cache = []

                print "Testing network with hidden layer size: " + str(
                    hidden_layer_size)
                # The Neural network is trained and tested for the specified number of iterations for each
                # hidden layer size value
                for i in range(0, 10, 1):
                    # Reinitialise dataset importer object
                    csv_delegate = CSVFileDelegate(self.filepath)

                    # Initialise Neural Network Classifier with data and target from data importer
                    neural_network_classifier = NeuralNetworkClassifier(
                        csv_delegate.training_data,
                        csv_delegate.training_target)

                    # Build and train network with <hidden_layer_size> nodes in the hidden layer
                    neural_network_classifier.build_network(
                        hidden_layer_size, training_iterations)

                    # Use classifier to classify testing data
                    results = neural_network_classifier.classify_set(
                        csv_delegate.testing_data)

                    # Compare results to testing target
                    accuracy = self.compare_result_to_target(
                        results, csv_delegate.testing_target)
                    accuracy_cache.append(accuracy)
                    print accuracy

                # Store the mean and standard error values for each set of results
                mean_accuracies.append((float(sum(accuracy_cache)) / 10))
                standard_errors.append(scipy.stats.sem(accuracy_cache))

            # Plot accuracy vs number of hidden nodes with the standard error
            plotter = ResultPlotter(self.hidden_layer_sizes, mean_accuracies,
                                    standard_errors, training_iterations,
                                    self.baseline_accuracy,
                                    ntpath.basename(self.filepath))
            plotter.generate_plot_with_errors()
            accuracy_results.append(mean_accuracies)
        if plotter:
            plotter.generate_combined_plot(self.hidden_layer_sizes,
                                           accuracy_results,
                                           self.training_iterations_list)
    def test_learning_rates(self):
        mean_accuracies = []
        standard_errors = []
        best_mean_accuracy = 0.0
        best_learning_rate = 0.0
        for learning_rate in self.learning_rates:
            # initialise list to store accuracy results
            accuracy_cache = []

            print "Testing network with learning rate: " + str(learning_rate)
            # The Neural network is trained and tested for the specified number of iterations for each
            # hidden layer size value
            for i in range(0, 10, 1):
                # Reinitialise dataset importer object
                csv_delegate = CSVFileDelegate("Datasets/owls15.csv")

                # Initialise Neural Network Classifier with data and target from data importer
                neural_network_classifier = NeuralNetworkClassifier(
                    csv_delegate.training_data, csv_delegate.training_target)

                # Build and train network with <hidden_layer_size> nodes in the hidden layer
                neural_network_classifier.build_network(3, 2000, learning_rate)

                # Use classifier to classify testing data
                results = neural_network_classifier.classify_set(
                    csv_delegate.testing_data)

                # Compare results to testing target
                accuracy = self.compare_result_to_target(
                    results, csv_delegate.testing_target)
                accuracy_cache.append(accuracy)
                print accuracy

            # Store the mean and standard error values for each set of results
            mean_accuracy = (float(sum(accuracy_cache)) / 10)
            if mean_accuracy > best_mean_accuracy:
                best_mean_accuracy = mean_accuracy
                best_learning_rate = learning_rate

            mean_accuracies.append(mean_accuracy)
            standard_errors.append(scipy.stats.sem(accuracy_cache))

        print "Best learning rate = " + str(best_learning_rate)
        plotter = ResultPlotter(self.learning_rates, mean_accuracies,
                                standard_errors, 0, 0, "Datasets/owls15.csv")
        plotter.generate_learning_rate_plot_with_errors(
            self.learning_rates, mean_accuracies, standard_errors)
Example #3
0
    # loop of different calibration methods
    for m in config['calibrationMethods']:
        calMethod = locals()[m]
        calB = []
        calTestData = []
        calStats = []
        # loop over the hops, i.e. perform calibration hop-by-hop
        for hop in range(config['numHops']):
            # get the data for calibration
            X, Y = myDataCreator.get_train_data(hop)

            if hop == 0:
                # in the first hop we calibrate the sensor array to the true values
                calB.append(calMethod(X, Y))
            else:
                # in the following hops (hop > 0) we calibrate the array to the previously calibrated one
                calB.append(calMethod(X, virtY))
            Xt, GT = myDataCreator.get_test_data(hop)

            virtY = calB[hop].dot(Xt)
            calTestData.append(virtY)
            trueB = myDataCreator.get_true_B(hop)

            calStats.append(CalibrationStatistics(virtY, GT, calB[hop], trueB))

        Results.update({m + '_run_' + str(run): calStats})

# plot the results
if config['statsToPlot']:
    ResultPlotter(Results, config['statsToPlot'], config['calibrationMethods'],
                  config['numRuns'], config['numHops'])
Example #4
0
 def __init__(self, plotterTerminal="png"):
     self.plotter = ResultPlotter(plotterTerminal)
     self.metrics = MetricsCalc()
     
     self.pareto = None
     self.setResultDirectories([])