Exemple #1
0
 def test_predict(self):
     """
     Test prediction for dataset. Returns a list of np.arrays.
     """
     # defining model
     layerSizes = [2,2,1]
     nn = MultilayerPerceptron(layerSizes)
     
     # setting up nn
     parameters = []
     parameters.append(np.ones((3,2)))
     parameters.append(np.ones((3,1)))
     nn.set_params(parameters)
     
     # preparing input NumericalDataSet
     inputSet = np.array([[2,2]])
     inputVec = np.array([[2,2]])
     nrObs = 10
     for _ in range(nrObs-1):
         inputSet = np.vstack((inputSet,inputVec))
     dataSet = NumericalDataSet(inputSet, None)
     
     # run function
     predictions = nn.predict(dataSet)
     
     # check nr of observations
     self.assertEqual(len(predictions), nrObs, "number of observations mismatch")
     for prediction in predictions:
         assert_equal(prediction, np.array([[2.9866142981514305]]), "wrong output")
Exemple #2
0
    def test_digits_prediction(self):
        training_data = np.loadtxt('../../data/pendigits-training.txt')[:500, :]
        testing_data = np.loadtxt('../../data/pendigits-testing.txt')

        layer_sizes = [16, 16, 10]
        update_method = Rprop(layer_sizes, init_step=0.01)
        nn = MultilayerPerceptron(layer_sizes, iterations=100, do_classification=True,
                           update_method=update_method,
                           batch_update_size=30,
                           activation_function=nputils.rectifier,
                           deriv_activation_function=nputils.rectifier_deriv)

        training_targets = nputils.convert_targets(training_data[:, -1], 10)
        training_input = training_data[:, 0:-1]
        maxs = np.max(training_input, axis=0)
        mins = np.min(training_input, axis=0)
        normalized_training_input = np.array(
            [(r - mins) / (maxs - mins) for r in training_input])

        training_data_set = NumericalDataSet(normalized_training_input,
                                              training_targets)

        testing_targets = nputils.convert_targets(testing_data[:, -1], 10)
        testing_input = testing_data[:, 0:-1]
        maxs = np.max(testing_input, axis=0)
        mins = np.min(testing_input, axis=0)
        normalized_testing_input = np.array(
            [(r - mins) / (maxs - mins) for r in testing_input])

        testing_data_set = NumericalDataSet(normalized_testing_input, testing_targets)

        nn.train(training_data_set)
        predictions = nn.predict(testing_data_set)

        predictions = [np.argmax(p) for p in predictions]

        conf_matrix = np.zeros((10, 10))
        conf_matrix = np.concatenate(([np.arange(0, 10)], conf_matrix), axis=0)
        conf_matrix = np.concatenate((np.transpose([np.arange(-1, 10)]), conf_matrix), axis=1)
        targets = testing_data[:, -1]
        for i in range(len(targets)):
            conf_matrix[targets[i] + 1, predictions[i] + 1] += 1

        print("Detection rate: " + str(np.sum(np.diagonal(conf_matrix[1:, 1:])) / len(targets)))
        print(str(conf_matrix))