Пример #1
0
 def test_numerical_accuracy(self):
     prob_vector = np.array([[0.3, 0.7], [0.6, 0.4]])
     true_vector = [1, 1]
     self.assertAlmostEqual(numerical_accuracy(prob_vector, true_vector),
                            0.5)
     true_vector = [1, 0]
     self.assertAlmostEqual(numerical_accuracy(prob_vector, true_vector),
                            1.0)
Пример #2
0
 def test_xor(self):
     X = np.array([0, 0, 1, 1, 0, 1, 1, 0], dtype=np.float32).reshape(4, 2)
     Y = np.array([0, 0, 1, 1], dtype=np.float32)
     mlp = MLP(hidden_layer_sizes=(2, ), epoch_num=1600, learning_rate=0.22)
     np.random.seed(2020)
     mlp.train(X, Y)
     self.assertAlmostEqual(numerical_accuracy(mlp.predict(X), Y), 1.0)
Пример #3
0
 def test_multiple_layer_with_regulation(self):
     # test on UCI ML hand-written digits datasets
     mlp = MLP(hidden_layer_sizes=(30, ),
               epoch_num=600,
               batch_size=32,
               learning_rate=0.2,
               _lambda=0.05)
     digits = load_digits()
     n_samples = len(digits.images)
     x_train = digits.data[:n_samples // 2]
     y_train = digits.target[:n_samples // 2]
     np.random.seed(2020)
     mlp.train(x_train, y_train)
     self.assertTrue(
         numerical_accuracy(mlp.predict(x_train), y_train) > 0.99)
     x_test = digits.data[n_samples // 2:]
     y_test = digits.target[n_samples // 2:]
     self.assertTrue(numerical_accuracy(mlp.predict(x_test), y_test) > 0.93)
Пример #4
0
 def test_iris(self):
     # test softmax on Iris dataset
     iris = load_iris()
     x_train = iris.data
     batch_size = int(x_train.shape[0] / 3)
     y_train = iris.target
     mlp = MLP(epoch_num=400, batch_size=batch_size, learning_rate=0.1)
     np.random.seed(2020)
     mlp.train(x_train, y_train)
     y_predict = mlp.predict(x_train)
     self.assertTrue(numerical_accuracy(y_predict, y_train) > 0.95)