def test_backprop_1500_iteration(self): X = np.array([[0.05, 0.10]]) y = np.array([[0.01, 0.99]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) self.setup_test_conf(nn) nn.learn(1500, 0.5, 1) self.assertEqual(nn.weights[0][0][0], 0.1747576240731027) self.assertEqual(nn.weights[0][0][1], 0.24951524814620638) self.assertEqual(nn.weights[0][1][0], 0.2740253089080296) self.assertEqual(nn.weights[0][1][1], 0.3480506178160598) self.assertEqual(nn.output.shape, (1, 2))
def test_backprop_1_iteration(self): X = np.array([[0.05, 0.10]]) y = np.array([[0.01, 0.99]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) self.setup_test_conf(nn) nn.learn(1, 0.5, 1) self.assertEqual(nn.weights[0][0][0], 0.14978071613276281) self.assertEqual(nn.weights[0][0][1], 0.19956143226552567) self.assertEqual(nn.weights[0][1][0], 0.24975114363236958) self.assertEqual(nn.weights[0][1][1], 0.29950228726473915) self.assertEqual(nn.weights[1][0][0], 0.35891647971788465) self.assertEqual(nn.weights[1][0][1], 0.4086661860762334) self.assertEqual(nn.weights[1][1][0], 0.5113012702387375) self.assertEqual(nn.weights[1][1][1], 0.5613701211079891)
def test_relu(self): # XOR X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0, 1], [1, 0], [1, 0], [0, 1]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) nn.add_layer(100) nn.add_layer(100) nn.add_layer(2) nn.learn(2000, 0.01, 1, activation='relu') res = nn.classify(X) print(res) self.assertEqual(int(round(res[0][0])), 0) self.assertEqual(int(round(res[0][1])), 1) self.assertEqual(int(round(res[1][0])), 1) self.assertEqual(int(round(res[1][1])), 0)
def test_classify(self): X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0]]) y = np.array([[0], [1], [1], [0], [0]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) nn.add_layer(4, 0) nn.add_layer(1, 0) nn.learn(1500, 10, 1) res = nn.classify(np.array([[0, 0], [1, 0]])) self.assertEqual(int(round(res[0][0])), 0) self.assertEqual(int(round(res[1][0])), 1)
def test_multiple_input_50_neuron_2_layer(self): X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0]]) y = np.array([[0], [1], [1], [0], [0]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) nn.add_layer(4, 0) nn.add_layer(1, 0) nn.learn(1500, 1, 5) res = nn.classify([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0]]) self.assertEqual(list(map(lambda x: int(round(x[0])), res)), [0, 1, 1, 0, 0])
def test_raise_illegal_output(self): X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1], [0, 0, 0]]) y = np.array([[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) nn.add_layer(1, 0) with self.assertRaises(ValueError): nn.learn(1, 1, 1)
def test_simple_net(self): X = np.array([[0.05]]) y = np.array([[0.01]]) nn = MyNeuralNet() nn.input = np.array(list(zip(X, y))) nn.add_layer(1, 0) nn.learn(1500, 0.3, 1) self.assertEqual(len(nn.weights), 1) self.assertEqual(nn.weights[0].shape, (1, 2)) self.assertEqual(nn.weights[0][0][0], 1.60050105653771)
def test_gradient_checking(self): X = np.array([[0.05, 0.10]]) y = np.array([[0.01, 0.99]]) nn = MyNeuralNet() nn.activation = 'relu' nn.input = np.array(list(zip(X, y))) self.setup_test_conf(nn) grad_approx = np.array(self.calc_gradients(nn)) nn.feed_forward(nn.input[:, 0]) grad = np.array(nn.backward(nn.input[:, 1])) self.assertTrue(np.allclose(grad, grad_approx))
def test_raise_no_layers(self): nn = MyNeuralNet() with self.assertRaises(ValueError): nn.learn(1, 0.3, 1) nn.last_layer_neuron_count