예제 #1
0
    def test_backpropagation(self):
        weight1 = np.array([
            [0.22667075,  0.38116981,  0.62686969],
            [1.13062085,  0.40836474, -0.50492125],
            [-0.22645265,  1.13541005, -2.7876409]
        ])
        weight2 = np.array([
            [0.63547163],
            [0.63347214],
            [-1.3669957],
            [-0.42770718]
        ])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network = Backpropagation(
            input_layer > hidden_layer > output,
            step=0.3,
            zero_weight=20,
            optimizations=[WeightElimination]
        )
        network.train(xor_input_train, xor_target_train, epochs=350)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #2
0
    def test_backpropagation(self):
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        weight1 = np.array([
            [0.31319847, -1.17858149, 0.71556407],
            [1.60798015, 0.16304449, -0.22483005],
            [-0.90144173, 0.58500625, -0.01724167]
        ])
        weight2 = np.array([
            [-1.34351428],
            [0.45506056],
            [0.24790366],
            [-0.74360389]
        ])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)

        network = Backpropagation(
            (input_layer > hidden_layer > output),
            step=0.3,
            verbose=False
        )

        network.train(xor_input_train, xor_target_train, epochs=1000)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #3
0
    def test_first_step_updates(self):
        def square_error_deriv(output_train, target_train):
            return output_train - target_train

        @with_derivative(square_error_deriv)
        def square_error(output_train, target_train):
            return np.sum((target_train - output_train) ** 2) / 2

        weight1 = np.array([[0.1, 0.2], [0.5, 0.5], [0.5, 0.5]])
        weight2 = np.array([[0.3, 0.5, 0.5]]).T

        input_layer = SigmoidLayer(2, weight=weight1)
        hidden_layer = SigmoidLayer(2, weight=weight2)
        output = OutputLayer(1)

        network = Backpropagation(
            (input_layer > hidden_layer > output),
            error=square_error,
            step=1,
            verbose=False
        )

        test_input = np.array([[1, 1]])
        test_target = np.array([[1]])
        network.train(test_input, test_target, epochs=1)

        np.testing.assert_array_almost_equal(
            network.train_layers[0].weight_without_bias,
            np.array([[0.50461013, 0.50437699],
                      [0.50461013, 0.50437699]]),
        )
        np.testing.assert_array_almost_equal(
            network.train_layers[1].weight_without_bias,
            np.array([[0.53691945, 0.53781823]]).T,
        )
예제 #4
0
    def test_backpropagation(self):
        weight1 = np.array([
            [-0.53980522, -0.64724144, -0.92496063],
            [-0.04144865, -0.60458235,  0.25735483],
            [0.08818209, -0.10212516, -1.46030816]
        ])
        weight2 = np.array([
            [0.54230442],
            [0.1393251],
            [1.59479241],
            [0.1479949]
        ])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network = Backpropagation(
            input_layer > hidden_layer > output,
            step=0.3,
            decay_rate=0.0001,
            optimizations=[WeightDecay]
        )
        network.train(xor_input_train, xor_target_train, epochs=500)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #5
0
    def test_backpropagation(self):
        weight1 = np.array([[-0.53980522, -0.64724144, -0.92496063],
                            [-0.04144865, -0.60458235, 0.25735483],
                            [0.08818209, -0.10212516, -1.46030816]])
        weight2 = np.array([[0.54230442], [0.1393251], [1.59479241],
                            [0.1479949]])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network = Backpropagation(input_layer > hidden_layer > output,
                                  step=0.3,
                                  decay_rate=0.0001,
                                  optimizations=[WeightDecay])
        network.train(xor_input_train, xor_target_train, epochs=500)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #6
0
    def test_backpropagation(self):
        weight1 = np.array([[0.22667075, 0.38116981, 0.62686969],
                            [1.13062085, 0.40836474, -0.50492125],
                            [-0.22645265, 1.13541005, -2.7876409]])
        weight2 = np.array([[0.63547163], [0.63347214], [-1.3669957],
                            [-0.42770718]])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network = Backpropagation(input_layer > hidden_layer > output,
                                  step=0.3,
                                  zero_weight=20,
                                  optimizations=[WeightElimination])
        network.train(xor_input_train, xor_target_train, epochs=350)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #7
0
    def test_train_state(self):
        global triggered_times
        triggered_times = 0
        epochs = 4

        def print_message(network):
            global triggered_times
            triggered_times += 1

        def print_message2(network):
            global triggered_times
            triggered_times += 1

        network = Backpropagation(
            connection=(2, 2, 1),
            train_epoch_end_signal=print_message,
            train_end_signal=print_message2,
        )
        network.train(xor_input_train, xor_target_train, epochs=epochs)

        self.assertEqual(triggered_times, epochs + 1)
예제 #8
0
    def test_first_step_updates(self):
        def square_error_deriv(output_train, target_train):
            return output_train - target_train

        @with_derivative(square_error_deriv)
        def square_error(output_train, target_train):
            return np.sum((target_train - output_train) ** 2) / 2

        weight1 = np.array([[0.1, 0.2], [0.5, 0.5], [0.5, 0.5]])
        weight2 = np.array([[0.3, 0.5, 0.5]]).T

        weight1_new = np.array([[0.50461013, 0.50437699],
                                [0.50461013, 0.50437699]])
        weight2_new = np.array([[0.53691945, 0.53781823]]).T

        input_layer = SigmoidLayer(2, weight=weight1)
        hidden_layer = SigmoidLayer(2, weight=weight2)
        output = OutputLayer(1)

        network = Backpropagation(
            (input_layer > hidden_layer > output),
            error=square_error,
            step=1,
        )

        network.train(np.array([[1, 1]]), np.array([[1]]), epochs=1)

        trained_weight1 = network.train_layers[0].weight_without_bias
        trained_weight2 = network.train_layers[1].weight_without_bias

        self.assertTrue(np.all(
            np.round(trained_weight1, 8) == np.round(weight1_new, 8))
        )
        self.assertTrue(np.all(
            np.round(trained_weight2, 8) == np.round(weight2_new, 8))
        )