예제 #1
0
    def test_backpropagation(self):
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        weight1 = np.array([
            [0.31319847, -1.17858149, 0.71556407],
            [1.60798015, 0.16304449, -0.22483005],
            [-0.90144173, 0.58500625, -0.01724167]
        ])
        weight2 = np.array([
            [-1.34351428],
            [0.45506056],
            [0.24790366],
            [-0.74360389]
        ])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)

        network = Backpropagation(
            (input_layer > hidden_layer > output),
            step=0.3,
            verbose=False
        )

        network.train(xor_input_train, xor_target_train, epochs=1000)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #2
0
파일: test_dan.py 프로젝트: zhdbeng/neupy
    def test_handle_errors(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        with self.assertRaises(ValueError):
            # First network has two output layers and the second
            # just one.
            ensemble.DynamicallyAveragedNetwork([
                algorithms.RPROP((4, 10, 2), step=0.1),
                algorithms.Backpropagation((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Use ensemble with less than one network
            ensemble.DynamicallyAveragedNetwork([
                algorithms.Backpropagation((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Output between -1 and 1
            dan = ensemble.DynamicallyAveragedNetwork([
                algorithms.Backpropagation(
                    SigmoidLayer(4) > TanhLayer(10) > OutputLayer(1),
                    step=0.01
                ),
                algorithms.RPROP((4, 10, 1), step=0.1)
            ])
            dan.train(x_train, y_train, epochs=10)
            dan.predict(x_test)
예제 #3
0
    def test_backpropagation(self):
        weight1 = np.array([[-0.53980522, -0.64724144, -0.92496063],
                            [-0.04144865, -0.60458235, 0.25735483],
                            [0.08818209, -0.10212516, -1.46030816]])
        weight2 = np.array([[0.54230442], [0.1393251], [1.59479241],
                            [0.1479949]])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network = Backpropagation(input_layer > hidden_layer > output,
                                  step=0.3,
                                  decay_rate=0.0001,
                                  optimizations=[WeightDecay])
        network.train(xor_input_train, xor_target_train, epochs=500)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #4
0
    def test_backpropagation(self):
        weight1 = np.array([[0.22667075, 0.38116981, 0.62686969],
                            [1.13062085, 0.40836474, -0.50492125],
                            [-0.22645265, 1.13541005, -2.7876409]])
        weight2 = np.array([[0.63547163], [0.63347214], [-1.3669957],
                            [-0.42770718]])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network = Backpropagation(input_layer > hidden_layer > output,
                                  step=0.3,
                                  zero_weight=20,
                                  optimizations=[WeightElimination])
        network.train(xor_input_train, xor_target_train, epochs=350)
        self.assertEqual(round(network.last_error_in(), 2), 0)
예제 #5
0
 def setUp(self):
     super(LearningRateUpdatesTestCase, self).setUp()
     self.first_step = 0.3
     # Weights
     self.weight1 = np.array([
         [0.57030714, 0.64724479, 0.74482306],
         [0.12310346, 0.26571213, 0.74472318],
         [0.5642351, 0.52127089, 0.57070108],
     ])
     self.weight2 = np.array([
         [0.2343891],
         [0.70945912],
         [0.46677056],
         [0.83986252],
     ])
     # Layers
     input_layer = TanhLayer(2, weight=self.weight1)
     hidden_layer = TanhLayer(3, weight=self.weight2)
     output = StepOutputLayer(1, output_bounds=(-1, 1))
     self.connection = input_layer > hidden_layer > output
예제 #6
0
    def test_backpropagation(self):
        weight1 = np.array([[-0.53980522, -0.64724144, -0.92496063],
                            [-0.04144865, -0.60458235, 0.25735483],
                            [0.08818209, -0.10212516, -1.46030816]])
        weight2 = np.array([[0.54230442], [0.1393251], [1.59479241],
                            [0.1479949]])

        input_layer = TanhLayer(2, weight=weight1)
        hidden_layer = TanhLayer(3, weight=weight2)
        output = StepOutputLayer(1, output_bounds=(-1, 1))

        network2 = Momentum(
            (input_layer > hidden_layer > output),
            step=0.1,
            momentum=0.1,
            use_raw_predict_at_error=True,
        )

        network2.train(xor_input_train, xor_target_train, epochs=300)
        self.assertEqual(round(network2.last_error_in(), 2), 0)
예제 #7
0
 def setUp(self):
     super(GradientDescentTestCase, self).setUp()
     output = StepOutputLayer(1, output_bounds=(-1, 1))
     self.connection = TanhLayer(2) > TanhLayer(5) > output