예제 #1
0
    def test_network_convergence(self):
        with catch_stdout() as out:
            bpnet = algorithms.Backpropagation((2, 3, 1),
                                               step=0.1,
                                               verbose=True,
                                               show_epoch=100)
            bpnet.train(xor_zero_input_train,
                        xor_zero_target_train,
                        epochs=3,
                        epsilon=1e-5)
            terminal_output = out.getvalue()
        self.assertEqual(1, terminal_output.count("Network didn't converge"))

        with catch_stdout() as out:
            bpnet = algorithms.Backpropagation((2, 3, 1),
                                               step=0.1,
                                               verbose=True,
                                               show_epoch=100)
            bpnet.train(xor_zero_input_train,
                        xor_zero_target_train,
                        epochs=1e3,
                        epsilon=1e-3)
            terminal_output = out.getvalue()

        self.assertEqual(1, terminal_output.count("Network converged"))
예제 #2
0
파일: test_dan.py 프로젝트: zhdbeng/neupy
    def test_handle_errors(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        with self.assertRaises(ValueError):
            # First network has two output layers and the second
            # just one.
            ensemble.DynamicallyAveragedNetwork([
                algorithms.RPROP((4, 10, 2), step=0.1),
                algorithms.Backpropagation((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Use ensemble with less than one network
            ensemble.DynamicallyAveragedNetwork([
                algorithms.Backpropagation((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Output between -1 and 1
            dan = ensemble.DynamicallyAveragedNetwork([
                algorithms.Backpropagation(
                    SigmoidLayer(4) > TanhLayer(10) > OutputLayer(1),
                    step=0.01
                ),
                algorithms.RPROP((4, 10, 1), step=0.1)
            ])
            dan.train(x_train, y_train, epochs=10)
            dan.predict(x_test)
예제 #3
0
    def test_pandas_for_bp(self):
        dataset = datasets.load_diabetes()

        input_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        n_features = dataset.data.shape[1]
        input_columns = ['column_' + str(i) for i in range(n_features)]

        pandas_data = pd.DataFrame(dataset.data, columns=input_columns)
        pandas_data['target'] = target_scaler.fit_transform(dataset.target)
        pandas_data[input_columns] = input_scaler.fit_transform(
            pandas_data[input_columns])

        x_train, x_test, y_train, y_test = train_test_split(
            pandas_data[input_columns], pandas_data['target'], train_size=0.85)

        bpnet = algorithms.Backpropagation(connection=[
            layers.SigmoidLayer(10),
            layers.SigmoidLayer(40),
            layers.OutputLayer(1),
        ],
                                           use_bias=True,
                                           show_epoch=100)
        bpnet.train(x_train, y_train, epochs=1000)
        y_predict = bpnet.predict(x_test)

        error = rmsle(target_scaler.inverse_transform(y_test),
                      target_scaler.inverse_transform(y_predict).round())
        self.assertAlmostEqual(0.4477, error, places=4)
예제 #4
0
    def test_show_epoch_valid_cases(self):
        Case = namedtuple("Case", "show_epoch should_be_n_times n_epochs")
        cases = (
            # Show 10 epochs and the last one would be 11
            Case(show_epoch='10 times', should_be_n_times=11, n_epochs=100),
            Case(show_epoch='1 time', should_be_n_times=2, n_epochs=10),
            Case(show_epoch='1 times', should_be_n_times=2, n_epochs=10),
            # Should be equal to the number of epochs
            Case(show_epoch='100 times', should_be_n_times=10, n_epochs=10),
            Case(show_epoch=5, should_be_n_times=3, n_epochs=10),
            Case(show_epoch=100, should_be_n_times=2, n_epochs=10),
        )

        for case in cases:
            with catch_stdout() as out:
                bpnet = algorithms.Backpropagation((2, 3, 1),
                                                   step=0.1,
                                                   verbose=True,
                                                   show_epoch=case.show_epoch)
                bpnet.train(xor_zero_input_train,
                            xor_zero_target_train,
                            epochs=case.n_epochs)
                terminal_output = out.getvalue()

            self.assertEqual(case.should_be_n_times,
                             terminal_output.count("Train error"))
예제 #5
0
    def test_pipeline(self):
        dataset = datasets.load_diabetes()
        target_scaler = preprocessing.MinMaxScaler()

        x_train, x_test, y_train, y_test = train_test_split(
            dataset.data,
            target_scaler.fit_transform(dataset.target),
            train_size=0.85)

        network = algorithms.Backpropagation(
            connection=[
                layers.SigmoidLayer(10),
                layers.SigmoidLayer(40),
                layers.OutputLayer(1),
            ],
            use_bias=True,
            show_epoch=100,
            verbose=False,
        )
        pipeline = Pipeline([
            ('min_max_scaler', preprocessing.MinMaxScaler()),
            ('backpropagation', network),
        ])
        pipeline.fit(x_train, y_train, backpropagation__epochs=1000)
        y_predict = pipeline.predict(x_test)

        error = rmsle(target_scaler.inverse_transform(y_test),
                      target_scaler.inverse_transform(y_predict).round())
        self.assertAlmostEqual(0.4481, error, places=4)
예제 #6
0
    def test_mixture_of_experts(self):
        dataset = datasets.load_diabetes()
        data, target = dataset.data, dataset.target
        insize, outsize = data.shape[1], 1

        input_scaler = preprocessing.MinMaxScaler((-1, 1))
        output_scaler = preprocessing.MinMaxScaler()
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            input_scaler.fit_transform(data),
            output_scaler.fit_transform(target.reshape(-1, 1)),
            train_size=0.8)

        n_epochs = 300
        scaled_y_test = output_scaler.inverse_transform(y_test).reshape(
            (y_test.size, 1))

        # -------------- Train single Backpropagation -------------- #

        bpnet = algorithms.Backpropagation((insize, 20, outsize),
                                           step=0.1,
                                           verbose=False)
        bpnet.train(x_train, y_train, epochs=n_epochs)
        network_output = bpnet.predict(x_test)
        network_error = rmsle(output_scaler.inverse_transform(network_output),
                              scaled_y_test)

        # -------------- Train ensemlbe -------------- #

        moe = ensemble.MixtureOfExperts(
            networks=[
                algorithms.Backpropagation((insize, 20, outsize),
                                           step=0.1,
                                           verbose=False),
                algorithms.Backpropagation((insize, 20, outsize),
                                           step=0.1,
                                           verbose=False),
            ],
            gating_network=algorithms.Backpropagation(
                layers.SoftmaxLayer(insize) > layers.OutputLayer(2),
                step=0.1,
                verbose=False))
        moe.train(x_train, y_train, epochs=n_epochs)
        ensemble_output = moe.predict(x_test)
        ensemlbe_error = rmsle(
            output_scaler.inverse_transform(ensemble_output), scaled_y_test)

        self.assertGreater(network_error, ensemlbe_error)
예제 #7
0
    def test_terminal_output_frequency(self):
        with catch_stdout() as out:
            data = np.random.random((1000, 2))
            target = np.random.random((1000, 1))
            bpnet = algorithms.Backpropagation((2, 1, 1),
                                               verbose=True,
                                               show_epoch=1)
            bpnet.train(data, target, epochs=100)
            terminal_output = out.getvalue()

        self.assertEqual(1, terminal_output.count("Too many outputs"))
예제 #8
0
파일: test_dan.py 프로젝트: zhdbeng/neupy
    def test_dan(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        dan = ensemble.DynamicallyAveragedNetwork([
            algorithms.RPROP((4, 100, 1), step=0.1, maximum_step=1),
            algorithms.Backpropagation((4, 5, 1), step=0.1),
            algorithms.ConjugateGradient((4, 5, 1), step=0.01),
        ])

        dan.train(x_train, y_train, epochs=500)
        result = dan.predict(x_test)
        ensemble_result = metrics.accuracy_score(y_test, result)
        self.assertAlmostEqual(0.9333, ensemble_result, places=4)
예제 #9
0
    def test_dynamic_classes(self):
        optimization_classes = [
            algorithms.WeightDecay, algorithms.SearchThenConverge
        ]
        bpnet = algorithms.Backpropagation(
            (3, 5, 1),
            optimizations=optimization_classes,
            verbose=False,
        )
        data, target = datasets.make_regression(n_features=3, n_targets=1)

        data = preprocessing.MinMaxScaler().fit_transform(data)
        target = preprocessing.MinMaxScaler().fit_transform(target)

        with tempfile.NamedTemporaryFile() as temp:
            valid_class_name = bpnet.__class__.__name__
            dill.dump(bpnet, temp)
            temp.file.seek(0)

            restored_bpnet = dill.load(temp)
            restored_class_name = restored_bpnet.__class__.__name__
            temp.file.seek(0)

            self.assertEqual(valid_class_name, restored_class_name)
            self.assertEqual(optimization_classes,
                             restored_bpnet.optimizations)

            bpnet.train(data, target, epochs=10)
            real_bpnet_error = bpnet.error(bpnet.predict(data), target)
            updated_input_weight = bpnet.input_layer.weight.copy()

            dill.dump(bpnet, temp)
            temp.file.seek(0)

            restored_bpnet2 = dill.load(temp)
            temp.file.seek(0)
            actual = restored_bpnet2.predict(data)
            restored_bpnet_error = restored_bpnet2.error(actual, target)

            np.testing.assert_array_equal(updated_input_weight,
                                          restored_bpnet2.input_layer.weight)
            # Error must be big, because we didn't normalize data
            self.assertEqual(real_bpnet_error, restored_bpnet_error)
예제 #10
0
    def test_show_epoch_invalid_cases(self):
        wrong_input_values = (
            'time 10',
            'good time',
            '100',
            'super power',
            '0 times',
            '-1 times',
            0,
            -100,
        )

        for wrong_input_value in wrong_input_values:
            with self.assertRaises(ValueError):
                bpnet = algorithms.Backpropagation(
                    (2, 3, 1),
                    step=0.1,
                    verbose=False,
                    show_epoch=wrong_input_value)
예제 #11
0
    def test_simple_storage(self):
        bpnet = algorithms.Backpropagation((2, 3, 1), step=0.25, verbose=False)
        data, target = datasets.make_regression(n_features=2, n_targets=1)

        data = preprocessing.MinMaxScaler().fit_transform(data)
        target = preprocessing.MinMaxScaler().fit_transform(target)

        with tempfile.NamedTemporaryFile() as temp:
            test_layer_weights = bpnet.input_layer.weight.copy()
            dill.dump(bpnet, temp)
            temp.file.seek(0)

            restored_bpnet = dill.load(temp)
            temp.file.seek(0)
            layers_sizes = [
                layer.input_size for layer in restored_bpnet.layers
            ]

            self.assertEqual(0.25, restored_bpnet.step)
            self.assertEqual([2, 3, 1], layers_sizes)
            np.testing.assert_array_equal(test_layer_weights,
                                          restored_bpnet.input_layer.weight)

            bpnet.train(data, target, epochs=5)
            real_bpnet_error = bpnet.error(bpnet.predict(data), target)
            updated_input_weight = bpnet.input_layer.weight.copy()

            dill.dump(bpnet, temp)
            temp.file.seek(0)

            restored_bpnet2 = dill.load(temp)
            temp.file.seek(0)
            actual = restored_bpnet2.predict(data)
            restored_bpnet_error = restored_bpnet2.error(actual, target)

            np.testing.assert_array_equal(updated_input_weight,
                                          restored_bpnet2.input_layer.weight)
            # Error must be big, because we didn't normalize data
            self.assertEqual(real_bpnet_error, restored_bpnet_error)
예제 #12
0
    def test_ensemble(self):
        data, target = datasets.make_classification(300,
                                                    n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.7)

        dan = ensemble.DynamicallyAveragedNetwork([
            algorithms.RPROP((4, 100, 1), step=0.1, maximum_step=1),
            algorithms.Backpropagation((4, 5, 1), step=0.1),
            algorithms.ConjugateGradient((4, 5, 1), step=0.01),
        ])

        pipeline = Pipeline([
            ('min_max_scaler', preprocessing.StandardScaler()),
            ('dan', dan),
        ])
        pipeline.fit(x_train, y_train, dan__epochs=500)

        result = pipeline.predict(x_test)
        ensemble_result = metrics.accuracy_score(y_test, result)
        self.assertAlmostEqual(0.9222, ensemble_result, places=4)
예제 #13
0
    bpn.train(input_data, target_data, epsilon=0.125)
    weights = weights[:, :bpn.epoch + 1]
    weight_quiver(weights, color=color)

    label = "{name} ({n} steps)".format(name=name, n=bpn.epoch)
    return mpatches.Patch(color=color, label=label)


def target_function(network, x, y):
    network.input_layer.weight = np.array([[x], [y]])
    predicted = network.predict(input_data)
    return network.error(predicted, target_data)


# Get data for countour plot
bp_network = algorithms.Backpropagation(get_connection(), **network_settings)
network_target_function = partial(target_function, bp_network)

plt.figure()
plt.title("Approximation function contour plot")
plt.xlabel("First weight")
plt.ylabel("Second weight")

draw_countour(np.linspace(-4.5, 4, 50), np.linspace(-4.5, 4, 50),
              network_target_function)

cgnet_class = partial(algorithms.ConjugateGradient,
                      optimizations=[algorithms.LinearSearch])

algorithms = (
    (algorithms.Backpropagation, 'Gradient Descent', 'k'),
예제 #14
0
    def test_handle_errors(self):
        networks = [
            algorithms.Backpropagation((1, 20, 1), step=0.2),
            algorithms.Backpropagation((1, 20, 1), step=0.2),
        ]

        with self.assertRaises(ValueError):
            # Ivalid network (not Backpropagation)
            ensemble.MixtureOfExperts(
                networks=networks + [algorithms.GRNN()],
                gating_network=algorithms.Backpropagation(
                    layers.SigmoidLayer(1) > layers.OutputLayer(3), ))

        with self.assertRaises(ValueError):
            # Ivalid number of outputs in third network
            ensemble.MixtureOfExperts(
                networks=networks +
                [algorithms.Backpropagation((1, 20, 2), step=0.2)],
                gating_network=algorithms.Backpropagation(
                    layers.SigmoidLayer(1) > layers.OutputLayer(3), ))

        with self.assertRaises(ValueError):
            # Ivalid gating network output layer size
            ensemble.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.Backpropagation(
                    layers.SoftmaxLayer(1) > layers.OutputLayer(1), ))

        with self.assertRaises(ValueError):
            # Ivalid gating network input layer
            ensemble.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.Backpropagation(
                    layers.SigmoidLayer(1) > layers.OutputLayer(2), ))

        with self.assertRaises(ValueError):
            # Ivalid gating network output layer
            ensemble.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.Backpropagation(
                    layers.SoftmaxLayer(1) > layers.RoundOutputLayer(2)))

        with self.assertRaises(ValueError):
            # Ivalid network error function
            ensemble.MixtureOfExperts(
                networks=networks + [
                    algorithms.Backpropagation(
                        (1, 20, 1), step=0.2, error=rmsle)
                ],
                gating_network=algorithms.Backpropagation(
                    layers.SigmoidLayer(1) > layers.OutputLayer(3), ))

        with self.assertRaises(ValueError):
            moe = ensemble.MixtureOfExperts(
                # Ivalid gating error function
                networks=networks,
                gating_network=algorithms.Backpropagation(
                    layers.SoftmaxLayer(1) > layers.OutputLayer(2),
                    error=rmsle))

        moe = ensemble.MixtureOfExperts(
            # Ivalid gating network output layer
            networks=networks,
            gating_network=algorithms.Backpropagation(
                layers.SoftmaxLayer(1) > layers.OutputLayer(2)))
        with self.assertRaises(ValueError):
            # Wrong number of train input features
            moe.train(np.array([[1, 2]]), np.array([[0]]))

        with self.assertRaises(ValueError):
            # Wrong number of train output features
            moe.train(np.array([[1]]), np.array([[0, 0]]))