Beispiel #1
0
    def test_handle_errors(self):
        data, target = datasets.make_classification(300,
                                                    n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = model_selection.train_test_split(
            data, target, test_size=0.3)

        with self.assertRaises(ValueError):
            # First network has two output layers and the second
            # just one.
            algorithms.DynamicallyAveragedNetwork([
                algorithms.RPROP((4, 10, 2), step=0.1),
                algorithms.GradientDescent((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Use ensemble with less than one network
            algorithms.DynamicallyAveragedNetwork(
                [algorithms.GradientDescent((4, 10, 1), step=0.1)])

        with self.assertRaises(ValueError):
            # Output greater than 1
            dan = algorithms.DynamicallyAveragedNetwork([
                algorithms.GradientDescent([
                    Input(4),
                    Sigmoid(10),
                    Relu(1, weight=init.Uniform(), bias=init.Uniform()),
                ],
                                           step=0.01),
                algorithms.RPROP((4, 10, 1), step=0.01),
            ])
            dan.train(x_train, y_train, epochs=10)
            dan.predict(x_test)
Beispiel #2
0
    def test_irpropplus(self):
        options = dict(minstep=0.001,
                       maxstep=1,
                       increase_factor=1.1,
                       decrease_factor=0.1,
                       step=1,
                       verbose=False)

        uniform = init.Uniform()
        params1 = dict(
            weight=uniform.sample((3, 10), return_array=True),
            bias=uniform.sample((10, ), return_array=True),
        )
        params2 = dict(
            weight=uniform.sample((10, 2), return_array=True),
            bias=uniform.sample((2, ), return_array=True),
        )

        network = layers.join(
            Input(3),
            Sigmoid(10, **params1),
            Sigmoid(2, **params2),
        )

        nw = algorithms.IRPROPPlus(copy.deepcopy(network), **options)
        nw.train(simple_x_train, simple_y_train, epochs=100)
        irprop_plus_error = nw.errors.train[-1]
        self.assertGreater(1e-4, nw.errors.train[-1])

        nw = algorithms.RPROP(copy.deepcopy(network), **options)
        nw.train(simple_x_train, simple_y_train, epochs=100)
        rprop_error = nw.errors.train[-1]
        self.assertGreater(rprop_error, irprop_plus_error)
    def test_uniformal_initializer(self):
        uniform = init.Uniform(minval=-10, maxval=10)
        weight = uniform.sample((30, 30))

        self.assertUniformlyDistributed(weight)
        self.assertAlmostEqual(-10, np.min(weight), places=1)
        self.assertAlmostEqual(10, np.max(weight), places=1)
Beispiel #4
0
    def test_hessdiag(self):
        x_train, x_test, y_train, y_test = simple_classification()
        params = dict(weight=init.Uniform(-0.1, 0.1),
                      bias=init.Uniform(-0.1, 0.1))

        nw = algorithms.HessianDiagonal(
            network=[
                layers.Input(10),
                layers.Sigmoid(20, **params),
                layers.Sigmoid(1, **params),
            ],
            step=0.1,
            shuffle_data=False,
            verbose=False,
            min_eigval=0.1,
        )
        nw.train(x_train, y_train, epochs=50)
        self.assertGreater(0.2, nw.errors.train[-1])
Beispiel #5
0
    def test_uniform_reprodusible_with_outside_seed(self):
        uniform = init.Uniform(minval=-10, maxval=10)

        np.random.seed(0)
        weight1 = uniform.sample((10, 4), return_array=True)

        np.random.seed(0)
        weight2 = uniform.sample((10, 4), return_array=True)

        np.testing.assert_array_almost_equal(weight1, weight2)
 def test_hessdiag(self):
     x_train, x_test, y_train, y_test = simple_classification()
     nw = algorithms.HessianDiagonal(
         connection=[
             layers.Input(10),
             layers.Sigmoid(20,
                            weight=init.Uniform(-1, 1),
                            bias=init.Uniform(-1, 1)),
             layers.Sigmoid(1,
                            weight=init.Uniform(-1, 1),
                            bias=init.Uniform(-1, 1)),
         ],
         step=0.1,
         shuffle_data=False,
         verbose=False,
         min_eigval=0.01,
     )
     nw.train(x_train / 2, y_train, epochs=10)
     self.assertAlmostEqual(0.10, nw.errors.last(), places=2)
Beispiel #7
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Linear(20, weight=init.Uniform(-0.5, 0.5)) ,
                layers.LeakyRelu(15, weight=init.Uniform(-0.5, 0.5)),
                layers.LeakyRelu(15, weight=init.Uniform(-0.5, 0.5)),
                layers.LeakyRelu(12, weight=init.Uniform(-0.5, 0.5)),
                layers.Linear(9, weight=init.Uniform(-0.5, 0.5)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
Beispiel #8
0
    def test_irpropplus(self):
        options = dict(minstep=0.001,
                       maxstep=1,
                       increase_factor=1.1,
                       decrease_factor=0.1,
                       step=1,
                       verbose=False)
        connection = [
            Input(3),
            Sigmoid(10, weight=init.Uniform(), bias=init.Uniform()),
            Sigmoid(2, weight=init.Uniform(), bias=init.Uniform()),
        ]

        nw = algorithms.IRPROPPlus(copy.deepcopy(connection), **options)
        nw.train(simple_input_train, simple_target_train, epochs=100)
        irprop_plus_error = nw.errors.last()
        self.assertGreater(1e-4, nw.errors.last())

        nw = algorithms.RPROP(copy.deepcopy(connection), **options)
        nw.train(simple_input_train, simple_target_train, epochs=100)
        rprop_error = nw.errors.last()
        self.assertGreater(rprop_error, irprop_plus_error)
Beispiel #9
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Relu(30, weight=init.Uniform(-1, 1)),
                layers.Tanh(40, weight=init.Uniform(-1, 1)),
                # layers.Embedding(40, 1),
                # layers.GRU(40),
                layers.Relu(25, weight=init.Uniform(-1, 1)),
                layers.Linear(9, weight=init.Uniform(-1, 1)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
Beispiel #10
0
    def test_compare_bp_and_hessian(self):
        x_train, _, y_train, _ = simple_classification()
        params = dict(weight=init.Uniform(-0.1, 0.1),
                      bias=init.Uniform(-0.1, 0.1))

        compare_networks(
            # Test classes
            partial(algorithms.GradientDescent, batch_size=None),
            partial(algorithms.HessianDiagonal, min_eigval=0.1),
            # Test data
            (x_train, y_train),
            # Network configurations
            network=[
                layers.Input(10),
                layers.Sigmoid(20, **params),
                layers.Sigmoid(1, **params),
            ],
            step=0.1,
            shuffle_data=True,
            verbose=False,
            # Test configurations
            epochs=50,
            show_comparison_plot=False)
Beispiel #11
0
 def test_uniform_initializer_repr(self):
     uniform_initializer = init.Uniform(minval=0, maxval=1)
     self.assertEqual("Uniform(0, 1)", str(uniform_initializer))
Beispiel #12
0
from utils import plot_2d_grid, make_circle, make_elipse, make_square

plt.style.use('ggplot')
utils.reproducible()

if __name__ == '__main__':
    GRID_WIDTH = 4
    GRID_HEIGHT = 4

    datasets = [
        make_square(),
        make_circle(),
        make_elipse(corr=0.7),
    ]
    configurations = [{
        'weight_init': init.Uniform(0, 1),
        'title': 'Random uniform initialization',
    }, {
        'weight_init': 'sample_from_data',
        'title': 'Sampled from the data',
    }, {
        'weight_init': 'init_pca',
        'title': 'Initialize with PCA',
    }]

    plt.figure(figsize=(15, 15))
    plt.title("Compare weight initialization methods for SOFM")

    red, blue = ('#E24A33', '#348ABD')
    n_columns = len(configurations)
    n_rows = len(datasets)