Beispiel #1
0
def reproducible_network_train(seed=0, epochs=500, **additional_params):
    """ Make a reproducible train for Gradient Descent based neural
    network with a XOR problem and return trained network.

    Parameters
    ----------
    seed : int
        Random State seed number for reproducibility. Defaults to ``0``.
    epochs : int
        Number of epochs for training. Defaults to ``500``.
    **additional_params
        Aditional parameters for Neural Network.

    Returns
    -------
    GradientDescent instance
        Returns trained network.
    """
    np.random.seed(seed)
    network = algorithms.GradientDescent(connection=[
        layers.Tanh(2),
        layers.Tanh(5),
        layers.StepOutput(1, output_bounds=(-1, 1))
    ],
                                         **additional_params)
    network.train(xor_input_train, xor_target_train, epochs=epochs)
    return network
Beispiel #2
0
 def setUp(self):
     super(LearningRateUpdatesTestCase, self).setUp()
     self.first_step = 0.3
     self.connection = [
         layers.Tanh(2),
         layers.Tanh(3),
         layers.StepOutput(1, output_bounds=(-1, 1))
     ]
Beispiel #3
0
 def test_that_alg_works(self):
     network = algorithms.GradientDescent([
         layers.Tanh(2),
         layers.Tanh(3),
         layers.StepOutput(1, output_bounds=(-1, 1))
     ],
                                          step=0.3,
                                          decay_rate=0.0001,
                                          addons=[algorithms.WeightDecay])
     network.train(xor_input_train, xor_target_train, epochs=500)
     self.assertAlmostEqual(network.errors.last(), 0, places=2)
Beispiel #4
0
 def test_that_alg_works(self):
     network = algorithms.GradientDescent(
         [
             layers.Tanh(2),
             layers.Tanh(3),
             layers.StepOutput(1, output_bounds=(-1, 1))
         ],
         step=0.3,
         zero_weight=20,
         addons=[algorithms.WeightElimination])
     network.train(xor_input_train, xor_target_train, epochs=350)
     self.assertAlmostEqual(network.errors.last(), 0, places=2)
Beispiel #5
0
 def test_errdiff(self):
     network = algorithms.GradientDescent(
         [
             layers.Tanh(2),
             layers.Tanh(3),
             layers.StepOutput(1, output_bounds=(-1, 1))
         ],
         step=0.3,
         update_for_smaller_error=1.05,
         update_for_bigger_error=0.7,
         error_difference=1.04,
         addons=[algorithms.ErrDiffStepUpdate])
     network.train(xor_input_train, xor_target_train, epochs=200)
     self.assertAlmostEqual(network.errors.last(), 0, places=5)
Beispiel #6
0
 def test_search_then_converge(self):
     network = algorithms.GradientDescent(
         [
             layers.Tanh(2),
             layers.Tanh(3),
             layers.StepOutput(1, output_bounds=(-1, 1))
         ],
         step=0.3,
         epochs_step_minimizator=50,
         rate_coefitient=0.2,
         addons=[algorithms.SearchThenConverge])
     network.train(xor_input_train, xor_target_train, epochs=6)
     self.assertAlmostEqual(
         network.variables.step.get_value(),
         0.18,
         places=5,
     )