예제 #1
0
 def test___init__(self):
     sys.stdout.write('FNN_layer -> Performing init test ... ')
     sys.stdout.flush()
     l = FullConnLayer(input_dim=9,
                       output_dim=4,
                       activation_function=AFct.Sigmoid,
                       initial_weights='AUTO',
                       initial_bias='AUTO',
                       initial_offset='AUTO',
                       connections=generate_2d_connection_matrix(
                           3, 3, 2, 2, 1, 1, False))
     assert numx.all(l.weights.shape == (9, 4))
     assert numx.all(l.bias.shape == (1, 4))
     assert numx.all(l.offset.shape == (1, 9))
     assert numx.all(l.connections.shape == (9, 4))
     assert numx.all(l.activation_function == AFct.Sigmoid)
     l = FullConnLayer(input_dim=9,
                       output_dim=4,
                       activation_function=AFct.Rectifier,
                       initial_weights=1.0,
                       initial_bias=2.0,
                       initial_offset=3.0,
                       connections=None)
     assert numx.all(l.weights.shape == (9, 4))
     assert numx.all(
         numx.abs(l.bias - numx.ones((1, 4)) * 2.0) < self.epsilon)
     assert numx.all(
         numx.abs(l.offset - numx.ones((1, 9)) * 3.0) < self.epsilon)
     assert numx.all(l.connections is None)
     assert numx.all(l.activation_function == AFct.Rectifier)
     l = FullConnLayer(input_dim=9,
                       output_dim=4,
                       activation_function=AFct.SoftMax,
                       initial_weights=numx.ones((9, 4)) * 1.0,
                       initial_bias=numx.ones((1, 4)) * 2.0,
                       initial_offset=numx.ones((1, 9)) * 3.0,
                       connections=None)
     assert numx.all(
         numx.abs(l.weights - numx.ones((9, 4)) * 1.0) < self.epsilon)
     assert numx.all(
         numx.abs(l.bias - numx.ones((1, 4)) * 2.0) < self.epsilon)
     assert numx.all(
         numx.abs(l.offset - numx.ones((1, 9)) * 3.0) < self.epsilon)
     assert numx.all(l.activation_function == AFct.SoftMax)
     print('successfully passed!')
     sys.stdout.flush()
예제 #2
0
 def test_get_parameters(self):
     sys.stdout.write('FNN_layer -> Performing get_parameters test ... ')
     sys.stdout.flush()
     l = FullConnLayer(input_dim=9,
                       output_dim=4,
                       activation_function=AFct.Sigmoid,
                       initial_weights='AUTO',
                       initial_bias='AUTO',
                       initial_offset='AUTO',
                       connections=generate_2d_connection_matrix(
                           3, 3, 2, 2, 1, 1, False))
     w, b = l.get_parameters()
     assert numx.all(l.weights.shape == (9, 4))
     assert numx.all(l.bias.shape == (1, 4))
     assert numx.all(numx.abs(l.weights - w) < self.epsilon)
     assert numx.all(numx.abs(l.bias - b) < self.epsilon)
     print('successfully passed!')
     sys.stdout.flush()
예제 #3
0
 def test_update_parameters(self):
     sys.stdout.write('FNN_layer -> Performing update_parameters test ... ')
     sys.stdout.flush()
     l = FullConnLayer(input_dim=9,
                       output_dim=4,
                       activation_function=AFct.Sigmoid,
                       initial_weights=0,
                       initial_bias=0,
                       initial_offset=0,
                       connections=generate_2d_connection_matrix(
                           3, 3, 2, 2, 1, 1, False))
     assert numx.all(
         numx.abs(l.weights - numx.zeros((9, 4))) < self.epsilon)
     assert numx.all(numx.abs(l.bias - numx.zeros((1, 4))) < self.epsilon)
     l.update_parameters([-numx.ones((9, 4)), -numx.ones((1, 4))])
     assert numx.all(numx.abs(l.weights - numx.ones((9, 4))) < self.epsilon)
     assert numx.all(numx.abs(l.bias - numx.ones((1, 4))) < self.epsilon)
     print('successfully passed!')
     sys.stdout.flush()
예제 #4
0
    def check(self, data, delta, act1, act2, act3, reg_sparseness,
              desired_sparseness, cost_sparseness, reg_targets,
              desired_targets, cost_targets, full):

        connections = None
        if full is False:
            connections = generate_2d_connection_matrix(
                6, 6, 3, 3, 2, 2, False)

        model1 = FullConnLayer(6 * 6,
                               4 * 4,
                               activation_function=act1,
                               initial_weights='AUTO',
                               initial_bias=0.0,
                               initial_offset=0.0,
                               connections=connections,
                               dtype=numx.float64)

        model2 = FullConnLayer(4 * 4,
                               5 * 5,
                               activation_function=act2,
                               initial_weights='AUTO',
                               initial_bias=0.0,
                               initial_offset=0.5,
                               dtype=numx.float64)

        model3 = FullConnLayer(5 * 5,
                               6 * 6,
                               activation_function=act3,
                               initial_weights='AUTO',
                               initial_bias=0.0,
                               initial_offset=0.5,
                               dtype=numx.float64)

        model = MODEL.Model([model1, model2, model3])

        trainer = TRAINER.GDTrainer(model)

        _, _, maxw, maxb = model.finit_differences(
            delta, data, desired_targets, cost_targets, reg_targets,
            desired_sparseness, cost_sparseness, reg_sparseness)
        return numx.max([maxw, maxb])