Esempio n. 1
0
    def test_prelu_param_updates(self):
        x_train, _, y_train, _ = simple_classification()
        prelu_layer1 = layers.PRelu(20, alpha=0.25)
        prelu_layer2 = layers.PRelu(1, alpha=0.25)

        gdnet = algorithms.GradientDescent([
            layers.Input(10),
            prelu_layer1,
            prelu_layer2,
        ])

        prelu1_alpha_before_training = prelu_layer1.alpha.get_value()
        prelu2_alpha_before_training = prelu_layer2.alpha.get_value()

        gdnet.train(x_train, y_train, epochs=10)

        prelu1_alpha_after_training = prelu_layer1.alpha.get_value()
        prelu2_alpha_after_training = prelu_layer2.alpha.get_value()

        self.assertTrue(
            all(
                np.not_equal(
                    prelu1_alpha_before_training,
                    prelu1_alpha_after_training,
                )))
        self.assertTrue(
            all(
                np.not_equal(
                    prelu2_alpha_before_training,
                    prelu2_alpha_after_training,
                )))
Esempio n. 2
0
    def test_invalid_alpha_axes_parameter(self):
        prelu_layer = layers.PRelu(10, alpha_axes=2)
        with self.assertRaises(ValueError):
            # cannot specify 2-axis, because we only
            # have 0 and 1 axes (2D input)
            layers.Input(10) > prelu_layer

        with self.assertRaises(ValueError):
            # 0-axis is not allowed
            layers.PRelu(10, alpha_axes=0)
Esempio n. 3
0
    def test_invalid_alpha_axes_parameter(self):
        # there are can be specified axis 1, but not 2
        prelu_layer = layers.PRelu(10, alpha_axes=2)
        connection = layers.Input(10) > prelu_layer
        with self.assertRaises(ValueError):
            prelu_layer.initialize()

        # cannot specify alpha per input sample
        prelu_layer = layers.PRelu(10, alpha_axes=0)
        connection = layers.Input(10) > prelu_layer
        with self.assertRaises(ValueError):
            prelu_layer.initialize()
Esempio n. 4
0
    def test_invalid_alpha_axes_parameter(self):
        network = layers.join(
            layers.PRelu(10, alpha_axes=2),
            layers.Relu(),
        )
        with self.assertRaises(LayerConnectionError):
            # cannot specify 2-axis, because we only
            # have 0 and 1 axes (2D input)
            layers.join(layers.Input(10), network)

        with self.assertRaises(ValueError):
            # 0-axis is not allowed
            layers.PRelu(10, alpha_axes=0)
Esempio n. 5
0
    def test_prelu_alpha_init_constant_value(self):
        prelu_layer = layers.PRelu(10, alpha=0.25)
        prelu_layer.create_variables((None, 5))

        alpha = self.eval(prelu_layer.alpha)
        self.assertEqual(alpha.shape, (10, ))
        np.testing.assert_array_almost_equal(alpha, np.ones(10) * 0.25)
Esempio n. 6
0
    def test_prelu_random_params(self):
        prelu_layer = layers.PRelu(10, alpha=None)
        connection = layers.Input(10) > prelu_layer
        prelu_layer.initialize()

        alpha = prelu_layer.alpha.get_value()
        self.assertEqual(10, np.unique(alpha).size)
Esempio n. 7
0
    def test_prelu_layer_param_dense(self):
        prelu_layer = layers.PRelu(10, alpha=0.25)
        layers.Input(10) > prelu_layer

        alpha = self.eval(prelu_layer.alpha)

        self.assertEqual(alpha.shape, (10,))
        np.testing.assert_array_almost_equal(alpha, np.ones(10) * 0.25)
Esempio n. 8
0
    def test_prelu_layer_param_dense(self):
        prelu_layer = layers.PRelu(10, alpha=0.25)
        connection = layers.Input(10) > prelu_layer
        prelu_layer.initialize()

        alpha = prelu_layer.alpha.get_value()

        self.assertEqual(alpha.shape, (10, ))
        np.testing.assert_array_almost_equal(alpha, np.ones(10) * 0.25)
Esempio n. 9
0
    def test_prelu_output_by_dense_input(self):
        prelu_layer = layers.PRelu(1, alpha=0.25)
        layers.Input(1) > prelu_layer

        input_data = np.array([[10, 1, 0.1, 0, -0.1, -1]]).T
        expected_output = np.array([[10, 1, 0.1, 0, -0.025, -0.25]]).T
        actual_output = self.eval(prelu_layer.activation_function(input_data))

        np.testing.assert_array_almost_equal(expected_output, actual_output)
Esempio n. 10
0
    def test_prelu_output_by_dense_input(self):
        prelu_layer = layers.PRelu(alpha=0.25)
        prelu_layer.create_variables((None, 1))

        X = np.array([[10, 1, 0.1, 0, -0.1, -1]]).T
        expected_output = np.array([[10, 1, 0.1, 0, -0.025, -0.25]]).T
        actual_output = self.eval(prelu_layer.activation_function(X))

        np.testing.assert_array_almost_equal(expected_output, actual_output)
Esempio n. 11
0
    def test_prelu_output_by_spatial_input(self):
        network = layers.join(
            layers.Input((10, 10, 3)),
            layers.Convolution((3, 3, 5)),
            layers.PRelu(alpha=0.25, alpha_axes=(1, 3)),
        )

        X = asfloat(np.random.random((1, 10, 10, 3)))
        actual_output = self.eval(network.output(X))
        self.assertEqual(actual_output.shape, (1, 8, 8, 5))
Esempio n. 12
0
    def test_prelu_layer_param_conv(self):
        input_layer = layers.Input((10, 10, 3))
        conv_layer = layers.Convolution((3, 3, 5))
        prelu_layer = layers.PRelu(alpha=0.25, alpha_axes=(1, 3))

        input_layer > conv_layer > prelu_layer

        alpha = self.eval(prelu_layer.alpha)
        expected_alpha = np.ones((8, 5)) * 0.25

        self.assertEqual(alpha.shape, (8, 5))
        np.testing.assert_array_almost_equal(alpha, expected_alpha)
Esempio n. 13
0
    def test_prelu_layer_param_conv(self):
        network = layers.join(
            layers.Input((10, 10, 3)),
            layers.Convolution((3, 3, 5)),
            layers.PRelu(alpha=0.25, alpha_axes=(1, 3), name='prelu'),
        )
        network.create_variables()

        alpha = self.eval(network.layer('prelu').alpha)
        expected_alpha = np.ones((8, 5)) * 0.25

        self.assertEqual(alpha.shape, (8, 5))
        np.testing.assert_array_almost_equal(alpha, expected_alpha)
Esempio n. 14
0
    def test_prelu_layer_param_conv(self):
        input_layer = layers.Input((3, 10, 10))
        conv_layer = layers.Convolution((5, 3, 3))
        prelu_layer = layers.PRelu(alpha=0.25, alpha_axes=(1, 3))
        connection = input_layer > conv_layer > prelu_layer

        conv_layer.initialize()
        prelu_layer.initialize()

        alpha = prelu_layer.alpha.get_value()
        expected_alpha = np.ones((5, 8)) * 0.25

        self.assertEqual(alpha.shape, (5, 8))
        np.testing.assert_array_almost_equal(alpha, expected_alpha)
Esempio n. 15
0
    def test_prelu_output_by_spatial_input(self):
        input_data = asfloat(np.random.random((1, 10, 10, 3)))

        input_layer = layers.Input((10, 10, 3))
        conv_layer = layers.Convolution((3, 3, 5))
        prelu_layer = layers.PRelu(alpha=0.25, alpha_axes=(1, 3))

        connection = input_layer > conv_layer > prelu_layer

        actual_output = input_data
        for layer in connection:
            actual_output = layer.output(actual_output)

        actual_output = self.eval(actual_output)
        self.assertEqual(actual_output.shape, (1, 8, 8, 5))
Esempio n. 16
0
    def test_prelu_variables(self):
        network = layers.join(
            layers.Input(2),
            layers.PRelu(3, name='prelu'),
        )
        self.assertDictEqual(network.layer('prelu').variables, {})

        network.create_variables()
        variables = network.layer('prelu').variables
        self.assertSequenceEqual(sorted(variables.keys()),
                                 ['alpha', 'bias', 'weight'])

        self.assertShapesEqual(variables['bias'].shape, (3, ))
        self.assertShapesEqual(variables['weight'].shape, (2, 3))
        self.assertShapesEqual(variables['alpha'].shape, (3, ))
Esempio n. 17
0
	def model_network(self, algorithm='LevenbergMarquardt', model=None, opt=None):

		model = self.decode_model(model)
		if model is None:
			model = [
				[1, 'hidden', 15, 'Linear'],
				[2, 'hidden', 10, 'Linear'],
				[3, 'output', self.output_classes, 'Elu']
			]
			# [Input(4), Elu(1)]
			# [Input(4), Elu(6), Elu(1)] EP: 100
		layer_model = [layers.Input(self.input_features)]
		for layer in model:
			if layer[3] == 'Linear':
				layer_model.append(layers.Linear(layer[2]))
			if layer[3] == 'Relu':
				layer_model.append(layers.Relu(layer[2]))
			if layer[3] == 'Sigmoid':
				layer_model.append(layers.Sigmoid(layer[2]))
			if layer[3] == 'HardSigmoid':
				layer_model.append(layers.HardSigmoid(layer[2]))
			if layer[3] == 'Step':
				layer_model.append(layers.Step(layer[2]))
			if layer[3] == 'Tanh':
				layer_model.append(layers.Tanh(layer[2]))
			if layer[3] == 'Softplus':
				layer_model.append(layers.Softplus(layer[2]))
			if layer[3] == 'Softmax':
				layer_model.append(layers.Softmax(layer[2]))
			if layer[3] == 'Elu':
				layer_model.append(layers.Elu(layer[2]))
			if layer[3] == 'PRelu':
				layer_model.append(layers.PRelu(layer[2]))
			if layer[3] == 'LeakyRelu':
				layer_model.append(layers.LeakyRelu(layer[2]))

		print('Model warstw: ' + str(layer_model))

		self.layers = layer_model
		self.select_algorithm(algorithm, options=opt)
Esempio n. 18
0
mean = x_train.mean(axis=(0, 2, 3)).reshape((1, -1, 1, 1))
std = x_train.std(axis=(0, 2, 3)).reshape((1, -1, 1, 1))

x_train -= mean
x_train /= std
x_test -= mean
x_test /= std

target_scaler = OneHotEncoder()
y_train = target_scaler.fit_transform(y_train.reshape((-1, 1))).todense()
y_test = target_scaler.transform(y_test.reshape((-1, 1))).todense()

network = algorithms.Adadelta(
    [
        layers.Input((3, 32, 32)),
        layers.Convolution((64, 3, 3)) > layers.BatchNorm() > layers.PRelu(),
        layers.Convolution((64, 3, 3)) > layers.BatchNorm() > layers.PRelu(),
        layers.MaxPooling((2, 2)),
        layers.Convolution((128, 3, 3)) > layers.BatchNorm() > layers.PRelu(),
        layers.Convolution((128, 3, 3)) > layers.BatchNorm() > layers.PRelu(),
        layers.MaxPooling((2, 2)),
        layers.Reshape(),
        layers.Linear(1024) > layers.BatchNorm() > layers.PRelu(),
        layers.Linear(1024) > layers.BatchNorm() > layers.PRelu(),
        layers.Softmax(10),
    ],
    error='categorical_crossentropy',
    step=0.25,
    shuffle_data=True,
    batch_size=128,
    verbose=True,
Esempio n. 19
0
    def test_prelu_alpha_init_random_params(self):
        prelu_layer = layers.PRelu(10, alpha=init.XavierNormal())
        prelu_layer.create_variables((None, 5))

        alpha = self.eval(prelu_layer.alpha)
        self.assertEqual(10, np.unique(alpha).size)
Esempio n. 20
0
 def test_repr_with_size(self):
     self.assertEqual(str(layers.PRelu(10)),
                      ("PRelu(10, alpha_axes=(-1,), alpha=Constant(0.25), "
                       "weight=HeNormal(gain=2), bias=Constant(0), "
                       "name='p-relu-1')"))
Esempio n. 21
0
    def test_prelu_random_params(self):
        prelu_layer = layers.PRelu(10, alpha=init.XavierNormal())
        layers.Input(10) > prelu_layer

        alpha = self.eval(prelu_layer.alpha)
        self.assertEqual(10, np.unique(alpha).size)
Esempio n. 22
0
    verbose=True,
    step=0.1,
    momentum=0.99,
    shuffle_data=True,
    batch_size=64,
    error='binary_crossentropy',
)
conv_autoencoder.architecture()
conv_autoencoder.train(x_unlabeled_4d, x_unlabeled,
                       x_labeled_4d, x_labeled, epochs=10)

x_labeled_encoded = encoder.output(x_labeled_4d).eval()
x_unlabeled_encoded = encoder.output(x_unlabeled_4d).eval()

classifier_network = layers.join(
    layers.PRelu(512),
    layers.Dropout(0.25),
    layers.Softmax(10),
)

encoder_classifier = algorithms.Adadelta(
    layers.Input(encoder.output_shape) > classifier_network,
    verbose=True,
    step=0.05,
    shuffle_data=True,
    batch_size=64,
    error='categorical_crossentropy',
)
encoder_classifier.architecture()
encoder_classifier.train(x_labeled_encoded, y_labeled,
                         x_unlabeled_encoded, y_unlabeled, epochs=100)
Esempio n. 23
0
    def test_storage_save_dict(self):
        network = layers.join(
            layers.parallel([
                layers.Input(2, name='input-1'),
                layers.PRelu(1, name='prelu')
            ], [
                layers.Input(1, name='input-2'),
                layers.Sigmoid(4, name='sigmoid'),
                layers.BatchNorm(name='batch-norm'),
            ]),
            layers.Concatenate(name='concatenate'),
            layers.Softmax(3, name='softmax'),
        )
        dict_network = storage.save_dict(network)

        expected_keys = ('metadata', 'layers', 'graph')
        self.assertItemsEqual(expected_keys, dict_network.keys())

        expected_metadata_keys = ('created', 'language', 'library', 'version')
        actual_metadata_keys = dict_network['metadata'].keys()
        self.assertItemsEqual(expected_metadata_keys, actual_metadata_keys)

        self.assertEqual(len(dict_network['layers']), 7)

        expected_layers = [{
            'class_name': 'Input',
            'configs': {
                'name': 'input-1',
                'shape': (2, )
            },
            'name': 'input-1',
        }, {
            'class_name': 'PRelu',
            'configs': {
                'alpha_axes': (-1, ),
                'name': 'prelu',
                'n_units': 1
            },
            'name': 'prelu',
        }, {
            'class_name': 'Input',
            'configs': {
                'name': 'input-2',
                'shape': (1, )
            },
            'name': 'input-2',
        }, {
            'class_name': 'Sigmoid',
            'configs': {
                'name': 'sigmoid',
                'n_units': 4
            },
            'name': 'sigmoid',
        }, {
            'class_name': 'BatchNorm',
            'configs': {
                'alpha': 0.1,
                'axes': (0, ),
                'epsilon': 1e-05,
                'name': 'batch-norm'
            },
            'name': 'batch-norm',
        }, {
            'class_name': 'Concatenate',
            'configs': {
                'axis': -1,
                'name': 'concatenate'
            },
            'name': 'concatenate',
        }, {
            'class_name': 'Softmax',
            'configs': {
                'name': 'softmax',
                'n_units': 3
            },
            'name': 'softmax',
        }]
        actual_layers = []
        for i, layer in enumerate(dict_network['layers']):
            self.assertIn('parameters', layer, msg="Layer #" + str(i))

            layer = copy.deepcopy(layer)
            del layer['parameters']
            actual_layers.append(layer)

        self.assertEqual(actual_layers, expected_layers)
Esempio n. 24
0
 def test_repr_without_size(self):
     self.assertEqual(
         "PRelu(alpha_axes=(-1,), alpha=Constant(0.25), name='p-relu-1')",
         str(layers.PRelu()))