def test_transfer_learning_using_names(self): network_pretrained = layers.join( layers.Input(10), layers.Elu(5, name='elu-a'), layers.Elu(2, name='elu-b'), layers.Sigmoid(1), ) network_new = layers.join( layers.Input(10), layers.Elu(5, name='elu-a'), layers.Elu(2, name='elu-b'), layers.Elu(8, name='elu-c'), # new layer ) pretrained_layers_stored = storage.save_dict(network_pretrained) storage.load_dict(network_new, pretrained_layers_stored, load_by='names', skip_validation=False, ignore_missing=True) random_input = asfloat(np.random.random((12, 10))) pretrained_output = self.eval( network_pretrained.end('elu-b').output(random_input)) new_network_output = self.eval( network_new.end('elu-b').output(random_input)) np.testing.assert_array_almost_equal(pretrained_output, new_network_output) pred = self.eval(network_new.output(random_input)) self.assertEqual(pred.shape, (12, 8))
def test_transfer_learning_using_position(self): network_pretrained = layers.join( layers.Input(10), layers.Elu(5), layers.Elu(2, name='elu'), layers.Sigmoid(1), ) network_new = layers.join( layers.Input(10), layers.Elu(5), layers.Elu(2), ) pretrained_layers_stored = storage.save_dict(network_pretrained) with self.assertRaises(ParameterLoaderError): storage.load_dict(network_new, pretrained_layers_stored, load_by='names_or_order', ignore_missing=False) storage.load_dict(network_new, pretrained_layers_stored, load_by='names_or_order', ignore_missing=True) random_input = asfloat(np.random.random((12, 10))) new_network_output = self.eval(network_new.output(random_input)) pretrained_output = self.eval( network_pretrained.end('elu').output(random_input)) np.testing.assert_array_almost_equal(pretrained_output, new_network_output)
def test_elu_layer(self): test_input = asfloat(np.array([[10, 1, 0.1, 0, -1]]).T) expected_output = np.array([[10, 1, 0.1, 0, -0.6321205588285577]]).T layer = layers.Elu() actual_output = self.eval(layer.activation_function(test_input)) np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_elu_layer(self): test_input = np.array([[10, 1, 0.1, 0, -1]]).T expected_output = np.array([[10, 1, 0.1, 0, 0.1 * math.exp(-1) - 0.1]]).T layer = layers.Elu(alpha=0.1) actual_output = layer.activation_function(test_input).eval() np.testing.assert_array_almost_equal(expected_output, actual_output)
def model_network(self, algorithm='LevenbergMarquardt', model=None, opt=None): model = self.decode_model(model) if model is None: model = [ [1, 'hidden', 15, 'Linear'], [2, 'hidden', 10, 'Linear'], [3, 'output', self.output_classes, 'Elu'] ] # [Input(4), Elu(1)] # [Input(4), Elu(6), Elu(1)] EP: 100 layer_model = [layers.Input(self.input_features)] for layer in model: if layer[3] == 'Linear': layer_model.append(layers.Linear(layer[2])) if layer[3] == 'Relu': layer_model.append(layers.Relu(layer[2])) if layer[3] == 'Sigmoid': layer_model.append(layers.Sigmoid(layer[2])) if layer[3] == 'HardSigmoid': layer_model.append(layers.HardSigmoid(layer[2])) if layer[3] == 'Step': layer_model.append(layers.Step(layer[2])) if layer[3] == 'Tanh': layer_model.append(layers.Tanh(layer[2])) if layer[3] == 'Softplus': layer_model.append(layers.Softplus(layer[2])) if layer[3] == 'Softmax': layer_model.append(layers.Softmax(layer[2])) if layer[3] == 'Elu': layer_model.append(layers.Elu(layer[2])) if layer[3] == 'PRelu': layer_model.append(layers.PRelu(layer[2])) if layer[3] == 'LeakyRelu': layer_model.append(layers.LeakyRelu(layer[2])) print('Model warstw: ' + str(layer_model)) self.layers = layer_model self.select_algorithm(algorithm, options=opt)