Example #1
0
 def test_nesterov_momentum(self):
     x_train, _, y_train, _ = simple_classification()
     compare_networks(
        # Test classes
        partial(algorithms.Momentum, nesterov=False),
        partial(algorithms.Momentum, nesterov=True),
        # Test data
        (x_train, y_train),
        # Network configurations
        network=[
            layers.Input(10),
            layers.Sigmoid(20),
            layers.Sigmoid(1)
        ],
        batch_size=None,
        step=0.25,
        momentum=0.9,
        shuffle_data=True,
        verbose=False,
        # Test configurations
        epochs=10,
        show_comparison_plot=False,
     )
Example #2
0
    def test_sequential_partial_definitions(self):
        # Tree structure:
        #
        #                       Sigmoid(10)
        #                      /
        # Input(10) - Sigmoid(5)
        #                      \
        #                       Softmax(10)
        #
        input_layer = layers.Input(10)
        minimized = input_layer >> layers.Sigmoid(5)
        reconstructed = minimized >> layers.Sigmoid(10)
        classifier = minimized >> layers.Softmax(20)

        x_matrix = asfloat(np.random.random((3, 10)))
        minimized_output = self.eval(minimized.output(x_matrix))
        self.assertEqual((3, 5), minimized_output.shape)

        reconstructed_output = self.eval(reconstructed.output(x_matrix))
        self.assertEqual((3, 10), reconstructed_output.shape)

        classifier_output = self.eval(classifier.output(x_matrix))
        self.assertEqual((3, 20), classifier_output.shape)
Example #3
0
    def test_storage_pickle_save_and_load_during_the_training(self):
        tempdir = tempfile.mkdtemp()
        x_train, x_test, y_train, y_test = simple_classification()

        errors = {}

        def on_epoch_end(network):
            epoch = network.last_epoch
            errors[epoch] = network.score(x_test, y_test)

            if epoch == 4:
                storage.load_pickle(
                    network.network,
                    os.path.join(tempdir, 'training-epoch-2'))
                raise StopTraining('Stop training process after 4th epoch')
            else:
                storage.save_pickle(
                    network.network,
                    os.path.join(tempdir, 'training-epoch-{}'.format(epoch)))

        gdnet = algorithms.GradientDescent(
            network=[
                layers.Input(10),
                layers.Sigmoid(4),
                layers.Sigmoid(1)
            ],
            signals=on_epoch_end,
            batch_size=None,
            step=0.5
        )
        gdnet.train(x_train, y_train)

        validation_error = gdnet.score(x_test, y_test)

        self.assertGreater(errors[2], errors[4])
        self.assertAlmostEqual(validation_error, errors[2])
        self.assertNotAlmostEqual(validation_error, errors[4])
Example #4
0
    def test_pandas_for_bp(self):
        dataset = datasets.load_diabetes()
        target = dataset.target.reshape(-1, 1)

        input_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        n_features = dataset.data.shape[1]
        input_columns = ['column_' + str(i) for i in range(n_features)]

        pandas_data = pd.DataFrame(dataset.data, columns=input_columns)
        pandas_data['target'] = target_scaler.fit_transform(target)
        pandas_data[input_columns] = input_scaler.fit_transform(
            pandas_data[input_columns])

        x_train, x_test, y_train, y_test = train_test_split(
            asfloat(pandas_data[input_columns]),
            asfloat(pandas_data['target']),
            test_size=0.15)

        bpnet = algorithms.GradientDescent(
            [
                layers.Input(10),
                layers.Sigmoid(30),
                layers.Sigmoid(1),
            ],
            batch_size=None,
        )
        bpnet.train(x_train, y_train, epochs=50)
        y_predict = bpnet.predict(x_test).reshape(-1, 1)
        y_test = y_test.reshape(-1, 1)

        error = objectives.rmsle(
            target_scaler.inverse_transform(y_test),
            target_scaler.inverse_transform(y_predict).round())
        error = self.eval(error)
        self.assertGreater(0.5, error)
Example #5
0
    def test_failed_load_parameter_invalid_type(self):
        sigmoid = layers.Sigmoid(1, bias=None)
        network = layers.join(layers.Input(2), sigmoid)
        network.create_variables()

        with self.assertRaisesRegexp(ParameterLoaderError, "equal to None"):
            load_layer_parameter(
                sigmoid, {
                    'parameters': {
                        'bias': {
                            'value': np.array([[0]]),
                            'trainable': True,
                        },
                    },
                })
Example #6
0
    def test_compare_bp_and_cg(self):
        x_train, x_test, y_train, y_test = simple_classification()

        compare_networks(
            # Test classes
            partial(
                partial(algorithms.GradientDescent, batch_size=None),
                step=1.0,
            ),
            partial(algorithms.ConjugateGradient,
                    update_function='fletcher_reeves'),
            # Test data
            (asfloat(x_train), asfloat(y_train)),
            # Network configurations
            network=layers.join(
                layers.Input(10),
                layers.Sigmoid(5),
                layers.Sigmoid(1),
            ),
            loss='mse',
            shuffle_data=True,
            # Test configurations
            epochs=50,
            show_comparison_plot=False)
 def test_compare_bp_and_hessian(self):
     x_train, _, y_train, _ = simple_classification()
     compare_networks(
         # Test classes
         algorithms.GradientDescent,
         partial(algorithms.HessianDiagonal, min_eigval=0.01),
         # Test data
         (x_train, y_train),
         # Network configurations
         connection=[
             layers.Input(10),
             layers.Sigmoid(20,
                            weight=init.Uniform(-1, 1),
                            bias=init.Uniform(-1, 1)),
             layers.Sigmoid(1,
                            weight=init.Uniform(-1, 1),
                            bias=init.Uniform(-1, 1)),
         ],
         step=0.1,
         shuffle_data=True,
         verbose=False,
         # Test configurations
         epochs=50,
         show_comparison_plot=False)
Example #8
0
    def test_repeated_inline_connections_with_list(self):
        input_layer_1 = layers.Input(1)
        input_layer_2 = layers.Input(1)
        hd1 = layers.Relu(4)
        hd2 = layers.Sigmoid(4)
        output_layer = layers.Softmax(5)

        connection_1 = input_layer_1 > [hd1, hd2] > output_layer
        connection_2 = input_layer_2 > [hd1, hd2] > output_layer

        self.assertListEqual(list(connection_1),
                             [input_layer_1, hd1, hd2, output_layer])

        self.assertListEqual(list(connection_2),
                             [input_layer_2, hd1, hd2, output_layer])
Example #9
0
    def assert_invalid_step_values(self, step, initial_value,
                                   final_value, epochs):

        x_train, x_test, y_train, y_test = simple_classification()
        optimizer = algorithms.Momentum(
            [
                layers.Input(10),
                layers.Sigmoid(5),
                layers.Sigmoid(1),
            ],
            step=step,
            momentum=0.99,
            batch_size=None,
            verbose=False,
            nesterov=True,
        )

        step = self.eval(optimizer.step)
        self.assertAlmostEqual(step, initial_value)

        optimizer.train(x_train, y_train, x_test, y_test, epochs=epochs)

        step = self.eval(optimizer.step)
        self.assertAlmostEqual(step, final_value)
Example #10
0
    def test_parallel_many_to_many_connection(self):
        relu_layer_1 = layers.Relu(1)
        sigmoid_layer_1 = layers.Sigmoid(1)

        relu_layer_2 = layers.Relu(2)
        sigmoid_layer_2 = layers.Sigmoid(2)

        connection = layers.join(
            [
                sigmoid_layer_1,
                relu_layer_1,
            ],
            [
                sigmoid_layer_2,
                relu_layer_2,
            ],
        )

        self.assertEqual(connection.input_shape, [None, None])
        self.assertEqual(connection.output_shape, [(2, ), (2, )])

        graph = connection.graph

        for layer in [relu_layer_1, sigmoid_layer_1]:
            n_forward_connections = len(graph.forward_graph[layer])
            n_backward_connections = len(graph.backward_graph[layer])

            self.assertEqual(n_forward_connections, 2)
            self.assertEqual(n_backward_connections, 0)

        for layer in [relu_layer_2, sigmoid_layer_2]:
            n_forward_connections = len(graph.forward_graph[layer])
            n_backward_connections = len(graph.backward_graph[layer])

            self.assertEqual(n_forward_connections, 0)
            self.assertEqual(n_backward_connections, 2)
Example #11
0
    def test_progressbar_signal(self):
        x_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        y_train = np.array([[1, 0, 0, 1]]).T

        class BatchStartSignal(object):
            def update_end(self, network):
                progressbar_signal = network.events.signals[0]
                bar = progressbar_signal.bar

                if isinstance(bar, progressbar.NullBar):
                    network.nullbar += 1
                else:
                    network.otherbar += 1

        network = algorithms.GradientDescent(layers.join(
            layers.Input(2),
            layers.Sigmoid(1),
        ),
                                             verbose=False,
                                             batch_size=2,
                                             signals=BatchStartSignal)

        network.nullbar = 0
        network.otherbar = 0

        network.verbose = True
        network.train(x_train, y_train, epochs=10)

        self.assertEqual(network.nullbar, 0)
        self.assertEqual(network.otherbar, 20)

        network.verbose = True
        network.batch_size = 10
        network.train(x_train, y_train, epochs=10)

        self.assertEqual(network.nullbar, 10)
        self.assertEqual(network.otherbar, 20)

        network.verbose = False
        network.batch_size = 1
        network.train(x_train, y_train, epochs=10)

        self.assertEqual(network.nullbar, 50)
        self.assertEqual(network.otherbar, 20)
    def train_lstm(self, data, **lstm_options):
        x_train, x_test, y_train, y_test = data
        network = algorithms.RMSProp(
            [
                layers.Input(self.n_time_steps),
                layers.Embedding(self.n_categories, 10),
                layers.LSTM(20, **lstm_options),
                layers.Sigmoid(1),
            ],
            step=0.05,
            verbose=False,
            batch_size=16,
            error='binary_crossentropy',
        )
        network.train(x_train, y_train, x_test, y_test, epochs=20)

        y_predicted = network.predict(x_test).round()
        accuracy = (y_predicted.T == y_test).mean()
        return accuracy
Example #13
0
    def test_storage_data_layer_compatibility(self):
        connection = layers.Input(2) > layers.Sigmoid(3, name='sigm')
        sigmoid = connection.layer('sigm')

        with self.assertRaises(ParameterLoaderError):
            validate_layer_compatibility(sigmoid, {
                'name': 'sigm',
                'class_name': 'Sigmoid',
                'input_shape': (3,),  # wrong input shape
                'output_shape': (3,),
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((2, 3))},
                    'bias': {'trainable': True, 'value': np.ones((3,))},
                }
            })

        with self.assertRaises(ParameterLoaderError):
            validate_layer_compatibility(sigmoid, {
                'name': 'sigm',
                'class_name': 'Sigmoid',
                'input_shape': (2,),
                'output_shape': (2,),  # wrong output shape
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((2, 3))},
                    'bias': {'trainable': True, 'value': np.ones((3,))},
                }
            })

        result = validate_layer_compatibility(sigmoid, {
            'name': 'sigm',
            'class_name': 'Sigmoid',
            'input_shape': (2,),
            'output_shape': (3,),
            'configs': {},
            'parameters': {
                'weight': {'trainable': True, 'value': np.ones((2, 3))},
                'bias': {'trainable': True, 'value': np.ones((3,))},
            }
        })
        self.assertIsNone(result)
Example #14
0
    def test_stacked_gru(self):
        x_train, x_test, y_train, y_test = self.data
        network = algorithms.RMSProp(
            [
                layers.Input(self.n_time_steps),
                layers.Embedding(self.n_categories, 10),
                layers.GRU(10, only_return_final=False),
                layers.GRU(1),
                layers.Sigmoid(1),
            ],
            step=0.01,
            verbose=False,
            batch_size=1,
            loss='binary_crossentropy',
        )
        network.train(x_train, y_train, x_test, y_test, epochs=10)

        y_predicted = network.predict(x_test).round()
        accuracy = (y_predicted.T == y_test).mean()

        self.assertGreaterEqual(accuracy, 0.8)
    def test_layer_structure_ignore_layers_attr(self):
        input_layer = layers.Input(10)
        connection = input_layer > layers.Sigmoid(1)

        with tempfile.NamedTemporaryFile() as temp:
            plots.layer_structure(connection,
                                  filepath=temp.name,
                                  show=False,
                                  ignore_layers=[])
            filesize_first = os.path.getsize(temp.name)

        with tempfile.NamedTemporaryFile() as temp:
            plots.layer_structure(connection,
                                  filepath=temp.name,
                                  show=False,
                                  ignore_layers=[layers.Sigmoid])
            filesize_second = os.path.getsize(temp.name)

        # First one should have more layers to draw
        # than the second one
        self.assertGreater(filesize_first, filesize_second)
Example #16
0
    def test_connection_initializations(self):
        possible_connections = (
            (2, 3, 1),

            # as a list
            [layers.Input(2),
             layers.Sigmoid(3),
             layers.Tanh(1)],

            # as forward sequence with inline operators
            layers.Input(2) > layers.Relu(10) > layers.Tanh(1),
            layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1),

            # as backward sequence with inline operators
            layers.Tanh(1) < layers.Relu(10) < layers.Input(2),
            layers.Tanh(1) << layers.Relu(10) << layers.Input(2),
        )

        for connection in possible_connections:
            network = algorithms.GradientDescent(connection)
            self.assertEqual(len(network.layers), 3, msg=connection)
Example #17
0
	def model_network(self, algorithm='LevenbergMarquardt', model=None, opt=None):

		model = self.decode_model(model)
		if model is None:
			model = [
				[1, 'hidden', 15, 'Linear'],
				[2, 'hidden', 10, 'Linear'],
				[3, 'output', self.output_classes, 'Elu']
			]
			# [Input(4), Elu(1)]
			# [Input(4), Elu(6), Elu(1)] EP: 100
		layer_model = [layers.Input(self.input_features)]
		for layer in model:
			if layer[3] == 'Linear':
				layer_model.append(layers.Linear(layer[2]))
			if layer[3] == 'Relu':
				layer_model.append(layers.Relu(layer[2]))
			if layer[3] == 'Sigmoid':
				layer_model.append(layers.Sigmoid(layer[2]))
			if layer[3] == 'HardSigmoid':
				layer_model.append(layers.HardSigmoid(layer[2]))
			if layer[3] == 'Step':
				layer_model.append(layers.Step(layer[2]))
			if layer[3] == 'Tanh':
				layer_model.append(layers.Tanh(layer[2]))
			if layer[3] == 'Softplus':
				layer_model.append(layers.Softplus(layer[2]))
			if layer[3] == 'Softmax':
				layer_model.append(layers.Softmax(layer[2]))
			if layer[3] == 'Elu':
				layer_model.append(layers.Elu(layer[2]))
			if layer[3] == 'PRelu':
				layer_model.append(layers.PRelu(layer[2]))
			if layer[3] == 'LeakyRelu':
				layer_model.append(layers.LeakyRelu(layer[2]))

		print('Model warstw: ' + str(layer_model))

		self.layers = layer_model
		self.select_algorithm(algorithm, options=opt)
Example #18
0
    def assertCanNetworkOverfit(self,
                                network_class,
                                epochs=100,
                                min_accepted_loss=0.001):

        x_train = 2 * np.random.random((10, 2)) - 1  # zero centered
        y_train = np.random.random((10, 1))

        relu_xavier_normal = init.XavierNormal(gain=4)
        relu_weight = relu_xavier_normal.sample((2, 20), return_array=True)

        xavier_normal = init.XavierNormal(gain=2)
        sigmoid_weight = xavier_normal.sample((20, 1), return_array=True)

        optimizer = network_class([
            layers.Input(2),
            layers.Relu(20, weight=relu_weight),
            layers.Sigmoid(1, weight=sigmoid_weight),
        ])

        optimizer.train(x_train, y_train, epochs=epochs)
        self.assertLess(optimizer.errors.train[-1], min_accepted_loss)
Example #19
0
    def test_connect_cutted_layers_to_other_layers(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(8, name='relu-0'),
            layers.Relu(5, name='relu-1'),
            layers.Relu(2, name='relu-2'),
            layers.Relu(1, name='relu-3'),
        )

        self.assertShapesEqual(network.input_shape, (None, 10))
        self.assertShapesEqual(network.output_shape, (None, 1))
        self.assertEqual(len(network), 5)

        cutted_network = network.start('relu-1').end('relu-2')
        self.assertEqual(cutted_network.input_shape, None)
        self.assertShapesEqual(cutted_network.output_shape, (None, 2))
        self.assertEqual(len(cutted_network), 2)
        self.assertDictEqual(
            cutted_network.forward_graph, {
                network.layer('relu-1'): [network.layer('relu-2')],
                network.layer('relu-2'): [],
            })

        new_network = layers.join(
            layers.Input(8),
            cutted_network,
            layers.Sigmoid(11),
        )
        self.assertShapesEqual(new_network.input_shape, (None, 8))
        self.assertShapesEqual(new_network.output_shape, (None, 11))
        self.assertEqual(len(new_network), 4)

        x_test = asfloat(np.ones((7, 10)))
        y_predicted = self.eval(network.output(x_test))
        self.assertEqual(y_predicted.shape, (7, 1))

        x_test = asfloat(np.ones((7, 8)))
        y_predicted = self.eval(new_network.output(x_test))
        self.assertEqual(y_predicted.shape, (7, 11))
Example #20
0
    def test_connection_initializations(self):
        possible_connections = (
            (2, 3, 1),

            # as a list
            [layers.Input(2),
             layers.Sigmoid(3),
             layers.Tanh(1)],

            # as forward sequence with inline operators
            layers.Input(2) > layers.Relu(10) > layers.Tanh(1),
            layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1),

            # as backward sequence with inline operators
            layers.Tanh(1) < layers.Relu(10) < layers.Input(2),
            layers.Tanh(1) << layers.Relu(10) << layers.Input(2),
        )

        for i, connection in enumerate(possible_connections, start=1):
            network = algorithms.GradientDescent(connection)
            message = "[Test #{}] Connection: {}".format(i, connection)
            self.assertEqual(len(network.layers), 3, msg=message)
Example #21
0
    def test_optimizer_with_bad_shape_input_passed(self):
        optimizer = algorithms.GradientDescent(
            [
                layers.Input((10, 10, 3)),
                layers.Convolution((3, 3, 7)),
                layers.Reshape(),
                layers.Sigmoid(1),
            ],
            batch_size=None,
            verbose=False,
            loss='mse',
        )

        image = np.random.random((10, 10, 3))
        optimizer.train(image, [1], epochs=1)

        retrieved_score = optimizer.score(image, [1])
        self.assertLessEqual(0, retrieved_score)
        self.assertGreaterEqual(1, retrieved_score)

        prediction = optimizer.predict(image)
        self.assertEqual(prediction.ndim, 2)
    def test_gru_with_4d_input(self):
        x_train, x_test, y_train, y_test = self.data
        network = algorithms.RMSProp(
            [
                layers.Input(self.n_time_steps),
                layers.Embedding(self.n_categories, 10),
                # Make 4D input
                layers.Reshape((self.n_time_steps, 5, 2), name='reshape'),
                layers.GRU(10),
                layers.Sigmoid(1),
            ],
            step=0.1,
            verbose=False,
            batch_size=1,
            error='binary_crossentropy',
        )
        network.train(x_train, y_train, x_test, y_test, epochs=2)

        reshape = network.connection.end('reshape')
        # +1 for batch size
        output_dimension = len(reshape.output_shape) + 1
        self.assertEqual(4, output_dimension)
Example #23
0
    def test_tree_graph(self):
        l0 = layers.Input(1)
        l1 = layers.Sigmoid(10)
        l2 = layers.Sigmoid(20)
        l3 = layers.Sigmoid(30)
        l4 = layers.Sigmoid(40)
        l5 = layers.Sigmoid(50)
        l6 = layers.Sigmoid(60)

        # Tree Structure:
        #
        # l0 - l1 - l5 - l6
        #        \
        #         l2 - l4
        #           \
        #            -- l3
        graph = LayerGraph()
        # Connection #1
        graph.connect_layers(l0, l1)
        graph.connect_layers(l1, l5)
        graph.connect_layers(l5, l6)
        # Connection #2
        graph.connect_layers(l1, l2)
        graph.connect_layers(l2, l3)
        # Connection #3
        graph.connect_layers(l2, l4)

        for layer in graph.forward_graph:
            layer.initialize()

        subgraph = graph.subgraph_for_output(l6)
        self.assertEqual(1, len(subgraph.output_layers))
        self.assertIs(l6, subgraph.output_layers[0])
        self.assertEqual(1, len(subgraph.input_layers))
        self.assertIs(l0, subgraph.input_layers[0])

        x = T.matrix()
        outputs = graph.propagate_forward(x)

        text_input = asfloat(np.array([[1]]))
        expected_shapes = [(1, 30), (1, 40), (1, 60)]

        for output, expected_shape in zip(outputs, expected_shapes):
            output_value = output.eval({x: text_input})
            self.assertIn(output_value.shape, expected_shapes)
    def test_mixture_of_experts_init_gating_network_exceptions(self):
        networks = self.networks

        with self.assertRaises(ValueError):
            # Invalid gating error function
            algorithms.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.GradientDescent(
                    layers.Input(1) > layers.Softmax(2),
                    error='rmsle',
                    verbose=False),
            )

        with self.assertRaises(ValueError):
            # Invalid gating network algorithm
            algorithms.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.PNN(),
            )

        with self.assertRaises(ValueError):
            # Invalid gating network output layer
            algorithms.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.GradientDescent(
                    layers.Input(1) > layers.Sigmoid(2),
                    verbose=False,
                ))

        with self.assertRaises(ValueError):
            # Invalid gating network output layer size
            algorithms.MixtureOfExperts(
                networks=networks,
                gating_network=algorithms.GradientDescent(
                    layers.Input(1) > layers.Softmax(1),
                    verbose=False,
                ))
Example #25
0
    def test_graph_connect_layer_missed_input_shapes(self):
        # input_layer_1 -> concatenate
        #                    /
        #              hidden_layer
        #                  /
        #         input_layer_2
        input_layer_1 = layers.Input(10)
        hidden_layer = layers.Sigmoid(10)
        input_layer_2 = layers.Input(2)
        merge_layer = layers.Concatenate()

        graph = LayerGraph()

        # First we join layers that doesn't have input shapes
        graph.connect_layers(hidden_layer, merge_layer)
        self.assertEqual(merge_layer.output_shape, None)

        # Now we can join layer that has input shape
        graph.connect_layers(input_layer_1, merge_layer)
        self.assertEqual(merge_layer.output_shape, None)

        # At this point we have fully constructed connection
        graph.connect_layers(input_layer_2, hidden_layer)
        self.assertEqual(merge_layer.output_shape, (20,))
    def test_stacked_gru_with_enabled_backwards_option(self):
        x_train, x_test, y_train, y_test = self.data
        x_train = x_train[:, ::-1]
        x_test = x_test[:, ::-1]

        network = algorithms.RMSProp(
            [
                layers.Input(self.n_time_steps),
                layers.Embedding(self.n_categories, 10),
                layers.GRU(10, only_return_final=False, backwards=True),
                layers.GRU(2, backwards=True),
                layers.Sigmoid(1),
            ],
            step=0.1,
            verbose=False,
            batch_size=1,
            error='binary_crossentropy',
        )
        network.train(x_train, y_train, x_test, y_test, epochs=20)

        y_predicted = network.predict(x_test).round()
        accuracy = (y_predicted.T == y_test).mean()

        self.assertGreaterEqual(accuracy, 0.9)
Example #27
0
    def test_gd_custom_target(self):
        def custom_loss(actual, predicted):
            actual_shape = tf.shape(actual)
            n_samples = actual_shape[0]
            actual = tf.reshape(actual, (n_samples, 1))
            return objectives.rmse(actual, predicted)

        optimizer = algorithms.GradientDescent(
            layers.Input(10) >> layers.Sigmoid(1),

            step=0.2,
            shuffle_data=True,
            batch_size=None,

            loss=custom_loss,
            target=tf.placeholder(tf.float32, shape=(None, 1, 1)),
        )
        x_train, _, y_train, _ = simple_classification()

        error_message = "Cannot feed value of shape \(60, 1\) for Tensor"
        with self.assertRaisesRegexp(ValueError, error_message):
            optimizer.train(x_train, y_train, epochs=1)

        optimizer.train(x_train, y_train.reshape(-1, 1, 1), epochs=1)
Example #28
0
    def test_connect_cutted_layers_to_other_layers(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(8, name='relu-0'),
            layers.Relu(5, name='relu-1'),
            layers.Relu(2, name='relu-2'),
            layers.Relu(1, name='relu-3'),
        )

        self.assertEqual(network.input_shape, (10, ))
        self.assertEqual(network.output_shape, (1, ))
        self.assertEqual(len(network), 5)

        cutted_network = network.start('relu-1').end('relu-2')
        self.assertEqual(cutted_network.input_shape, (8, ))
        self.assertEqual(cutted_network.output_shape, (2, ))
        self.assertEqual(len(cutted_network), 2)

        new_network = layers.join(
            layers.Input(8),
            cutted_network,
            layers.Sigmoid(11),
        )
        self.assertEqual(new_network.input_shape, (8, ))
        self.assertEqual(new_network.output_shape, (11, ))
        self.assertEqual(len(new_network), 4)

        predict = network.compile()
        x_test = asfloat(np.ones((7, 10)))
        y_predicted = predict(x_test)
        self.assertEqual(y_predicted.shape, (7, 1))

        predict = new_network.compile()
        x_test = asfloat(np.ones((7, 8)))
        y_predicted = predict(x_test)
        self.assertEqual(y_predicted.shape, (7, 11))
Example #29
0
    def test_tree_connection_structure(self):
        l0 = layers.Input(1)
        l1 = layers.Sigmoid(10)
        l2 = layers.Sigmoid(20)
        l3 = layers.Sigmoid(30)
        l4 = layers.Sigmoid(40)
        l5 = layers.Sigmoid(50)
        l6 = layers.Sigmoid(60)

        # Tree Structure:
        #
        # l0 - l1 - l5 - l6
        #        \
        #         l2 - l4
        #           \
        #            -- l3
        conn1 = layers.join(l0, l1, l5, l6)
        conn2 = layers.join(l0, l1, l2, l3)
        conn3 = layers.join(l0, l1, l2, l4)

        self.assertEqual(conn1.output_shape, as_tuple(60))
        self.assertEqual(conn2.output_shape, as_tuple(30))
        self.assertEqual(conn3.output_shape, as_tuple(40))
Example #30
0
data = dataset.data
target = dataset.target.reshape((-1, 1))

data_scaler = preprocessing.MinMaxScaler((-3, 3))
target_scaler = preprocessing.MinMaxScaler()

data = data_scaler.fit_transform(data)
target = target_scaler.fit_transform(target)

x_train, x_test, y_train, y_test = train_test_split(data,
                                                    target,
                                                    train_size=0.85)

cgnet = algorithms.Hessian(
    connection=[
        layers.Sigmoid(13),
        layers.Sigmoid(50),
        layers.Sigmoid(10),
        layers.Output(1),
    ],
    verbose=True,
)

cgnet.train(x_train, y_train, x_test, y_test, epochs=3)
y_predict = cgnet.predict(x_test)

y_test = target_scaler.inverse_transform(y_test.reshape((-1, 1)))
y_predict = target_scaler.inverse_transform(y_predict).T.round(1)
error = estimators.rmsle(y_predict, y_test)
print("RMSLE = {}".format(error))