示例#1
0
    def test_upscale_layer_shape(self):
        Case = namedtuple("Case", "scale expected_shape")
        testcases = (
            Case(scale=(2, 2), expected_shape=(None, 28, 28, 1)),
            Case(scale=(2, 1), expected_shape=(None, 28, 14, 1)),
            Case(scale=(1, 2), expected_shape=(None, 14, 28, 1)),
            Case(scale=(1, 1), expected_shape=(None, 14, 14, 1)),
            Case(scale=(1, 10), expected_shape=(None, 14, 140, 1)),
        )

        for testcase in testcases:
            network = layers.join(
                layers.Input((14, 14, 1)),
                layers.Upscale(testcase.scale),
            )

            self.assertShapesEqual(network.output_shape,
                                   testcase.expected_shape,
                                   msg="scale: {}".format(testcase.scale))
示例#2
0
    def test_dilated_convolution(self):
        network = layers.join(
            layers.Input((6, 6, 1)),
            layers.Convolution((3, 3, 1), dilation=2, weight=1, bias=None),
        )

        input_value = asfloat(np.arange(36).reshape(1, 6, 6, 1))
        actual_output = self.eval(network.output(input_value))

        self.assertShapesEqual(actual_output.shape, (1, 2, 2, 1))
        self.assertShapesEqual(actual_output.shape[1:],
                               network.output_shape[1:])

        actual_output = actual_output[0, :, :, 0]
        expected_output = np.array([
            [126, 135],  # every row value adds +1 per filter value (+9)
            [180, 189],  # every col value adds +6 per filter value (+54)
        ])
        np.testing.assert_array_almost_equal(actual_output, expected_output)
示例#3
0
    def test_tree_graph(self):
        l0 = layers.Input(1)
        l1 = layers.Sigmoid(10)
        l2 = layers.Sigmoid(20)
        l3 = layers.Sigmoid(30)
        l4 = layers.Sigmoid(40)
        l5 = layers.Sigmoid(50)
        l6 = layers.Sigmoid(60)

        # Tree Structure:
        #
        # l0 - l1 - l5 - l6
        #        \
        #         l2 - l4
        #           \
        #            -- l3
        graph = LayerGraph()
        # Connection #1
        graph.connect_layers(l0, l1)
        graph.connect_layers(l1, l5)
        graph.connect_layers(l5, l6)
        # Connection #2
        graph.connect_layers(l1, l2)
        graph.connect_layers(l2, l3)
        # Connection #3
        graph.connect_layers(l2, l4)

        for layer in graph.forward_graph:
            layer.initialize()

        subgraph = graph.subgraph_for_output(l6)
        self.assertEqual(1, len(subgraph.output_layers))
        self.assertIs(l6, subgraph.output_layers[0])
        self.assertEqual(1, len(subgraph.input_layers))
        self.assertIs(l0, subgraph.input_layers[0])

        text_input = asfloat(np.array([[1]]))
        expected_shapes = [(1, 30), (1, 40), (1, 60)]
        outputs = graph.propagate_forward(text_input)

        for output, expected_shape in zip(outputs, expected_shapes):
            output_value = self.eval(output)
            self.assertIn(output_value.shape, expected_shapes)
示例#4
0
    def test_errdiff(self):
        initial_step = 0.3
        network = algorithms.GradientDescent(
            [
                layers.Input(2),
                layers.Tanh(3),
                layers.Tanh(1),
            ],
            batch_size='all',
            step=initial_step,
            update_for_smaller_error=1.05,
            update_for_bigger_error=0.7,
            error_difference=1.04,
            addons=[algorithms.ErrDiffStepUpdate]
        )
        network.train(xor_input_train, xor_target_train, epochs=200)

        self.assertNotEqual(self.eval(network.variables.step), initial_step)
        self.assertAlmostEqual(network.errors.last(), 0, places=4)
示例#5
0
    def test_quasi_newton_dfp(self):
        x_train, x_test, y_train, y_test = simple_classification()

        qnnet = algorithms.QuasiNewton(
            connection=[
                layers.Input(10),
                layers.Sigmoid(30, weight=init.Orthogonal()),
                layers.Sigmoid(1, weight=init.Orthogonal()),
            ],
            shuffle_data=True,
            verbose=False,
            update_function='dfp',
            h0_scale=2,
        )
        qnnet.train(x_train, y_train, x_test, y_test, epochs=10)
        result = qnnet.predict(x_test).round()

        roc_curve_score = metrics.roc_auc_score(result, y_test)
        self.assertAlmostEqual(0.92, roc_curve_score, places=2)
示例#6
0
    def test_max_pooling(self):
        X = asfloat(
            np.array([
                [1, 2, 3, -1],
                [4, -6, 3, 1],
                [0, 0, 1, 0],
                [0, -1, 0, 0],
            ])).reshape(1, 4, 4, 1)
        expected_output = asfloat(np.array([
            [4, 3],
            [0, 1],
        ])).reshape(1, 2, 2, 1)

        network = layers.join(
            layers.Input((4, 4, 1)),
            layers.MaxPooling((2, 2)),
        )
        actual_output = self.eval(network.output(X))
        np.testing.assert_array_almost_equal(actual_output, expected_output)
示例#7
0
    def test_quasi_newton_bfgs(self):
        x_train, x_test, y_train, y_test = simple_classification()

        qnnet = algorithms.QuasiNewton(
            connection=[
                layers.Input(10),
                layers.Sigmoid(30, weight=init.Orthogonal()),
                layers.Sigmoid(1, weight=init.Orthogonal()),
            ],
            shuffle_data=True,
            show_epoch='20 times',
            update_function='bfgs',
        )

        qnnet.train(x_train, y_train, x_test, y_test, epochs=50)
        result = qnnet.predict(x_test).round().astype(int)

        roc_curve_score = metrics.roc_auc_score(result, y_test)
        self.assertAlmostEqual(0.92, roc_curve_score, places=2)
示例#8
0
    def test_fail_many_to_many_connection(self):
        network_a = layers.join(
            layers.Input(10),
            layers.parallel(
                layers.Relu(5),
                layers.Relu(4),
            ),
        )
        network_b = layers.join(
            layers.parallel(
                layers.Relu(5),
                layers.Relu(4),
            ),
            layers.Concatenate(),
        )

        error_message = "Cannot make many to many connection between graphs"
        with self.assertRaisesRegexp(LayerConnectionError, error_message):
            layers.join(network_a, network_b)
 def test_hessdiag(self):
     x_train, x_test, y_train, y_test = simple_classification()
     nw = algorithms.HessianDiagonal(
         connection=[
             layers.Input(10),
             layers.Sigmoid(20,
                            weight=init.Uniform(-1, 1),
                            bias=init.Uniform(-1, 1)),
             layers.Sigmoid(1,
                            weight=init.Uniform(-1, 1),
                            bias=init.Uniform(-1, 1)),
         ],
         step=0.1,
         shuffle_data=False,
         verbose=False,
         min_eigval=0.01,
     )
     nw.train(x_train / 2, y_train, epochs=10)
     self.assertAlmostEqual(0.10, nw.errors.last(), places=2)
示例#10
0
    def train_lstm(self, data, verbose=False, **lstm_options):
        x_train, x_test, y_train, y_test = data
        network = algorithms.RMSProp(
            [
                layers.Input(self.n_time_steps),
                layers.Embedding(self.n_categories, 10),
                layers.LSTM(20, **lstm_options),
                layers.Sigmoid(1),
            ],
            step=0.05,
            verbose=verbose,
            batch_size=16,
            loss='binary_crossentropy',
        )
        network.train(x_train, y_train, x_test, y_test, epochs=20)

        y_predicted = network.predict(x_test).round()
        accuracy = (y_predicted.T == y_test).mean()
        return accuracy
示例#11
0
文件: playground.py 项目: altek42/inz
    def run(self):
        self.prepareData()
        self.showData()
        # net = Net(1,8,2)
        lmnet = algorithms.LevenbergMarquardt([
            layers.Input(1),
            layers.Sigmoid(8),
            layers.Sigmoid(2),
        ],
                                              verbose=True,
                                              shuffle_data=True)

        self.showNetData(lmnet, 'dumb')
        # net.Train(self.input, self.output)
        a = np.array(self.input)
        b = np.array(self.output)
        lmnet.fit(a, b, epochs=100)
        self.showNetData(lmnet, 'trained')
        plt.show()
示例#12
0
    def test_embedding_layer(self):
        weight = np.arange(10).reshape((5, 2))

        input_layer = layers.Input(1)
        embedding_layer = layers.Embedding(5, 2, weight=weight)

        connection = layers.join(input_layer, embedding_layer)
        connection.initialize()

        input_vector = asfloat(np.array([[0, 1, 4]]).T)
        expected_output = np.array([
            [[0, 1]],
            [[2, 3]],
            [[8, 9]],
        ])
        actual_output = connection.output(input_vector).eval()

        self.assertEqual(embedding_layer.output_shape, (1, 2))
        np.testing.assert_array_equal(expected_output, actual_output)
示例#13
0
    def test_conv_with_custom_int_padding(self):
        network = layers.join(
            layers.Input((5, 5, 1)),
            layers.Convolution((3, 3, 1), bias=0, weight=1, padding=2),
        )

        x = asfloat(np.ones((1, 5, 5, 1)))
        expected_output = np.array([
            [1, 2, 3, 3, 3, 2, 1],
            [2, 4, 6, 6, 6, 4, 2],
            [3, 6, 9, 9, 9, 6, 3],
            [3, 6, 9, 9, 9, 6, 3],
            [3, 6, 9, 9, 9, 6, 3],
            [2, 4, 6, 6, 6, 4, 2],
            [1, 2, 3, 3, 3, 2, 1],
        ]).reshape((1, 7, 7, 1))

        actual_output = self.eval(network.output(x))
        np.testing.assert_array_almost_equal(expected_output, actual_output)
示例#14
0
    def test_change_output_layer(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(5, name='relu-1'),
            layers.Relu(1, name='relu-2'),
        )

        self.assertShapesEqual(network.input_shape, (None, 10))
        self.assertShapesEqual(network.output_shape, (None, 1))
        self.assertEqual(len(network), 3)

        relu_1_network = network.end('relu-1')
        self.assertShapesEqual(relu_1_network.input_shape, (None, 10))
        self.assertShapesEqual(relu_1_network.output_shape, (None, 5))
        self.assertEqual(len(relu_1_network.layers), 2)

        x_test = asfloat(np.ones((7, 10)))
        y_predicted = self.eval(relu_1_network.output(x_test))
        self.assertEqual(y_predicted.shape, (7, 5))
示例#15
0
    def test_cut_output_layers_in_sequence(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(5, name='relu-1'),
            layers.Relu(1, name='relu-2'),
        )

        self.assertShapesEqual(network.input_shape, (None, 10))
        self.assertShapesEqual(network.output_shape, (None, 1))
        self.assertEqual(len(network), 3)

        cutted_network = network.end('relu-1').end('input-1')
        self.assertShapesEqual(cutted_network.input_shape, (None, 10))
        self.assertShapesEqual(cutted_network.output_shape, (None, 10))
        self.assertEqual(len(cutted_network), 1)

        x_test = asfloat(np.ones((7, 10)))
        y_predicted = cutted_network.output(x_test)
        self.assertEqual(y_predicted.shape, (7, 10))
示例#16
0
    def test_storage_data_layer_compatibility(self):
        connection = layers.Input(2) > layers.Sigmoid(3, name='sigm')
        sigmoid = connection.layer('sigm')

        with self.assertRaises(ParameterLoaderError):
            validate_layer_compatibility(sigmoid, {
                'name': 'sigm',
                'class_name': 'Sigmoid',
                'input_shape': (3,),  # wrong input shape
                'output_shape': (3,),
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((2, 3))},
                    'bias': {'trainable': True, 'value': np.ones((3,))},
                }
            })

        with self.assertRaises(ParameterLoaderError):
            validate_layer_compatibility(sigmoid, {
                'name': 'sigm',
                'class_name': 'Sigmoid',
                'input_shape': (2,),
                'output_shape': (2,),  # wrong output shape
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((2, 3))},
                    'bias': {'trainable': True, 'value': np.ones((3,))},
                }
            })

        result = validate_layer_compatibility(sigmoid, {
            'name': 'sigm',
            'class_name': 'Sigmoid',
            'input_shape': (2,),
            'output_shape': (3,),
            'configs': {},
            'parameters': {
                'weight': {'trainable': True, 'value': np.ones((2, 3))},
                'bias': {'trainable': True, 'value': np.ones((3,))},
            }
        })
        self.assertIsNone(result)
示例#17
0
    def test_linear_search(self):
        methods = [
            ('golden', 0.36517048),
            ('brent', 0.36848962),
        ]

        for method_name, valid_error in methods:
            np.random.seed(self.random_seed)

            dataset = datasets.load_boston()
            data, target = dataset.data, dataset.target

            data_scaler = preprocessing.MinMaxScaler()
            target_scaler = preprocessing.MinMaxScaler()

            x_train, x_test, y_train, y_test = train_test_split(
                data_scaler.fit_transform(data),
                target_scaler.fit_transform(target.reshape(-1, 1)),
                test_size=0.15)

            cgnet = algorithms.GradientDescent(
                connection=[
                    layers.Input(13),
                    layers.Sigmoid(50),
                    layers.Sigmoid(1),
                ],
                batch_size='all',
                show_epoch=1,
                verbose=False,
                search_method=method_name,
                tol=0.1,
                addons=[algorithms.LinearSearch],
            )
            cgnet.train(x_train, y_train, x_test, y_test, epochs=10)
            y_predict = cgnet.predict(x_test)

            error = errors.rmsle(
                asfloat(target_scaler.inverse_transform(y_test)),
                asfloat(target_scaler.inverse_transform(y_predict)),
            )
            error = self.eval(error)
            self.assertAlmostEqual(valid_error, error, places=5)
示例#18
0
    def test_custom_layer(self):
        class NewLayer(layers.BaseLayer):
            def __init__(self, *args, **kwargs):
                super(NewLayer, self).__init__(*args, **kwargs)
                self._input_shape = tf.TensorShape((None, None, None))

            def create_variables(self, input_shape):
                self.input_shape = input_shape

            def output(self, input):
                return input

        new_layer = NewLayer()
        network = layers.join(layers.Input((10, 5)), new_layer)
        self.assertShapesEqual(network.output_shape, None)
        self.assertShapesEqual(new_layer.input_shape, (None, None, None))

        network.create_variables()
        self.assertShapesEqual(network.output_shape, None)
        self.assertShapesEqual(new_layer.input_shape, (None, 10, 5))
示例#19
0
    def test_deconvolution_tuple_padding(self):
        network = layers.join(
            layers.Input((10, 10, 3)),
            layers.Convolution((3, 3, 7), padding=(9, 3)),
            layers.Deconvolution((3, 3, 4), padding=(9, 3)),
        )

        shapes = network.output_shapes_per_layer
        shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
        self.assertSequenceEqual(
            shapes, {
                network.layers[0]: (None, 10, 10, 3),
                network.layers[1]: (None, 26, 14, 7),
                network.layers[2]: (None, 10, 10, 4),
            })

        input_value = asfloat(np.random.random((1, 10, 10, 3)))
        actual_output = self.eval(network.output(input_value))

        self.assertEqual(actual_output.shape, (1, 10, 10, 4))
示例#20
0
    def test_graph_repr(self):
        l1 = layers.Input(1)
        l2 = layers.Sigmoid(2)
        l3 = layers.Sigmoid(3)
        l4 = layers.Sigmoid(4)

        graph = LayerGraph()

        graph.connect_layers(l1, l2)
        graph.connect_layers(l2, l3)
        graph.connect_layers(l3, l4)

        expected_output = textwrap.dedent("""
        [(Input(1), [Sigmoid(2)]),
         (Sigmoid(2), [Sigmoid(3)]),
         (Sigmoid(3), [Sigmoid(4)]),
         (Sigmoid(4), [])]
        """).strip()

        self.assertEqual(expected_output, repr(graph).strip())
示例#21
0
    def test_get_layer_by_name_from_connection(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(8, name='relu-0'),
            layers.Relu(5, name='relu-1'),
        )

        reul0 = network.layer('relu-0')
        self.assertShapesEqual(reul0.output_shape, (None, 8))

        reul1 = network.layer('relu-1')
        self.assertShapesEqual(reul1.output_shape, (None, 5))

        message = "Cannot find layer with name 'some-layer-name'"
        with self.assertRaisesRegexp(NameError, message):
            network.layer('some-layer-name')

        message = "Layer name expected to be a string"
        with self.assertRaisesRegexp(ValueError, message):
            network.layer(object)
示例#22
0
    def test_conv_shapes(self):
        border_modes = ['valid', 'full', 'half', 4, 5, (6, 3), (4, 4), (1, 1)]
        strides = [(1, 1), (2, 1), (2, 2)]
        x = asfloat(np.random.random((20, 2, 12, 11)))

        for stride, border_mode in product(strides, border_modes):
            input_layer = layers.Input((2, 12, 11))
            conv_layer = layers.Convolution((5, 3, 4),
                                            border_mode=border_mode,
                                            stride_size=stride)

            connection = input_layer > conv_layer
            conv_layer.initialize()

            y = conv_layer.output(x).eval()
            actual_output_shape = as_tuple(y.shape[1:])

            self.assertEqual(actual_output_shape,
                             conv_layer.output_shape,
                             msg='border_mode={}'.format(border_mode))
示例#23
0
def train(X, Y):

    environment.reproducible()
    img_size = X.shape[1]
    network = algorithms.Momentum(
        [
            layers.Input(img_size),
            layers.Relu(100),
            layers.Softmax(Y.shape[1]),
        ],
        error='categorical_crossentropy',
        step=0.01,
        verbose=True,
        shuffle_data=True,
        momentum=0.9,
        nesterov=True,
    )
    network.architecture()
    network.train(X, Y, epochs=20)
    return network
    def test_conv_shapes(self):
        paddings = ['valid', 'full', 'half', 4, 5, (6, 3), (4, 4), (1, 1)]
        strides = [(1, 1), (2, 1), (2, 2)]
        x = asfloat(np.random.random((20, 2, 12, 11)))

        for stride, padding in product(strides, paddings):
            input_layer = layers.Input((2, 12, 11))
            conv_layer = layers.Convolution((5, 3, 4),
                                            padding=padding,
                                            stride=stride)

            input_layer > conv_layer
            conv_layer.initialize()

            y = conv_layer.output(x).eval()
            actual_output_shape = as_tuple(y.shape[1:])

            self.assertEqual(actual_output_shape,
                             conv_layer.output_shape,
                             msg='padding={}'.format(padding))
示例#25
0
    def test_cut_along_lines_basic(self):
        network = algorithms.GradientDescent([
            layers.Input(5),

            surgery.CutLine(),

            layers.Sigmoid(10),
            layers.Sigmoid(20),
            layers.Sigmoid(30),

            surgery.CutLine(),

            layers.Sigmoid(1),
        ])

        for connection in (network, network.connection):
            _, interested_layers, _ = surgery.cut_along_lines(connection)
            cutted_shapes = [layer.output_shape for layer in interested_layers]

            self.assertEqual(as_tuple(*cutted_shapes), (10, 20, 30))
示例#26
0
    def test_conv_with_custom_int_padding(self):
        input_layer = layers.Input((5, 5, 1))
        conv = layers.Convolution((3, 3, 1), bias=0, weight=1, padding=2)

        connection = input_layer > conv
        connection.initialize()

        x = asfloat(np.ones((1, 5, 5, 1)))
        expected_output = np.array([
            [1, 2, 3, 3, 3, 2, 1],
            [2, 4, 6, 6, 6, 4, 2],
            [3, 6, 9, 9, 9, 6, 3],
            [3, 6, 9, 9, 9, 6, 3],
            [3, 6, 9, 9, 9, 6, 3],
            [2, 4, 6, 6, 6, 4, 2],
            [1, 2, 3, 3, 3, 2, 1],
        ]).reshape((1, 7, 7, 1))

        actual_output = self.eval(connection.output(x))
        np.testing.assert_array_almost_equal(expected_output, actual_output)
示例#27
0
    def test_change_input_layer(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(5, name='relu-1'),
            layers.Relu(1, name='relu-2'),
        )

        self.assertEqual(network.input_shape, (10, ))
        self.assertEqual(network.output_shape, (1, ))
        self.assertEqual(len(network), 3)

        relu_1_network = network.start('relu-1')
        self.assertEqual(relu_1_network.input_shape, (10, ))
        self.assertEqual(relu_1_network.output_shape, (1, ))
        self.assertEqual(len(relu_1_network), 2)

        predict = relu_1_network.compile()
        x_test = asfloat(np.ones((7, 10)))
        y_predicted = predict(x_test)
        self.assertEqual(y_predicted.shape, (7, 1))
示例#28
0
    def test_cut_input_layers_in_sequence(self):
        network = layers.join(
            layers.Input(10, name='input-1'),
            layers.Relu(5, name='relu-1'),
            layers.Relu(1, name='relu-2'),
        )

        self.assertEqual(network.input_shape, (10, ))
        self.assertEqual(network.output_shape, (1, ))
        self.assertEqual(len(network), 3)

        cutted_network = network.start('relu-1').start('relu-2')
        self.assertEqual(cutted_network.input_shape, (5, ))
        self.assertEqual(cutted_network.output_shape, (1, ))
        self.assertEqual(len(cutted_network), 1)

        predict = cutted_network.compile()
        x_test = asfloat(np.ones((7, 5)))
        y_predicted = predict(x_test)
        self.assertEqual(y_predicted.shape, (7, 1))
示例#29
0
def run_neural_net():

    import_modules()

    dataset = datasets.load_boston()
    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection=[
            layers.Input(13),
            layers.Sigmoid(75),
            layers.Sigmoid(25),
            layers.Sigmoid(1),
        ],
        search_method='golden',
        show_epoch=1,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    cgnet.train(x_train, y_train, x_test, y_test, epochs=30)

    plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = rmsle(target_scalar.invers_transform(y_test), \
                  target_scalar.invers_transform(y_predict))

    return (error)
示例#30
0
def ann_fs_fitness(solution):
    learning_rate, neuron = solution

    #
    fs_model = FeatureSelectionModel(X, Y)
    X_reduced = FeatureSelectionTransform(fs_model, X)
    xs, ys = BuildDataScale(X_reduced, Y)
    x_scale = DataScaleTransform(xs, X_reduced)
    y_scale = DataScaleTransform(ys, Y)
    layer = [
        layers.Input(len(X_reduced[0])),
        layers.Sigmoid(neuron),
        layers.Sigmoid(5),
    ]
    net = ANNForecastBuild(layer, learning_rate)
    start_time = time.time()
    x_train, x_test, y_train, y_test = TrainANN(net, x_scale, y_scale, e=100)
    elapsed_time = time.time() - start_time
    print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))

    #evaluation
    y_pred = net.predict(x_test)
    y_pred = DataScaleInverse(ys, y_pred)
    y_true = DataScaleInverse(ys, y_test)
    #Evaluation(y_true, y_pred)
    r2 = r2_score(y_true, y_pred)
    m_rmse = np.sqrt(
        mean_squared_error(y_true, y_pred, multioutput='raw_values'))
    rmse = np.sqrt(mean_squared_error(y_true, y_pred))
    print('m_rmse : ' + str(m_rmse))
    print('r2 : ' + str(r2))
    print('rmse : ' + str(rmse))
    print('lr : ' + str(learning_rate) + ', hidden neuron : ' + str(neuron))
    output = rmse

    finished = output <= err_threshold
    #finished = output <= 0.01
    fitness = 1 / output
    print(finished)
    print(fitness)
    return fitness, finished