Example #1
0
    def test_json_storage(self):
        connection_1 = layers.join(
            layers.Input(10),
            [
                layers.Sigmoid(5),
                layers.Relu(5),
            ],
            layers.Elementwise(),
        )
        predict_1 = connection_1.compile()

        connection_2 = layers.join(
            layers.Input(10),
            [
                layers.Sigmoid(5),
                layers.Relu(5),
            ],
            layers.Elementwise(),
        )
        predict_2 = connection_2.compile()

        random_input = asfloat(np.random.random((13, 10)))
        random_output_1 = predict_1(random_input)
        random_output_2_1 = predict_2(random_input)

        # Outputs has to be different
        self.assertFalse(np.any(random_output_1 == random_output_2_1))

        with tempfile.NamedTemporaryFile() as temp:
            storage.save_json(connection_1, temp.name)
            storage.load_json(connection_2, temp.name)
            random_output_2_2 = predict_2(random_input)

            np.testing.assert_array_almost_equal(random_output_1,
                                                 random_output_2_2)
Example #2
0
    def test_simple_storage_hdf5(self):
        network_1 = layers.join(
            layers.Input(10),
            layers.parallel(
                layers.Sigmoid(5),
                layers.Relu(5),
            ),
            layers.Elementwise(),
        )
        network_2 = layers.join(
            layers.Input(10),
            layers.parallel(
                layers.Sigmoid(5),
                layers.Relu(5),
            ),
            layers.Elementwise(),
        )

        random_input = asfloat(np.random.random((13, 10)))
        random_output_1 = self.eval(network_1.output(random_input))
        random_output_2_1 = self.eval(network_2.output(random_input))

        # Outputs has to be different
        self.assertFalse(np.any(random_output_1 == random_output_2_1))

        with tempfile.NamedTemporaryFile() as temp:
            storage.save_hdf5(network_1, temp.name)
            storage.load_hdf5(network_2, temp.name)

            random_output_2_2 = self.eval(network_2.output(random_input))

            np.testing.assert_array_almost_equal(random_output_1,
                                                 random_output_2_2)
Example #3
0
    def test_many_to_many_graph(self):
        l0 = layers.Input(1)
        l11 = layers.Sigmoid(10)
        le = layers.Elementwise()
        l3 = layers.Sigmoid(30)
        l4 = layers.Sigmoid(40)
        l5 = layers.Sigmoid(50)
        l6 = layers.Sigmoid(60)
        l12 = layers.Sigmoid(70)

        # Graph Structure:
        # [l0, l12] -> [l6, l4]
        #
        # l0 - l11 - l5 - l6
        #        \
        #   l12 - le - l4
        #           \
        #            -- l3
        graph = LayerGraph()

        # Connection #1
        graph.connect_layers(l0, l11)
        graph.connect_layers(l11, l5)
        graph.connect_layers(l5, l6)

        # Connection #2
        graph.connect_layers(l11, le)
        graph.connect_layers(le, l4)

        # Connection #3
        graph.connect_layers(le, l3)

        # Connection #4
        graph.connect_layers(l12, le)
Example #4
0
    def test_elementwise_in_connections(self):
        input_layer = layers.Input(2)
        hidden_layer_1 = layers.Relu(1,
                                     weight=init.Constant(1),
                                     bias=init.Constant(0))
        hidden_layer_2 = layers.Relu(1,
                                     weight=init.Constant(2),
                                     bias=init.Constant(0))
        elem_layer = layers.Elementwise(merge_function=tf.add)

        connection = layers.join(input_layer, hidden_layer_1, elem_layer)
        connection = layers.join(input_layer, hidden_layer_2, elem_layer)
        connection.initialize()

        self.assertEqual(elem_layer.output_shape, (1, ))

        test_input = asfloat(np.array([
            [0, 1],
            [-1, -1],
        ]))
        actual_output = self.eval(connection.output(test_input))
        expected_output = np.array([
            [3],
            [0],
        ])
        np.testing.assert_array_almost_equal(expected_output, actual_output)
Example #5
0
    def test_compilation_multiple_inputs(self):
        input_matrix = asfloat(np.ones((7, 10)))
        expected_output = np.ones((7, 5))

        network = layers.join([[
            layers.Input(10),
        ], [
            layers.Input(10),
        ]], layers.Elementwise(),
                              layers.Linear(5,
                                            weight=init.Constant(0.1),
                                            bias=None))

        # Generated input variables
        predict = network.compile()
        actual_output = predict(input_matrix * 0.7, input_matrix * 0.3)
        np.testing.assert_array_almost_equal(actual_output, expected_output)

        # Pre-defined input variables
        input_variable_1 = T.matrix('x1')
        input_variable_2 = T.matrix('x2')

        predict = network.compile(input_variable_1, input_variable_2)
        actual_output = predict(input_matrix * 0.7, input_matrix * 0.3)
        np.testing.assert_array_almost_equal(actual_output, expected_output)
Example #6
0
    def test_elementwise_init_error(self):
        input_layer_1 = layers.Input(10)
        input_layer_2 = layers.Input(20)
        elem_layer = layers.Elementwise()

        layers.join(input_layer_1, elem_layer)
        with self.assertRaises(LayerConnectionError):
            layers.join(input_layer_2, elem_layer)
Example #7
0
    def test_elementwise_basic(self):
        elem_layer = layers.Elementwise(merge_function=tf.add)

        x1_matrix = asfloat(np.random.random((10, 2)))
        x2_matrix = asfloat(np.random.random((10, 2)))

        expected_output = x1_matrix + x2_matrix
        actual_output = self.eval(elem_layer.output(x1_matrix, x2_matrix))
        np.testing.assert_array_almost_equal(expected_output, actual_output)
Example #8
0
    def test_elementwise_exceptions(self):
        with self.assertRaises(ValueError):
            not_callable_object = (1, 2, 3)
            layers.Elementwise(merge_function=not_callable_object)

        with self.assertRaises(ValueError):
            layers.Elementwise(merge_function='wrong-func-name')

        message = "expected multiple inputs"
        with self.assertRaisesRegexp(LayerConnectionError, message):
            layers.join(layers.Input(5), layers.Elementwise('multiply'))

        inputs = layers.parallel(
            layers.Input(2),
            layers.Input(1),
        )
        error_message = "layer have incompatible shapes"
        with self.assertRaisesRegexp(LayerConnectionError, error_message):
            layers.join(inputs, layers.Elementwise('add'))
Example #9
0
def create_VIN(input_image_shape=(8, 8, 2), n_hidden_filters=150,
               n_state_filters=10, k=10):

    SamePadConvolution = partial(layers.Convolution, padding='SAME', bias=None)

    R = layers.join(
        layers.Input(input_image_shape, name='grid-input'),
        layers.Convolution((3, 3, n_hidden_filters),
                           padding='SAME',
                           weight=init.Normal(),
                           bias=init.Normal()),
        SamePadConvolution((1, 1, 1), weight=init.Normal()),
    )

    # Create shared weights
    q_weight = random_weight((3, 3, 1, n_state_filters))
    fb_weight = random_weight((3, 3, 1, n_state_filters))

    Q = R > SamePadConvolution((3, 3, n_state_filters), weight=q_weight)

    for i in range(k):
        V = Q > ChannelGlobalMaxPooling()
        Q = layers.join(
            # Convolve R and V separately and then add outputs together with
            # the Elementwise layer. This part of the code looks different
            # from the one that was used in the original VIN repo, but
            # it does the same operation.
            #
            # conv(x, w) == (conv(x1, w1) + conv(x2, w2))
            # where, x = concat(x1, x2)
            #        w = concat(w1, w2)
            #
            # See code sample from Github Gist: https://bit.ly/2zm3ntN
            [[
                R,
                SamePadConvolution((3, 3, n_state_filters), weight=q_weight)
            ], [
                V,
                SamePadConvolution((3, 3, n_state_filters), weight=fb_weight)
            ]],
            layers.Elementwise(merge_function=tf.add),
        )

    input_state_1 = layers.Input(UNKNOWN, name='state-1-input')
    input_state_2 = layers.Input(UNKNOWN, name='state-2-input')

    # Select the conv-net channels at the state position (S1, S2)
    VIN = [Q, input_state_1, input_state_2] > SelectValueAtStatePosition()

    # Set up softmax layer that predicts actions base on (S1, S2)
    # position. Each action encodes specific direction:
    # N, S, E, W, NE, NW, SE, SW (in the same order)
    VIN = VIN > layers.Softmax(8, bias=None, weight=init.Normal())

    return VIN
Example #10
0
    def test_one_to_one_graph(self):
        l0 = layers.Input(1)
        l1 = layers.Sigmoid(10)
        l2 = layers.Sigmoid(20)
        l3 = layers.Sigmoid(30)
        l41 = layers.Sigmoid(40)
        l42 = layers.Sigmoid(40)
        le = layers.Elementwise()

        # Graph Structure:
        # l0 -> le
        #
        # l0 - l1 - l41 -- le
        #        \        /
        #         l2 - l42
        #           \
        #            -- l3
        graph = LayerGraph()

        # Connection #1
        graph.connect_layers(l0, l1)
        graph.connect_layers(l1, l41)
        graph.connect_layers(l41, le)

        graph.connect_layers(l1, l2)
        graph.connect_layers(l2, l42)
        graph.connect_layers(l42, le)

        # Connection #2
        graph.connect_layers(l2, l3)

        for layer in graph.forward_graph:
            layer.initialize()

        subgraph = graph.subgraph_for_output(le)
        self.assertIsNot(l3, subgraph.forward_graph)
        self.assertEqual(6, len(subgraph))

        # Input layers
        self.assertEqual(1, len(subgraph.input_layers))
        self.assertEqual([l0], subgraph.input_layers)

        # Output layers
        self.assertEqual(1, len(subgraph.output_layers))
        self.assertEqual([le], subgraph.output_layers)

        x = T.matrix()
        y = subgraph.propagate_forward(x)
        test_input = asfloat(np.array([[1]]))
        output = y.eval({x: test_input})

        self.assertEqual((1, 40), output.shape)
    def test_elementwise_basic(self):
        elem_layer = layers.Elementwise(merge_function=T.add)

        x1 = T.matrix()
        x2 = T.matrix()
        y = theano.function([x1, x2], elem_layer.output(x1, x2))

        x1_matrix = asfloat(np.random.random((10, 2)))
        x2_matrix = asfloat(np.random.random((10, 2)))

        expected_output = x1_matrix + x2_matrix
        actual_output = y(x1_matrix, x2_matrix)
        np.testing.assert_array_almost_equal(expected_output, actual_output)
Example #12
0
    def test_elementwise_in_network(self):
        network = layers.join(
            layers.Input(2),
            layers.parallel(
                layers.Relu(1, weight=1, bias=0),
                layers.Relu(1, weight=2, bias=0),
            ),
            layers.Elementwise('add'),
        )
        self.assertShapesEqual(network.input_shape, (None, 2))
        self.assertShapesEqual(network.output_shape, (None, 1))

        test_input = asfloat(np.array([[0, 1], [-1, -1]]))
        actual_output = self.eval(network.output(test_input))
        expected_output = np.array([[3, 0]]).T
        np.testing.assert_array_almost_equal(expected_output, actual_output)
Example #13
0
def create_VIN(input_image_shape=(2, 8, 8), n_hidden_filters=150,
               n_state_filters=10, k=10):

    HalfPaddingConv = partial(layers.Convolution, padding='half', bias=None)

    R = layers.join(
        layers.Input(input_image_shape, name='grid-input'),
        layers.Convolution((n_hidden_filters, 3, 3),
                           padding='half',
                           weight=init.Normal(),
                           bias=init.Normal()),
        HalfPaddingConv((1, 1, 1), weight=init.Normal()),
    )

    # Create shared weights
    q_weight = random_weight((n_state_filters, 1, 3, 3))
    fb_weight = random_weight((n_state_filters, 1, 3, 3))

    Q = R > HalfPaddingConv((n_state_filters, 3, 3), weight=q_weight)

    for i in range(k):
        V = Q > GlobalMaxPooling()
        Q = layers.join(
            # Convolve R and V separately and then add
            # outputs together with the Elementwise layer
            [[
                R,
                HalfPaddingConv((n_state_filters, 3, 3), weight=q_weight)
            ], [
                V,
                HalfPaddingConv((n_state_filters, 3, 3), weight=fb_weight)
            ]],
            layers.Elementwise(merge_function=T.add),
        )

    input_state_1 = layers.Input(10, name='state-1-input')
    input_state_2 = layers.Input(10, name='state-2-input')

    # Select the conv-net channels at the state position (S1, S2)
    VIN = [Q, input_state_1, input_state_2] > SelectValueAtStatePosition()

    # Set up softmax layer that predicts actions base on (S1, S2)
    # position. Each action encodes specific direction:
    # N, S, E, W, NE, NW, SE, SW (in the same order)
    VIN = VIN > layers.Softmax(8, bias=None, weight=init.Normal())

    return VIN
Example #14
0
    def test_many_to_one_graph(self):
        l0 = layers.Input(1)
        l11 = layers.Sigmoid(10)
        le = layers.Elementwise()
        l3 = layers.Sigmoid(30)
        l4 = layers.Sigmoid(40)
        l5 = layers.Input(50)
        l6 = layers.Sigmoid(60)
        l12 = layers.Sigmoid(10)

        # Graph Structure:
        # [l0, l12] -> l6
        #
        # l0 - l11 - le - l6
        #           /
        #    l5 - l12 - l4
        #           \
        #            -- l3
        graph = LayerGraph()

        # Connection #1
        graph.connect_layers(l0, l11)
        graph.connect_layers(l11, le)
        graph.connect_layers(le, l6)

        graph.connect_layers(l5, l12)
        graph.connect_layers(l12, le)

        # Connection #2
        graph.connect_layers(l12, l4)

        # Connection #3
        graph.connect_layers(l12, l3)

        subgraph = graph.subgraph_for_output(l6)
        self.assertIsNot(l4, subgraph.forward_graph)
        self.assertIsNot(l3, subgraph.forward_graph)
        self.assertEqual(6, len(subgraph))

        # Input layers
        self.assertEqual(2, len(subgraph.input_layers))
        self.assertIn(l0, subgraph.input_layers)
        self.assertIn(l5, subgraph.input_layers)

        # Output layers
        self.assertEqual(1, len(subgraph.output_layers))
        self.assertEqual([l6], subgraph.output_layers)
Example #15
0
    def test_inline_connections_after_exception(self):
        # One possibility to solve it is to reset all states in
        # connections/inline.py and when we assing new shape
        # in connections/graph.py:connect_layers catch error if happens
        # and destroy connection between layers
        input_layer = layers.Input(2)

        with self.assertRaises(LayerConnectionError):
            # it suppose to fail because layers in parallel connections
            # specified with different output shapes.
            input_layer > [layers.Sigmoid(20),
                           layers.Sigmoid(10)] > layers.Elementwise()

        # Issue #181. Bug presented in NeuPy versions <= 0.7.2
        network = input_layer > layers.Softmax(5)
        self.assertEqual(network.input_shape, (2, ))
        self.assertEqual(network.output_shape, (5, ))
Example #16
0
    def test_elementwise_custom_function(self):
        def weighted_sum(a, b):
            return 0.2 * a + 0.8 * b

        network = layers.join(
            layers.Input(2),
            layers.parallel(
                layers.Relu(1, weight=1, bias=0),
                layers.Relu(1, weight=2, bias=0),
            ),
            layers.Elementwise(weighted_sum),
        )
        self.assertShapesEqual(network.input_shape, (None, 2))
        self.assertShapesEqual(network.output_shape, (None, 1))

        test_input = asfloat(np.array([[0, 1], [-1, -1]]))
        actual_output = self.eval(network.output(test_input))
        expected_output = np.array([[1.8, 0]]).T
        np.testing.assert_array_almost_equal(expected_output, actual_output)
Example #17
0
def ResidualUnit(n_in_filters, n_out_filters, stride, has_branch=False):
    main_branch = layers.join(
        layers.Convolution((n_in_filters, 1, 1), stride=stride, bias=None),
        layers.BatchNorm(),
        layers.Relu(),
        layers.Convolution((n_in_filters, 3, 3), padding=1, bias=None),
        layers.BatchNorm(),
        layers.Relu(),
        layers.Convolution((n_out_filters, 1, 1), bias=None),
        layers.BatchNorm(),
    )

    residual_branch = []
    if has_branch:
        residual_branch = layers.join(
            layers.Convolution((n_out_filters, 1, 1), stride=stride,
                               bias=None),
            layers.BatchNorm(),
        )

    return layers.join(
        [main_branch, residual_branch],
        layers.Elementwise() > layers.Relu(),
    )
Example #18
0
 def test_elementwise_output_shape_no_connection(self):
     elem_layer = layers.Elementwise()
     self.assertEqual(elem_layer.output_shape, None)
Example #19
0
 def test_elementwise_not_function(self):
     with self.assertRaises(ValueError):
         not_callable_object = (1, 2, 3)
         layers.Elementwise(merge_function=not_callable_object)
Example #20
0
def ResidualUnit(n_input_filters, stride=1, rate=1, has_branch=False,
                 name=None):

    def bn_name(index):
        return 'bn' + name + '_branch' + index

    def conv_name(index):
        return 'res' + name + '_branch' + index

    n_output_filters = 4 * n_input_filters
    main_branch = layers.join(
        # The main purpose of this 1x1 convolution layer is to
        # reduce number of filters. For instance, for the tensor with
        # 256 filters it can be reduced to 64. This trick allows to
        # reduce computation by factor of 4.
        layers.Convolution(
            size=(1, 1, n_input_filters),
            stride=stride,
            bias=None,
            name=conv_name('2a'),
        ),
        layers.BatchNorm(name=bn_name('2a')),
        layers.Relu(),

        # This convolution layer applies 3x3 filter in order to
        # extract features.
        layers.Convolution(
            (3, 3, n_input_filters),
            padding='same',
            dilation=rate,
            bias=None,
            name=conv_name('2b'),
        ),
        layers.BatchNorm(name=bn_name('2b')),
        layers.Relu(),

        # Last layer reverses operations of the first layer. In this
        # case we increase number of filters. For instance, from previously
        # obtained 64 filters we can increase it back to the 256 filters
        layers.Convolution(
            (1, 1, n_output_filters),
            bias=None,
            name=conv_name('2c')
        ),
        layers.BatchNorm(name=bn_name('2c')),
    )

    if has_branch:
        residual_branch = layers.join(
            layers.Convolution(
                (1, 1, n_output_filters),
                stride=stride,
                bias=None,
                name=conv_name('1'),
            ),
            layers.BatchNorm(name=bn_name('1')),
        )
    else:
        # Empty list defines residual connection, meaning that
        # output from this branch would be equal to its input
        residual_branch = layers.Identity('residual-' + name)

    return layers.join(
        # For the output from two branches we just combine results
        # with simple elementwise sum operation. The main purpose of
        # the residual connection is to build shortcuts for the
        # gradient during backpropagation.
        (main_branch | residual_branch),
        layers.Elementwise(),
        layers.Relu(),
    )
Example #21
0
 def test_elementwise_single_input(self):
     elem_layer = layers.Elementwise()
     output = elem_layer.output(None)
     self.assertEqual(output, None)
Example #22
0
 def test_elementwise_initialize(self):
     # Suppose not to fail if you initialize
     # it without connection
     elem_layer = layers.Elementwise()
     elem_layer.initialize()