Beispiel #1
0
    def test_apply_gradient_directory(self):
        """get_grad correctly fetches gradient Tensor from Variable"""
        w_0 = 6
        sgd = nn.optimizer.SGD(learning_rate=1.0)
        with nn.variable_scope(self.get_scope()):
            x = nn.Input(shape=(), name='x')
            w1 = nn.make_variable(
                name='w',
                shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y1 = w1 * x
            sgd.minimize(y1, w1)
            dy1dw1_1 = nn.get_tensor('{}_grad'.format(w1.name))
            dy1dw1_2 = nn.get_grad(w1)

            self.assertIs(dy1dw1_1, dy1dw1_2)

        with nn.variable_scope('{}/2'.format(self.get_scope())):
            w2 = nn.make_variable(
                name='w',
                shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y2 = w2 * x
            sgd.minimize(y2, w2)
            dy2dw2_1 = nn.get_tensor('{}_grad'.format(w2.name))
            dy2dw2_2 = nn.get_grad(w2)

            self.assertIs(dy2dw2_1, dy2dw2_2)
Beispiel #2
0
    def test_apply_gradient_directory(self):
        """get_grad correctly fetches gradient Tensor from Variable"""
        w_0 = 6
        sgd = nn.optimizer.SGD(learning_rate=1.0)
        with nn.variable_scope(self.get_scope()):
            x = nn.Input(shape=(), name='x')
            w1 = nn.make_variable(
                name='w', shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y1 = w1 * x
            sgd.minimize(y1, w1)
            dy1dw1_1 = nn.get_tensor('{}_grad'.format(w1.name))
            dy1dw1_2 = nn.get_grad(w1)

            self.assertIs(dy1dw1_1, dy1dw1_2)

        with nn.variable_scope('{}/2'.format(self.get_scope())):
            w2 = nn.make_variable(
                name='w', shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y2 = w2 * x
            sgd.minimize(y2, w2)
            dy2dw2_1 = nn.get_tensor('{}_grad'.format(w2.name))
            dy2dw2_2 = nn.get_grad(w2)

            self.assertIs(dy2dw2_1, dy2dw2_2)
Beispiel #3
0
    def test_fetch_output(self):
        """Output of Anonymous layer is fetched"""
        base_scope, scope = self.get_scope(), 'anon'
        with nn.variable_scope(base_scope):
            layer = nn.layer.Anonymous(exp='x', scope=scope)
            output_var = layer(nn.Input(shape=(3, 4)))
            _tensor = nn.get_tensor('{}/output'.format(scope))
            self.assertIs(output_var, _tensor)

        _tensor = nn.get_tensor('{}/{}/output'.format(base_scope, scope))
        self.assertIs(output_var, _tensor)
Beispiel #4
0
    def test_get_tensor_from_current_scope(self):
        """get_tensor retrieve existing tensor"""
        scope, name = self.get_scope(), 'foo'
        with nn.variable_scope(scope):
            tensor = fixture.create_ones_tensor([3, 1], 'float32', name=name)
            self.assertIs(tensor, nn.get_tensor(name))

        self.assertIs(tensor, nn.get_tensor('{}/{}'.format(scope, name)))

        with self.assertRaises(ValueError):
            nn.get_tensor(name)
Beispiel #5
0
    def test_get_tensor_from_current_scope(self):
        """get_tensor retrieve existing tensor"""
        scope, name = self.get_scope(), 'foo'
        with nn.variable_scope(scope):
            tensor = fixture.create_ones_tensor([3, 1], 'float32', name=name)
            self.assertIs(tensor, nn.get_tensor(name))

        self.assertIs(tensor, nn.get_tensor('{}/{}'.format(scope, name)))

        with self.assertRaises(ValueError):
            nn.get_tensor(name)
Beispiel #6
0
    def test_conv2dtranspose(self):
        """Compnents consisting Conv2DTranspose layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope) as vs:
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.get_layer('Conv2D')(filter_height=4,
                                           filter_width=4,
                                           n_filters=4,
                                           strides=1,
                                           with_bias=True,
                                           name='Conv2D')
            output = layer(input_)
            layer = nn.get_layer('Conv2DTranspose')(filter_height=4,
                                                    filter_width=4,
                                                    n_filters=4,
                                                    strides=1,
                                                    with_bias=True,
                                                    output_shape=input_.shape,
                                                    name='Conv2DT')
            output = layer(output)
            filters = layer.get_parameter_variable('filter')
            bias = layer.get_parameter_variable('bias')

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(filters, nn.get_variable('Conv2DT/filter'))
            self.assertIs(bias, nn.get_variable('Conv2DT/bias'))
            self.assertIs(output, nn.get_tensor('Conv2DT/output'))
            self.assertIs(input_, nn.get_input('input'))
Beispiel #7
0
    def test_get_gradients(self):
        """gradients can be retrieved with get_tensor"""
        scope = self.get_scope()
        with nn.variable_scope(scope):
            xs = [nn.make_variable(
                name='x_{}'.format(i), shape=(), trainable=True,
            ) for i in range(5)]
            y = xs[0] + xs[1] + xs[2] + xs[3] + xs[4]
            grads_and_vars = nn.ops.compute_gradient(loss=y, wrt=xs)

            for i in range(5):
                grad = nn.get_tensor('{}_grad'.format(xs[i].name))
                self.assertIs(grads_and_vars[i][0], grad)

        for i in range(5):
            grad = nn.get_tensor('{}/{}_grad'.format(scope, xs[i].name))
            self.assertIs(grads_and_vars[i][0], grad)
Beispiel #8
0
 def test_concat(self):
     """Compnents consisting Concat layer are retrieved"""
     with nn.variable_scope(self.get_scope()):
         layer = nn.fetch_layer('Concat')(axis=1, scope='Concat')
         output = layer([
             nn.Input(shape=(32, 4), name='input_1'),
             nn.Input(shape=(32, 5), name='input_2'),
         ])
         self.assertIs(output, nn.get_tensor('Concat/output'))
Beispiel #9
0
    def test_mean(self):
        """Compnents consisting Mean layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope):
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.get_layer('Mean')(axis=[1, 2], name='Mean')
            output = layer(input_)

            self.assertIs(output, nn.get_tensor('Mean/output'))
            self.assertIs(input_, nn.get_input('input'))
Beispiel #10
0
    def test_tile(self):
        """Compnents consisting Tile layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope):
            input_ = nn.Input(shape=(32, ), name='input')
            layer = nn.get_layer('Tile')(pattern=(1, 2), name='Tile')
            output = layer(input_)

            self.assertIs(output, nn.get_tensor('Tile/output'))
            self.assertIs(input_, nn.get_input('input'))
Beispiel #11
0
    def test_true_div(self):
        """Compnents consisting truediv layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope):
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.fetch_layer('TrueDiv')(denom=1.0, scope='TrueDiv')
            output = layer(input_)

            self.assertIs(output, nn.get_tensor('TrueDiv/output'))
            self.assertIs(input_, nn.get_input('input'))
Beispiel #12
0
    def _test_layer_io(self, layer_name, input_shape):
        scope = '{}/{}'.format(self.get_scope(), layer_name)
        with nn.variable_scope(scope) as vs:
            input_ = nn.Input(shape=input_shape, name='input')
            layer = nn.fetch_layer(layer_name)(scope=layer_name)
            output = layer(input_)

        with nn.variable_scope(vs, reuse=True):
            output_tensor_name = '{}/output'.format(layer_name)
            self.assertIs(input_, nn.get_input('input'))
            self.assertIs(output, nn.get_tensor(output_tensor_name))
Beispiel #13
0
 def test_concat(self):
     """Compnents consisting Concat layer are retrieved"""
     scope = self.get_scope()
     with nn.variable_scope(scope):
         input_ = [
             nn.Input(shape=(32, 4), name='input'),
             nn.Input(shape=(32, 5), name='input'),
         ]
         layer = nn.get_layer('Concat')(axis=1, name='Concat')
         output = layer(input_)
         self.assertIs(output, nn.get_tensor('Concat/output'))
Beispiel #14
0
 def test_sub(self):
     """Compnents consisting Sub layer are retrieved"""
     scope = self.get_scope()
     with nn.variable_scope(scope):
         input_ = [
             nn.Input(shape=(32, 4), name='input'),
             nn.Input(shape=(32, 4), name='input'),
         ]
         layer = nn.get_layer('Sub')(name='Sub')
         output = layer(input_)
         self.assertIs(output, nn.get_tensor('Sub/output'))
Beispiel #15
0
    def test_dense(self):
        """Compnents consisting Dense layer are retrieved"""
        with nn.variable_scope(self.get_scope()) as vs:
            input_ = nn.Input(shape=(32, 5), name='input')
            layer = nn.fetch_layer('Dense')(
                n_nodes=4, with_bias=True, scope='Dense')
            output = layer(input_)
            weight = layer.get_parameter_variable('weight')
            bias = layer.get_parameter_variable('bias')

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(weight, nn.get_variable('Dense/weight'))
            self.assertIs(bias, nn.get_variable('Dense/bias'))
            self.assertIs(output, nn.get_tensor('Dense/output'))
            self.assertIs(input_, nn.get_input('input'))
Beispiel #16
0
    def test_conv2d(self):
        """Compnents consisting Conv2D layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope) as vs:
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.fetch_layer('Conv2D')(
                filter_height=4, filter_width=4, n_filters=4,
                strides=1, with_bias=True, name='Conv2D')
            output = layer(input_)
            filters = layer.get_parameter_variable('filter')
            bias = layer.get_parameter_variable('bias')

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(filters, nn.get_variable('Conv2D/filter'))
            self.assertIs(bias, nn.get_variable('Conv2D/bias'))
            self.assertIs(output, nn.get_tensor('Conv2D/output'))
            self.assertIs(input_, nn.get_input('input'))
Beispiel #17
0
    def test_bn(self):
        """Compnents consisting BatchNormalization layer are retrieved"""
        base_scope, scope = self.get_scope(), 'BN'
        with nn.variable_scope(base_scope) as vs:
            input_ = nn.Input(shape=(32, 4), name='input')
            layer = nn.fetch_layer('BatchNormalization')(scope=scope)
            output = layer(input_)
            mean = layer.get_parameter_variable('mean')
            var = layer.get_parameter_variable('var')
            scale = layer.get_parameter_variable('scale')
            offset = layer.get_parameter_variable('offset')
            updates = layer.get_update_operations()

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(mean, nn.get_variable('BN/mean'))
            self.assertIs(var, nn.get_variable('BN/var'))
            self.assertIs(scale, nn.get_variable('BN/scale'))
            self.assertIs(offset, nn.get_variable('BN/offset'))
            self.assertIs(output, nn.get_tensor('BN/output'))
            self.assertIs(updates[0], nn.get_operation('BN/update_mean'))
            self.assertIs(updates[1], nn.get_operation('BN/update_var'))
Beispiel #18
0
    def test_check_optimizer_slot(self):
        """Slot variables are updated when applying gradient directly"""
        name, b1_0, b2_0 = 'Adam', 0.5, 0.4
        opt = nn.optimizer.Adam(
            learning_rate=1.0, name=name, beta1=b1_0, beta2=b2_0)
        with nn.variable_scope(self.get_scope()) as vs:
            x = nn.Input(shape=(), name='x')
            w = nn.get_variable(shape=(), name='w')
            update_op = opt.minimize(w * x, w)

            vs.reuse_variables()
            dw = nn.get_tensor('{}_grad'.format(w.name))
            b1 = nn.get_variable('{}/beta1_power'.format(name))
            b2 = nn.get_variable('{}/beta2_power'.format(name))

        session = nn.Session()
        session.initialize()

        for i in range(10):
            b1_val, b2_val = session.run(outputs=[b1, b2])
            np.testing.assert_almost_equal(b1_val, b1_0 ** (i + 1))
            np.testing.assert_almost_equal(b2_val, b2_0 ** (i + 1))
            session.run(updates=update_op, givens={dw: 1.0})
Beispiel #19
0
    def test_apply_gradient_directory(self):
        """Variables can be updated by appyling gradient directly"""
        w_0 = 6
        with nn.variable_scope(self.get_scope()):
            x = nn.Input(shape=(), name='x')
            w = nn.get_variable(
                name='w', shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y = w * x

            sgd = nn.optimizer.SGD(learning_rate=1.0)
            update_op = sgd.minimize(y, w)
            dw = nn.get_tensor('{}_grad'.format(w.name))

            session = nn.Session()
            session.initialize()

            val0 = 3.
            session.run(updates=update_op, givens={dw: val0})
            val_w = session.run(outputs=w)

            np.testing.assert_almost_equal(val_w, w_0 - val0)