Example #1
0
    def test_output_shape_format_nhwc(self):
        """`output_shape` is converted correctly"""
        h, w, c = 7, 5, 3
        strides, padding = 3, 'valid'
        input_shape_nhwc, input_shape_nchw = (32, 84, 84, 4), (32, 4, 84, 84)
        if _FMT == 'NHWC' and _BE == 'tensorflow':
            input_shape = input_shape_nhwc
        else:
            input_shape = input_shape_nchw

        conv2d = nn.layer.Conv2D(
            filter_height=h, filter_width=w, n_filters=c,
            strides=strides, padding=padding)
        input_var = nn.Input(shape=input_shape, name='original_input')

        with nn.variable_scope(self.get_scope('convolution')):
            conv_output = conv2d(input_var)

        conv2d_t = nn.layer.Conv2DTranspose(
            filter_height=h, filter_width=w, n_filters=c,
            strides=strides, padding=padding,
            output_shape=input_shape_nhwc, output_shape_format='NHWC')

        with nn.variable_scope(self.get_scope('transpose')):
            conv_t_output = conv2d_t(conv_output)

        self._check(input_var, conv_t_output)
        self.assertIsNot(
            conv2d.get_parameter_variable('filter'),
            conv2d_t.get_parameter_variable('filter'),
        )
Example #2
0
    def test_apply_gradient_directory(self):
        """get_grad correctly fetches gradient Tensor from Variable"""
        w_0 = 6
        sgd = nn.optimizer.SGD(learning_rate=1.0)
        with nn.variable_scope(self.get_scope()):
            x = nn.Input(shape=(), name='x')
            w1 = nn.make_variable(
                name='w',
                shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y1 = w1 * x
            sgd.minimize(y1, w1)
            dy1dw1_1 = nn.get_tensor('{}_grad'.format(w1.name))
            dy1dw1_2 = nn.get_grad(w1)

            self.assertIs(dy1dw1_1, dy1dw1_2)

        with nn.variable_scope('{}/2'.format(self.get_scope())):
            w2 = nn.make_variable(
                name='w',
                shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y2 = w2 * x
            sgd.minimize(y2, w2)
            dy2dw2_1 = nn.get_tensor('{}_grad'.format(w2.name))
            dy2dw2_2 = nn.get_grad(w2)

            self.assertIs(dy2dw2_1, dy2dw2_2)
Example #3
0
    def test_apply_gradient_directory(self):
        """get_grad correctly fetches gradient Tensor from Variable"""
        w_0 = 6
        sgd = nn.optimizer.SGD(learning_rate=1.0)
        with nn.variable_scope(self.get_scope()):
            x = nn.Input(shape=(), name='x')
            w1 = nn.make_variable(
                name='w', shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y1 = w1 * x
            sgd.minimize(y1, w1)
            dy1dw1_1 = nn.get_tensor('{}_grad'.format(w1.name))
            dy1dw1_2 = nn.get_grad(w1)

            self.assertIs(dy1dw1_1, dy1dw1_2)

        with nn.variable_scope('{}/2'.format(self.get_scope())):
            w2 = nn.make_variable(
                name='w', shape=(),
                initializer=nn.initializer.ConstantInitializer(w_0),
            )
            y2 = w2 * x
            sgd.minimize(y2, w2)
            dy2dw2_1 = nn.get_tensor('{}_grad'.format(w2.name))
            dy2dw2_2 = nn.get_grad(w2)

            self.assertIs(dy2dw2_1, dy2dw2_2)
Example #4
0
    def test_original_filter(self):
        """Conv2DTranspose layer is built with provided original_filter"""
        h, w, c = 7, 5, 3
        strides, padding = 3, 'valid'
        if _FMT == 'NHWC' and _BE == 'tensorflow':
            input_shape = (32, 84, 84, 4)
        else:
            input_shape = (32, 4, 84, 84)

        conv2d = nn.layer.Conv2D(
            filter_height=h, filter_width=w, n_filters=c,
            strides=strides, padding=padding)

        input_var = nn.Input(shape=input_shape, name='original_input')

        with nn.variable_scope(self.get_scope('convolution')):
            conv_output = conv2d(input_var)

        original_filter = conv2d.get_parameter_variable('filter')
        conv2d_t = nn.layer.Conv2DTranspose(
            filter_height=h, filter_width=w, n_filters=c,
            strides=strides, padding=padding)
        conv2d_t.set_parameter_variables(
            original_input=input_var, original_filter=original_filter)

        with nn.variable_scope(self.get_scope('transpose')):
            conv_t_output = conv2d_t(conv_output)

        self._check(input_var, conv_t_output)
        self.assertIsNot(
            conv2d.get_parameter_variable('filter'),
            conv2d_t.get_parameter_variable('filter'),
        )
Example #5
0
    def test_conv2dtranspose(self):
        """Compnents consisting Conv2DTranspose layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope) as vs:
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.get_layer('Conv2D')(filter_height=4,
                                           filter_width=4,
                                           n_filters=4,
                                           strides=1,
                                           with_bias=True,
                                           name='Conv2D')
            output = layer(input_)
            layer = nn.get_layer('Conv2DTranspose')(filter_height=4,
                                                    filter_width=4,
                                                    n_filters=4,
                                                    strides=1,
                                                    with_bias=True,
                                                    output_shape=input_.shape,
                                                    name='Conv2DT')
            output = layer(output)
            filters = layer.get_parameter_variable('filter')
            bias = layer.get_parameter_variable('bias')

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(filters, nn.get_variable('Conv2DT/filter'))
            self.assertIs(bias, nn.get_variable('Conv2DT/bias'))
            self.assertIs(output, nn.get_tensor('Conv2DT/output'))
            self.assertIs(input_, nn.get_input('input'))
Example #6
0
def _create_variables(shape=(3, 4)):
    init = nn.initializer.ConstantInitializer
    with nn.variable_scope('source'):
        src = nn.get_variable('source', shape=shape, initializer=init(value=1))
    with nn.variable_scope('target'):
        tgt = nn.get_variable('taget', shape=shape, initializer=init(value=0))
    return src, tgt
Example #7
0
    def test_original_input(self):
        """Conv2DTranspose layer is built with provided original_input"""
        h, w, c = 7, 5, 3
        strides, padding = 3, 'valid'
        if _FMT == 'NHWC' and _BE == 'tensorflow':
            input_shape = (32, 84, 84, 4)
        else:
            input_shape = (32, 4, 84, 84)

        conv2d = nn.layer.Conv2D(filter_height=h,
                                 filter_width=w,
                                 n_filters=c,
                                 strides=strides,
                                 padding=padding)
        input_var = nn.Input(shape=input_shape, name='original_input')

        with nn.variable_scope(self.get_scope('convolution')):
            conv_output = conv2d(input_var)

        conv2d_t = nn.layer.Conv2DTranspose(filter_height=h,
                                            filter_width=w,
                                            n_filters=c,
                                            strides=strides,
                                            padding=padding)
        conv2d_t.set_parameter_variables(original_input=input_var)

        with nn.variable_scope(self.get_scope('transpose')):
            conv_t_output = conv2d_t(conv_output)

        self._check(input_var, conv_t_output)
        self.assertIsNot(
            conv2d.get_parameter_variable('filter'),
            conv2d_t.get_parameter_variable('filter'),
        )
Example #8
0
 def test_variable_scope(self):
     """variable_scope stacks new scope on the current scope"""
     scopes = [self.get_scope('aaa'), 'bbb', 'ccc']
     with nn.variable_scope(scopes[0]):
         self._check_scope(scopes[0])
         with nn.variable_scope(scopes[1]):
             self._check_scope('/'.join(scopes[:2]))
             with nn.variable_scope(scopes[2]):
                 self._check_scope('/'.join(scopes[:3]))
         with nn.variable_scope(scopes[2]):
             self._check_scope('/'.join([scopes[0], scopes[2]]))
Example #9
0
 def test_variable_scope(self):
     """variable_scope stacks new scope on the current scope"""
     scopes = [self.get_scope('aaa'), 'bbb', 'ccc']
     with nn.variable_scope(scopes[0]):
         self._check_scope(scopes[0])
         with nn.variable_scope(scopes[1]):
             self._check_scope('/'.join(scopes[:2]))
             with nn.variable_scope(scopes[2]):
                 self._check_scope('/'.join(scopes[:3]))
         with nn.variable_scope(scopes[2]):
             self._check_scope('/'.join([scopes[0], scopes[2]]))
Example #10
0
    def _test_layer_io(self, layer_name, input_shape):
        scope = '{}/{}'.format(self.get_scope(), layer_name)
        with nn.variable_scope(scope) as vs:
            input_ = nn.Input(shape=input_shape, name='input')
            layer = nn.fetch_layer(layer_name)(scope=layer_name)
            output = layer(input_)

        with nn.variable_scope(vs, reuse=True):
            output_tensor_name = '{}/output'.format(layer_name)
            self.assertIs(input_, nn.get_input('input'))
            self.assertIs(output, nn.get_tensor(output_tensor_name))
Example #11
0
    def test_get_variable_scope(self):
        """get_variable_scope retrieves the current scope and reuse flag"""
        scope, reuse = self.get_scope(), False
        with nn.variable_scope(scope, reuse=reuse):
            vs = be.get_variable_scope()
            self._check_scope(expected=scope, found=vs.name)
            self._check_reuse(expected=reuse, found=vs.reuse)

        reuse = True
        with nn.variable_scope(scope, reuse=reuse):
            vs = be.get_variable_scope()
            self._check_scope(expected=scope, found=vs.name)
            self._check_reuse(expected=reuse, found=vs.reuse)
Example #12
0
    def test_get_variable_scope(self):
        """get_variable_scope retrieves the current scope and reuse flag"""
        scope, reuse = self.get_scope(), False
        with nn.variable_scope(scope, reuse=reuse):
            vs = be.get_variable_scope()
            self._check_scope(expected=scope, found=vs.name)
            self._check_reuse(expected=reuse, found=vs.reuse)

        reuse = True
        with nn.variable_scope(scope, reuse=reuse):
            vs = be.get_variable_scope()
            self._check_scope(expected=scope, found=vs.name)
            self._check_reuse(expected=reuse, found=vs.reuse)
Example #13
0
    def test_get_variable_scope_stack(self):
        """get_variable_scope retrieves the current scope and reuse flag"""
        scopes, reuses = [self.get_scope(), 'aaa'], [False, False]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)

        scopes, reuses = [self.get_scope(), 'bbb'], [False, True]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)

        scopes, reuses = [self.get_scope(), 'ccc'], [True, False]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)

        scopes, reuses = [self.get_scope(), 'ddd'], [True, True]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)
Example #14
0
    def test_get_variable_scope_stack(self):
        """get_variable_scope retrieves the current scope and reuse flag"""
        scopes, reuses = [self.get_scope(), 'aaa'], [False, False]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)

        scopes, reuses = [self.get_scope(), 'bbb'], [False, True]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)

        scopes, reuses = [self.get_scope(), 'ccc'], [True, False]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)

        scopes, reuses = [self.get_scope(), 'ddd'], [True, True]
        with nn.variable_scope(scopes[0], reuse=reuses[0]):
            with nn.variable_scope(scopes[1], reuse=reuses[1]):
                vs = be.get_variable_scope()
                self._check_scope(expected='/'.join(scopes), found=vs.name)
                self._check_reuse(expected=reuses[1], found=vs.reuse)
Example #15
0
    def test_dense(self):
        """Compnents consisting Dense layer are retrieved"""
        with nn.variable_scope(self.get_scope()) as vs:
            input_ = nn.Input(shape=(32, 5), name='input')
            layer = nn.fetch_layer('Dense')(
                n_nodes=4, with_bias=True, scope='Dense')
            output = layer(input_)
            weight = layer.get_parameter_variable('weight')
            bias = layer.get_parameter_variable('bias')

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(weight, nn.get_variable('Dense/weight'))
            self.assertIs(bias, nn.get_variable('Dense/bias'))
            self.assertIs(output, nn.get_tensor('Dense/output'))
            self.assertIs(input_, nn.get_input('input'))
Example #16
0
    def test_dynamic_initializer(self):
        """Initializers are correctly selected"""
        n_in, n_nodes, weight_val, bias_val = 4, 5, 13, 7
        with nn.variable_scope(self.get_scope()):
            dense = nn.layer.Dense(n_nodes=5,
                                   initializers={
                                       'weight': {
                                           'typename': 'ConstantInitializer',
                                           'args': {
                                               'value': weight_val,
                                           },
                                       },
                                       'bias': {
                                           'typename': 'ConstantInitializer',
                                           'args': {
                                               'value': bias_val,
                                           }
                                       }
                                   })
            dense(nn.Input(shape=(3, n_in)))

        session = nn.Session()
        session.initialize()

        weight, bias = session.run(outputs=[
            dense.get_parameter_variable('weight'),
            dense.get_parameter_variable('bias'),
        ])

        np.testing.assert_almost_equal(weight,
                                       weight_val * np.ones((n_in, n_nodes)))
        np.testing.assert_almost_equal(bias, bias_val * np.ones((n_nodes, )))
Example #17
0
    def test_list(self):
        """Anonymous layer can handle list inputs"""
        shape, dtype = (3, 4), 'float32'

        exp = ('x[0] + x[1]')
        input_vars = [
            nn.Input(shape=shape, dtype=dtype, name='input1'),
            nn.Input(shape=shape, dtype=dtype, name='input2')
        ]
        input_vals = [
            np.random.rand(3, 4).astype(dtype),
            np.random.rand(3, 4).astype(dtype),
        ]
        output_val = sum(input_vals)
        with nn.variable_scope(self.get_scope()):
            layer = nn.layer.Anonymous(exp)
            output_var = layer(*input_vars)

        session = nn.Session()
        output_val_ = session.run(outputs=output_var,
                                  inputs={
                                      input_vars[0]: input_vals[0],
                                      input_vars[1]: input_vals[1]
                                  })
        np.testing.assert_almost_equal(output_val, output_val_)
Example #18
0
    def test_retrieval(self):
        """Model is correctly retrieved"""
        scope1 = '{}/foo'.format(self.get_scope())
        scope2 = '{}/bar'.format(self.get_scope())

        name = 'baz'
        with nn.variable_scope(scope1):
            model1 = nn.model.Graph(name=name)
            self.assertIs(nn.get_model(name), model1)

        with nn.variable_scope(scope2):
            model2 = nn.model.Graph(name=name)
            self.assertIs(nn.get_model(name), model2)

        self.assertIs(nn.get_model('{}/{}'.format(scope1, name)), model1)
        self.assertIs(nn.get_model('{}/{}'.format(scope2, name)), model2)
Example #19
0
    def test_paramter_reuse_dense(self):
        """Dense layer is built using existing Variables"""
        shape = (3, 5)
        with nn.variable_scope(self.get_scope()):
            layer1 = nn.layer.Dense(n_nodes=5)
            layer2 = nn.layer.Dense(n_nodes=5)

            tensor = nn.Input(shape=shape)
            out1 = layer1(tensor)
            layer2.set_parameter_variables(
                weight=layer1.get_parameter_variable('weight'),
                bias=layer1.get_parameter_variable('bias'),
            )
            out2 = layer2(tensor)

        for key in ['weight', 'bias']:
            var1 = layer1.get_parameter_variable(key)
            var2 = layer2.get_parameter_variable(key)
            self.assertIs(var1, var2)

        session = nn.Session()
        session.initialize()

        input_val = np.random.rand(*shape)
        out1, out2 = session.run(outputs=[out1, out2],
                                 inputs={tensor: input_val})

        np.testing.assert_almost_equal(out1, out2)
Example #20
0
    def test_sync(self):
        """Sync operation copy model0 network parameters to model1"""
        shape = [None, 4, 84, 84] if _CONV == 'NCHW' else [None, 84, 84, 4]
        model_def = util.get_model_config('vanilla_dqn',
                                          input_shape=shape,
                                          n_actions=5)
        # Skip biases as they are deterministically initialized
        for cfg in model_def['args']['layer_configs']:
            if cfg['typename'] in ['Conv2D', 'Dense']:
                cfg['args']['with_bias'] = False

        with nn.variable_scope(self.get_scope()):
            dqn = _make_dqn(model_def=model_def)

        params0 = dqn.models['model_0'].get_parameters_to_train()
        params1 = dqn.models['model_1'].get_parameters_to_train()

        # check that variables are different before sync
        vars0 = dqn.session.run(outputs=params0)
        vars1 = dqn.session.run(outputs=params1)
        for var0, var1 in zip(vars0, vars1):
            with self.assertRaises(AssertionError):
                np.testing.assert_almost_equal(var0, var1)

        dqn.sync_network()

        # check that variables are equal after sync
        vars0 = dqn.session.run(outputs=params0)
        vars1 = dqn.session.run(outputs=params1)
        for var0, var1 in zip(vars0, vars1):
            np.testing.assert_almost_equal(var0, var1)
Example #21
0
    def test_paramter_reuse_conv2d(self):
        """Conv2D layer is built using existing Variables"""
        shape = (10, 11, 12, 13)
        with nn.variable_scope(self.get_scope()):
            layer1 = nn.layer.Conv2D(filter_width=5,
                                     filter_height=3,
                                     n_filters=4,
                                     strides=1,
                                     padding='VALID')
            layer2 = nn.layer.Conv2D(filter_width=5,
                                     filter_height=3,
                                     n_filters=4,
                                     strides=1,
                                     padding='VALID')

            tensor = nn.Input(shape=shape)
            out1 = layer1(tensor)
            layer2.set_parameter_variables(
                filter=layer1.get_parameter_variable('filter'),
                bias=layer1.get_parameter_variable('bias'))
            out2 = layer2(tensor)

        for key in ['filter', 'bias']:
            var1 = layer1.get_parameter_variable(key)
            var2 = layer2.get_parameter_variable(key)
            self.assertIs(var1, var2)

        session = nn.Session()
        session.initialize()

        input_val = np.random.rand(*shape)
        out1, out2 = session.run(outputs=[out1, out2],
                                 inputs={tensor: input_val})

        np.testing.assert_almost_equal(out1, out2)
Example #22
0
    def test_target_q(self):
        """Target Q value is correct"""
        discount, batch, n_actions = 0.9, 32, 5
        scale_reward, min_reward, max_reward = 3.0, -1, 1

        with nn.variable_scope(self.get_scope()):
            dqn = _make_dqn(discount_rate=discount,
                            n_actions=n_actions,
                            scale_reward=scale_reward)

        action_value_1 = np.random.randn(batch, n_actions)
        rewards = np.random.randn(batch, )
        terminal = np.random.randint(low=0, high=2, size=(batch, ))

        target_q = dqn.session.run(outputs=dqn.vars['target_q'],
                                   inputs={
                                       dqn.vars['action_value_1']:
                                       action_value_1,
                                       dqn.vars['reward']: rewards,
                                       dqn.vars['terminal']: terminal,
                                   })

        rewards = np.clip(rewards / scale_reward, min_reward, max_reward)
        max_action = np.max(action_value_1, axis=1)
        target_q_ = rewards + (1.0 - terminal) * discount * max_action
        target_q_ = np.tile(target_q_.reshape(-1, 1), (1, n_actions))

        np.testing.assert_almost_equal(target_q, target_q_, decimal=4)
Example #23
0
    def test_retrieval(self):
        """Model is correctly retrieved"""
        scope1 = '{}/foo'.format(self.get_scope())
        scope2 = '{}/bar'.format(self.get_scope())

        name = 'baz'
        with nn.variable_scope(scope1):
            model1 = nn.model.Graph(name=name)
            self.assertIs(nn.get_model(name), model1)

        with nn.variable_scope(scope2):
            model2 = nn.model.Graph(name=name)
            self.assertIs(nn.get_model(name), model2)

        self.assertIs(nn.get_model('{}/{}'.format(scope1, name)), model1)
        self.assertIs(nn.get_model('{}/{}'.format(scope2, name)), model2)
Example #24
0
    def test_clip_gradients(self):
        """Gradients are clipped"""
        sgd = nn.optimizer.SGD(learning_rate=1.0)
        shape = (32, 1)
        with nn.variable_scope(self.get_scope()):
            initializer = nn.fetch_initializer(
                'UniformInitializer')(min_value=-3, max_value=3)
            x = nn.make_variable(
                name='x', shape=shape, initializer=initializer)
            y = nn.ops.reduce_sum(x * x / 2)
            grads_and_vars = [
                (nn.ops.clip_by_value(grad, max_value=1, min_value=-1), var)
                for grad, var in nn.ops.compute_gradient(loss=y, wrt=x)
            ]
            op = sgd.apply_gradients(grads_and_vars)

        session = nn.Session()
        session.initialize()

        val_0 = session.run(outputs=x)
        session.run(updates=op)
        val_1_be = session.run(outputs=x)

        val_1_np = np.zeros(shape)
        val_1_np[val_0 > 1] = val_0[val_0 > 1] - 1
        val_1_np[val_0 < -1] = val_0[val_0 < -1] + 1
        np.testing.assert_almost_equal(val_1_be, val_1_np)
Example #25
0
    def test_concate_2d_axis_1_3(self):
        """Concatenate 3 2D tensors"""
        axis, shape1, shape2, shape3 = 1, (2, 5), (2, 3), (2, 4)
        with nn.variable_scope(self.get_scope(), reuse=False):
            var1 = nn.get_variable(name='var1', shape=shape1)
            var2 = nn.get_variable(name='var2', shape=shape2)
            var3 = nn.get_variable(name='var3', shape=shape3)
            conc_var = nn.layer.Concat(axis=axis).build([var1, var2, var3])

        session = nn.Session()
        val1, val2 = np.random.rand(*shape1), np.random.rand(*shape2)
        val3 = np.random.rand(*shape3)
        conc_val = session.run(outputs=conc_var,
                               givens={
                                   var1: val1,
                                   var2: val2,
                                   var3: val3
                               })
        expected = conc_val.shape
        found = conc_var.shape
        self.assertEqual(found, expected)

        expected = np.concatenate((val1, val2, val3), axis=axis)
        found = conc_val
        np.testing.assert_almost_equal(found, expected)
Example #26
0
    def test_dict(self):
        """Anonymous layer can handle dict inputs"""
        shape, dtype = (3, 4), 'float32'

        exp = ('x["0"] + x["1"]')
        input_vars = {
            '0': nn.Input(shape=shape, dtype=dtype, name='input1'),
            '1': nn.Input(shape=shape, dtype=dtype, name='input2')
        }
        input_vals = {
            '0': np.random.rand(3, 4).astype(dtype),
            '1': np.random.rand(3, 4).astype(dtype),
        }
        output_val = sum(input_vals.values())
        with nn.variable_scope(self.get_scope()):
            layer = nn.layer.Anonymous(exp)
            output_var = layer(**input_vars)

        session = nn.Session()
        output_val_ = session.run(outputs=output_var,
                                  inputs={
                                      input_vars['0']: input_vals['0'],
                                      input_vars['1']: input_vals['1']
                                  })
        np.testing.assert_almost_equal(output_val, output_val_)
Example #27
0
def _get_y_equals_x_squared(scope, x_init):
    with nn.variable_scope(scope):
        x = nn.make_variable(
            name='x', shape=(), trainable=True,
            initializer=nn.initializer.ConstantInitializer(x_init))
        y = x * x
    return x, y
Example #28
0
    def test_clip_variable_by_norm(self):
        """Test clip_by_norm with Variable"""
        shape, clip_norm = (3, 4), np.asarray(15, dtype='float32')
        with nn.variable_scope(self.get_scope()):
            input_ = nn.Input(shape, dtype='float32')
            clip_var = nn.Input(shape=[], dtype='float32')
            output = nn.ops.clip_by_norm(input_, clip_norm=clip_var)

        session = nn.Session()

        in_val = np.random.rand(*shape).astype('float32')
        out_val = session.run(
            outputs=output,
            givens={input_: in_val, clip_var: clip_norm}
        )
        np.testing.assert_almost_equal(out_val, in_val)

        in_val += 10.0
        out_val = session.run(
            outputs=output,
            givens={input_: in_val, clip_var: clip_norm}
        )
        l2_norm = np.sqrt(np.sum(in_val ** 2))
        np.testing.assert_almost_equal(
            out_val, clip_norm * in_val / l2_norm, decimal=3)
Example #29
0
    def test_normalization_2d(self):
        """Output of normalization layer is normalized on 2D array"""
        offset, scale, shape = 10.0, 1.0, (64, 16)

        with nn.variable_scope(self.id().replace('.', '/')):
            output_value = _normalize_batch(shape, offset, scale)

        self.assertEqual(output_value.shape, shape)

        for c in range(shape[1]):
            column = output_value[:, c]

            expected = offset
            found = column.mean()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The mean value of column {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'
                .format(c, expected, found)
            )

            expected = scale
            found = column.std()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The variance of column {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'
                .format(c, expected, found)
            )
Example #30
0
    def test_normalization_4d_NCHW(self):
        """Output of normalization layer is normalized on 4D array"""
        luchador.set_nn_conv_format('NCHW')
        offset, scale, shape = 3.0, 7.0, (32, 16, 8, 7)
        with nn.variable_scope(self.get_scope()):
            output_value = _normalize_batch(shape, offset, scale)

        self.assertEqual(output_value.shape, shape)

        for c in range(shape[1]):
            channel = output_value[:, c]

            expected = offset
            found = channel.mean()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The mean value of channel {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'.format(
                    c, expected, found))

            expected = scale
            found = channel.std()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The variance of channel {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'.format(
                    c, expected, found))
Example #31
0
    def test_clip_gradients(self):
        """Gradients are clipped"""
        sgd = nn.optimizer.SGD(learning_rate=1.0)
        shape = (32, 1)
        with nn.variable_scope(self.get_scope()):
            initializer = nn.get_initializer('UniformInitializer')(minval=-3,
                                                                   maxval=3)
            x = nn.get_variable(name='x', shape=shape, initializer=initializer)
            y = x * x / 2
            grads_and_vars = [
                (nn.clip_by_value(grad, max_value=1.0, min_value=-1.0), var)
                for grad, var in sgd.compute_gradients(y.sum(), wrt=x)
            ]
            op = sgd.apply_gradients(grads_and_vars)

        session = nn.Session()
        session.initialize()

        val_0 = session.run(outputs=x)
        session.run(updates=op)
        val_1_be = session.run(outputs=x)

        val_1_np = np.zeros(shape)
        val_1_np[val_0 > 1] = val_0[val_0 > 1] - 1
        val_1_np[val_0 < -1] = val_0[val_0 < -1] + 1
        np.testing.assert_almost_equal(val_1_be, val_1_np)
Example #32
0
    def test_sync_with_tau(self):
        """sync op copies weighted sum of source and target variables"""
        tau = 0.1
        with nn.variable_scope(self.get_scope()):
            source_var, target_var = _create_variables()
            sync_op = nn.ops.build_sync_op(
                [source_var], [target_var], tau=tau)

        session = nn.Session()
        session.initialize()

        src_val, tgt_val = session.run([source_var, target_var])
        self.assertTrue((src_val == 1).all())
        self.assertTrue((tgt_val == 0).all())

        for _ in range(10):
            expected = tau * src_val + (1 - tau) * tgt_val
            session.run(updates=sync_op)
            src_val, found = session.run([source_var, target_var])
            self.assertTrue((src_val == 1).all())
            self.assertTrue(
                np.square(expected - found).sum() < 1e-10,
                '\nExpected: \n{}\nFound: \n{}'.format(expected, found)
            )
            tgt_val = found
Example #33
0
    def test_clip_number_by_norm_with_axes(self):
        """Test clip_by_norm with axis"""
        shape, clip_norm, axis = (3, 4), 15.0, 1
        with nn.variable_scope(self.get_scope()):
            input_ = nn.Input(shape, dtype='float32')
            output = nn.ops.clip_by_norm(
                input_, clip_norm=clip_norm, axes=axis)

        session = nn.Session()

        in_val = np.random.rand(*shape).astype('float32')
        out_val = session.run(
            outputs=output,
            givens={input_: in_val}
        )
        np.testing.assert_almost_equal(out_val, in_val)

        in_val += 10.0
        out_val = session.run(
            outputs=output,
            givens={input_: in_val}
        )
        l2_norm = np.sqrt(np.sum(in_val ** 2, axis=axis, keepdims=True))
        np.testing.assert_almost_equal(
            out_val, clip_norm * in_val / l2_norm, decimal=3)
Example #34
0
    def test_paramter_reuse_conv2d(self):
        """Conv2D layer is built using existing Variables"""
        shape = (10, 11, 12, 13)
        with nn.variable_scope(self.get_scope()):
            layer1 = nn.layer.Conv2D(
                filter_width=5, filter_height=3, n_filters=4, strides=1,
                padding='VALID')
            layer2 = nn.layer.Conv2D(
                filter_width=5, filter_height=3, n_filters=4, strides=1,
                padding='VALID')

            tensor = nn.Input(shape=shape)
            out1 = layer1(tensor)
            layer2.set_parameter_variables(
                filter=layer1.get_parameter_variable('filter'),
                bias=layer1.get_parameter_variable('bias'))
            out2 = layer2(tensor)

        for key in ['filter', 'bias']:
            var1 = layer1.get_parameter_variable(key)
            var2 = layer2.get_parameter_variable(key)
            self.assertIs(var1, var2)

        session = nn.Session()
        session.initialize()

        input_val = np.random.rand(*shape)
        out1, out2 = session.run(
            outputs=[out1, out2],
            inputs={tensor: input_val}
        )

        np.testing.assert_almost_equal(
            out1, out2
        )
Example #35
0
    def test_paramter_reuse_dense(self):
        """Dense layer is built using existing Variables"""
        shape = (3, 5)
        with nn.variable_scope(self.get_scope()):
            layer1 = nn.layer.Dense(n_nodes=5)
            layer2 = nn.layer.Dense(n_nodes=5)

            tensor = nn.Input(shape=shape)
            out1 = layer1(tensor)
            layer2.set_parameter_variables(
                weight=layer1.get_parameter_variable('weight'),
                bias=layer1.get_parameter_variable('bias'),
            )
            out2 = layer2(tensor)

        for key in ['weight', 'bias']:
            var1 = layer1.get_parameter_variable(key)
            var2 = layer2.get_parameter_variable(key)
            self.assertIs(var1, var2)

        session = nn.Session()
        session.initialize()

        input_val = np.random.rand(*shape)
        out1, out2 = session.run(
            outputs=[out1, out2],
            inputs={tensor: input_val}
        )

        np.testing.assert_almost_equal(
            out1, out2
        )
Example #36
0
    def test_normalization_4d_NCHW(self):
        """Output of normalization layer is normalized on 4D array"""
        luchador.set_nn_conv_format('NCHW')
        offset, scale, shape = 3.0, 7.0, (32, 16, 8, 7)
        with nn.variable_scope(self.get_scope()):
            output_value = _normalize_batch(shape, offset, scale)

        self.assertEqual(output_value.shape, shape)

        for c in range(shape[1]):
            channel = output_value[:, c]

            expected = offset
            found = channel.mean()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The mean value of channel {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'
                .format(c, expected, found)
            )

            expected = scale
            found = channel.std()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The variance of channel {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'
                .format(c, expected, found)
            )
Example #37
0
    def test_normalization_2d(self):
        """Output of normalization layer is normalized on 2D array"""
        offset, scale, shape = 10.0, 1.0, (64, 16)

        with nn.variable_scope(self.id().replace('.', '/')):
            output_value = _normalize_batch(shape, offset, scale)

        self.assertEqual(output_value.shape, shape)

        for c in range(shape[1]):
            column = output_value[:, c]

            expected = offset
            found = column.mean()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The mean value of column {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'.format(
                    c, expected, found))

            expected = scale
            found = column.std()
            diff = abs(expected - found) / expected
            threshold = 0.01
            self.assertTrue(
                diff < threshold,
                'The variance of column {} must be close enough to '
                'the target offset value. Expected: {}, Found: {}'.format(
                    c, expected, found))
Example #38
0
def _exe(exp, input_val, scope):
    input_var = nn.Input(shape=input_val.shape, dtype=input_val.dtype)
    with nn.variable_scope(scope):
        layer = nn.layer.Anonymous(exp)
        output_var = layer(input_var)

    session = nn.Session()
    return session.run(outputs=output_var, inputs={input_var: input_val})
Example #39
0
    def test_get_variable_creates_variable(self):
        """get_variable create variable"""
        scope, var_name = self.get_scope(), 'foo'
        full_name = '/'.join([scope, var_name])

        self.assertTrue(full_name not in nn.core.base.wrapper._VARIABLES)
        with nn.variable_scope(scope, reuse=True):
            with self.assertRaises(ValueError):
                nn.get_variable(var_name)

        with nn.variable_scope(scope, reuse=False):
            variable = nn.get_variable(var_name, shape=[3, 1])
        self.assertTrue(full_name in nn.core.base.wrapper._VARIABLES)

        self.assertIs(variable, nn.core.base.wrapper._VARIABLES[full_name])
        with nn.variable_scope(scope, reuse=True):
            self.assertIs(variable, nn.get_variable(var_name))
Example #40
0
 def test_concatenate_raise_when_incosistent_shape(self):
     """Concatenate raise ValueError when inconsistent shapes"""
     axis, shape1, shape2 = 1, (3, 5), (4, 6)
     with nn.variable_scope(self.get_scope(), reuse=False):
         input1 = nn.Input(shape=shape1, dtype='float32', name='name1')
         input2 = nn.Input(shape=shape2, dtype='float32', name='name2')
         with self.assertRaises(ValueError):
             nn.layer.Concat(axis=axis).build([input1, input2])
Example #41
0
 def _test_sub(self, noise, shape, mean, std, scope):
     with nn.variable_scope(scope):
         in_var = nn.Input(shape=shape, name='original_input')
         out_var_1 = noise - in_var
         out_var_2 = in_var - noise
     in_val = 10 * np.ones(shape=in_var.shape, dtype=in_var.dtype)
     self._validate(in_var, in_val, mean - 10, std, out_var_1)
     self._validate(in_var, in_val, 10 - mean, std, out_var_2)
Example #42
0
 def _test_add(self, noise, shape, mean, std, scope):
     with nn.variable_scope(scope):
         in_var = nn.Input(shape=shape, name='original_input')
         out_var_1 = noise + in_var
         out_var_2 = in_var + noise
     in_val = np.zeros(shape=in_var.shape, dtype=in_var.dtype)
     self._validate(in_var, in_val, mean, std, out_var_1)
     self._validate(in_var, in_val, mean, std, out_var_2)
Example #43
0
 def test_concatenate_raise_when_incosistent_shape(self):
     """Concatenate raise ValueError when inconsistent shapes"""
     axis, shape1, shape2 = 1, (3, 5), (4, 6)
     with nn.variable_scope(self.get_scope(), reuse=False):
         input1 = nn.Input(shape=shape1, dtype='float32', name='name1')
         input2 = nn.Input(shape=shape2, dtype='float32', name='name2')
         with self.assertRaises(ValueError):
             nn.layer.Concat(axis=axis).build([input1, input2])
Example #44
0
    def test_get_variable_creates_variable(self):
        """get_variable create variable"""
        scope, var_name = self.get_scope(), 'foo'
        full_name = '/'.join([scope, var_name])

        self.assertTrue(full_name not in _VARIABLES)
        with nn.variable_scope(scope, reuse=True):
            with self.assertRaises(ValueError):
                nn.get_variable(var_name)

        with nn.variable_scope(scope, reuse=False):
            variable = nn.make_variable(var_name, shape=[3, 1])
        self.assertTrue(full_name in _VARIABLES)

        self.assertIs(variable, _VARIABLES[full_name])
        with nn.variable_scope(scope, reuse=True):
            self.assertIs(variable, nn.get_variable(var_name))
Example #45
0
    def test_conv2d(self):
        """Compnents consisting Conv2D layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope) as vs:
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.fetch_layer('Conv2D')(
                filter_height=4, filter_width=4, n_filters=4,
                strides=1, with_bias=True, name='Conv2D')
            output = layer(input_)
            filters = layer.get_parameter_variable('filter')
            bias = layer.get_parameter_variable('bias')

        with nn.variable_scope(vs, reuse=True):
            self.assertIs(filters, nn.get_variable('Conv2D/filter'))
            self.assertIs(bias, nn.get_variable('Conv2D/bias'))
            self.assertIs(output, nn.get_tensor('Conv2D/output'))
            self.assertIs(input_, nn.get_input('input'))
Example #46
0
 def test_concat(self):
     """Compnents consisting Concat layer are retrieved"""
     with nn.variable_scope(self.get_scope()):
         layer = nn.fetch_layer('Concat')(axis=1, scope='Concat')
         output = layer([
             nn.Input(shape=(32, 4), name='input_1'),
             nn.Input(shape=(32, 5), name='input_2'),
         ])
         self.assertIs(output, nn.get_tensor('Concat/output'))
Example #47
0
def _get_y_equals_x_squared(scope, x_init):
    with nn.variable_scope(scope):
        x = nn.get_variable(
            name='x',
            shape=(),
            trainable=True,
            initializer=nn.initializer.ConstantInitializer(x_init))
        y = x * x
    return x, y
Example #48
0
    def test_lrelu_parameter(self):
        """Parameter retrieval failes when train=False"""
        base_scope, scope, alpha, shape = self.get_scope(), 'foo', 0.1, (3, 4)
        with nn.variable_scope(base_scope):
            in_var = nn.Input(shape=shape)
            layer = nn.layer.LeakyReLU(alpha=alpha, train=False, scope=scope)
            layer(in_var)

        with self.assertRaises(KeyError):
            layer.get_parameter_variable('alpha')
Example #49
0
    def test_NHWC2NCHW(self):
        """Test NHWC to NCHW conversion"""
        shape = (32, 8, 7, 4)
        with nn.variable_scope(self.get_scope()):
            output_value, output_tensor = _convert(
                nn.layer.NHWC2NCHW(), shape)

        expected = (shape[0], shape[3], shape[1], shape[2])
        self.assertEqual(expected, output_value.shape)
        self.assertEqual(expected, output_tensor.shape)
Example #50
0
    def test_NCHW2NHWC(self):
        """Test NCHW to NHWC conversion"""
        shape = (32, 4, 7, 8)
        with nn.variable_scope(self.get_scope()):
            output_value, output_tensor = _convert(
                nn.layer.NCHW2NHWC(), shape)

        expected = (shape[0], shape[2], shape[3], shape[1])
        self.assertEqual(expected, output_value.shape)
        self.assertEqual(expected, output_tensor.shape)
Example #51
0
    def test_true_div(self):
        """Compnents consisting truediv layer are retrieved"""
        scope = self.get_scope()
        with nn.variable_scope(scope):
            input_ = nn.Input(shape=(32, 4, 8, 8), name='input')
            layer = nn.fetch_layer('TrueDiv')(denom=1.0, scope='TrueDiv')
            output = layer(input_)

            self.assertIs(output, nn.get_tensor('TrueDiv/output'))
            self.assertIs(input_, nn.get_input('input'))
Example #52
0
    def test_get_operation_from_current_scope(self):
        """get_operation retrieve existing operation"""
        scope, name = self.get_scope(), 'foo'
        with nn.variable_scope(scope):
            op = nn.Operation(op=None, name=name)
            self.assertIs(op, nn.get_operation(name))

        self.assertIs(op, nn.get_operation('{}/{}'.format(scope, name)))

        with self.assertRaises(ValueError):
            nn.get_operation(name)
Example #53
0
    def test_get_input_from_current_scope(self):
        """get_input retrieve existing input"""
        scope, name = self.get_scope(), 'foo'
        with nn.variable_scope(scope):
            op = nn.Input(shape=[], name=name)
            self.assertIs(op, nn.get_input(name))

        self.assertIs(op, nn.get_input('{}/{}'.format(scope, name)))

        with self.assertRaises(ValueError):
            nn.get_input(name)