def test_list(self): """Anonymous layer can handle list inputs""" shape, dtype = (3, 4), 'float32' exp = ('x[0] + x[1]') input_vars = [ nn.Input(shape=shape, dtype=dtype, name='input1'), nn.Input(shape=shape, dtype=dtype, name='input2') ] input_vals = [ np.random.rand(3, 4).astype(dtype), np.random.rand(3, 4).astype(dtype), ] output_val = sum(input_vals) with nn.variable_scope(self.get_scope()): layer = nn.layer.Anonymous(exp) output_var = layer(*input_vars) session = nn.Session() output_val_ = session.run(outputs=output_var, inputs={ input_vars[0]: input_vals[0], input_vars[1]: input_vals[1] }) np.testing.assert_almost_equal(output_val, output_val_)
def test_dict(self): """Anonymous layer can handle dict inputs""" shape, dtype = (3, 4), 'float32' exp = ('x["0"] + x["1"]') input_vars = { '0': nn.Input(shape=shape, dtype=dtype, name='input1'), '1': nn.Input(shape=shape, dtype=dtype, name='input2') } input_vals = { '0': np.random.rand(3, 4).astype(dtype), '1': np.random.rand(3, 4).astype(dtype), } output_val = sum(input_vals.values()) with nn.variable_scope(self.get_scope()): layer = nn.layer.Anonymous(exp) output_var = layer(**input_vars) session = nn.Session() output_val_ = session.run(outputs=output_var, inputs={ input_vars['0']: input_vals['0'], input_vars['1']: input_vals['1'] }) np.testing.assert_almost_equal(output_val, output_val_)
def test_clip_variable_by_norm(self): """Test clip_by_norm with Variable""" shape, clip_norm = (3, 4), np.asarray(15, dtype='float32') with nn.variable_scope(self.get_scope()): input_ = nn.Input(shape, dtype='float32') clip_var = nn.Input(shape=[], dtype='float32') output = nn.ops.clip_by_norm(input_, clip_norm=clip_var) session = nn.Session() in_val = np.random.rand(*shape).astype('float32') out_val = session.run( outputs=output, givens={input_: in_val, clip_var: clip_norm} ) np.testing.assert_almost_equal(out_val, in_val) in_val += 10.0 out_val = session.run( outputs=output, givens={input_: in_val, clip_var: clip_norm} ) l2_norm = np.sqrt(np.sum(in_val ** 2)) np.testing.assert_almost_equal( out_val, clip_norm * in_val / l2_norm, decimal=3)
def test_concatenate_raise_when_incosistent_shape(self): """Concatenate raise ValueError when inconsistent shapes""" axis, shape1, shape2 = 1, (3, 5), (4, 6) with nn.variable_scope(self.get_scope(), reuse=False): input1 = nn.Input(shape=shape1, dtype='float32', name='name1') input2 = nn.Input(shape=shape2, dtype='float32', name='name2') with self.assertRaises(ValueError): nn.layer.Concat(axis=axis).build([input1, input2])
def test_concat(self): """Compnents consisting Concat layer are retrieved""" with nn.variable_scope(self.get_scope()): layer = nn.fetch_layer('Concat')(axis=1, scope='Concat') output = layer([ nn.Input(shape=(32, 4), name='input_1'), nn.Input(shape=(32, 5), name='input_2'), ]) self.assertIs(output, nn.get_tensor('Concat/output'))
def test_concat(self): """Compnents consisting Concat layer are retrieved""" scope = self.get_scope() with nn.variable_scope(scope): input_ = [ nn.Input(shape=(32, 4), name='input'), nn.Input(shape=(32, 5), name='input'), ] layer = nn.get_layer('Concat')(axis=1, name='Concat') output = layer(input_) self.assertIs(output, nn.get_tensor('Concat/output'))
def test_sub(self): """Compnents consisting Sub layer are retrieved""" scope = self.get_scope() with nn.variable_scope(scope): input_ = [ nn.Input(shape=(32, 4), name='input'), nn.Input(shape=(32, 4), name='input'), ] layer = nn.get_layer('Sub')(name='Sub') output = layer(input_) self.assertIs(output, nn.get_tensor('Sub/output'))
def _compute_cost(cost, target, logit): target_tensor = nn.Input(shape=target.shape) logit_tensor = nn.Input(shape=logit.shape) output_tensor = cost.build(target_tensor, logit_tensor) session = nn.Session() output_value = session.run( outputs=output_tensor, inputs={ logit_tensor: logit, target_tensor: target, }, ) return output_value
def test_paramter_reuse_conv2d(self): """Conv2D layer is built using existing Variables""" shape = (10, 11, 12, 13) with nn.variable_scope(self.get_scope()): layer1 = nn.layer.Conv2D(filter_width=5, filter_height=3, n_filters=4, strides=1, padding='VALID') layer2 = nn.layer.Conv2D(filter_width=5, filter_height=3, n_filters=4, strides=1, padding='VALID') tensor = nn.Input(shape=shape) out1 = layer1(tensor) layer2.set_parameter_variables( filter=layer1.get_parameter_variable('filter'), bias=layer1.get_parameter_variable('bias')) out2 = layer2(tensor) for key in ['filter', 'bias']: var1 = layer1.get_parameter_variable(key) var2 = layer2.get_parameter_variable(key) self.assertIs(var1, var2) session = nn.Session() session.initialize() input_val = np.random.rand(*shape) out1, out2 = session.run(outputs=[out1, out2], inputs={tensor: input_val}) np.testing.assert_almost_equal(out1, out2)
def test_dynamic_initializer(self): """Initializers are correctly selected""" n_in, n_nodes, weight_val, bias_val = 4, 5, 13, 7 with nn.variable_scope(self.get_scope()): dense = nn.layer.Dense(n_nodes=5, initializers={ 'weight': { 'typename': 'ConstantInitializer', 'args': { 'value': weight_val, }, }, 'bias': { 'typename': 'ConstantInitializer', 'args': { 'value': bias_val, } } }) dense(nn.Input(shape=(3, n_in))) session = nn.Session() session.initialize() weight, bias = session.run(outputs=[ dense.get_parameter_variable('weight'), dense.get_parameter_variable('bias'), ]) np.testing.assert_almost_equal(weight, weight_val * np.ones((n_in, n_nodes))) np.testing.assert_almost_equal(bias, bias_val * np.ones((n_nodes, )))
def test_original_input(self): """Conv2DTranspose layer is built with provided original_input""" h, w, c = 7, 5, 3 strides, padding = 3, 'valid' if _FMT == 'NHWC' and _BE == 'tensorflow': input_shape = (32, 84, 84, 4) else: input_shape = (32, 4, 84, 84) conv2d = nn.layer.Conv2D(filter_height=h, filter_width=w, n_filters=c, strides=strides, padding=padding) input_var = nn.Input(shape=input_shape, name='original_input') with nn.variable_scope(self.get_scope('convolution')): conv_output = conv2d(input_var) conv2d_t = nn.layer.Conv2DTranspose(filter_height=h, filter_width=w, n_filters=c, strides=strides, padding=padding) conv2d_t.set_parameter_variables(original_input=input_var) with nn.variable_scope(self.get_scope('transpose')): conv_t_output = conv2d_t(conv_output) self._check(input_var, conv_t_output) self.assertIsNot( conv2d.get_parameter_variable('filter'), conv2d_t.get_parameter_variable('filter'), )
def test_paramter_reuse_dense(self): """Dense layer is built using existing Variables""" shape = (3, 5) with nn.variable_scope(self.get_scope()): layer1 = nn.layer.Dense(n_nodes=5) layer2 = nn.layer.Dense(n_nodes=5) tensor = nn.Input(shape=shape) out1 = layer1(tensor) layer2.set_parameter_variables( weight=layer1.get_parameter_variable('weight'), bias=layer1.get_parameter_variable('bias'), ) out2 = layer2(tensor) for key in ['weight', 'bias']: var1 = layer1.get_parameter_variable(key) var2 = layer2.get_parameter_variable(key) self.assertIs(var1, var2) session = nn.Session() session.initialize() input_val = np.random.rand(*shape) out1, out2 = session.run(outputs=[out1, out2], inputs={tensor: input_val}) np.testing.assert_almost_equal(out1, out2)
def test_conv2dtranspose(self): """Compnents consisting Conv2DTranspose layer are retrieved""" scope = self.get_scope() with nn.variable_scope(scope) as vs: input_ = nn.Input(shape=(32, 4, 8, 8), name='input') layer = nn.get_layer('Conv2D')(filter_height=4, filter_width=4, n_filters=4, strides=1, with_bias=True, name='Conv2D') output = layer(input_) layer = nn.get_layer('Conv2DTranspose')(filter_height=4, filter_width=4, n_filters=4, strides=1, with_bias=True, output_shape=input_.shape, name='Conv2DT') output = layer(output) filters = layer.get_parameter_variable('filter') bias = layer.get_parameter_variable('bias') with nn.variable_scope(vs, reuse=True): self.assertIs(filters, nn.get_variable('Conv2DT/filter')) self.assertIs(bias, nn.get_variable('Conv2DT/bias')) self.assertIs(output, nn.get_tensor('Conv2DT/output')) self.assertIs(input_, nn.get_input('input'))
def test_apply_gradient_directory(self): """get_grad correctly fetches gradient Tensor from Variable""" w_0 = 6 sgd = nn.optimizer.SGD(learning_rate=1.0) with nn.variable_scope(self.get_scope()): x = nn.Input(shape=(), name='x') w1 = nn.make_variable( name='w', shape=(), initializer=nn.initializer.ConstantInitializer(w_0), ) y1 = w1 * x sgd.minimize(y1, w1) dy1dw1_1 = nn.get_tensor('{}_grad'.format(w1.name)) dy1dw1_2 = nn.get_grad(w1) self.assertIs(dy1dw1_1, dy1dw1_2) with nn.variable_scope('{}/2'.format(self.get_scope())): w2 = nn.make_variable( name='w', shape=(), initializer=nn.initializer.ConstantInitializer(w_0), ) y2 = w2 * x sgd.minimize(y2, w2) dy2dw2_1 = nn.get_tensor('{}_grad'.format(w2.name)) dy2dw2_2 = nn.get_grad(w2) self.assertIs(dy2dw2_1, dy2dw2_2)
def test_clip_number_by_norm_with_axes(self): """Test clip_by_norm with axis""" shape, clip_norm, axis = (3, 4), 15.0, 1 with nn.variable_scope(self.get_scope()): input_ = nn.Input(shape, dtype='float32') output = nn.ops.clip_by_norm( input_, clip_norm=clip_norm, axes=axis) session = nn.Session() in_val = np.random.rand(*shape).astype('float32') out_val = session.run( outputs=output, givens={input_: in_val} ) np.testing.assert_almost_equal(out_val, in_val) in_val += 10.0 out_val = session.run( outputs=output, givens={input_: in_val} ) l2_norm = np.sqrt(np.sum(in_val ** 2, axis=axis, keepdims=True)) np.testing.assert_almost_equal( out_val, clip_norm * in_val / l2_norm, decimal=3)
def _exe(exp, input_val, scope): input_var = nn.Input(shape=input_val.shape, dtype=input_val.dtype) with nn.variable_scope(scope): layer = nn.layer.Anonymous(exp) output_var = layer(input_var) session = nn.Session() return session.run(outputs=output_var, inputs={input_var: input_val})
def _test_sub(self, noise, shape, mean, std, scope): with nn.variable_scope(scope): in_var = nn.Input(shape=shape, name='original_input') out_var_1 = noise - in_var out_var_2 = in_var - noise in_val = 10 * np.ones(shape=in_var.shape, dtype=in_var.dtype) self._validate(in_var, in_val, mean - 10, std, out_var_1) self._validate(in_var, in_val, 10 - mean, std, out_var_2)
def _test_add(self, noise, shape, mean, std, scope): with nn.variable_scope(scope): in_var = nn.Input(shape=shape, name='original_input') out_var_1 = noise + in_var out_var_2 = in_var + noise in_val = np.zeros(shape=in_var.shape, dtype=in_var.dtype) self._validate(in_var, in_val, mean, std, out_var_1) self._validate(in_var, in_val, mean, std, out_var_2)
def _test_dot(self, shape0, shape1): with nn.variable_scope(self.get_scope()): in_var0 = nn.Input(shape=shape0) in_var1 = nn.Input(shape=shape1) out_var = nn.ops.dot(in_var0, in_var1) in_val0 = np.random.random(size=shape0) in_val1 = np.random.random(size=shape1) session = nn.Session() out_val = session.run( outputs=out_var, inputs={in_var0: in_val0, in_var1: in_val1}, ) np_val = np.dot(in_val0, in_val1) np.testing.assert_almost_equal(out_val, np_val, decimal=3) self._verify_shape(out_val.shape, out_var.shape)
def _test_minimum(self, value0, value1): with nn.variable_scope(self.get_scope()): input0 = nn.Input(shape=value0.shape, dtype=value0.dtype, name='0') input1 = nn.Input(shape=value1.shape, dtype=value1.dtype, name='1') output0 = nn.minimum(input0, input1) output1 = nn.minimum(input1, input0) session = nn.Session() val0, val1 = session.run( outputs=[output0, output1], inputs={ input0: value0, input1: value1 }, ) np.testing.assert_almost_equal(val0, np.minimum(value0, value1)) np.testing.assert_almost_equal(val1, np.minimum(value1, value0))
def test_plrelu_parameter(self): """Parameter retrieval succeeds when train=True""" base_scope, scope, alpha, shape = self.get_scope(), 'foo', 0.1, (3, 4) with nn.variable_scope(base_scope): in_var = nn.Input(shape=shape) layer = nn.layer.LeakyReLU(alpha=alpha, train=True, scope=scope) layer(in_var) self.assertIs(layer.get_parameter_variable('alpha'), nn.get_variable('{}/{}/alpha'.format(base_scope, scope)))
def test_mean(self): """Compnents consisting Mean layer are retrieved""" scope = self.get_scope() with nn.variable_scope(scope): input_ = nn.Input(shape=(32, 4, 8, 8), name='input') layer = nn.get_layer('Mean')(axis=[1, 2], name='Mean') output = layer(input_) self.assertIs(output, nn.get_tensor('Mean/output')) self.assertIs(input_, nn.get_input('input'))
def test_tile(self): """Compnents consisting Tile layer are retrieved""" scope = self.get_scope() with nn.variable_scope(scope): input_ = nn.Input(shape=(32, ), name='input') layer = nn.get_layer('Tile')(pattern=(1, 2), name='Tile') output = layer(input_) self.assertIs(output, nn.get_tensor('Tile/output')) self.assertIs(input_, nn.get_input('input'))
def test_lrelu_parameter(self): """Parameter retrieval failes when train=False""" base_scope, scope, alpha, shape = self.get_scope(), 'foo', 0.1, (3, 4) with nn.variable_scope(base_scope): in_var = nn.Input(shape=shape) layer = nn.layer.LeakyReLU(alpha=alpha, train=False, scope=scope) layer(in_var) with self.assertRaises(KeyError): layer.get_parameter_variable('alpha')
def test_true_div(self): """Compnents consisting truediv layer are retrieved""" scope = self.get_scope() with nn.variable_scope(scope): input_ = nn.Input(shape=(32, 4, 8, 8), name='input') layer = nn.fetch_layer('TrueDiv')(denom=1.0, scope='TrueDiv') output = layer(input_) self.assertIs(output, nn.get_tensor('TrueDiv/output')) self.assertIs(input_, nn.get_input('input'))
def test_fetch_output(self): """Output of Anonymous layer is fetched""" base_scope, scope = self.get_scope(), 'anon' with nn.variable_scope(base_scope): layer = nn.layer.Anonymous(exp='x', scope=scope) output_var = layer(nn.Input(shape=(3, 4))) _tensor = nn.get_tensor('{}/output'.format(scope)) self.assertIs(output_var, _tensor) _tensor = nn.get_tensor('{}/{}/output'.format(base_scope, scope)) self.assertIs(output_var, _tensor)
def test_get_input_from_current_scope(self): """get_input retrieve existing input""" scope, name = self.get_scope(), 'foo' with nn.variable_scope(scope): op = nn.Input(shape=[], name=name) self.assertIs(op, nn.get_input(name)) self.assertIs(op, nn.get_input('{}/{}'.format(scope, name))) with self.assertRaises(ValueError): nn.get_input(name)
def _test_layer_io(self, layer_name, input_shape): scope = '{}/{}'.format(self.get_scope(), layer_name) with nn.variable_scope(scope) as vs: input_ = nn.Input(shape=input_shape, name='input') layer = nn.fetch_layer(layer_name)(scope=layer_name) output = layer(input_) with nn.variable_scope(vs, reuse=True): output_tensor_name = '{}/output'.format(layer_name) self.assertIs(input_, nn.get_input('input')) self.assertIs(output, nn.get_tensor(output_tensor_name))
def _test_cost( self, cost, target, prediction, expected, elementwise, decimal=5): with nn.variable_scope(self.get_scope()): target_var = nn.Input(shape=target.shape) pred_var = nn.Input(shape=prediction.shape) out_var = cost(target_var, pred_var) session = nn.Session() out_val = session.run( outputs=out_var, inputs={ target_var: target, pred_var: prediction, }, ) if not elementwise: expected = np.sum(np.mean(expected, axis=0)) np.testing.assert_almost_equal(out_val, expected, decimal=decimal) self.assertEqual(out_val.shape, out_var.shape)
def _convert(layer, shape): input_tensor = nn.Input(shape=shape) input_value = np.random.randn(*shape) - 100 session = nn.Session() output_tensor = layer(input_tensor) output_value = session.run( outputs=output_tensor, inputs={input_tensor: input_value}, ) return output_value, output_tensor