def test_basic(self): test_utils.layer_test( wrappers.WeightNormalization, kwargs={ 'layer': tf.keras.layers.Conv2D(5, (2, 2)), }, input_shape=(2, 4, 4, 3))
def test_random(dtype): x = np.array([[0.5, 1.2, -0.3]]).astype(dtype) val = np.array([[0.345714, 1.0617027, -0.11462909]]).astype(dtype) test_utils.layer_test(GELU, kwargs={"dtype": dtype}, input_data=x, expected_output=val)
def test_simple(num_clusters): test_utils.layer_test( NetVLAD, kwargs={"num_clusters": num_clusters}, input_shape=(5, 4, 100), expected_output_shape=(None, num_clusters * 100), )
def test_max_3d(): valid_input = np.arange(start=0.0, stop=80.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 4, 10, 2, 1)) output = np.array( [[[28.0, 29.0], [38.0, 39.0]], [[68.0, 69.0], [78.0, 79.0]]] ).astype(np.float32) output = np.reshape(output, (1, 2, 2, 2, 1)) test_utils.layer_test( AdaptiveMaxPooling3D, kwargs={"output_size": (2, 2, 2), "data_format": "channels_last"}, input_data=valid_input, expected_output=output, ) valid_input = np.arange(start=0.0, stop=80.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 1, 4, 10, 2)) output = np.array( [[[28.0, 29.0], [38.0, 39.0]], [[68.0, 69.0], [78.0, 79.0]]] ).astype(np.float32) output = np.reshape(output, (1, 1, 2, 2, 2)) test_utils.layer_test( AdaptiveMaxPooling3D, kwargs={"output_size": (2, 2, 2), "data_format": "channels_first"}, input_data=valid_input, expected_output=output, )
def layer_test_esn(dtype): inp = np.asanyarray([[[1.0, 1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0, 3.0]]]).astype(dtype) out = np.asarray([[2.5, 2.5, 2.5], [4.5, 4.5, 4.5], [6.5, 6.5, 6.5]]).astype(dtype) const_initializer = tf.constant_initializer(0.5) kwargs = { "units": 3, "connectivity": 1, "leaky": 1, "spectral_radius": 0.9, "use_norm2": True, "use_bias": True, "activation": None, "kernel_initializer": const_initializer, "recurrent_initializer": const_initializer, "bias_initializer": const_initializer, "dtype": dtype, } test_utils.layer_test(ESN, kwargs=kwargs, input_data=inp, expected_output=out)
def test_avg_2d(): valid_input = np.arange(start=0.0, stop=40.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 4, 10, 1)) output = np.array([[7.0, 12.0], [27.0, 32.0]]).astype(np.float32) output = np.reshape(output, (1, 2, 2, 1)) test_utils.layer_test( adaptive_pooling.AdaptiveAveragePooling2D, kwargs={ "output_size": (2, 2), "data_format": "channels_last" }, input_data=valid_input, expected_output=output, ) valid_input = np.arange(start=0.0, stop=40.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 1, 4, 10)) output = np.array([[7.0, 12.0], [27.0, 32.0]]).astype(np.float32) output = np.reshape(output, (1, 1, 2, 2)) test_utils.layer_test( adaptive_pooling.AdaptiveAveragePooling2D, kwargs={ "output_size": (2, 2), "data_format": "channels_first" }, input_data=valid_input, expected_output=output, )
def stochastic_depth_test(seed, training): np.random.seed(seed) tf.random.set_seed(seed) survival_probability = 0.5 shortcut = np.asarray([[0.2, 0.1, 0.4]]).astype(np.float32) residual = np.asarray([[0.2, 0.4, 0.5]]).astype(np.float32) if training: if seed == _KEEP_SEED: # shortcut + residual expected_output = np.asarray([[0.4, 0.5, 0.9]]).astype(np.float32) elif seed == _DROP_SEED: # shortcut expected_output = np.asarray([[0.2, 0.1, 0.4]]).astype(np.float32) else: # shortcut + p_l * residual expected_output = np.asarray([[0.3, 0.3, 0.65]]).astype(np.float32) test_utils.layer_test( StochasticDepth, kwargs={"survival_probability": survival_probability}, input_data=[shortcut, residual], expected_output=expected_output, )
def test_max_1d(): valid_input = np.arange(start=0.0, stop=12.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 12, 1)) output = np.array([2.0, 5.0, 8.0, 11.0]).astype(np.float32) output = np.reshape(output, (1, 4, 1)) test_utils.layer_test( adaptive_pooling.AdaptiveMaxPooling1D, kwargs={ "output_size": 4, "data_format": "channels_last" }, input_data=valid_input, expected_output=output, ) valid_input = np.arange(start=0.0, stop=12.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 1, 12)) output = np.array([2.0, 5.0, 8.0, 11.0]).astype(np.float32) output = np.reshape(output, (1, 1, 4)) test_utils.layer_test( adaptive_pooling.AdaptiveMaxPooling1D, kwargs={ "output_size": 4, "data_format": "channels_first" }, input_data=valid_input, expected_output=output, )
def test_avg_3d(): valid_input = np.arange(start=0.0, stop=80.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 4, 10, 2, 1)) output = np.array( [[[14.0, 15.0], [24.0, 25.0]], [[54.0, 55.0], [64.0, 65.0]]] ).astype(np.float32) output = np.reshape(output, (1, 2, 2, 2, 1)) test_utils.layer_test( AdaptiveAveragePooling3D, kwargs={"output_size": (2, 2, 2), "data_format": "channels_last"}, input_data=valid_input, expected_output=output, ) valid_input = np.arange(start=0.0, stop=80.0, step=1.0).astype(np.float32) valid_input = np.reshape(valid_input, (1, 1, 4, 10, 2)) output = np.array( [[[14.0, 15.0], [24.0, 25.0]], [[54.0, 55.0], [64.0, 65.0]]] ).astype(np.float32) output = np.reshape(output, (1, 1, 2, 2, 2)) test_utils.layer_test( AdaptiveAveragePooling3D, kwargs={"output_size": (2, 2, 2), "data_format": "channels_first"}, input_data=valid_input, expected_output=output, )
def test_random(self, dtype): x = np.array([[-2.5, 0.0, 0.3]]).astype(dtype) val = np.array([[0.0, 0.0, 0.3]]).astype(dtype) test_utils.layer_test(TLU, kwargs={"dtype": dtype}, input_data=x, expected_output=val)
def test_no_bias(self): test_utils.layer_test(wrappers.WeightNormalization, kwargs={ 'layer': tf.keras.layers.Dense(5, use_bias=False), }, input_shape=(2, 4))
def test_weightnorm_keras(self): input_data = np.random.random((10, 3, 4)).astype(np.float32) test_utils.layer_test(wrappers.WeightNormalization, kwargs={ 'layer': tf.keras.layers.Dense(2), 'input_shape': (3, 4) }, input_data=input_data)
def test_nchw(): test_utils.layer_test( Maxout, kwargs={"num_units": 4, "axis": 1}, input_shape=(2, 20, 3, 6) ) test_utils.layer_test( Maxout, kwargs={"num_units": 4, "axis": -3}, input_shape=(2, 20, 3, 6) )
def test_sparsemax_layer_against_numpy(self, dtype=None): """check sparsemax kernel against numpy.""" random = np.random.RandomState(1) z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype) test_utils.layer_test(Sparsemax, input_data=z, expected_output=_np_sparsemax(z).astype(dtype))
def test_unknown(): inputs = np.random.random((5, 4, 100)).astype("float32") test_utils.layer_test( NetVLAD, kwargs={"num_clusters": 3}, input_shape=(None, None, 100), input_data=inputs, expected_output_shape=(None, 3 * 100), )
def test_keras(): input_data = np.random.random((10, 3, 4)).astype(np.float32) test_utils.layer_test( spectral_normalization.SpectralNormalization, kwargs={ "layer": tf.keras.layers.Dense(2), "input_shape": (3, 4) }, input_data=input_data, )
def test_unknown(self): inputs = np.random.random((5, 4, 2, 18)).astype('float32') test_utils.layer_test(Maxout, kwargs={'num_units': 3}, input_shape=(5, 4, 2, None), input_data=inputs) test_utils.layer_test(Maxout, kwargs={'num_units': 3}, input_shape=(None, None, None, None), input_data=inputs)
def test_sparsemax_layer_against_numpy(self, dtype): """check sparsemax kernel against numpy.""" self.skipTest('Wait #33614 to be fixed') random = np.random.RandomState(1) z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype) test_utils.layer_test(Sparsemax, kwargs={'dtype': dtype}, input_data=z, expected_output=_np_sparsemax(z).astype(dtype))
def test_invalid_shape(self): with self.assertRaisesRegexp(ValueError, r"`num_clusters` must be greater than 1"): test_utils.layer_test(NetVLAD, kwargs={"num_clusters": 0}, input_shape=(5, 4, 20)) with self.assertRaisesRegexp(ValueError, r"must have rank 3"): test_utils.layer_test(NetVLAD, kwargs={"num_clusters": 2}, input_shape=(5, 4, 4, 20))
def _check_data_init(self, data_init, input_data, expected_output): layer = tf.keras.layers.Dense(input_data.shape[-1], activation=None, kernel_initializer='identity', bias_initializer='zeros') test_utils.layer_test(wrappers.WeightNormalization, kwargs={ 'layer': layer, 'data_init': data_init, }, input_data=input_data, expected_output=expected_output)
def test_invalid_shape(): with pytest.raises(ValueError) as exception_info: test_utils.layer_test( NetVLAD, kwargs={"num_clusters": 0}, input_shape=(5, 4, 20) ) assert "`num_clusters` must be greater than 1" in str(exception_info.value) with pytest.raises(ValueError) as exception_info: test_utils.layer_test( NetVLAD, kwargs={"num_clusters": 2}, input_shape=(5, 4, 4, 20) ) assert "must have rank 3" in str(exception_info.value)
def test_weightnorm_keras(self): input_data = np.random.random((10, 3, 4)).astype(np.float32) test_utils.layer_test( wrappers.WeightNormalization, kwargs={ 'layer': tf.keras.layers.Dense(2), 'input_shape': (3, 4) }, input_data=input_data, # TODO: Fix the bug thats causing layer test to run a # graph Tensor in eager mode. validate_training=False)
def test_affine(self, dtype): x = np.array([[-2.5, 0., 0.3]]).astype(dtype) val = np.array([[-1.5, 1.0, 1.3]]).astype(dtype) test_utils.layer_test(TLU, kwargs={ 'affine': True, 'dtype': dtype, 'alpha_initializer': 'ones', 'tau_initializer': 'ones' }, input_data=x, expected_output=val)
def test_unknown(): inputs = np.random.random((5, 4, 2, 18)).astype("float32") test_utils.layer_test( Maxout, kwargs={"num_units": 3}, input_shape=(5, 4, 2, None), input_data=inputs ) test_utils.layer_test( Maxout, kwargs={"num_units": 3}, input_shape=(None, None, None, None), input_data=inputs, )
def test_layer(dtype): x = np.random.rand(2, 5).astype(dtype) a = np.random.randn() val = snake(x, a) test_utils.layer_test( Snake, kwargs={ "frequency_initializer": tf.constant_initializer(a), "dtype": dtype }, input_data=x, expected_output=val, )
def _check_data_init(self, data_init, input_data, expected_output): layer = tf.keras.layers.Dense( input_data.shape[-1], activation=None, kernel_initializer="identity", bias_initializer="zeros", ) test_utils.layer_test( wrappers.WeightNormalization, kwargs={"layer": layer, "data_init": data_init,}, input_data=input_data, expected_output=expected_output, )
def test_affine(dtype): x = np.array([[-2.5, 0.0, 0.3]]).astype(dtype) val = np.array([[-1.5, 1.0, 1.3]]).astype(dtype) test_utils.layer_test( TLU, kwargs={ "affine": True, "dtype": dtype, "alpha_initializer": "ones", "tau_initializer": "ones", }, input_data=x, expected_output=val, )
def test_nchw(self): test_utils.layer_test(Maxout, kwargs={ 'num_units': 4, 'axis': 1 }, input_shape=(2, 20, 3, 6)) test_utils.layer_test(Maxout, kwargs={ 'num_units': 4, 'axis': -3 }, input_shape=(2, 20, 3, 6))
def test_spp_output_2d(): inputs = np.arange(start=0.0, stop=16.0, step=1.0).astype(np.float32) inputs = np.reshape(inputs, (1, 4, 4, 1)) output = np.array([[[7.5], [2.5], [4.5], [10.5], [12.5]]]).astype(np.float32) test_utils.layer_test( SpatialPyramidPooling2D, kwargs={"bins": [[1, 1], [2, 2]], "data_format": "channels_last"}, input_data=inputs, expected_output=output, ) inputs = np.arange(start=0.0, stop=16.0, step=1.0).astype(np.float32) inputs = np.reshape(inputs, (1, 1, 4, 4)) output = np.array([[[7.5, 2.5, 4.5, 10.5, 12.5]]]).astype(np.float32) test_utils.layer_test( SpatialPyramidPooling2D, kwargs={"bins": [[1, 1], [2, 2]], "data_format": "channels_first"}, input_data=inputs, expected_output=output, )
def test_poincare_normalize_dim_array(): x_shape = [20, 7, 3] epsilon = 1e-5 tol = 1e-6 np.random.seed(1) inputs = np.random.random_sample(x_shape).astype(np.float32) dim = [1, 2] outputs_expected = _poincare_normalize(inputs, dim, epsilon) outputs = test_utils.layer_test( PoincareNormalize, kwargs={"axis": dim, "epsilon": epsilon}, input_data=inputs, expected_output=outputs_expected, ) for y in outputs_expected, outputs: norm = np.linalg.norm(y, axis=tuple(dim)) assert norm.max() <= 1.0 - epsilon + tol