def test_layer(self): test_utils.layer_test(StandardizedConv2D, kwargs={ 'filters': 4, 'kernel_size': 1, 'strides': 1, 'padding': 'valid' }, input_shape=[2, 16, 16, 8], input_dtype='float32', expected_output_shape=[None, 16, 16, 4], expected_output_dtype='float32') mixed_precision.set_global_policy('mixed_float16') result = test_utils.layer_test(StandardizedConv2D, kwargs={ 'filters': 4, 'kernel_size': 3, 'strides': 2, 'padding': 'same' }, input_shape=[2, 16, 16, 8], input_dtype='float16', expected_output_shape=[None, 8, 8, 4], expected_output_dtype='float16') self.assertTrue(np.all(np.isfinite(result)))
def test_global_average_pooling_2d(self): test_utils.layer_test(keras.layers.GlobalAveragePooling2D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5, 6)) test_utils.layer_test(keras.layers.GlobalAveragePooling2D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 5, 6, 4))
def test_layer(self): test_utils.layer_test(UnionAttention, kwargs={'confidence': 0.1}, input_shape=(2, 32, 32, 16), input_dtype='float32', expected_output_shape=(None, 32, 32, 1), expected_output_dtype='float32')
def test_basic_batchnorm_v2(self): test_utils.layer_test(batch_normalization.BatchNormalization, kwargs={'fused': True}, input_shape=(3, 3, 3, 3)) test_utils.layer_test(batch_normalization.BatchNormalization, kwargs={'fused': None}, input_shape=(3, 3, 3))
def _run_test_extra_batch_dim(self, kwargs, expected_output_shape, spatial_shape=(7, 6)): batch_shape = (2, 11) stack_size = 3 num_row, num_col = spatial_shape input_data = None # Generate valid input data. if None in spatial_shape: input_data_shape = batch_shape + ( num_row or 7, num_col or 6, stack_size, ) input_data = 10 * np.random.random(input_data_shape).astype( np.float32) with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None, ) + expected_output_shape test_utils.layer_test( keras.layers.Conv2D, kwargs=kwargs, input_shape=batch_shape + (num_row, num_col, stack_size), input_data=input_data, expected_output_shape=expected_output_shape, )
def test_upsampling_1d(self): with self.cached_session(): test_utils.layer_test( keras.layers.UpSampling1D, kwargs={"size": 2}, input_shape=(3, 5, 4), )
def test_layer(self): test_utils.layer_test(SingleConvBlock, kwargs={'out_features': 10}, input_shape=[2, 16, 16, 3], input_dtype='float32', expected_output_shape=[None, 16, 16, 10], expected_output_dtype='float32')
def test_thresholded_relu(self): test_utils.layer_test( keras.layers.ThresholdedReLU, kwargs={"theta": 0.5}, input_shape=(2, 3, 4), supports_masking=True, )
def test_conv2d_transpose_dilation(self): test_utils.layer_test(keras.layers.Conv2DTranspose, kwargs={ 'filters': 2, 'kernel_size': 3, 'padding': 'same', 'data_format': 'channels_last', 'dilation_rate': (2, 2) }, input_shape=(2, 5, 6, 3)) input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32) # pylint: disable=too-many-function-args expected_output = np.float32([[192, 228, 192, 228], [336, 372, 336, 372], [192, 228, 192, 228], [336, 372, 336, 372]]).reshape( (1, 4, 4, 1)) test_utils.layer_test(keras.layers.Conv2DTranspose, input_data=input_data, kwargs={ 'filters': 1, 'kernel_size': 3, 'padding': 'same', 'data_format': 'channels_last', 'dilation_rate': (2, 2), 'kernel_initializer': 'ones' }, expected_output=expected_output)
def test_locallyconnected_2d_channels_first(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 kwargs = { "filters": filters, "kernel_size": 3, "data_format": data_format, "implementation": implementation, "padding": padding, } if padding == "same" and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: test_utils.layer_test( keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size), )
def test_locallyconnected_1d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 2 num_steps = 8 input_dim = 5 filter_length = 3 filters = 4 for strides in [1]: if padding == "same" and strides != 1: continue kwargs = { "filters": filters, "kernel_size": filter_length, "padding": padding, "strides": strides, "data_format": data_format, "implementation": implementation, } if padding == "same" and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs) else: test_utils.layer_test( keras.layers.LocallyConnected1D, kwargs=kwargs, input_shape=(num_samples, num_steps, input_dim), )
def test_locallyconnected_2d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 for strides in [(1, 1), (2, 2)]: if padding == "same" and strides != (1, 1): continue kwargs = { "filters": filters, "kernel_size": 3, "padding": padding, "kernel_regularizer": "l2", "bias_regularizer": "l2", "strides": strides, "data_format": data_format, "implementation": implementation, } if padding == "same" and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: test_utils.layer_test( keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size), )
def test_locallyconnected_1d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 2 num_steps = 8 input_dim = 5 filter_length = 3 filters = 4 for strides in [1]: if padding == 'same' and strides != 1: continue kwargs = { 'filters': filters, 'kernel_size': filter_length, 'padding': padding, 'strides': strides, 'data_format': data_format, 'implementation': implementation } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs) else: test_utils.layer_test(keras.layers.LocallyConnected1D, kwargs=kwargs, input_shape=(num_samples, num_steps, input_dim))
def test_locallyconnected_2d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue kwargs = { 'filters': filters, 'kernel_size': 3, 'padding': padding, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'strides': strides, 'data_format': data_format, 'implementation': implementation } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: test_utils.layer_test(keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size))
def test_lambda(self): test_utils.layer_test(keras.layers.Lambda, kwargs={'function': lambda x: x + 1}, input_shape=(3, 2)) test_utils.layer_test(keras.layers.Lambda, kwargs={ 'function': lambda x, a, b: x * a + b, 'arguments': { 'a': 0.6, 'b': 0.4 } }, input_shape=(3, 2)) # test serialization with function def f(x): return x + 1 ld = keras.layers.Lambda(f) config = ld.get_config() ld = keras.layers.deserialize({ 'class_name': 'Lambda', 'config': config }) self.assertEqual(ld.function(3), 4) # test with lambda ld = keras.layers.Lambda( lambda x: keras.backend.concatenate([tf.square(x), x])) config = ld.get_config() ld = keras.layers.Lambda.from_config(config) self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3])
def test_layer(self): test_utils.layer_test(FeatureSelection, kwargs={'filters': 4}, input_shape=[2, 16, 16, 3], input_dtype='float32', expected_output_shape=[None, 16, 16, 4], expected_output_dtype='float32')
def test_layer(self): test_utils.layer_test(MINet, kwargs={ 'classes': 3, 'bone_arch': 'resnet_50', 'bone_init': 'imagenet', 'bone_train': False }, input_shape=[2, 62, 62, 3], input_dtype='uint8', expected_output_shape=[None, 62, 62, 3], expected_output_dtype='float32') mixed_precision.set_global_policy('mixed_float16') test_utils.layer_test(MINet, kwargs={ 'classes': 1, 'bone_arch': 'resnet_50', 'bone_init': 'imagenet', 'bone_train': False }, input_shape=[2, 64, 64, 3], input_dtype='uint8', expected_output_shape=[None, 64, 64, 1], expected_output_dtype='float32')
def test_basic_layernorm(self): test_utils.layer_test( keras.layers.LayerNormalization, kwargs={ 'gamma_regularizer': keras.regularizers.l2(0.01), 'beta_regularizer': keras.regularizers.l2(0.01) }, input_shape=(3, 4, 2)) test_utils.layer_test( keras.layers.LayerNormalization, kwargs={ 'gamma_initializer': 'ones', 'beta_initializer': 'ones', }, input_shape=(3, 4, 2)) test_utils.layer_test( keras.layers.LayerNormalization, kwargs={'scale': False, 'center': False}, input_shape=(3, 3)) test_utils.layer_test( keras.layers.LayerNormalization, kwargs={'axis': (-3, -2, -1)}, input_shape=(2, 8, 8, 3)) test_utils.layer_test( keras.layers.LayerNormalization, input_shape=(1, 0, 10))
def test_basic_layernorm(self): test_utils.layer_test( keras.layers.LayerNormalization, kwargs={ "gamma_regularizer": keras.regularizers.l2(0.01), "beta_regularizer": keras.regularizers.l2(0.01), }, input_shape=(3, 4, 2), ) test_utils.layer_test( keras.layers.LayerNormalization, kwargs={ "gamma_initializer": "ones", "beta_initializer": "ones", }, input_shape=(3, 4, 2), ) test_utils.layer_test( keras.layers.LayerNormalization, kwargs={"scale": False, "center": False}, input_shape=(3, 3), ) test_utils.layer_test( keras.layers.LayerNormalization, kwargs={"axis": (-3, -2, -1)}, input_shape=(2, 8, 8, 3), ) test_utils.layer_test( keras.layers.LayerNormalization, input_shape=(1, 0, 10) )
def test_prelu_share(self): test_utils.layer_test( keras.layers.PReLU, kwargs={"shared_axes": 1}, input_shape=(2, 3, 4), supports_masking=True, )
def test_conv3d_transpose_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) with self.cached_session(): # Won't raise error here. test_utils.layer_test( keras.layers.Conv3DTranspose, kwargs={ "data_format": "channels_last", "filters": 3, "kernel_size": 3, }, input_shape=(None, None, None, None, 3), input_data=input_data, ) if tf.test.is_gpu_available(cuda_only=True): test_utils.layer_test( keras.layers.Conv3DTranspose, kwargs={ "data_format": "channels_first", "filters": 3, "kernel_size": 3, }, input_shape=(None, 3, None, None, None), input_data=input_data, )
def test_global_maxpooling_3d(self): test_utils.layer_test(keras.layers.GlobalMaxPooling3D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 3, 4, 3)) test_utils.layer_test(keras.layers.GlobalMaxPooling3D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 4, 3, 4, 3))
def test_conv2d_transpose_dilation(self): test_utils.layer_test( keras.layers.Conv2DTranspose, kwargs={ "filters": 2, "kernel_size": 3, "padding": "same", "data_format": "channels_last", "dilation_rate": (2, 2), }, input_shape=(2, 5, 6, 3), ) input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32) # pylint: disable=too-many-function-args expected_output = np.float32([ [192, 228, 192, 228], [336, 372, 336, 372], [192, 228, 192, 228], [336, 372, 336, 372], ]).reshape((1, 4, 4, 1)) test_utils.layer_test( keras.layers.Conv2DTranspose, input_data=input_data, kwargs={ "filters": 1, "kernel_size": 3, "padding": "same", "data_format": "channels_last", "dilation_rate": (2, 2), "kernel_initializer": "ones", }, expected_output=expected_output, )
def test_softmax(self): test_utils.layer_test( keras.layers.Softmax, kwargs={"axis": 1}, input_shape=(2, 3, 4), supports_masking=True, )
def test_layer(self): test_utils.layer_test(ClassificationUncertainty, kwargs={}, input_shape=[2, 16, 16, 10], input_dtype='float32', expected_output_shape=[None, 16, 16], expected_output_dtype='float32') test_utils.layer_test(ClassificationUncertainty, kwargs={'from_logits': True}, input_shape=[2, 16, 16, 10], input_dtype='float32', expected_output_shape=[None, 16, 16], expected_output_dtype='float32') test_utils.layer_test(ClassificationUncertainty, kwargs={'from_logits': True}, input_shape=[2, 16, 16, 1], input_dtype='float32', expected_output_shape=[None, 16, 16], expected_output_dtype='float32') mixed_precision.set_global_policy('mixed_float16') test_utils.layer_test(ClassificationUncertainty, kwargs={}, input_shape=[2, 16, 16, 10], input_dtype='float16', expected_output_shape=[None, 16, 16], expected_output_dtype='float16') test_utils.layer_test(ClassificationUncertainty, kwargs={}, input_shape=[2, 16, 16, 10], input_dtype='float32', expected_output_shape=[None, 16, 16], expected_output_dtype='float32')
def test_layer(self): test_utils.layer_test(BoxFilter, kwargs={'radius': 3}, input_shape=[2, 8, 9, 1], input_dtype='float32', expected_output_shape=[None, 8, 9, 1], expected_output_dtype='float32')
def test_leaky_relu(self): for alpha in [0.0, 0.5]: test_utils.layer_test( keras.layers.LeakyReLU, kwargs={"alpha": alpha}, input_shape=(2, 3, 4), supports_masking=True, )
def test_permute_errors_on_invalid_set_of_dims_indices(self): with self.assertRaisesRegex(ValueError, r"Invalid permutation .*dims.*"): test_utils.layer_test( keras.layers.Permute, kwargs={"dims": (1, 4, 2)}, input_shape=(3, 2, 4), )
def test_global_average_pooling_1d(self): test_utils.layer_test(keras.layers.GlobalAveragePooling1D, input_shape=(3, 4, 5)) test_utils.layer_test( keras.layers.GlobalAveragePooling1D, kwargs={"data_format": "channels_first"}, input_shape=(3, 4, 5), )
def test_elu(self): for alpha in [0.0, 0.5, -1.0]: test_utils.layer_test( keras.layers.ELU, kwargs={"alpha": alpha}, input_shape=(2, 3, 4), supports_masking=True, )