def test_return_sequences(layer_class): layer_test(layer_class, kwargs={ 'output_dim': output_dim, 'return_sequences': True }, input_shape=(nb_samples, timesteps, embedding_dim))
def test_swish(trainable): layer_test(Swish, kwargs={ "beta": 1.0, "trainable": trainable }, input_shape=(2, 3, 4))
def test_swish(trainable): layer_test(Swish, kwargs={ 'beta': 1.0, 'trainable': trainable }, input_shape=(2, 3, 4))
def test_cosineconvolution_2d(border_mode, subsample, use_bias_mode, use_regularizer): num_samples = 2 num_filter = 2 stack_size = 3 num_row = 10 num_col = 6 if border_mode == 'same' and subsample != (1, 1): return kwargs = { 'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format } if use_regularizer: kwargs.update({ 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2' }) layer_test(CosineConvolution2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size))
def test_cosineconvolution_2d(border_mode, subsample, use_bias_mode, use_regularizer): num_samples = 2 num_filter = 2 stack_size = 3 num_row = 10 num_col = 6 if border_mode == "same" and subsample != (1, 1): return kwargs = { "filters": num_filter, "kernel_size": (3, 3), "padding": border_mode, "strides": subsample, "use_bias": use_bias_mode, "data_format": data_format, } if use_regularizer: kwargs.update({ "kernel_regularizer": "l2", "bias_regularizer": "l2", "activity_regularizer": "l2", }) layer_test( CosineConvolution2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size), )
def test_swish(trainable): layer_test(advanced_activations.Swish, kwargs={ 'beta': 1.0, 'trainable': trainable }, input_shape=(2, 3, 4))
def test_implementation_mode(layer_class): for mode in ['cpu', 'mem', 'gpu']: layer_test(layer_class, kwargs={ 'output_dim': output_dim, 'consume_less': mode }, input_shape=(nb_samples, timesteps, embedding_dim))
def test_dropout(layer_class): layer_test(layer_class, kwargs={ 'output_dim': output_dim, 'dropout_U': 0.1, 'dropout_W': 0.1 }, input_shape=(nb_samples, timesteps, embedding_dim))
def basic_batchrenorm_test(): from keras import regularizers layer_test(normalization.BatchRenormalization, input_shape=(3, 4, 2)) layer_test(normalization.BatchRenormalization, kwargs={'gamma_regularizer': regularizers.l2(0.01), 'beta_regularizer': regularizers.l2(0.01)}, input_shape=(3, 4, 2))
def test_cosinedense_reg_constraint(): layer_test(core.CosineDense, kwargs={ 'units': 3, 'kernel_regularizer': regularizers.l2(0.01), 'bias_regularizer': regularizers.l1(0.01), 'activity_regularizer': regularizers.l2(0.01), 'kernel_constraint': constraints.MaxNorm(1), 'bias_constraint': constraints.MaxNorm(1) }, input_shape=(3, 2))
def test_cdropout_valid_layer(layer, args, shape, n_data): """Original layer test with a variety of different valid parameters. """ layer_test(ConcreteDropout, kwargs={ 'layer': layer(*args), 'n_data': n_data }, input_shape=shape) if K.backend() == 'tensorflow' or K.backend() == 'cntk': K.clear_session()
def test_cosinedense_reg_constraint(): layer_test( core.CosineDense, kwargs={ "units": 3, "kernel_regularizer": regularizers.l2(0.01), "bias_regularizer": regularizers.l1(0.01), "activity_regularizer": regularizers.l2(0.01), "kernel_constraint": constraints.MaxNorm(1), "bias_constraint": constraints.MaxNorm(1), }, input_shape=(3, 2), )
def test_basic_groupnorm(): layer_test( GroupNormalization, kwargs={ "groups": 2, "epsilon": 0.1, "gamma_regularizer": regularizers.l2(0.01), "beta_regularizer": regularizers.l2(0.01), }, input_shape=(3, 4, 2), ) layer_test( GroupNormalization, kwargs={"groups": 2, "epsilon": 0.1, "axis": 1}, input_shape=(3, 4, 2), ) layer_test( GroupNormalization, kwargs={"groups": 2, "gamma_initializer": "ones", "beta_initializer": "ones"}, input_shape=(3, 4, 2, 4), ) if K.backend() != "theano": layer_test( GroupNormalization, kwargs={"groups": 2, "axis": 1, "scale": False, "center": False}, input_shape=(3, 4, 2, 4), )
def test_basic_groupnorm(): layer_test(GroupNormalization, kwargs={ 'groups': 2, 'epsilon': 0.1, 'gamma_regularizer': regularizers.l2(0.01), 'beta_regularizer': regularizers.l2(0.01) }, input_shape=(3, 4, 2)) layer_test(GroupNormalization, kwargs={ 'groups': 2, 'epsilon': 0.1, 'axis': 1 }, input_shape=(3, 4, 2)) layer_test(GroupNormalization, kwargs={ 'groups': 2, 'gamma_initializer': 'ones', 'beta_initializer': 'ones' }, input_shape=(3, 4, 2, 4)) if K.backend() != 'theano': layer_test(GroupNormalization, kwargs={ 'groups': 2, 'axis': 1, 'scale': False, 'center': False }, input_shape=(3, 4, 2, 4))
def test_capsule(num_capsule, dim_capsule, routings, share_weights, activation): num_samples = 100 num_rows = 256 num_cols = 256 kwargs = { 'num_capsule': num_capsule, 'dim_capsule': dim_capsule, 'routings': routings, 'share_weights': share_weights, 'activation': activation } layer_test(capsule.Capsule, kwargs=kwargs, input_shape=(num_samples, num_rows, num_cols))
def test_sub_pixel_upscaling(): num_samples = 2 num_row = 16 num_col = 16 for scale_factor in [2, 3, 4]: input_data = np.random.random( (num_samples, 4 * (scale_factor**2), num_row, num_col)) if K.image_data_format() == 'channels_last': input_data = input_data.transpose((0, 2, 3, 1)) input_tensor = K.variable(input_data) expected_output = K.eval( KC.depth_to_space(input_tensor, scale=scale_factor)) layer_test(convolutional.SubPixelUpscaling, kwargs={'scale_factor': scale_factor}, input_data=input_data, expected_output=expected_output, expected_output_dtype=K.floatx())
def test_capsule(num_capsule, dim_capsule, routings, share_weights, activation): # TODO: removed this once the issue #25546 in the Tensorflow repo is fixed. if is_tf_keras and not share_weights: return num_samples = 100 num_rows = 256 num_cols = 256 kwargs = { 'num_capsule': num_capsule, 'dim_capsule': dim_capsule, 'routings': routings, 'share_weights': share_weights, 'activation': activation } layer_test(capsule.Capsule, kwargs=kwargs, input_shape=(num_samples, num_rows, num_cols))
def test_deconvolution_3d(): num_samples = 6 num_filter = 4 stack_size = 2 kernel_dim1 = 12 kernel_dim2 = 10 kernel_dim3 = 8 for batch_size in [None, num_samples]: for border_mode in _convolution_border_modes: for subsample in [(1, 1, 1), (2, 2, 2)]: if border_mode == 'same' and subsample != (1, 1, 1): continue dim1 = conv_input_length(kernel_dim1, 7, border_mode, subsample[0]) dim2 = conv_input_length(kernel_dim2, 5, border_mode, subsample[1]) dim3 = conv_input_length(kernel_dim3, 3, border_mode, subsample[2]) layer_test(convolutional.Deconvolution3D, kwargs={'filters': num_filter, 'kernel_size': (7, 5, 3), 'output_shape': (batch_size, num_filter, dim1, dim2, dim3), 'padding': border_mode, 'strides': subsample, 'data_format': 'channels_first'}, input_shape=(num_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3), fixed_batch_size=True, tolerance=None) layer_test(convolutional.Deconvolution3D, kwargs={'filters': num_filter, 'kernel_size': (7, 5, 3), 'output_shape': (batch_size, num_filter, dim1, dim2, dim3), 'padding': border_mode, 'strides': subsample, 'data_format': 'channels_first', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2'}, input_shape=(num_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3), fixed_batch_size=True, tolerance=None) layer_test(convolutional.Deconvolution3D, kwargs={'filters': num_filter, 'kernel_size': (7, 5, 3), 'output_shape': (num_filter, dim1, dim2, dim3), 'padding': border_mode, 'strides': subsample, 'data_format': 'channels_first', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2'}, input_shape=(num_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3), tolerance=None)
def test_sub_pixel_upscaling(scale_factor): num_samples = 2 num_row = 16 num_col = 16 input_dtype = K.floatx() nb_channels = 4 * (scale_factor**2) input_data = np.random.random((num_samples, nb_channels, num_row, num_col)) input_data = input_data.astype(input_dtype) if K.image_data_format() == "channels_last": input_data = input_data.transpose((0, 2, 3, 1)) input_tensor = K.variable(input_data) expected_output = K.eval( KC.depth_to_space(input_tensor, scale=scale_factor)) layer_test( SubPixelUpscaling, kwargs={"scale_factor": scale_factor}, input_data=input_data, expected_output=expected_output, expected_output_dtype=K.floatx(), )
def basic_instancenorm_test(): from keras import regularizers layer_test(normalization.InstanceNormalization, kwargs={'epsilon': 0.1, 'gamma_regularizer': regularizers.l2(0.01), 'beta_regularizer': regularizers.l2(0.01)}, input_shape=(3, 4, 2)) layer_test(normalization.InstanceNormalization, kwargs={'gamma_initializer': 'ones', 'beta_initializer': 'ones'}, input_shape=(3, 4, 2)) layer_test(normalization.InstanceNormalization, kwargs={'scale': False, 'center': False}, input_shape=(3, 3))
def test_sine_relu(epsilon): layer_test(SineReLU, kwargs={"epsilon": epsilon}, input_shape=(2, 3, 4))
def test_pelu(): layer_test(advanced_activations.PELU, kwargs={}, input_shape=(2, 3, 4))
def test_swish_constant(): layer_test(advanced_activations.Swish, kwargs={'beta': 1.0, 'trainable': False}, input_shape=(2, 3, 4))
def test_srelu_share(): layer_test(advanced_activations.SReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
def test_cosineconvolution_2d(): num_samples = 2 num_filter = 2 stack_size = 3 num_row = 10 num_col = 6 if K.backend() == 'theano': data_format = 'channels_first' elif K.backend() == 'tensorflow': data_format = 'channels_last' for border_mode in _convolution_border_modes: for subsample in [(1, 1), (2, 2)]: for use_bias_mode in [True, False]: if border_mode == 'same' and subsample != (1, 1): continue layer_test(convolutional.CosineConvolution2D, kwargs={ 'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format }, input_shape=(num_samples, num_row, num_col, stack_size)) layer_test(convolutional.CosineConvolution2D, kwargs={ 'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2' }, input_shape=(num_samples, num_row, num_col, stack_size)) if data_format == 'channels_first': X = np.random.randn(1, 3, 5, 5) input_dim = (3, 5, 5) W0 = X[:, :, ::-1, ::-1] elif data_format == 'channels_last': X = np.random.randn(1, 5, 5, 3) input_dim = (5, 5, 3) W0 = X[0, :, :, :, None] model = Sequential() model.add( convolutional.CosineConvolution2D(1, (5, 5), use_bias=True, input_shape=input_dim, data_format=data_format)) model.compile(loss='mse', optimizer='rmsprop') W = model.get_weights() W[0] = W0 W[1] = np.asarray([1.]) model.set_weights(W) out = model.predict(X) assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5) model = Sequential() model.add( convolutional.CosineConvolution2D(1, (5, 5), use_bias=False, input_shape=input_dim, data_format=data_format)) model.compile(loss='mse', optimizer='rmsprop') W = model.get_weights() W[0] = -2 * W0 model.set_weights(W) out = model.predict(X) assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
def test_cosinedense(input_shape): layer_test(core.CosineDense, kwargs={'units': 3}, input_shape=input_shape)
def test_srelu(kwargs): layer_test(SReLU, kwargs=kwargs, input_shape=(2, 3, 4))
def test_ptrelu(kwargs): layer_test(advanced_activations.PTReLU, kwargs=kwargs, input_shape=(2, 3, 4))
def test_pelu(kwargs): layer_test(PELU, kwargs=kwargs, input_shape=(2, 3, 4))
def test_sine_relu(epsilon): layer_test(advanced_activations.SineReLU, kwargs={'epsilon': epsilon}, input_shape=(2, 3, 4))