def test_group_normalization(self, use_cpu_only, backend, rank, groups, axis, epsilon, center, scale): tensorflow_addons = pytest.importorskip("tensorflow_addons") from tensorflow_addons.layers import GroupNormalization shape = np.random.randint(low=2, high=4, size=rank) shape[-1] = shape[-1] * groups # groups must be a multiple of channels model = tf.keras.Sequential([ GroupNormalization( batch_input_shape=shape, groups=groups, axis=axis, epsilon=epsilon, center=center, scale=scale, ) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-1, rand_max=1)], use_cpu_only=use_cpu_only, backend=backend, atol=1e-3, rtol=1e-4, )
def test_lstm_dynamic_batch(self, use_cpu_only, backend): # Support dynamic elem_shape <rdar://problem/69522780> if backend != "nn_proto": return input_shape = (1, 1280) inp = tf.keras.layers.Input(shape=input_shape) h0 = tf.keras.layers.Input(shape=(512, )) c0 = tf.keras.layers.Input(shape=(512, )) out, hn, cn = tf.keras.layers.LSTM(512, return_sequences=True, return_state=True, recurrent_activation='sigmoid')(inp) model = tf.keras.models.Model(inputs=[inp, h0, c0], outputs=[out, hn, cn]) batch_size = 2 run_compare_tf_keras( model, [ random_gen((batch_size, 1, 1280), -1, 1), random_gen((batch_size, 512), -1, 1), random_gen((batch_size, 512), -1, 1), ], use_cpu_only=use_cpu_only, backend=backend, )
def test_lstm( self, use_cpu_only, backend, rank, units, activation, recurrent_activation, use_bias, return_sequences, ): shape = np.random.randint(low=1, high=4, size=rank) model = tf.keras.Sequential([ tf.keras.layers.LSTM( batch_input_shape=shape, units=units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, return_sequences=return_sequences, ), ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-1, rand_max=1)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, op, upsample_factor, data_format, interpolation): kwargs = {} shape = None if op == tf.keras.layers.UpSampling1D: shape = np.random.randint(low=2, high=4, size=3) upsample_factor = upsample_factor[2] elif op == tf.keras.layers.UpSampling2D: kwargs = { "data_format": data_format, "interpolation": interpolation } shape = np.random.randint(low=2, high=4, size=4) upsample_factor = (upsample_factor[1], upsample_factor[2]) elif op == tf.keras.layers.UpSampling3D: kwargs = {"data_format": data_format} shape = np.random.randint(low=2, high=4, size=5) model = tf.keras.Sequential( [op(batch_input_shape=shape, size=upsample_factor, **kwargs)]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_pooling(self, use_cpu_only, backend, op, data_format, pool_size): shape = None if op in {tf.keras.layers.AveragePooling1D, tf.keras.layers.MaxPool1D}: shape = np.random.randint(low=3, high=9, size=3) pool_size = pool_size[2] elif op in { tf.keras.layers.AveragePooling2D, tf.keras.layers.MaxPool2D }: if data_format == "channels_first": return # AvgPoolingOp only supports NHWC on CPU shape = np.random.randint(low=3, high=9, size=4) pool_size = pool_size[1:] elif op in { tf.keras.layers.AveragePooling3D, tf.keras.layers.MaxPool3D }: shape = np.random.randint(low=3, high=9, size=5) model = tf.keras.Sequential([ op(batch_input_shape=shape, pool_size=pool_size, data_format=data_format) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_global_pooling(self, use_cpu_only, backend, op, data_format): shape = None if op in { tf.keras.layers.GlobalAveragePooling1D, tf.keras.layers.GlobalMaxPool1D, }: shape = np.random.randint(low=2, high=4, size=3) elif op in { tf.keras.layers.GlobalAveragePooling2D, tf.keras.layers.GlobalMaxPool2D, }: shape = np.random.randint(low=2, high=4, size=4) elif op in { tf.keras.layers.GlobalAveragePooling3D, tf.keras.layers.GlobalMaxPool3D, }: shape = np.random.randint(low=2, high=4, size=5) model = tf.keras.Sequential( [op(batch_input_shape=shape, data_format=data_format)]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_layer(self, use_cpu_only, backend, rank, op): shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential([op(batch_input_shape=shape)]) run_compare_tf_keras( model, [random_gen(shape, -10, 10)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, rank_and_perm): rank, perm = rank_and_perm shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential( [tf.keras.layers.Permute(batch_input_shape=shape, dims=perm)]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_skip_noise(self, use_cpu_only, backend): shape = np.random.randint(low=1, high=4, size=5) model = tf.keras.Sequential([ # GaussianNoise should do nothing in inference mode tf.keras.layers.GaussianNoise(batch_input_shape=shape, stddev=0.5) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_cropping_2d(self, use_cpu_only, backend, begin_end1, begin_end2): shape = (1, 10, 10, 3) model = tf.keras.Sequential([ tf.keras.layers.Cropping2D(batch_input_shape=shape, cropping=(begin_end1, begin_end2)) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-1, rand_max=1)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, n): # input shape 2D tensor (batch size, features) # output shape 3D tensor (batch size, n, features) shape = np.random.randint(low=1, high=4, size=2) model = tf.keras.Sequential( [tf.keras.layers.RepeatVector(batch_input_shape=shape, n=n)]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_lstmcell(self, use_cpu_only, backend): shape = np.random.randint(low=1, high=4, size=3) model = tf.keras.Sequential([ tf.keras.layers.RNN(batch_input_shape=shape, cell=tf.keras.layers.LSTMCell(units=3)) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-1, rand_max=1)], use_cpu_only=use_cpu_only, backend=backend, )
def test_skip_regularization(self, use_cpu_only, backend, rank, l1, l2): shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential([ tf.keras.layers.ActivityRegularization(batch_input_shape=shape, l1=l1, l2=l2) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_lstm_time_distributed_dense(self, use_cpu_only, backend): shape = list(np.random.randint(low=1, high=4, size=3)) k_in = tf.keras.layers.Input(batch_size=shape[0], shape=shape[1:]) lstm = tf.keras.layers.LSTM(units=32, return_sequences=True)(k_in) k_out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1))(lstm) model = tf.keras.Model(inputs=k_in, outputs=k_out) run_compare_tf_keras( model, [random_gen(shape, rand_min=-1, rand_max=1)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, rank, op): shape = np.random.randint(low=1, high=4, size=rank) input_x = tf.keras.layers.Input(batch_input_shape=tuple(shape)) input_y = tf.keras.layers.Input(batch_input_shape=tuple(shape)) out = op()([input_x, input_y]) model = tf.keras.Model(inputs=[input_x, input_y], outputs=out) run_compare_tf_keras( model, [random_gen(shape, -10, 10), random_gen(shape, -10, 10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_skip_dropout(self, use_cpu_only, backend, skip_op): shape = np.random.randint(low=1, high=4, size=5) if skip_op == tf.keras.layers.SpatialDropout1D: shape = shape[:3] elif skip_op == tf.keras.layers.SpatialDropout2D: shape = shape[:4] model = tf.keras.Sequential( [skip_op(batch_input_shape=shape, rate=0.5)]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, rank, axis): shape = np.random.randint(low=2, high=4, size=rank) inputs = [] for _ in range(2): inputs.append( tf.keras.layers.Input(batch_input_shape=tuple(shape))) out = tf.keras.layers.Concatenate(axis=axis)(inputs) model = tf.keras.Model(inputs=inputs, outputs=out) run_compare_tf_keras( model, [random_gen(shape), random_gen(shape)], use_cpu_only=use_cpu_only, backend=backend, )
def test_dot(self, use_cpu_only, rank, backend, axes, normalize): shape = np.random.randint(low=2, high=6, size=rank) input_x = tf.keras.layers.Input(batch_input_shape=tuple(shape)) input_y = tf.keras.layers.Input(batch_input_shape=tuple(shape)) out = tf.keras.layers.Dot(axes=axes, normalize=normalize)([input_x, input_y]) model = tf.keras.Model(inputs=[input_x, input_y], outputs=out) run_compare_tf_keras( model, [random_gen(shape, -10, 10), random_gen(shape, -10, 10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_unary(self, use_cpu_only, backend, rank, function): shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential([ tf.keras.layers.Lambda( batch_input_shape=shape, function=function, ) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-5, rand_max=5)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, rank, data_format): shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential([ tf.keras.layers.Flatten( batch_input_shape=shape, data_format=data_format, ) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_layer_normalization(self, use_cpu_only, backend, rank, axis, epsilon): shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential([ tf.keras.layers.LayerNormalization(batch_input_shape=shape, axis=axis, epsilon=epsilon, trainable=False) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-100, rand_max=100)], use_cpu_only=use_cpu_only, backend=backend, )
def test_activation(self, use_cpu_only, backend, rank, op): kwargs = ({ "atol": 1e-3, "rtol": 1e-4 } if op == tf.keras.activations.exponential and use_cpu_only is False else {}) if op == tf.keras.activations.softmax and rank == 1: return # skip apply softmax to a tensor that is 1D shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential( [tf.keras.layers.Activation(op, batch_input_shape=shape)]) run_compare_tf_keras(model, [random_gen(shape, -10, 10)], use_cpu_only=use_cpu_only, backend=backend, **kwargs)
def test(self, use_cpu_only, backend, rank, units, activation, use_bias): shape = np.random.randint(low=2, high=4, size=rank) model = tf.keras.Sequential([ tf.keras.layers.Dense( batch_input_shape=shape, units=units, activation=activation, use_bias=use_bias, ) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, rank, infer_shape): shape = np.random.randint(low=2, high=4, size=rank) # target shape does not include the batch dimension target_shape = random.sample(list(shape[1:]), len(shape[1:])) if len(target_shape) > 0 and infer_shape: target_shape[-1] = -1 model = tf.keras.Sequential([ tf.keras.layers.Reshape(batch_input_shape=shape, target_shape=target_shape) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_conv_transpose( self, use_cpu_only, backend, op, padding, data_format, spatial_dim_and_ks, output_padding, strides, dilations, batch_size, ): s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks c_in, c_out = 2, 3 input_shape = None kernel_size = None if op == tf.keras.layers.Conv2DTranspose: input_shape = (batch_size, s2, s3, c_in) kernel_size = (k2, k3) strides = (strides[1], strides[2]) dilations = dilations[1:] output_padding = (output_padding[1], output_padding[2]) elif op == tf.keras.layers.Conv3DTranspose: input_shape = (batch_size, s1, s2, s3, c_in) kernel_size = (k1, k2, k3) model = tf.keras.Sequential([ op( batch_input_shape=input_shape, filters=c_out, kernel_size=kernel_size, strides=strides, padding=padding.upper(), output_padding=output_padding, data_format=data_format, dilation_rate=dilations, ) ]) run_compare_tf_keras( model, [random_gen(input_shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_batch_normalization(self, use_cpu_only, backend, rank, axis, momentum, epsilon): shape = np.random.randint(low=2, high=5, size=rank) model = tf.keras.Sequential([ tf.keras.layers.BatchNormalization( batch_input_shape=shape, axis=axis, momentum=momentum, epsilon=epsilon, ) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_separable_conv( self, use_cpu_only, backend, op, padding, data_format, spatial_dim_and_ks, strides, dilations, batch_size, ): s1, s2, k1, k2 = spatial_dim_and_ks c_in, c_out = 2, 3 input_shape = None kernel_size = None if op == tf.keras.layers.SeparableConv1D: input_shape = (batch_size, s2, c_in) kernel_size = k2 strides = strides[1] dilations = dilations[1] elif op == tf.keras.layers.SeparableConv2D: input_shape = (batch_size, s1, s2, c_in) kernel_size = (k1, k2) model = tf.keras.Sequential([ op( batch_input_shape=input_shape, filters=c_out, kernel_size=kernel_size, strides=strides, padding=padding.upper(), data_format=data_format, dilation_rate=dilations, ) ]) run_compare_tf_keras( model, [random_gen(input_shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test(self, use_cpu_only, backend, dims, batch_size, input_length): # input shape: 2D tensor (batch_size, input_length) # output shape: 3D tensor (batch_size, input_length, output_dim) shape = (batch_size, input_length) model = tf.keras.Sequential([ tf.keras.layers.Embedding( batch_input_shape=shape, input_dim=dims[0], output_dim=dims[1], input_length=input_length, ) ]) run_compare_tf_keras( model, [random_gen(shape, rand_min=0, rand_max=dims[0])], use_cpu_only=use_cpu_only, backend=backend, atol=1e-3, rtol=1e-4, )
def test(self, use_cpu_only, backend, op, data_format, padding): shape = None kwargs = {} if op == tf.keras.layers.ZeroPadding1D: padding = padding[-1] shape = np.random.randint(low=2, high=4, size=3) elif op == tf.keras.layers.ZeroPadding2D: padding = padding[1:] kwargs = {"data_format": data_format} shape = np.random.randint(low=2, high=4, size=4) elif op == tf.keras.layers.ZeroPadding3D: kwargs = {"data_format": data_format} shape = np.random.randint(low=2, high=4, size=5) model = tf.keras.Sequential( [op(batch_input_shape=shape, padding=padding, **kwargs)]) run_compare_tf_keras( model, [random_gen(shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
def test_depth_wise_conv( self, use_cpu_only, backend, op, padding, data_format, spatial_dim_and_ks, strides, dilations, batch_size, ): s1, s2, k1, k2 = spatial_dim_and_ks c_in, c_out = 2, 6 if len(strides) != np.sum(strides) and len(dilations) != np.sum( dilations): # TF produces incorrect output for non-one strides + dilations return input_shape = (batch_size, s1, s2, c_in) model = tf.keras.Sequential([ op( batch_input_shape=input_shape, kernel_size=(k1, k2), strides=strides, padding=padding.upper(), data_format=data_format, dilation_rate=dilations, ) ]) run_compare_tf_keras( model, [random_gen(input_shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )