Example #1
0
def test_zero_padding_1d():
    num_samples = 2
    input_dim = 2
    num_steps = 5
    shape = (num_samples, num_steps, input_dim)
    input = np.ones(shape)

    # basic test
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': 2},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': (1, 2)},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding1D(padding=2)
    layer.build(shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for offset in [0, 1, -1, -2]:
        assert_allclose(np_output[:, offset, :], 0.)
    assert_allclose(np_output[:, 2:-2, :], 1.)

    layer = convolutional.ZeroPadding1D(padding=(1, 2))
    layer.build(shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for left_offset in [0]:
        assert_allclose(np_output[:, left_offset, :], 0.)
    for right_offset in [-1, -2]:
        assert_allclose(np_output[:, right_offset, :], 0.)
    assert_allclose(np_output[:, 1:-2, :], 1.)
    layer.get_config()
Example #2
0
def basic_batchnorm_test():
    layer_test(normalization.BatchNormalization,
               kwargs={'mode': 1},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'mode': 0},
               input_shape=(3, 4, 2))
Example #3
0
def test_convolution_3d_additional_args():
    num_samples = 2
    filters = 2
    stack_size = 3
    padding = 'valid'
    strides = (2, 2, 2)

    input_len_dim1 = 9
    input_len_dim2 = 8
    input_len_dim3 = 8

    layer_test(convolutional.Convolution3D,
               kwargs={'filters': filters,
                       'kernel_size': (1, 2, 3),
                       'padding': padding,
                       'activation': None,
                       'kernel_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'kernel_constraint': 'max_norm',
                       'bias_constraint': 'max_norm',
                       'strides': strides},
               input_shape=(num_samples,
                            input_len_dim1, input_len_dim2, input_len_dim3,
                            stack_size))
Example #4
0
def test_upsampling_2d_bilinear(data_format):
    num_samples = 2
    stack_size = 2
    input_num_row = 11
    input_num_col = 12

    if data_format == 'channels_first':
        inputs = np.random.rand(num_samples, stack_size, input_num_row,
                                input_num_col)
    else:  # tf
        inputs = np.random.rand(num_samples, input_num_row, input_num_col,
                                stack_size)

    # basic test
    layer_test(convolutional.UpSampling2D,
               kwargs={'size': (2, 2),
                       'data_format': data_format,
                       'interpolation': 'bilinear'},
               input_shape=inputs.shape)

    for length_row in [2]:
        for length_col in [2, 3]:
            layer = convolutional.UpSampling2D(
                size=(length_row, length_col),
                data_format=data_format)
            layer.build(inputs.shape)
            outputs = layer(K.variable(inputs))
            np_output = K.eval(outputs)
            if data_format == 'channels_first':
                assert np_output.shape[2] == length_row * input_num_row
                assert np_output.shape[3] == length_col * input_num_col
            else:  # tf
                assert np_output.shape[1] == length_row * input_num_row
                assert np_output.shape[2] == length_col * input_num_col
Example #5
0
def test_separable_conv_2d_additional_args():
    num_samples = 2
    filters = 6
    stack_size = 3
    num_row = 7
    num_col = 6
    padding = 'valid'
    strides = (2, 2)
    multiplier = 2

    layer_test(convolutional.SeparableConv2D,
               kwargs={'filters': filters,
                       'kernel_size': 3,
                       'padding': padding,
                       'data_format': 'channels_first',
                       'activation': None,
                       'depthwise_regularizer': 'l2',
                       'pointwise_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'pointwise_constraint': 'unit_norm',
                       'depthwise_constraint': 'unit_norm',
                       'strides': strides,
                       'depth_multiplier': multiplier},
               input_shape=(num_samples, stack_size, num_row, num_col))
Example #6
0
def test_conv_1d(padding, strides):
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'strides': strides},
               input_shape=(batch_size, steps, input_dim))

    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'kernel_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'kernel_constraint': 'max_norm',
                       'bias_constraint': 'max_norm',
                       'strides': strides},
               input_shape=(batch_size, steps, input_dim))
Example #7
0
def test_atrous_conv_2d():
    nb_samples = 2
    nb_filter = 2
    stack_size = 3
    nb_row = 10
    nb_col = 6

    for border_mode in _convolution_border_modes:
        for subsample in [(1, 1), (2, 2)]:
            for atrous_rate in [(1, 1), (2, 2)]:
                if border_mode == 'same' and subsample != (1, 1):
                    continue
                if subsample != (1, 1) and atrous_rate != (1, 1):
                    continue

                layer_test(convolutional.AtrousConv2D,
                           kwargs={'nb_filter': nb_filter,
                                   'nb_row': 3,
                                   'nb_col': 3,
                                   'border_mode': border_mode,
                                   'subsample': subsample,
                                   'atrous_rate': atrous_rate},
                           input_shape=(nb_samples, nb_row, nb_col, stack_size))

                layer_test(convolutional.AtrousConv2D,
                           kwargs={'nb_filter': nb_filter,
                                   'nb_row': 3,
                                   'nb_col': 3,
                                   'border_mode': border_mode,
                                   'W_regularizer': 'l2',
                                   'b_regularizer': 'l2',
                                   'activity_regularizer': 'activity_l2',
                                   'subsample': subsample,
                                   'atrous_rate': atrous_rate},
                           input_shape=(nb_samples, nb_row, nb_col, stack_size))
Example #8
0
def test_convolution_1d():
    nb_samples = 2
    nb_steps = 8
    input_dim = 2
    filter_length = 3
    nb_filter = 3

    for border_mode in ['valid', 'same']:
        for subsample_length in [1]:
            if border_mode == 'same' and subsample_length != 1:
                continue
            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))
Example #9
0
def test_zero_padding_3d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 4
    input_len_dim2 = 5
    input_len_dim3 = 3

    input = np.ones((nb_samples,
                     input_len_dim1, input_len_dim2, input_len_dim3,
                     stack_size))

    # basic test
    layer_test(convolutional.ZeroPadding3D,
               kwargs={'padding': (2, 2, 2)},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding3D(padding=(2, 2, 2))
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for offset in [0, 1, -1, -2]:
        assert_allclose(np_output[:, offset, :, :, :], 0.)
        assert_allclose(np_output[:, :, offset, :, :], 0.)
        assert_allclose(np_output[:, :, :, offset, :], 0.)
    assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
    layer.get_config()
Example #10
0
def test_zero_padding_3d():
    nb_samples = 9
    stack_size = 7
    input_len_dim1 = 10
    input_len_dim2 = 11
    input_len_dim3 = 12

    input = np.ones((nb_samples, stack_size, input_len_dim1,
                     input_len_dim2, input_len_dim3))

    # basic test
    layer_test(convolutional.ZeroPadding3D,
               kwargs={'padding': (2, 2, 2)},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding3D(padding=(2, 2, 2))
    layer.set_input(K.variable(input), shape=input.shape)
    out = K.eval(layer.output)
    for offset in [0, 1, -1, -2]:
        assert_allclose(out[:, :, offset, :, :], 0.)
        assert_allclose(out[:, :, :, offset, :], 0.)
        assert_allclose(out[:, :, :, :, offset], 0.)
    assert_allclose(out[:, :, 2:-2, 2:-2, 2:-2], 1.)
    layer.get_config()
Example #11
0
def test_maxpooling_2d(strides):
    pool_size = (3, 3)
    layer_test(convolutional.MaxPooling2D,
               kwargs={'strides': strides,
                       'padding': 'valid',
                       'pool_size': pool_size},
               input_shape=(3, 5, 6, 4))
Example #12
0
def test_averagepooling_2d(strides, padding, data_format, input_shape):
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': strides,
                       'padding': padding,
                       'pool_size': (2, 2),
                       'data_format': data_format},
               input_shape=input_shape)
Example #13
0
def test_locallyconnected_1d():
    nb_samples = 2
    nb_steps = 8
    input_dim = 5
    filter_length = 3
    nb_filter = 4

    for border_mode in ['valid']:
        for subsample_length in [1]:
            if border_mode == 'same' and subsample_length != 1:
                continue
            layer_test(local.LocallyConnected1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))

            layer_test(local.LocallyConnected1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))
Example #14
0
def test_locallyconnected_2d():
    nb_samples = 8
    nb_filter = 3
    stack_size = 4
    nb_row = 6
    nb_col = 10

    for border_mode in ['valid']:
        for subsample in [(1, 1), (2, 2)]:
            if border_mode == 'same' and subsample != (1, 1):
                continue

            layer_test(local.LocallyConnected2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample': subsample,
                               'dim_ordering': 'tf'},
                       input_shape=(nb_samples, nb_row, nb_col, stack_size))

            layer_test(local.LocallyConnected2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample': subsample,
                               'dim_ordering': 'th'},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col))
Example #15
0
def test_maxpooling_1d():
    for border_mode in ['valid', 'same']:
        for stride in [1, 2]:
            layer_test(convolutional.MaxPooling1D,
                       kwargs={'stride': stride,
                               'border_mode': border_mode},
                       input_shape=(3, 5, 4))
Example #16
0
def test_lambda():
    layer_test(layers.Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    layer_test(layers.Lambda,
               kwargs={'function': lambda x, a, b: x * a + b,
                       'arguments': {'a': 0.6, 'b': 0.4}},
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = layers.Lambda(f)
    config = ld.get_config()
    ld = deserialize_layer({'class_name': 'Lambda', 'config': config})

    # test with lambda
    ld = layers.Lambda(
        lambda x: K.concatenate([K.square(x), x]),
        output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = layers.Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = layers.Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = deserialize_layer({'class_name': 'Lambda', 'config': config})
Example #17
0
def test_cropping_2d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 8
    input_len_dim2 = 8
    cropping = ((2, 2), (3, 3))
    dim_ordering = K.image_dim_ordering()

    if dim_ordering == 'th':
        input = np.random.rand(nb_samples, stack_size, input_len_dim1, input_len_dim2)
    else:
        input = np.random.rand(nb_samples, input_len_dim1, input_len_dim2, stack_size)
    # basic test
    layer_test(convolutional.Cropping2D,
               kwargs={'cropping': cropping,
                       'dim_ordering': dim_ordering},
               input_shape=input.shape)
    # correctness test
    layer = convolutional.Cropping2D(cropping=cropping, dim_ordering=dim_ordering)
    layer.set_input(K.variable(input), shape=input.shape)

    out = K.eval(layer.output)
    # compare with numpy
    if dim_ordering == 'th':
        expected_out = input[:,
                             :,
                             cropping[0][0]:-cropping[0][1],
                             cropping[1][0]:-cropping[1][1]]
    else:
        expected_out = input[:,
                             cropping[0][0]:-cropping[0][1],
                             cropping[1][0]:-cropping[1][1],
                             :]

    assert_allclose(out, expected_out)
Example #18
0
def test_zero_padding_3d():
    num_samples = 2
    stack_size = 2
    input_len_dim1 = 4
    input_len_dim2 = 5
    input_len_dim3 = 3

    inputs = np.ones((num_samples,
                     input_len_dim1, input_len_dim2, input_len_dim3,
                     stack_size))

    # basic test
    for data_format in ['channels_first', 'channels_last']:
        layer_test(convolutional.ZeroPadding3D,
                   kwargs={'padding': (2, 2, 2), 'data_format': data_format},
                   input_shape=inputs.shape)
        layer_test(convolutional.ZeroPadding3D,
                   kwargs={'padding': ((1, 2), (3, 4), (0, 2)), 'data_format': data_format},
                   input_shape=inputs.shape)

        # correctness test
        layer = convolutional.ZeroPadding3D(padding=(2, 2, 2),
                                            data_format=data_format)
        layer.build(inputs.shape)
        outputs = layer(K.variable(inputs))
        np_output = K.eval(outputs)
        if data_format == 'channels_last':
            for offset in [0, 1, -1, -2]:
                assert_allclose(np_output[:, offset, :, :, :], 0.)
                assert_allclose(np_output[:, :, offset, :, :], 0.)
                assert_allclose(np_output[:, :, :, offset, :], 0.)
            assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
        elif data_format == 'channels_first':
            for offset in [0, 1, -1, -2]:
                assert_allclose(np_output[:, :, offset, :, :], 0.)
                assert_allclose(np_output[:, :, :, offset, :], 0.)
                assert_allclose(np_output[:, :, :, :, offset], 0.)
            assert_allclose(np_output[:, :, 2:-2, 2:-2, 2:-2], 1.)

        layer = convolutional.ZeroPadding3D(padding=((1, 2), (3, 4), (0, 2)),
                                            data_format=data_format)
        layer.build(inputs.shape)
        outputs = layer(K.variable(inputs))
        np_output = K.eval(outputs)
        if data_format == 'channels_last':
            for dim1_offset in [0, -1, -2]:
                assert_allclose(np_output[:, dim1_offset, :, :, :], 0.)
            for dim2_offset in [0, 1, 2, -1, -2, -3, -4]:
                assert_allclose(np_output[:, :, dim2_offset, :, :], 0.)
            for dim3_offset in [-1, -2]:
                assert_allclose(np_output[:, :, :, dim3_offset, :], 0.)
            assert_allclose(np_output[:, 1:-2, 3:-4, 0:-2, :], 1.)
        elif data_format == 'channels_first':
            for dim1_offset in [0, -1, -2]:
                assert_allclose(np_output[:, :, dim1_offset, :, :], 0.)
            for dim2_offset in [0, 1, 2, -1, -2, -3, -4]:
                assert_allclose(np_output[:, :, :, dim2_offset, :], 0.)
            for dim3_offset in [-1, -2]:
                assert_allclose(np_output[:, :, :, :, dim3_offset], 0.)
            assert_allclose(np_output[:, :, 1:-2, 3:-4, 0:-2], 1.)
Example #19
0
def test_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 4, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(None, None, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2))
Example #20
0
def test_atrous_conv_1d():
    nb_samples = 2
    nb_steps = 8
    input_dim = 2
    filter_length = 3
    nb_filter = 3

    for border_mode in _convolution_border_modes:
        for subsample_length in [1, 2]:
            for atrous_rate in [1, 2]:
                if border_mode == 'same' and subsample_length != 1:
                    continue
                if subsample_length != 1 and atrous_rate != 1:
                    continue

                layer_test(convolutional.AtrousConv1D,
                           kwargs={'nb_filter': nb_filter,
                                   'filter_length': filter_length,
                                   'border_mode': border_mode,
                                   'subsample_length': subsample_length,
                                   'atrous_rate': atrous_rate},
                           input_shape=(nb_samples, nb_steps, input_dim))

                layer_test(convolutional.AtrousConv1D,
                           kwargs={'nb_filter': nb_filter,
                                   'filter_length': filter_length,
                                   'border_mode': border_mode,
                                   'W_regularizer': 'l2',
                                   'b_regularizer': 'l2',
                                   'activity_regularizer': 'activity_l2',
                                   'subsample_length': subsample_length,
                                   'atrous_rate': atrous_rate},
                           input_shape=(nb_samples, nb_steps, input_dim))
Example #21
0
def test_conv2d_transpose_dilation():

    layer_test(convolutional.Conv2DTranspose,
               kwargs={'filters': 2,
                       'kernel_size': 3,
                       'padding': 'same',
                       'data_format': 'channels_last',
                       'dilation_rate': (2, 2)},
               input_shape=(2, 5, 6, 3))

    # Check dilated conv transpose returns expected output
    input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32)
    expected_output = np.float32([[192, 228, 192, 228],
                                  [336, 372, 336, 372],
                                  [192, 228, 192, 228],
                                  [336, 372, 336, 372]]).reshape((1, 4, 4, 1))

    layer_test(convolutional.Conv2DTranspose,
               input_data=input_data,
               kwargs={'filters': 1,
                       'kernel_size': 3,
                       'padding': 'same',
                       'data_format': 'channels_last',
                       'dilation_rate': (2, 2),
                       'kernel_initializer': 'ones'},
               expected_output=expected_output)
Example #22
0
def test_convolution_2d():
    nb_samples = 2
    nb_filter = 2
    stack_size = 3
    nb_row = 10
    nb_col = 6

    for border_mode in ['valid', 'same']:
        for subsample in [(1, 1), (2, 2)]:
            if border_mode == 'same' and subsample != (1, 1):
                continue

            layer_test(convolutional.Convolution2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'border_mode': border_mode,
                               'subsample': subsample},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col))

            layer_test(convolutional.Convolution2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample': subsample},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col))
Example #23
0
def test_dense():
    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(None, None, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3,
                       'kernel_regularizer': regularizers.l2(0.01),
                       'bias_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.L1L2(l1=0.01, l2=0.01),
                       'kernel_constraint': constraints.MaxNorm(1),
                       'bias_constraint': constraints.max_norm(1)},
               input_shape=(3, 2))

    layer = layers.Dense(3,
                         kernel_regularizer=regularizers.l1(0.01),
                         bias_regularizer='l1')
    layer.build((None, 4))
    assert len(layer.losses) == 2
Example #24
0
def test_dropout(layer_class):
    for unroll in [True, False]:
        layer_test(layer_class,
                   kwargs={'units': units,
                           'dropout': 0.1,
                           'recurrent_dropout': 0.1,
                           'unroll': unroll},
                   input_shape=(num_samples, timesteps, embedding_dim))

        # Test that dropout is applied during training
        x = K.ones((num_samples, timesteps, embedding_dim))
        layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
                            input_shape=(timesteps, embedding_dim))
        y = layer(x)
        assert y._uses_learning_phase

        y = layer(x, training=True)
        assert not getattr(y, '_uses_learning_phase')

        # Test that dropout is not applied during testing
        x = np.random.random((num_samples, timesteps, embedding_dim))
        layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
                            unroll=unroll,
                            input_shape=(timesteps, embedding_dim))
        model = Sequential([layer])
        assert model.uses_learning_phase
        y1 = model.predict(x)
        y2 = model.predict(x)
        assert_allclose(y1, y2)
def test_parametric_softplus():
    from keras.layers.advanced_activations import ParametricSoftplus
    for alpha in [0., .5, -1.]:
        layer_test(ParametricSoftplus,
                   kwargs={'alpha_init': 1.,
                           'beta_init': -1},
                   input_shape=(2, 3, 4))
Example #26
0
def test_separable_conv_2d():
    num_samples = 2
    filters = 6
    stack_size = 3
    num_row = 7
    num_col = 6

    for padding in _convolution_paddings:
        for strides in [(1, 1), (2, 2)]:
            for multiplier in [1, 2]:
                if padding == 'same' and strides != (1, 1):
                    continue

                layer_test(convolutional.SeparableConv2D,
                           kwargs={'filters': filters,
                                   'kernel_size': (3, 3),
                                   'padding': padding,
                                   'strides': strides,
                                   'depth_multiplier': multiplier},
                           input_shape=(num_samples, num_row, num_col, stack_size))

    layer_test(convolutional.SeparableConv2D,
               kwargs={'filters': filters,
                       'kernel_size': 3,
                       'padding': padding,
                       'depthwise_regularizer': 'l2',
                       'pointwise_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'pointwise_constraint': 'unit_norm',
                       'depthwise_constraint': 'unit_norm',
                       'strides': strides,
                       'depth_multiplier': multiplier},
               input_shape=(num_samples, num_row, num_col, stack_size))
Example #27
0
def test_conv2d_transpose():
    num_samples = 2
    filters = 2
    stack_size = 3
    num_row = 5
    num_col = 6

    for padding in _convolution_paddings:
        for strides in [(1, 1), (2, 2)]:
            if padding == 'same' and strides != (1, 1):
                continue
            layer_test(convolutional.Deconvolution2D,
                       kwargs={'filters': filters,
                               'kernel_size': 3,
                               'padding': padding,
                               'strides': strides,
                               'data_format': 'channels_last'},
                       input_shape=(num_samples, num_row, num_col, stack_size),
                       fixed_batch_size=True)

    layer_test(convolutional.Deconvolution2D,
               kwargs={'filters': filters,
                       'kernel_size': 3,
                       'padding': padding,
                       'data_format': 'channels_first',
                       'kernel_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'kernel_constraint': 'max_norm',
                       'bias_constraint': 'max_norm',
                       'strides': strides},
               input_shape=(num_samples, stack_size, num_row, num_col),
               fixed_batch_size=True)
Example #28
0
def test_averagepooling_1d():
    for padding in ['valid', 'same']:
        for stride in [1, 2]:
            layer_test(convolutional.AveragePooling1D,
                       kwargs={'strides': stride,
                               'padding': padding},
                       input_shape=(3, 5, 4))
Example #29
0
def test_dropout():
    layer_test(layers.Dropout,
               kwargs={'rate': 0.5},
               input_shape=(3, 2))

    layer_test(layers.Dropout,
               kwargs={'rate': 0.5, 'noise_shape': [3, 1]},
               input_shape=(3, 2))

    layer_test(layers.SpatialDropout1D,
               kwargs={'rate': 0.5},
               input_shape=(2, 3, 4))

    for data_format in ['channels_last', 'channels_first']:
        for shape in [(4, 5), (4, 5, 6)]:
            if data_format == 'channels_last':
                input_shape = (2,) + shape + (3,)
            else:
                input_shape = (2, 3) + shape
            layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D,
                       kwargs={'rate': 0.5,
                               'data_format': data_format},
                       input_shape=input_shape)

            # Test invalid use cases
            with pytest.raises(ValueError):
                layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D,
                           kwargs={'rate': 0.5,
                                   'data_format': 'channels_middle'},
                           input_shape=input_shape)
Example #30
0
def test_lambda():
    from keras.utils.layer_utils import layer_from_config
    Lambda = core.Lambda

    layer_test(Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = Lambda(f)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})

    ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
                output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})
Example #31
0
def test_reshape():
    layer_test(core.Reshape,
               kwargs={'target_shape': (8, 1)},
               input_shape=(3, 2, 4))

    layer_test(core.Reshape,
               kwargs={'target_shape': (-1, 1)},
               input_shape=(3, 2, 4))

    layer_test(core.Reshape,
               kwargs={'target_shape': (1, -1)},
               input_shape=(3, 2, 4))
Example #32
0
def DISABLED_test_convolutional_recurrent(data_format, return_sequences, use_mask):

    class Masking5D(Masking):
        """Regular masking layer returns wrong shape of mask for RNN"""
        def compute_mask(self, inputs, mask=None):
            return K.any(K.not_equal(inputs, 0.), axis=[2, 3, 4])

    if data_format == 'channels_first':
        inputs = np.random.rand(num_samples, sequence_len,
                                input_channel,
                                input_num_row, input_num_col)
    else:
        inputs = np.random.rand(num_samples, sequence_len,
                                input_num_row, input_num_col,
                                input_channel)

    # test for return state:
    x = Input(batch_shape=inputs.shape)
    kwargs = {'data_format': data_format,
              'return_sequences': return_sequences,
              'return_state': True,
              'stateful': True,
              'filters': filters,
              'kernel_size': (num_row, num_col),
              'padding': 'valid'}
    layer = convolutional_recurrent.ConvLSTM2D(**kwargs)
    layer.build(inputs.shape)
    if use_mask:
        outputs = layer(Masking5D()(x))
    else:
        outputs = layer(x)
    output, states = outputs[0], outputs[1:]
    assert len(states) == 2
    model = Model(x, states[0])
    state = model.predict(inputs)
    np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4)

    # test for output shape:
    output = layer_test(convolutional_recurrent.ConvLSTM2D,
                        kwargs={'data_format': data_format,
                                'return_sequences': return_sequences,
                                'filters': filters,
                                'kernel_size': (num_row, num_col),
                                'padding': 'valid'},
                        input_shape=inputs.shape)
def test_convolutional_recurrent():

    for data_format in ['channels_first', 'channels_last']:

        if data_format == 'channels_first':
            inputs = np.random.rand(num_samples, sequence_len, input_channel,
                                    input_num_row, input_num_col)
        else:
            inputs = np.random.rand(num_samples, sequence_len, input_num_row,
                                    input_num_col, input_channel)

        for return_sequences in [True, False]:

            # test for return state:
            x = Input(batch_shape=inputs.shape)
            kwargs = {
                'data_format': data_format,
                'return_sequences': return_sequences,
                'return_state': True,
                'stateful': True,
                'filters': filters,
                'kernel_size': (num_row, num_col),
                'padding': 'valid'
            }
            layer = convolutional_recurrent.ConvLSTM2D(**kwargs)
            layer.build(inputs.shape)
            outputs = layer(x)
            output, states = outputs[0], outputs[1:]
            assert len(states) == 2
            model = Model(x, states[0])
            state = model.predict(inputs)
            np.testing.assert_allclose(K.eval(layer.states[0]),
                                       state,
                                       atol=1e-4)

            # test for output shape:
            output = layer_test(convolutional_recurrent.ConvLSTM2D,
                                kwargs={
                                    'data_format': data_format,
                                    'return_sequences': return_sequences,
                                    'filters': filters,
                                    'kernel_size': (num_row, num_col),
                                    'padding': 'valid'
                                },
                                input_shape=inputs.shape)
Example #34
0
def test_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.Dense, kwargs={'output_dim': 3}, input_shape=(3, 2))

    layer_test(core.Dense, kwargs={'output_dim': 3}, input_shape=(3, 4, 2))

    layer_test(core.Dense, kwargs={'output_dim': 3}, input_shape=(3, 4, 5, 2))

    layer_test(core.Dense,
               kwargs={
                   'output_dim': 3,
                   'W_regularizer': regularizers.l2(0.01),
                   'b_regularizer': regularizers.l1(0.01),
                   'activity_regularizer': regularizers.activity_l2(0.01),
                   'W_constraint': constraints.MaxNorm(1),
                   'b_constraint': constraints.MaxNorm(1)
               },
               input_shape=(3, 2))
Example #35
0
def test_convolution_2d():
    num_samples = 2
    filters = 2
    stack_size = 3
    kernel_size = (3, 2)
    num_row = 7
    num_col = 6

    for padding in _convolution_paddings:
        for strides in [(1, 1), (2, 2)]:
            if padding == 'same' and strides != (1, 1):
                continue

            layer_test(convolutional.Conv2D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides,
                               'data_format': 'channels_first'},
                       input_shape=(num_samples, stack_size, num_row, num_col))

    layer_test(convolutional.Conv2D,
               kwargs={'filters': filters,
                       'kernel_size': 3,
                       'padding': padding,
                       'data_format': 'channels_last',
                       'activation': None,
                       'kernel_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'kernel_constraint': 'max_norm',
                       'bias_constraint': 'max_norm',
                       'strides': strides},
               input_shape=(num_samples, num_row, num_col, stack_size))

    # Test dilation
    layer_test(convolutional.Conv2D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': (2, 2)},
               input_shape=(num_samples, num_row, num_col, stack_size))

    # Test invalid use case
    with pytest.raises(ValueError):
        model = Sequential([convolutional.Conv2D(filters=filters,
                                                 kernel_size=kernel_size,
                                                 padding=padding,
                                                 batch_input_shape=(None, None, 5, None))])
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={
                           'filters': filters,
                           'kernel_size': kernel_size,
                           'padding': padding,
                           'strides': strides
                       },
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={
                           'filters': filters,
                           'kernel_size': kernel_size,
                           'padding': padding,
                           'kernel_regularizer': 'l2',
                           'bias_regularizer': 'l2',
                           'activity_regularizer': 'l2',
                           'kernel_constraint': 'max_norm',
                           'bias_constraint': 'max_norm',
                           'strides': strides
                       },
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={
                   'filters': filters,
                   'kernel_size': kernel_size,
                   'padding': padding,
                   'dilation_rate': 2,
                   'activation': None
               },
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim, ))
def test_embedding():
    layer_test(Embedding,
               kwargs={
                   'output_dim': 4,
                   'input_dim': 10,
                   'input_length': 2
               },
               input_shape=(3, 2),
               input_dtype='int32',
               expected_output_dtype=K.floatx())
    layer_test(Embedding,
               kwargs={
                   'output_dim': 4,
                   'input_dim': 10,
                   'mask_zero': True
               },
               input_shape=(3, 2),
               input_dtype='int32',
               expected_output_dtype=K.floatx())
    layer_test(Embedding,
               kwargs={
                   'output_dim': 4,
                   'input_dim': 10,
                   'mask_zero': True
               },
               input_shape=(3, 2, 5),
               input_dtype='int32',
               expected_output_dtype=K.floatx())
    layer_test(Embedding,
               kwargs={
                   'output_dim': 4,
                   'input_dim': 10,
                   'mask_zero': True,
                   'input_length': (None, 5)
               },
               input_shape=(3, 2, 5),
               input_dtype='int32',
               expected_output_dtype=K.floatx())
Example #38
0
def basic_batchnorm_test():
    from keras import regularizers
    layer_test(normalization.BatchNormalization,
               kwargs={'momentum': 0.9,
                       'epsilon': 0.1,
                       'gamma_regularizer': regularizers.l2(0.01),
                       'beta_regularizer': regularizers.l2(0.01)},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'gamma_initializer': 'ones',
                       'beta_initializer': 'ones',
                       'moving_mean_initializer': 'zeros',
                       'moving_variance_initializer': 'ones'},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'scale': False, 'center': False},
               input_shape=(3, 3))
def test_causal_dilated_conv():
    # Causal:
    # specify to use channels_last data format,
    # as default data format for Conv1D is None now
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
                   'data_format': 'channels_last'
               },
               expected_output=[[[0], [1], [3], [5]]])

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
                   'data_format': 'channels_last'
               },
               expected_output=[[[1], [3], [5]]])

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'),
                                     (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
                   'data_format': 'channels_last'
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12],
                                            [15], [18], [21]]]))
Example #40
0
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
Example #41
0
def test_convolution_2d():
    num_samples = 2
    filters = 2
    stack_size = 3
    kernel_size = (3, 2)
    num_row = 7
    num_col = 6

    for padding in _convolution_paddings:
        for strides in [(1, 1), (2, 2)]:
            if padding == 'same' and strides != (1, 1):
                continue

            layer_test(convolutional.Conv2D,
                       kwargs={
                           'filters': filters,
                           'kernel_size': kernel_size,
                           'padding': padding,
                           'strides': strides,
                           'data_format': 'channels_first'
                       },
                       input_shape=(num_samples, stack_size, num_row, num_col))

    layer_test(convolutional.Conv2D,
               kwargs={
                   'filters': filters,
                   'kernel_size': 3,
                   'padding': padding,
                   'kernel_regularizer': 'l2',
                   'bias_regularizer': 'l2',
                   'activity_regularizer': 'l2',
                   'kernel_constraint': 'max_norm',
                   'bias_constraint': 'max_norm',
                   'strides': strides
               },
               input_shape=(num_samples, num_row, num_col, stack_size))

    # Test dilation
    layer_test(convolutional.Conv2D,
               kwargs={
                   'filters': filters,
                   'kernel_size': kernel_size,
                   'padding': padding,
                   'dilation_rate': (2, 2)
               },
               input_shape=(num_samples, num_row, num_col, stack_size))
def test_implementation_mode(layer_class):
    for mode in [1, 2]:
        # Without dropout
        layer_test(layer_class,
                   kwargs={'units': units,
                           'implementation': mode},
                   input_shape=(num_samples, timesteps, embedding_dim))
        # With dropout
        layer_test(layer_class,
                   kwargs={'units': units,
                           'implementation': mode,
                           'dropout': 0.1,
                           'recurrent_dropout': 0.1},
                   input_shape=(num_samples, timesteps, embedding_dim))
        # Without bias
        layer_test(layer_class,
                   kwargs={'units': units,
                           'implementation': mode,
                           'use_bias': False},
                   input_shape=(num_samples, timesteps, embedding_dim))
def basic_instancenorm_test():
    from keras import regularizers
    layer_test(normalization.InstanceNormalization,
               kwargs={
                   'epsilon': 0.1,
                   'gamma_regularizer': regularizers.l2(0.01),
                   'beta_regularizer': regularizers.l2(0.01)
               },
               input_shape=(3, 4, 2))
    layer_test(normalization.InstanceNormalization,
               kwargs={
                   'gamma_initializer': 'ones',
                   'beta_initializer': 'ones'
               },
               input_shape=(3, 4, 2))
    layer_test(normalization.InstanceNormalization,
               kwargs={
                   'scale': False,
                   'center': False
               },
               input_shape=(3, 3))
Example #44
0
def test_zero_padding_1d():
    nb_samples = 2
    input_dim = 2
    nb_steps = 5
    shape = (nb_samples, nb_steps, input_dim)
    input = np.ones(shape)

    # basic test
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': 2},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': (1, 2)},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': {
                   'left_pad': 1,
                   'right_pad': 2
               }},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding1D(padding=2)
    layer.build(shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for offset in [0, 1, -1, -2]:
        assert_allclose(np_output[:, offset, :], 0.)
    assert_allclose(np_output[:, 2:-2, :], 1.)

    layer = convolutional.ZeroPadding1D(padding=(1, 2))
    layer.build(shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for left_offset in [0]:
        assert_allclose(np_output[:, left_offset, :], 0.)
    for right_offset in [-1, -2]:
        assert_allclose(np_output[:, right_offset, :], 0.)
    assert_allclose(np_output[:, 1:-2, :], 1.)
    layer.get_config()
Example #45
0
def test_masking():
    layer_test(core.Masking, kwargs={}, input_shape=(3, 2, 3))
Example #46
0
def test_repeat_vector():
    layer_test(core.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
Example #47
0
def test_cropping_2d():
    num_samples = 2
    stack_size = 2
    input_len_dim1 = 9
    input_len_dim2 = 9
    cropping = ((2, 2), (3, 3))

    for data_format in ['channels_first', 'channels_last']:
        if data_format == 'channels_first':
            inputs = np.random.rand(num_samples, stack_size,
                                    input_len_dim1, input_len_dim2)
        else:
            inputs = np.random.rand(num_samples,
                                    input_len_dim1, input_len_dim2,
                                    stack_size)
        # basic test
        layer_test(convolutional.Cropping2D,
                   kwargs={'cropping': cropping,
                           'data_format': data_format},
                   input_shape=inputs.shape)
        # correctness test
        layer = convolutional.Cropping2D(cropping=cropping,
                                         data_format=data_format)
        layer.build(inputs.shape)
        outputs = layer(K.variable(inputs))
        np_output = K.eval(outputs)
        # compare with numpy
        if data_format == 'channels_first':
            expected_out = inputs[:,
                                  :,
                                  cropping[0][0]: -cropping[0][1],
                                  cropping[1][0]: -cropping[1][1]]
        else:
            expected_out = inputs[:,
                                  cropping[0][0]: -cropping[0][1],
                                  cropping[1][0]: -cropping[1][1],
                                  :]
        assert_allclose(np_output, expected_out)

    for data_format in ['channels_first', 'channels_last']:
        if data_format == 'channels_first':
            inputs = np.random.rand(num_samples, stack_size,
                                    input_len_dim1, input_len_dim2)
        else:
            inputs = np.random.rand(num_samples,
                                    input_len_dim1, input_len_dim2,
                                    stack_size)
        # another correctness test (no cropping)
        cropping = ((0, 0), (0, 0))
        layer = convolutional.Cropping2D(cropping=cropping,
                                         data_format=data_format)
        layer.build(inputs.shape)
        outputs = layer(K.variable(inputs))
        np_output = K.eval(outputs)
        # compare with input
        assert_allclose(np_output, inputs)

    # Test invalid use cases
    with pytest.raises(ValueError):
        layer = convolutional.Cropping2D(cropping=((1, 1),))
    with pytest.raises(ValueError):
        layer = convolutional.Cropping2D(cropping=lambda x: x)
Example #48
0
def test_causal_dilated_conv(layer_kwargs, input_length, expected_output):
    input_data = np.reshape(np.arange(input_length, dtype='float32'),
                            (1, input_length, 1))
    layer_test(convolutional.Conv1D, input_data=input_data,
               kwargs=layer_kwargs, expected_output=expected_output)
Example #49
0
def test_GaussianDropout():
    layer_test(noise.GaussianDropout, kwargs={'p': 0.5}, input_shape=(3, 2, 3))
Example #50
0
def test_dropout():
    layer_test(core.Dropout, kwargs={'p': 0.5}, input_shape=(3, 2))
Example #51
0
def test_averagepooling_1d():
    for stride in [1, 2]:
        layer_test(convolutional.AveragePooling1D,
                   kwargs={'stride': stride,
                           'border_mode': 'valid'},
                   input_shape=(3, 5, 4))
Example #52
0
def test_zero_padding_2d():
    nb_samples = 2
    stack_size = 2
    input_nb_row = 4
    input_nb_col = 5
    dim_ordering = K.image_dim_ordering()
    assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'

    if dim_ordering == 'tf':
        input = np.ones((nb_samples, input_nb_row, input_nb_col, stack_size))
    elif dim_ordering == 'th':
        input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))

    # basic test
    layer_test(convolutional.ZeroPadding2D,
               kwargs={'padding': (2, 2)},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding2D,
               kwargs={'padding': (1, 2, 3, 4)},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding2D,
               kwargs={'padding': {'top_pad': 1, 'bottom_pad': 2, 'left_pad': 3, 'right_pad': 4}},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding2D(padding=(2, 2))
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    if dim_ordering == 'tf':
        for offset in [0, 1, -1, -2]:
            assert_allclose(np_output[:, offset, :, :], 0.)
            assert_allclose(np_output[:, :, offset, :], 0.)
        assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
    elif dim_ordering == 'th':
        for offset in [0, 1, -1, -2]:
            assert_allclose(np_output[:, :, offset, :], 0.)
            assert_allclose(np_output[:, :, :, offset], 0.)
        assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)

    layer = convolutional.ZeroPadding2D(padding=(1, 2, 3, 4))
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    if dim_ordering == 'tf':
        for top_offset in [0]:
            assert_allclose(np_output[:, top_offset, :, :], 0.)
        for bottom_offset in [-1, -2]:
            assert_allclose(np_output[:, bottom_offset, :, :], 0.)
        for left_offset in [0, 1, 2]:
            assert_allclose(np_output[:, :, left_offset, :], 0.)
        for right_offset in [-1, -2, -3, -4]:
            assert_allclose(np_output[:, :, right_offset, :], 0.)
        assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
    elif dim_ordering == 'th':
        for top_offset in [0]:
            assert_allclose(np_output[:, :, top_offset, :], 0.)
        for bottom_offset in [-1, -2]:
            assert_allclose(np_output[:, :, bottom_offset, :], 0.)
        for left_offset in [0, 1, 2]:
            assert_allclose(np_output[:, :, :, left_offset], 0.)
        for right_offset in [-1, -2, -3, -4]:
            assert_allclose(np_output[:, :, :, right_offset], 0.)
        assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
    layer.get_config()
Example #53
0
def test_globalpooling_1d():
    layer_test(pooling.GlobalMaxPooling1D,
               input_shape=(3, 4, 5))
    layer_test(pooling.GlobalAveragePooling1D,
               input_shape=(3, 4, 5))
Example #54
0
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'sigma': 1.},
               input_shape=(3, 2, 3))
Example #55
0
def test_upsampling_3d():
    num_samples = 2
    stack_size = 2
    input_len_dim1 = 10
    input_len_dim2 = 11
    input_len_dim3 = 12

    for data_format in ['channels_first', 'channels_last']:
        if data_format == 'channels_first':
            inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
                                    input_len_dim2, input_len_dim3)
        else:  # tf
            inputs = np.random.rand(num_samples, input_len_dim1,
                                    input_len_dim2, input_len_dim3, stack_size)

        # basic test
        layer_test(convolutional.UpSampling3D,
                   kwargs={
                       'size': (2, 2, 2),
                       'data_format': data_format
                   },
                   input_shape=inputs.shape)

        for length_dim1 in [2, 3]:
            for length_dim2 in [2]:
                for length_dim3 in [3]:
                    layer = convolutional.UpSampling3D(size=(length_dim1,
                                                             length_dim2,
                                                             length_dim3),
                                                       data_format=data_format)
                    layer.build(inputs.shape)
                    outputs = layer(K.variable(inputs))
                    np_output = K.eval(outputs)
                    if data_format == 'channels_first':
                        assert np_output.shape[
                            2] == length_dim1 * input_len_dim1
                        assert np_output.shape[
                            3] == length_dim2 * input_len_dim2
                        assert np_output.shape[
                            4] == length_dim3 * input_len_dim3
                    else:  # tf
                        assert np_output.shape[
                            1] == length_dim1 * input_len_dim1
                        assert np_output.shape[
                            2] == length_dim2 * input_len_dim2
                        assert np_output.shape[
                            3] == length_dim3 * input_len_dim3

                    # compare with numpy
                    if data_format == 'channels_first':
                        expected_out = np.repeat(inputs, length_dim1, axis=2)
                        expected_out = np.repeat(expected_out,
                                                 length_dim2,
                                                 axis=3)
                        expected_out = np.repeat(expected_out,
                                                 length_dim3,
                                                 axis=4)
                    else:  # tf
                        expected_out = np.repeat(inputs, length_dim1, axis=1)
                        expected_out = np.repeat(expected_out,
                                                 length_dim2,
                                                 axis=2)
                        expected_out = np.repeat(expected_out,
                                                 length_dim3,
                                                 axis=3)

                    assert_allclose(np_output, expected_out)
Example #56
0
def test_permute():
    layer_test(core.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
Example #57
0
def test_upsampling_1d():
    layer_test(convolutional.UpSampling1D,
               kwargs={'size': 2},
               input_shape=(3, 5, 4))
Example #58
0
def test_flatten():
    layer_test(core.Flatten, kwargs={}, input_shape=(3, 2, 4))
Example #59
0
def test_zero_padding_3d():
    num_samples = 2
    stack_size = 2
    input_len_dim1 = 4
    input_len_dim2 = 5
    input_len_dim3 = 3

    inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
                      input_len_dim3, stack_size))

    # basic test
    for data_format in ['channels_first', 'channels_last']:
        layer_test(convolutional.ZeroPadding3D,
                   kwargs={
                       'padding': (2, 2, 2),
                       'data_format': data_format
                   },
                   input_shape=inputs.shape)
        layer_test(convolutional.ZeroPadding3D,
                   kwargs={
                       'padding': ((1, 2), (3, 4), (0, 2)),
                       'data_format': data_format
                   },
                   input_shape=inputs.shape)

        # correctness test
        layer = convolutional.ZeroPadding3D(padding=(2, 2, 2),
                                            data_format=data_format)
        layer.build(inputs.shape)
        outputs = layer(K.variable(inputs))
        np_output = K.eval(outputs)
        if data_format == 'channels_last':
            for offset in [0, 1, -1, -2]:
                assert_allclose(np_output[:, offset, :, :, :], 0.)
                assert_allclose(np_output[:, :, offset, :, :], 0.)
                assert_allclose(np_output[:, :, :, offset, :], 0.)
            assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
        elif data_format == 'channels_first':
            for offset in [0, 1, -1, -2]:
                assert_allclose(np_output[:, :, offset, :, :], 0.)
                assert_allclose(np_output[:, :, :, offset, :], 0.)
                assert_allclose(np_output[:, :, :, :, offset], 0.)
            assert_allclose(np_output[:, :, 2:-2, 2:-2, 2:-2], 1.)

        layer = convolutional.ZeroPadding3D(padding=((1, 2), (3, 4), (0, 2)),
                                            data_format=data_format)
        layer.build(inputs.shape)
        outputs = layer(K.variable(inputs))
        np_output = K.eval(outputs)
        if data_format == 'channels_last':
            for dim1_offset in [0, -1, -2]:
                assert_allclose(np_output[:, dim1_offset, :, :, :], 0.)
            for dim2_offset in [0, 1, 2, -1, -2, -3, -4]:
                assert_allclose(np_output[:, :, dim2_offset, :, :], 0.)
            for dim3_offset in [-1, -2]:
                assert_allclose(np_output[:, :, :, dim3_offset, :], 0.)
            assert_allclose(np_output[:, 1:-2, 3:-4, 0:-2, :], 1.)
        elif data_format == 'channels_first':
            for dim1_offset in [0, -1, -2]:
                assert_allclose(np_output[:, :, dim1_offset, :, :], 0.)
            for dim2_offset in [0, 1, 2, -1, -2, -3, -4]:
                assert_allclose(np_output[:, :, :, dim2_offset, :], 0.)
            for dim3_offset in [-1, -2]:
                assert_allclose(np_output[:, :, :, :, dim3_offset], 0.)
            assert_allclose(np_output[:, :, 1:-2, 3:-4, 0:-2], 1.)
Example #60
0
def test_lambda():
    layer_test(layers.Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    layer_test(layers.Lambda,
               kwargs={'function': lambda x, a, b: x * a + b,
                       'arguments': {'a': 0.6, 'b': 0.4}},
               input_shape=(3, 2))

    def antirectifier(x):
        x -= K.mean(x, axis=1, keepdims=True)
        x = K.l2_normalize(x, axis=1)
        pos = K.relu(x)
        neg = K.relu(-x)
        return K.concatenate([pos, neg], axis=1)

    def antirectifier_output_shape(input_shape):
        shape = list(input_shape)
        assert len(shape) == 2  # only valid for 2D tensors
        shape[-1] *= 2
        return tuple(shape)

    layer_test(layers.Lambda,
               kwargs={'function': antirectifier,
                       'output_shape': antirectifier_output_shape},
               input_shape=(3, 2))

    # test layer with multiple outputs
    def test_multiple_outputs():
        def func(x):
            return [x * 0.2, x * 0.3]

        def output_shape(input_shape):
            return [input_shape, input_shape]

        def mask(inputs, mask=None):
            return [None, None]

        i = layers.Input(shape=(64, 64, 3))
        o = layers.Lambda(function=func,
                          output_shape=output_shape,
                          mask=mask)(i)

        o1, o2 = o
        assert o1._keras_shape == (None, 64, 64, 3)
        assert o2._keras_shape == (None, 64, 64, 3)

        model = Model(i, o)

        x = np.random.random((4, 64, 64, 3))
        out1, out2 = model.predict(x)
        assert out1.shape == (4, 64, 64, 3)
        assert out2.shape == (4, 64, 64, 3)
        assert_allclose(out1, x * 0.2, atol=1e-4)
        assert_allclose(out2, x * 0.3, atol=1e-4)

    test_multiple_outputs()

    # test serialization with function
    def f(x):
        return x + 1

    ld = layers.Lambda(f)
    config = ld.get_config()
    ld = deserialize_layer({'class_name': 'Lambda', 'config': config})

    # test with lambda
    ld = layers.Lambda(
        lambda x: K.concatenate([K.square(x), x]),
        output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = layers.Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = layers.Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = deserialize_layer({'class_name': 'Lambda', 'config': config})