Example #1
0
    def test_gradient(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        expth = xth * KTH.exp(xth)
        exptf = xtf * KTF.exp(xtf)
        lossth = KTH.sum(expth)
        losstf = KTF.sum(exptf)
        zero_lossth = KTH.stop_gradient(lossth)
        zero_losstf = KTF.stop_gradient(losstf)

        gradth = KTH.gradients(lossth, [expth])
        gradtf = KTF.gradients(losstf, [exptf])
        zero_gradth = KTH.gradients(lossth + zero_lossth, [expth])
        zero_gradtf = KTF.gradients(losstf + zero_losstf, [exptf])

        zth = KTH.eval(gradth[0])
        ztf = KTF.eval(gradtf[0])
        zero_zth = KTH.eval(zero_gradth[0])
        zero_ztf = KTF.eval(zero_gradtf[0])
        assert zth.shape == ztf.shape
        assert zero_zth.shape == zero_ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
        assert_allclose(zero_zth, zero_ztf, atol=1e-05)
        assert_allclose(zero_zth, zth, atol=1e-05)
        assert_allclose(zero_ztf, ztf, atol=1e-05)
Example #2
0
    def test_rnn(self):
        # implement a simple RNN
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        init_state_val = np.random.random((32, output_dim))
        W_i_val = np.random.random((input_dim, output_dim))
        W_o_val = np.random.random((output_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)
            W_o = K.variable(W_o_val)

            def step_function(x, states):
                assert len(states) == 1
                prev_output = states[0]
                output = K.dot(x, W_i) + K.dot(prev_output, W_o)
                return output, [output]
            return step_function

        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        inputs = KTH.variable(input_val)
        initial_states = [KTH.variable(init_state_val)]
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, inputs,
                                                   initial_states,
                                                   go_backwards=False,
                                                   masking=False)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 1
        th_state = KTH.eval(new_states[0])

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        inputs = KTF.variable(input_val)
        initial_states = [KTF.variable(init_state_val)]
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, inputs,
                                                   initial_states,
                                                   go_backwards=False,
                                                   masking=False)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 1
        tf_state = KTF.eval(new_states[0])

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
        assert_allclose(tf_state, th_state, atol=1e-04)
Example #3
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_z = KTH.repeat_elements(arr_th, reps, axis=rep_axis)
                th_rep = KTH.eval(th_z)
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
                if hasattr(th_z, '_keras_shape'):
                    assert th_z._keras_shape == th_rep.shape

                # test theano shape inference when
                # input shape has None entries
                if K.backend() == 'theano':
                    shape = list(shape)
                    shape[rep_axis] = None
                    x = K.placeholder(shape=shape)
                    y = K.repeat_elements(x, reps, axis=rep_axis)
                    assert y._keras_shape == tuple(shape)
Example #4
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4)
Example #5
0
    def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
Example #6
0
    def test_nn_operations(self):
        check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
        check_single_tensor_operation('softmax', (4, 10))
        check_single_tensor_operation('softplus', (4, 10))

        check_single_tensor_operation('sigmoid', (4, 2))
        check_single_tensor_operation('hard_sigmoid', (4, 2))
        check_single_tensor_operation('tanh', (4, 2))

        # dropout
        val = np.random.random((100, 100))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        zth = KTH.eval(KTH.dropout(xth, level=0.2))
        ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
        assert zth.shape == ztf.shape
        # dropout patterns are different, only check mean
        assert np.abs(zth.mean() - ztf.mean()) < 0.05

        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True)
        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False)

        check_single_tensor_operation('l2_normalize', (4, 3), axis=-1)
        check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
Example #7
0
    def test_conv3d(self):
        # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
        # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
        # TH kernel shape: (depth, input_depth, x, y, z)
        # TF kernel shape: (x, y, z, input_depth, depth)

        # test in dim_ordering = th
        for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
            for kernel_shape in [(4, 3, 2, 2, 2), (4, 3, 3, 2, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(KTH.conv3d(xth, kernel_th))
                ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        # test in dim_ordering = tf
        input_shape = (1, 2, 2, 2, 1)
        kernel_shape = (2, 2, 2, 1, 1)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='tf'))
        ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='tf'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Example #8
0
    def test_rnn_no_states(self):
        # implement a simple RNN without states
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        W_i_val = np.random.random((input_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)

            def step_function(x, states):
                assert len(states) == 0
                output = K.dot(x, W_i)
                return output, []
            return step_function

        # test default setup
        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        th_inputs = KTH.variable(input_val)
        th_initial_states = []
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
                                                   th_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 0

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        tf_inputs = KTF.variable(input_val)
        tf_initial_states = []
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
                                                   tf_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 0

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
Example #9
0
def check_single_tensor_operation(function_name, input_shape, **kwargs):
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    zth = KTH.eval(getattr(KTH, function_name)(xth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Example #10
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_rep = KTH.eval(KTH.tile(arr_th, n))
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
Example #11
0
 def test_arange(self):
     for test_value in (-20, 0, 1, 10):
         t_a = KTF.arange(test_value)
         a = KTF.eval(t_a)
         assert np.array_equal(a, np.arange(test_value))
         t_b = KTH.arange(test_value)
         b = KTH.eval(t_b)
         assert np.array_equal(b, np.arange(test_value))
         assert np.array_equal(a, b)
         assert KTF.dtype(t_a) == KTH.dtype(t_b)
     for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):
         a = KTF.eval(KTF.arange(start, stop, step))
         assert np.array_equal(a, np.arange(start, stop, step))
         b = KTH.eval(KTH.arange(start, stop, step))
         assert np.array_equal(b, np.arange(start, stop, step))
         assert np.array_equal(a, b)
     for dtype in ('int32', 'int64', 'float32', 'float64'):
         for backend in (KTF, KTH):
             t = backend.arange(10, dtype=dtype)
             assert backend.dtype(t) == dtype
Example #12
0
    def test_conv2d(self):
        # TF kernel shape: (rows, cols, input_depth, depth)

        # channels_first input shape: (n, input_depth, rows, cols)
        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(2, 2, 3, 4), (4, 3, 3, 4)]:
                for padding in ['valid', 'same']:
                    xval = np.random.random(input_shape)

                    xth = KTH.variable(xval)
                    xtf = KTF.variable(xval)

                    kernel_val = np.random.random(kernel_shape) - 0.5

                    kernel_th = KTH.variable(convert_kernel(kernel_val))
                    kernel_tf = KTF.variable(kernel_val)

                    zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_first'))
                    ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_first'))

                    assert zth.shape == ztf.shape
                    assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Example #13
0
    def test_random_normal(self):
        mean = 0.
        std = 1.
        rand = KTF.eval(KTF.random_normal((1000, 1000), mean=mean, std=std))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - mean) < 0.01)
        assert(np.abs(np.std(rand) - std) < 0.01)

        rand = KTH.eval(KTH.random_normal((1000, 1000), mean=mean, std=std))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - mean) < 0.01)
        assert(np.abs(np.std(rand) - std) < 0.01)
Example #14
0
    def test_conv2d(self):
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)

        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(4, 3, 2, 2), (4, 3, 3, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(KTH.conv2d(xth, kernel_th))
                ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Example #15
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_z = KTH.tile(arr_th, n)
        th_rep = KTH.eval(th_z)
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_rep.shape
Example #16
0
    def test_random_binomial(self):
        p = 0.5
        rand = KTF.eval(KTF.random_binomial((1000, 1000), p))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - p) < 0.01)
        assert(np.max(rand) == 1)
        assert(np.min(rand) == 0)

        rand = KTH.eval(KTH.random_binomial((1000, 1000), p))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - p) < 0.01)
        assert(np.max(rand) == 1)
        assert(np.min(rand) == 0)
Example #17
0
    def test_switch(self):
        val = np.random.random()
        xth = KTH.variable(val)
        xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)

        xtf = KTF.variable(val)
        xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)

        zth = KTH.eval(xth)
        ztf = KTF.eval(xtf)

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Example #18
0
    def test_in_top_k(self):
        batch_size = 20
        num_classes = 10

        # Random prediction test case
        predictions = np.random.random((batch_size, num_classes)).astype('float32')
        targets = np.random.randint(num_classes, size=batch_size, dtype='int32')

        predictions_th = KTH.variable(predictions, dtype='float32')
        targets_th = KTH.variable(targets, dtype='int32')
        predictions_tf = KTF.variable(predictions, dtype='float32')
        targets_tf = KTF.variable(targets, dtype='int32')

        for k in range(1, num_classes + 1):
            res_th = KTH.eval(KTH.in_top_k(predictions_th, targets_th, k))
            res_tf = KTF.eval(KTF.in_top_k(predictions_tf, targets_tf, k))

            assert res_th.shape == res_tf.shape
            assert_allclose(res_th, res_tf, atol=1e-05)

        # Identical prediction test case:
        # randomly set half of the predictions to an identical value
        num_identical = num_classes // 2
        for i in range(batch_size):
            idx_identical = np.random.choice(num_classes, size=num_identical, replace=False)
            predictions[i, idx_identical] = predictions[i, 0]
        targets = np.zeros(batch_size, dtype='int32')

        predictions_th = KTH.variable(predictions, dtype='float32')
        targets_th = KTH.variable(targets, dtype='int32')
        predictions_tf = KTF.variable(predictions, dtype='float32')
        targets_tf = KTF.variable(targets, dtype='int32')

        for k in range(1, num_classes + 1):
            res_th = KTH.eval(KTH.in_top_k(predictions_th, targets_th, k))
            res_tf = KTF.eval(KTF.in_top_k(predictions_tf, targets_tf, k))

            assert res_th.shape == res_tf.shape
            assert_allclose(res_th, res_tf, atol=1e-05)
Example #19
0
    def test_random_uniform(self):
        min = -1.
        max = 1.
        rand = KTF.eval(KTF.random_uniform((1000, 1000), min, max))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand)) < 0.01)
        assert(np.max(rand) <= max)
        assert(np.min(rand) >= min)

        rand = KTH.eval(KTH.random_uniform((1000, 1000), min, max))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand)) < 0.01)
        assert(np.max(rand) <= max)
        assert(np.min(rand) >= min)
Example #20
0
def check_two_tensor_operation(function_name, x_input_shape, y_input_shape, **kwargs):
    xval = np.random.random(x_input_shape) - 0.5
    xth = KTH.variable(xval)
    xtf = KTF.variable(xval)

    yval = np.random.random(y_input_shape) - 0.5
    yth = KTH.variable(yval)
    ytf = KTF.variable(yval)

    zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Example #21
0
    def test_random_uniform(self):
        min_val = -1.
        max_val = 1.
        rand = KTF.eval(KTF.random_uniform((1000, 1000), min_val, max_val))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand)) < 0.01
        assert np.max(rand) <= max_val
        assert np.min(rand) >= min_val

        rand = KTH.eval(KTH.random_uniform((1000, 1000), min_val, max_val))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand)) < 0.01
        assert np.max(rand) <= max_val
        assert np.min(rand) >= min_val
Example #22
0
    def test_ctc(self):
        # simplified version of TensorFlow's test

        label_lens = np.expand_dims(np.asarray([5, 4]), 1)
        input_lens = np.expand_dims(np.asarray([5, 5]), 1)  # number of timesteps

        # the Theano and Tensorflow CTC code use different methods to ensure
        # numerical stability.  The Theano code subtracts out the max
        # before the final log, so the results are different but scale
        # identically and still train properly
        loss_log_probs_tf = [3.34211, 5.42262]
        loss_log_probs_th = [1.73308, 3.81351]

        # dimensions are batch x time x categories
        labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
        inputs = np.asarray(
            [
                [
                    [0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
                    [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
                    [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
                    [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
                    [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
                ],
                [
                    [0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
                    [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
                    [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
                    [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
                    [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046],
                ],
            ],
            dtype=np.float32,
        )

        labels_tf = KTF.variable(labels, dtype="int32")
        inputs_tf = KTF.variable(inputs, dtype="float32")
        input_lens_tf = KTF.variable(input_lens, dtype="int32")
        label_lens_tf = KTF.variable(label_lens, dtype="int32")
        res = KTF.eval(KTF.ctc_batch_cost(labels_tf, inputs_tf, input_lens_tf, label_lens_tf))
        assert_allclose(res[:, 0], loss_log_probs_tf, atol=1e-05)

        labels_th = KTH.variable(labels, dtype="int32")
        inputs_th = KTH.variable(inputs, dtype="float32")
        input_lens_th = KTH.variable(input_lens, dtype="int32")
        label_lens_th = KTH.variable(label_lens, dtype="int32")
        res = KTH.eval(KTH.ctc_batch_cost(labels_th, inputs_th, input_lens_th, label_lens_th))
        assert_allclose(res[0, :], loss_log_probs_th, atol=1e-05)
Example #23
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape
Example #24
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_rep = KTH.eval(KTH.repeat_elements(arr_th, reps, axis=rep_axis))
                tf_rep = KTF.eval(KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
Example #25
0
    def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation("reshape", (4, 2), shape=(8, 1))
        check_single_tensor_operation("permute_dimensions", (4, 2, 3), pattern=(2, 0, 1))
        check_single_tensor_operation("repeat", (4, 1), n=3)
        check_single_tensor_operation("flatten", (4, 1))
        check_single_tensor_operation("expand_dims", (4, 3), dim=-1)
        check_single_tensor_operation("expand_dims", (4, 3, 2), dim=1)
        check_single_tensor_operation("squeeze", (4, 3, 1), axis=2)
Example #26
0
def check_composed_tensor_operations(first_function_name, first_function_args,
                                     second_function_name, second_function_args,
                                     input_shape):
    ''' Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    '''
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Example #27
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_z = KTH.repeat_elements(arr_th, reps, axis=rep_axis)
                th_rep = KTH.eval(th_z)
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
                if hasattr(th_z, '_keras_shape'):
                    assert th_z._keras_shape == th_rep.shape
Example #28
0
    def test_nn_operations(self):
        check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
        check_single_tensor_operation('softmax', (4, 10))
       # check_single_tensor_operation('softplus', (4, 10))
        check_single_tensor_operation('elu', (4, 10), alpha=0.5)

        check_single_tensor_operation('sigmoid', (4, 2))
       # check_single_tensor_operation('hard_sigmoid', (4, 2))
        check_single_tensor_operation('tanh', (4, 2))

        # dropout
        val = np.random.random((100, 100))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        zth = KTH.eval(KTH.dropout(xth, level=0.2))
        ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
        assert zth.shape == ztf.shape
        # dropout patterns are different, only check mean
        assert np.abs(zth.mean() - ztf.mean()) < 0.05

        '''
Example #29
0
    def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
Example #30
0
    def test_switch(self):
        val = np.random.random()
        xth = KTH.variable(val)
        xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)

        xtf = KTF.variable(val)
        xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)

        zth = KTH.eval(xth)
        ztf = KTF.eval(xtf)

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        xth1 = KTH.variable(0.7)
        xth2 = KTH.variable([1, 0.2])
        with pytest.raises(ValueError):
            xth = KTH.switch(xth1 >= 0.5, xth2 * 0.1, xth2 * 0.2)

        xth = KTH.switch(xth2 >= 0.5, xth1 * 0.1, xth1 * 0.2)
        assert_allclose(xth, KTH.variable([0.1, 0.2 * 0.2]), atol=1e-05)
    def test_extract2(self, input_shape, kernel_shape):

        xval = np.random.random(input_shape)

        kernel = [kernel_shape, kernel_shape]
        strides = [kernel_shape, kernel_shape]
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        ztf = KTF.eval(
            KCTF.extract_image_patches(xtf,
                                       kernel,
                                       strides,
                                       data_format="channels_last",
                                       padding="same"))
        zth = KTH.eval(
            KCTH.extract_image_patches(xth,
                                       kernel,
                                       strides,
                                       data_format="channels_last",
                                       padding="same"))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-02)
Example #32
0
def check_composed_tensor_operations(first_function_name, first_function_args,
                                     second_function_name,
                                     second_function_args, input_shape):
    ''' Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    '''
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(
        getattr(KTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(
        getattr(KTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Example #33
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_z = KTH.tile(arr_th, n)
        th_rep = KTH.eval(th_z)
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_rep.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 4))
            n = 2
            y = KTH.tile(x, n)
            assert y._keras_shape == (None, 8)
            n = (4, 3)
            y = K.tile(x, n)
            assert y._keras_shape == (None, 12)
Example #34
0
    def test_nn_operations(self):
        check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
        check_single_tensor_operation('softmax', (4, 10))
        check_single_tensor_operation('softplus', (4, 10))

        check_single_tensor_operation('sigmoid', (4, 2))
        check_single_tensor_operation('hard_sigmoid', (4, 2))
        check_single_tensor_operation('tanh', (4, 2))

        # dropout
        val = np.random.random((20, 20))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        zth = KTH.eval(KTH.dropout(xth, level=0.2))
        ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
        assert zth.shape == ztf.shape
        # dropout patterns are different, only check mean
        assert np.abs(zth.mean() - ztf.mean()) < 0.05

        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True)
        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False)

        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False)
Example #35
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_z = KTH.tile(arr_th, n)
        th_rep = KTH.eval(th_z)
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_rep.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 4))
            n = 2
            y = KTH.tile(x, n)
            assert y._keras_shape == (None, 8)
            n = (4, 3)
            y = K.tile(x, n)
            assert y._keras_shape == (None, 12)
Example #36
0
    def test_rnn(self):
        # implement a simple RNN
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        init_state_val = np.random.random((32, output_dim))
        W_i_val = np.random.random((input_dim, output_dim))
        W_o_val = np.random.random((output_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)
            W_o = K.variable(W_o_val)

            def step_function(x, states):
                assert len(states) == 1
                prev_output = states[0]
                output = K.dot(x, W_i) + K.dot(prev_output, W_o)
                return output, [output]

            return step_function

        # test default setup
        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        th_inputs = KTH.variable(input_val)
        th_initial_states = [KTH.variable(init_state_val)]
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn,
                                                   th_inputs,
                                                   th_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 1
        th_state = KTH.eval(new_states[0])

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        tf_inputs = KTF.variable(input_val)
        tf_initial_states = [KTF.variable(init_state_val)]
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn,
                                                   tf_inputs,
                                                   tf_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 1
        tf_state = KTF.eval(new_states[0])

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
        assert_allclose(tf_state, th_state, atol=1e-04)

        # test unroll
        unrolled_last_output, unrolled_outputs, unrolled_new_states = KTH.rnn(
            th_rnn_step_fn,
            th_inputs,
            th_initial_states,
            go_backwards=False,
            mask=None,
            unroll=True,
            input_length=timesteps)

        unrolled_th_last_output = KTH.eval(unrolled_last_output)
        unrolled_th_outputs = KTH.eval(unrolled_outputs)
        assert len(unrolled_new_states) == 1
        unrolled_th_state = KTH.eval(unrolled_new_states[0])
        assert_allclose(th_last_output, unrolled_th_last_output, atol=1e-04)
        assert_allclose(th_outputs, unrolled_th_outputs, atol=1e-04)
        assert_allclose(th_state, unrolled_th_state, atol=1e-04)

        # test go_backwards
        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        th_inputs = KTH.variable(input_val)
        th_initial_states = [KTH.variable(init_state_val)]
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn,
                                                   th_inputs,
                                                   th_initial_states,
                                                   go_backwards=True,
                                                   mask=None)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 1
        th_state = KTH.eval(new_states[0])

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        tf_inputs = KTF.variable(input_val)
        tf_initial_states = [KTF.variable(init_state_val)]
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn,
                                                   tf_inputs,
                                                   tf_initial_states,
                                                   go_backwards=True,
                                                   mask=None)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 1
        tf_state = KTF.eval(new_states[0])

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
        assert_allclose(tf_state, th_state, atol=1e-04)

        # test unroll with backwards = True
        bwd_last_output, bwd_outputs, bwd_new_states = KTH.rnn(
            th_rnn_step_fn,
            th_inputs,
            th_initial_states,
            go_backwards=True,
            mask=None)
        bwd_th_last_output = KTH.eval(bwd_last_output)
        bwd_th_outputs = KTH.eval(bwd_outputs)
        assert len(bwd_new_states) == 1
        bwd_th_state = KTH.eval(bwd_new_states[0])

        bwd_unrolled_last_output, bwd_unrolled_outputs, bwd_unrolled_new_states = KTH.rnn(
            th_rnn_step_fn,
            th_inputs,
            th_initial_states,
            go_backwards=True,
            mask=None,
            unroll=True,
            input_length=timesteps)

        bwd_unrolled_th_last_output = KTH.eval(bwd_unrolled_last_output)
        bwd_unrolled_th_outputs = KTH.eval(bwd_unrolled_outputs)
        assert len(bwd_unrolled_new_states) == 1
        bwd_unrolled_th_state = KTH.eval(bwd_unrolled_new_states[0])
        assert_allclose(bwd_th_last_output,
                        bwd_unrolled_th_last_output,
                        atol=1e-04)
        assert_allclose(bwd_th_outputs, bwd_unrolled_th_outputs, atol=1e-04)
        assert_allclose(bwd_th_state, bwd_unrolled_th_state, atol=1e-04)

        # test unroll with masking
        np_mask = np.random.randint(2, size=(32, timesteps))
        th_mask = KTH.variable(np_mask)

        masked_last_output, masked_outputs, masked_new_states = KTH.rnn(
            th_rnn_step_fn,
            th_inputs,
            th_initial_states,
            go_backwards=False,
            mask=th_mask)
        masked_th_last_output = KTH.eval(masked_last_output)
        masked_th_outputs = KTH.eval(masked_outputs)
        assert len(masked_new_states) == 1
        masked_th_state = KTH.eval(masked_new_states[0])

        unrolled_masked_last_output, unrolled_masked_outputs, unrolled_masked_new_states = KTH.rnn(
            th_rnn_step_fn,
            th_inputs,
            th_initial_states,
            go_backwards=False,
            mask=th_mask,
            unroll=True,
            input_length=timesteps)
        unrolled_masked_th_last_output = KTH.eval(unrolled_masked_last_output)
        unrolled_masked_th_outputs = KTH.eval(unrolled_masked_outputs)
        assert len(unrolled_masked_new_states) == 1
        unrolled_masked_th_state = KTH.eval(unrolled_masked_new_states[0])

        assert_allclose(unrolled_masked_th_last_output,
                        masked_th_last_output,
                        atol=1e-04)
        assert_allclose(unrolled_masked_th_outputs,
                        masked_th_outputs,
                        atol=1e-04)
        assert_allclose(unrolled_masked_th_state, masked_th_state, atol=1e-04)
Example #37
0
    def test_rnn(self):
        # implement a simple RNN
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        init_state_val = np.random.random((32, output_dim))
        W_i_val = np.random.random((input_dim, output_dim))
        W_o_val = np.random.random((output_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)
            W_o = K.variable(W_o_val)

            def step_function(x, states):
                assert len(states) == 1
                prev_output = states[0]
                output = K.dot(x, W_i) + K.dot(prev_output, W_o)
                return output, [output]
            return step_function

        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        th_inputs = KTH.variable(input_val)
        th_initial_states = [KTH.variable(init_state_val)]
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
                                                   th_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 1
        th_state = KTH.eval(new_states[0])

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        tf_inputs = KTF.variable(input_val)
        tf_initial_states = [KTF.variable(init_state_val)]
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
                                                   tf_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 1
        tf_state = KTF.eval(new_states[0])

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
        assert_allclose(tf_state, th_state, atol=1e-04)

        # test unroll
        unrolled_last_output, unrolled_outputs, unrolled_new_states = KTH.rnn(
            th_rnn_step_fn, th_inputs,
            th_initial_states,
            go_backwards=False,
            mask=None,
            unroll=True,
            input_length=timesteps)

        unrolled_th_last_output = KTH.eval(unrolled_last_output)
        unrolled_th_outputs = KTH.eval(unrolled_outputs)
        assert len(unrolled_new_states) == 1
        unrolled_th_state = KTH.eval(unrolled_new_states[0])
        assert_allclose(th_last_output, unrolled_th_last_output, atol=1e-04)
        assert_allclose(th_outputs, unrolled_th_outputs, atol=1e-04)
        assert_allclose(th_state, unrolled_th_state, atol=1e-04)

        # test unroll with backwards = True
        bwd_last_output, bwd_outputs, bwd_new_states = KTH.rnn(
            th_rnn_step_fn, th_inputs,
            th_initial_states,
            go_backwards=True,
            mask=None)
        bwd_th_last_output = KTH.eval(bwd_last_output)
        bwd_th_outputs = KTH.eval(bwd_outputs)
        assert len(bwd_new_states) == 1
        bwd_th_state = KTH.eval(bwd_new_states[0])

        bwd_unrolled_last_output, bwd_unrolled_outputs, bwd_unrolled_new_states = KTH.rnn(
            th_rnn_step_fn, th_inputs,
            th_initial_states,
            go_backwards=True,
            mask=None,
            unroll=True,
            input_length=timesteps)

        bwd_unrolled_th_last_output = KTH.eval(bwd_unrolled_last_output)
        bwd_unrolled_th_outputs = KTH.eval(bwd_unrolled_outputs)
        assert len(bwd_unrolled_new_states) == 1
        bwd_unrolled_th_state = KTH.eval(bwd_unrolled_new_states[0])
        assert_allclose(bwd_th_last_output, bwd_unrolled_th_last_output, atol=1e-04)
        assert_allclose(bwd_th_outputs, bwd_unrolled_th_outputs, atol=1e-04)
        assert_allclose(bwd_th_state, bwd_unrolled_th_state, atol=1e-04)

        # test unroll with masking
        np_mask = np.random.randint(2, size=(32, timesteps))
        th_mask = KTH.variable(np_mask)

        masked_last_output, masked_outputs, masked_new_states = KTH.rnn(
            th_rnn_step_fn, th_inputs,
            th_initial_states,
            go_backwards=False,
            mask=th_mask)
        masked_th_last_output = KTH.eval(masked_last_output)
        masked_th_outputs = KTH.eval(masked_outputs)
        assert len(masked_new_states) == 1
        masked_th_state = KTH.eval(masked_new_states[0])

        unrolled_masked_last_output, unrolled_masked_outputs, unrolled_masked_new_states = KTH.rnn(
            th_rnn_step_fn, th_inputs,
            th_initial_states,
            go_backwards=False,
            mask=th_mask,
            unroll=True,
            input_length=timesteps)
        unrolled_masked_th_last_output = KTH.eval(unrolled_masked_last_output)
        unrolled_masked_th_outputs = KTH.eval(unrolled_masked_outputs)
        assert len(unrolled_masked_new_states) == 1
        unrolled_masked_th_state = KTH.eval(unrolled_masked_new_states[0])

        assert_allclose(unrolled_masked_th_last_output, masked_th_last_output, atol=1e-04)
        assert_allclose(unrolled_masked_th_outputs, masked_th_outputs, atol=1e-04)
        assert_allclose(unrolled_masked_th_state, masked_th_state, atol=1e-04)