Пример #1
0
    def test_gradient(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        expth = xth * KTH.exp(xth)
        exptf = xtf * KTF.exp(xtf)
        lossth = KTH.sum(expth)
        losstf = KTF.sum(exptf)
        zero_lossth = KTH.stop_gradient(lossth)
        zero_losstf = KTF.stop_gradient(losstf)

        gradth = KTH.gradients(lossth, [expth])
        gradtf = KTF.gradients(losstf, [exptf])
        zero_gradth = KTH.gradients(lossth + zero_lossth, [expth])
        zero_gradtf = KTF.gradients(losstf + zero_losstf, [exptf])

        zth = KTH.eval(gradth[0])
        ztf = KTF.eval(gradtf[0])
        zero_zth = KTH.eval(zero_gradth[0])
        zero_ztf = KTF.eval(zero_gradtf[0])
        assert zth.shape == ztf.shape
        assert zero_zth.shape == zero_ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
        assert_allclose(zero_zth, zero_ztf, atol=1e-05)
        assert_allclose(zero_zth, zth, atol=1e-05)
        assert_allclose(zero_ztf, ztf, atol=1e-05)
Пример #2
0
    def test_ctc_decode_greedy(self):
        # Test adapted from tensorflow
        """Test two batch entries - best path decoder."""
        max_time_steps = 6

        seq_len_0 = 4
        input_prob_matrix_0 = np.asarray(
            [
                [1.0, 0.0, 0.0, 0.0],  # t=0
                [0.0, 0.0, 0.4, 0.6],  # t=1
                [0.0, 0.0, 0.4, 0.6],  # t=2
                [0.0, 0.9, 0.1, 0.0],  # t=3
                [0.0, 0.0, 0.0, 0.0],  # t=4 (ignored)
                [0.0, 0.0, 0.0, 0.0],
            ],  # t=5 (ignored)
            dtype=np.float32,
        )
        input_log_prob_matrix_0 = np.log(input_prob_matrix_0)

        seq_len_1 = 5
        # dimensions are time x depth

        input_prob_matrix_1 = np.asarray(
            [
                [0.1, 0.9, 0.0, 0.0],  # t=0
                [0.0, 0.9, 0.1, 0.0],  # t=1
                [0.0, 0.0, 0.1, 0.9],  # t=2
                [0.0, 0.9, 0.1, 0.1],  # t=3
                [0.9, 0.1, 0.0, 0.0],  # t=4
                [0.0, 0.0, 0.0, 0.0],
            ],  # t=5 (ignored)
            dtype=np.float32,
        )

        # len max_time_steps array of batch_size x depth matrices
        inputs = [np.vstack([input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]]) for t in range(max_time_steps)]

        # change tensorflow order to keras backend order
        inputs = KTF.variable(np.asarray(inputs).transpose((1, 0, 2)))
        # batch_size length vector of sequence_lengths
        input_length = KTF.variable(np.array([seq_len_0, seq_len_1], dtype=np.int32))

        # batch_size length vector of negative log probabilities
        log_prob_truth = np.array(
            [np.sum(-np.log([1.0, 0.6, 0.6, 0.9])), np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9]))], np.float32
        )[:, np.newaxis]

        # keras output, unlike tensorflow, is a dense (not sparse) tensor
        decode_truth = np.array([[0, 1, -1], [1, 1, 0]])

        decode_pred_tf, log_prob_pred_tf = KTF.ctc_decode(inputs, input_length, greedy=True)

        assert len(decode_pred_tf) == 1

        decode_pred = KTF.eval(decode_pred_tf[0])
        log_prob_pred = KTF.eval(log_prob_pred_tf)

        assert np.alltrue(decode_truth == decode_pred)
        assert np.allclose(log_prob_truth, log_prob_pred)
Пример #3
0
    def test_ctc_decode_beam_search(self):
        """Test one batch, two beams - hibernating beam search."""

        depth = 6

        seq_len_0 = 5
        input_prob_matrix_0 = np.asarray(
            [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
             [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
             [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
             [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
             [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
             # Random entry added in at time=5
             [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
            dtype=np.float32)

        # len max_time_steps array of batch_size x depth matrices
        inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]
                  for t in range(seq_len_0)] +  # Pad to max_time_steps = 8
                  2 * [np.zeros((1, depth), dtype=np.float32)])

        inputs = KTF.variable(np.asarray(inputs).transpose((1, 0, 2)))

        # batch_size length vector of sequence_lengths
        input_length = KTF.variable(np.array([seq_len_0], dtype=np.int32))
        # batch_size length vector of negative log probabilities
        log_prob_truth = np.array([
            0.584855,  # output beam 0
            0.389139  # output beam 1
        ], np.float32)[np.newaxis, :]

        decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]

        beam_width = 2
        top_paths = 2

        decode_pred_tf, log_prob_pred_tf = KTF.ctc_decode(inputs,
                                                          input_length,
                                                          greedy=False,
                                                          beam_width=beam_width,
                                                          top_paths=top_paths)

        assert len(decode_pred_tf) == top_paths

        log_prob_pred = KTF.eval(log_prob_pred_tf)

        for i in range(top_paths):
            assert np.alltrue(decode_truth[i] == KTF.eval(decode_pred_tf[i]))

        assert np.allclose(log_prob_truth, log_prob_pred)
Пример #4
0
 def test_batch_dot_shape(self):
     x_batch = KTF.ones(shape=(32, 20))
     y_batch = KTF.ones(shape=(32, 20))
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=1)
     assert_allclose(KTF.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=0)
     assert_allclose(KTF.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
     # making sure swapping axes when ndim == 2 works
     x_batch = KTF.ones(shape=(32, 20))
     y_batch = KTF.ones(shape=(20, 32))
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=(0, 1))
     assert_allclose(KTF.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=(1, 0))
     assert_allclose(KTF.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)
Пример #5
0
    def test_rnn(self):
        # implement a simple RNN
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        init_state_val = np.random.random((32, output_dim))
        W_i_val = np.random.random((input_dim, output_dim))
        W_o_val = np.random.random((output_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)
            W_o = K.variable(W_o_val)

            def step_function(x, states):
                assert len(states) == 1
                prev_output = states[0]
                output = K.dot(x, W_i) + K.dot(prev_output, W_o)
                return output, [output]
            return step_function

        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        inputs = KTH.variable(input_val)
        initial_states = [KTH.variable(init_state_val)]
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, inputs,
                                                   initial_states,
                                                   go_backwards=False,
                                                   masking=False)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 1
        th_state = KTH.eval(new_states[0])

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        inputs = KTF.variable(input_val)
        initial_states = [KTF.variable(init_state_val)]
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, inputs,
                                                   initial_states,
                                                   go_backwards=False,
                                                   masking=False)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 1
        tf_state = KTF.eval(new_states[0])

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
        assert_allclose(tf_state, th_state, atol=1e-04)
Пример #6
0
    def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
Пример #7
0
    def test_nn_operations(self):
        check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
        check_single_tensor_operation('softmax', (4, 10))
        check_single_tensor_operation('softplus', (4, 10))

        check_single_tensor_operation('sigmoid', (4, 2))
        check_single_tensor_operation('hard_sigmoid', (4, 2))
        check_single_tensor_operation('tanh', (4, 2))

        # dropout
        val = np.random.random((100, 100))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        zth = KTH.eval(KTH.dropout(xth, level=0.2))
        ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
        assert zth.shape == ztf.shape
        # dropout patterns are different, only check mean
        assert np.abs(zth.mean() - ztf.mean()) < 0.05

        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2),
                                   from_logits=True)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2),
                                   from_logits=True)
        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2),
                                   from_logits=False)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2),
                                   from_logits=False)

        check_single_tensor_operation('l2_normalize', (4, 3), axis=-1)
        check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
Пример #8
0
    def reset_states(self, states_value=None):
        if len(self.states) == 0:
            return
        if not self.stateful:
            raise AttributeError('Layer must be stateful.')
        if not hasattr(self, 'states') or self.states[0] is None:
            state_shapes = list(map(K.int_shape, self.model.input[1:]))
            self.states = list(map(K.zeros, state_shapes))

        if states_value is not None:
            if type(states_value) not in (list, tuple):
                states_value = [states_value] * len(self.states)
            assert len(states_value) == len(
                self.states), 'Your RNN has ' + str(len(
                    self.states)) + ' states, but was provided ' + str(
                        len(states_value)) + ' state values.'
            if 'numpy' not in type(states_value[0]):
                states_value = list(map(np.array, states_value))
            if states_value[0].shape == tuple():
                for state, val in zip(self.states, states_value):
                    K.set_value(state, K.get_value(state) * 0. + val)
            else:
                for state, val in zip(self.states, states_value):
                    K.set_value(state, val)
        else:
            if self.state_initializer:
                for state, init in zip(self.states, self.state_initializer):
                    if isinstance(init, initializers.Zeros):
                        K.set_value(state, 0 * K.get_value(state))
                    else:
                        K.set_value(state,
                                    K.eval(init(K.get_value(state).shape)))
            else:
                for state in self.states:
                    K.set_value(state, 0 * K.get_value(state))
Пример #9
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4)
Пример #10
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_z = KTH.repeat_elements(arr_th, reps, axis=rep_axis)
                th_rep = KTH.eval(th_z)
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
                if hasattr(th_z, '_keras_shape'):
                    assert th_z._keras_shape == th_rep.shape

                # test theano shape inference when
                # input shape has None entries
                if K.backend() == 'theano':
                    shape = list(shape)
                    shape[rep_axis] = None
                    x = K.placeholder(shape=shape)
                    y = K.repeat_elements(x, reps, axis=rep_axis)
                    assert y._keras_shape == tuple(shape)
Пример #11
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_z = KTH.repeat_elements(arr_th, reps, axis=rep_axis)
                th_rep = KTH.eval(th_z)
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
                if hasattr(th_z, '_keras_shape'):
                    assert th_z._keras_shape == th_rep.shape

                # test theano shape inference when
                # input shape has None entries
                if K.backend() == 'theano':
                    shape = list(shape)
                    shape[rep_axis] = None
                    x = K.placeholder(shape=shape)
                    y = K.repeat_elements(x, reps, axis=rep_axis)
                    assert y._keras_shape == tuple(shape)
Пример #12
0
    def test_depth_to_space(self, batch_size, scale, channels, rows, cols):
        if K.image_data_format() == 'channels_first':
            arr = np.arange(batch_size * channels * scale * scale * rows * cols)\
                .reshape((batch_size, channels * scale * scale, rows, cols))
        elif K.image_data_format() == 'channels_last':
            arr = np.arange(batch_size * rows * cols * scale * scale * channels) \
                .reshape((batch_size, rows, cols, channels * scale * scale))

        arr_tf = KTF.variable(arr)
        arr_th = KTH.variable(arr)

        if K.image_data_format() == 'channels_first':
            expected = arr.reshape((batch_size, scale, scale, channels, rows, cols))\
                .transpose((0, 3, 4, 1, 5, 2))\
                .reshape((batch_size, channels, rows * scale, cols * scale))
        elif K.image_data_format() == 'channels_last':
            expected = arr.reshape((batch_size, rows, cols, scale, scale, channels))\
                .transpose((0, 1, 3, 2, 4, 5))\
                .reshape((batch_size, rows * scale, cols * scale, channels))

        tf_ans = KTF.eval(KCTF.depth_to_space(arr_tf, scale))
        th_ans = KTH.eval(KCTH.depth_to_space(arr_th, scale))

        assert tf_ans.shape == expected.shape
        assert th_ans.shape == expected.shape
        assert_allclose(expected, tf_ans, atol=1e-05)
        assert_allclose(expected, th_ans, atol=1e-05)
Пример #13
0
def binary_loss(y_true, y_pred, labels):
    y_p = to_categorical(y_pred, labels)
    y_t = to_categorical(y_true, labels)
    y_pred = tf.convert_to_tensor(y_p)
    y_true = tf.convert_to_tensor(y_t)
    loss = binary_crossentropy(y_true, y_pred)
    return K.eval(loss)
def check_composed_tensor_operations(
    first_function_name,
    first_function_args,
    second_function_name,
    second_function_args,
    input_shape,
):
    """ Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    """
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KCTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KCTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(
        getattr(KCTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(
        getattr(KCTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Пример #15
0
    def test_nn_operations(self):
        check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
        check_single_tensor_operation('softmax', (4, 10))
        check_single_tensor_operation('softplus', (4, 10))

        check_single_tensor_operation('sigmoid', (4, 2))
        check_single_tensor_operation('hard_sigmoid', (4, 2))
        check_single_tensor_operation('tanh', (4, 2))

        # dropout
        val = np.random.random((100, 100))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        zth = KTH.eval(KTH.dropout(xth, level=0.2))
        ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
        assert zth.shape == ztf.shape
        # dropout patterns are different, only check mean
        assert np.abs(zth.mean() - ztf.mean()) < 0.05

        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True)
        check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False)
        check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False)

        check_single_tensor_operation('l2_normalize', (4, 3), axis=-1)
        check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
Пример #16
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4)
Пример #17
0
    def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
Пример #18
0
    def test_conv2d(self):
        # TF kernel shape: (rows, cols, input_depth, depth)

        # channels_first input shape: (n, input_depth, rows, cols)
        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(2, 2, 3, 4), (4, 3, 3, 4)]:
                for padding in ['valid', 'same']:
                    xval = np.random.random(input_shape)

                    xth = KTH.variable(xval)
                    xtf = KTF.variable(xval)

                    kernel_val = np.random.random(kernel_shape) - 0.5

                    kernel_th = KTH.variable(convert_kernel(kernel_val))
                    kernel_tf = KTF.variable(kernel_val)

                    zth = KTH.eval(
                        KTH.conv2d(xth,
                                   kernel_th,
                                   data_format='channels_first'))
                    ztf = KTF.eval(
                        KTF.conv2d(xtf,
                                   kernel_tf,
                                   data_format='channels_first'))

                    assert zth.shape == ztf.shape
                    assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #19
0
    def test_rnn_no_states(self):
        # implement a simple RNN without states
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        W_i_val = np.random.random((input_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)

            def step_function(x, states):
                assert len(states) == 0
                output = K.dot(x, W_i)
                return output, []

            return step_function

        # test default setup
        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        th_inputs = KTH.variable(input_val)
        th_initial_states = []
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn,
                                                   th_inputs,
                                                   th_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 0

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        tf_inputs = KTF.variable(input_val)
        tf_initial_states = []
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn,
                                                   tf_inputs,
                                                   tf_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 0

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
Пример #20
0
    def test_conv3d(self):
        # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
        # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
        # TH kernel shape: (depth, input_depth, x, y, z)
        # TF kernel shape: (x, y, z, input_depth, depth)

        # test in data_format = channels_first
        for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
            for kernel_shape in [(2, 2, 2, 3, 4), (3, 2, 4, 3, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(
                    KTH.conv3d(xth, kernel_th, data_format='channels_first'))
                ztf = KTF.eval(
                    KTF.conv3d(xtf, kernel_tf, data_format='channels_first'))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        # test in data_format = channels_last
        input_shape = (1, 2, 2, 2, 1)
        kernel_shape = (2, 2, 2, 1, 1)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv3d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #21
0
    def test_extract(self):
        for input_shape in [(1, 3, 40, 40), (1, 3, 10, 10)]:
            for kernel_shape in [2, 5]:
                xval = np.random.random(input_shape)
                kernel = [kernel_shape, kernel_shape]
                strides = [kernel_shape, kernel_shape]
                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)
                ztf = KTF.eval(
                    KCTF.extract_image_patches(xtf,
                                               kernel,
                                               strides,
                                               dim_ordering='th',
                                               border_mode="valid"))
                zth = KTH.eval(
                    KCTH.extract_image_patches(xth,
                                               kernel,
                                               strides,
                                               dim_ordering='th',
                                               border_mode="valid"))
                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-02)

        for input_shape in [(1, 40, 40, 3), (1, 10, 10, 3)]:
            for kernel_shape in [2, 5]:
                xval = np.random.random(input_shape)

                kernel = [kernel_shape, kernel_shape]
                strides = [kernel_shape, kernel_shape]
                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)
                ztf = KTF.eval(
                    KCTF.extract_image_patches(xtf,
                                               kernel,
                                               strides,
                                               dim_ordering='tf',
                                               border_mode="same"))
                zth = KTH.eval(
                    KCTH.extract_image_patches(xth,
                                               kernel,
                                               strides,
                                               dim_ordering='tf',
                                               border_mode="same"))
                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-02)
Пример #22
0
    def test_extract(self):
        for input_shape in [(1, 3, 40, 40), (1, 3, 10, 10)]:
            for kernel_shape in [2, 5]:
                xval = np.random.random(input_shape)
                kernel = [kernel_shape, kernel_shape]
                strides = [kernel_shape, kernel_shape]
                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)
                ztf = KTF.eval(
                    KCTF.extract_image_patches(xtf,
                                               kernel,
                                               strides,
                                               data_format='channels_first',
                                               padding='valid'))
                zth = KTH.eval(
                    KCTH.extract_image_patches(xth,
                                               kernel,
                                               strides,
                                               data_format='channels_first',
                                               padding='valid'))
                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-02)

        for input_shape in [(1, 40, 40, 3), (1, 10, 10, 3)]:
            for kernel_shape in [2, 5]:
                xval = np.random.random(input_shape)

                kernel = [kernel_shape, kernel_shape]
                strides = [kernel_shape, kernel_shape]
                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)
                ztf = KTF.eval(
                    KCTF.extract_image_patches(xtf,
                                               kernel,
                                               strides,
                                               data_format='channels_last',
                                               padding='same'))
                zth = KTH.eval(
                    KCTH.extract_image_patches(xth,
                                               kernel,
                                               strides,
                                               data_format='channels_last',
                                               padding='same'))
                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-02)
Пример #23
0
    def test_in_top_k(self):
        batch_size = 20
        num_classes = 10

        # Random prediction test case
        predictions = np.random.random(
            (batch_size, num_classes)).astype('float32')
        targets = np.random.randint(num_classes,
                                    size=batch_size,
                                    dtype='int32')

        predictions_th = KTH.variable(predictions, dtype='float32')
        targets_th = KTH.variable(targets, dtype='int32')
        predictions_tf = KTF.variable(predictions, dtype='float32')
        targets_tf = KTF.variable(targets, dtype='int32')

        for k in range(1, num_classes + 1):
            res_th = KTH.eval(KTH.in_top_k(predictions_th, targets_th, k))
            res_tf = KTF.eval(KTF.in_top_k(predictions_tf, targets_tf, k))

            assert res_th.shape == res_tf.shape
            assert_allclose(res_th, res_tf, atol=1e-05)

        # Identical prediction test case:
        # randomly set half of the predictions to an identical value
        num_identical = num_classes // 2
        for i in range(batch_size):
            idx_identical = np.random.choice(num_classes,
                                             size=num_identical,
                                             replace=False)
            predictions[i, idx_identical] = predictions[i, 0]
        targets = np.zeros(batch_size, dtype='int32')

        predictions_th = KTH.variable(predictions, dtype='float32')
        targets_th = KTH.variable(targets, dtype='int32')
        predictions_tf = KTF.variable(predictions, dtype='float32')
        targets_tf = KTF.variable(targets, dtype='int32')

        for k in range(1, num_classes + 1):
            res_th = KTH.eval(KTH.in_top_k(predictions_th, targets_th, k))
            res_tf = KTF.eval(KTF.in_top_k(predictions_tf, targets_tf, k))

            assert res_th.shape == res_tf.shape
            assert_allclose(res_th, res_tf, atol=1e-05)
Пример #24
0
    def test_conv3d(self):
        # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
        # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
        # TH kernel shape: (depth, input_depth, x, y, z)
        # TF kernel shape: (x, y, z, input_depth, depth)

        # test in dim_ordering = th
        for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
            for kernel_shape in [(4, 3, 2, 2, 2), (4, 3, 3, 2, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(KTH.conv3d(xth, kernel_th))
                ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        # test in dim_ordering = tf
        input_shape = (1, 2, 2, 2, 1)
        kernel_shape = (2, 2, 2, 1, 1)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='tf'))
        ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='tf'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #25
0
    def test_ctc(self):
        # simplified version of TensorFlow's test

        label_lens = np.expand_dims(np.asarray([5, 4]), 1)
        input_lens = np.expand_dims(np.asarray([5, 5]),
                                    1)  # number of timesteps

        # the Theano and Tensorflow CTC code use different methods to ensure
        # numerical stability.  The Theano code subtracts out the max
        # before the final log, so the results are different but scale
        # identically and still train properly
        loss_log_probs_tf = [3.34211, 5.42262]
        loss_log_probs_th = [1.73308, 3.81351]

        # dimensions are batch x time x categories
        labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
        inputs = np.asarray(
            [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
              [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
              [
                  0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882,
                  0.0037688
              ],
              [
                  0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545,
                  0.00331533
              ],
              [
                  0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441,
                  0.00623107
              ]],
             [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
              [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
              [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
              [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
              [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]
             ],
            dtype=np.float32)

        labels_tf = KTF.variable(labels, dtype="int32")
        inputs_tf = KTF.variable(inputs, dtype="float32")
        input_lens_tf = KTF.variable(input_lens, dtype="int32")
        label_lens_tf = KTF.variable(label_lens, dtype="int32")
        res = KTF.eval(
            KTF.ctc_batch_cost(labels_tf, inputs_tf, input_lens_tf,
                               label_lens_tf))
        assert_allclose(res[:, 0], loss_log_probs_tf, atol=1e-05)

        labels_th = KTH.variable(labels, dtype="int32")
        inputs_th = KTH.variable(inputs, dtype="float32")
        input_lens_th = KTH.variable(input_lens, dtype="int32")
        label_lens_th = KTH.variable(label_lens, dtype="int32")
        res = KTH.eval(
            KTH.ctc_batch_cost(labels_th, inputs_th, input_lens_th,
                               label_lens_th))
        assert_allclose(res[0, :], loss_log_probs_th, atol=1e-05)
Пример #26
0
def check_single_tensor_operation(function_name, input_shape, **kwargs):
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    zth = KTH.eval(getattr(KCTH, function_name)(xth, **kwargs))
    ztf = KTF.eval(getattr(KCTF, function_name)(xtf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Пример #27
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_rep = KTH.eval(KTH.tile(arr_th, n))
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
Пример #28
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_rep = KTH.eval(KTH.tile(arr_th, n))
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
Пример #29
0
    def test_rnn_no_states(self):
        # implement a simple RNN without states
        input_dim = 8
        output_dim = 4
        timesteps = 5

        input_val = np.random.random((32, timesteps, input_dim))
        W_i_val = np.random.random((input_dim, output_dim))

        def rnn_step_fn(input_dim, output_dim, K):
            W_i = K.variable(W_i_val)

            def step_function(x, states):
                assert len(states) == 0
                output = K.dot(x, W_i)
                return output, []
            return step_function

        # test default setup
        th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
        th_inputs = KTH.variable(input_val)
        th_initial_states = []
        last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
                                                   th_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        th_last_output = KTH.eval(last_output)
        th_outputs = KTH.eval(outputs)
        assert len(new_states) == 0

        tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
        tf_inputs = KTF.variable(input_val)
        tf_initial_states = []
        last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
                                                   tf_initial_states,
                                                   go_backwards=False,
                                                   mask=None)
        tf_last_output = KTF.eval(last_output)
        tf_outputs = KTF.eval(outputs)
        assert len(new_states) == 0

        assert_allclose(tf_last_output, th_last_output, atol=1e-04)
        assert_allclose(tf_outputs, th_outputs, atol=1e-04)
Пример #30
0
def check_single_tensor_operation(function_name, input_shape, **kwargs):
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    zth = KTH.eval(getattr(KTH, function_name)(xth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Пример #31
0
 def test_arange(self):
     for test_value in (-20, 0, 1, 10):
         t_a = KTF.arange(test_value)
         a = KTF.eval(t_a)
         assert np.array_equal(a, np.arange(test_value))
         t_b = KTH.arange(test_value)
         b = KTH.eval(t_b)
         assert np.array_equal(b, np.arange(test_value))
         assert np.array_equal(a, b)
         assert KTF.dtype(t_a) == KTH.dtype(t_b)
     for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):
         a = KTF.eval(KTF.arange(start, stop, step))
         assert np.array_equal(a, np.arange(start, stop, step))
         b = KTH.eval(KTH.arange(start, stop, step))
         assert np.array_equal(b, np.arange(start, stop, step))
         assert np.array_equal(a, b)
     for dtype in ('int32', 'int64', 'float32', 'float64'):
         for backend in (KTF, KTH):
             t = backend.arange(10, dtype=dtype)
             assert backend.dtype(t) == dtype
Пример #32
0
 def test_arange(self):
     for test_value in (-20, 0, 1, 10):
         t_a = KTF.arange(test_value)
         a = KTF.eval(t_a)
         assert np.array_equal(a, np.arange(test_value))
         t_b = KTH.arange(test_value)
         b = KTH.eval(t_b)
         assert np.array_equal(b, np.arange(test_value))
         assert np.array_equal(a, b)
         assert KTF.dtype(t_a) == KTH.dtype(t_b)
     for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):
         a = KTF.eval(KTF.arange(start, stop, step))
         assert np.array_equal(a, np.arange(start, stop, step))
         b = KTH.eval(KTH.arange(start, stop, step))
         assert np.array_equal(b, np.arange(start, stop, step))
         assert np.array_equal(a, b)
     for dtype in ('int32', 'int64', 'float32', 'float64'):
         for backend in (KTF, KTH):
             t = backend.arange(10, dtype=dtype)
             assert backend.dtype(t) == dtype
Пример #33
0
    def test_conv2d(self):
        # TF kernel shape: (rows, cols, input_depth, depth)

        # channels_first input shape: (n, input_depth, rows, cols)
        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(2, 2, 3, 4), (4, 3, 3, 4)]:
                for padding in ['valid', 'same']:
                    xval = np.random.random(input_shape)

                    xth = KTH.variable(xval)
                    xtf = KTF.variable(xval)

                    kernel_val = np.random.random(kernel_shape) - 0.5

                    kernel_th = KTH.variable(convert_kernel(kernel_val))
                    kernel_tf = KTF.variable(kernel_val)

                    zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_first'))
                    ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_first'))

                    assert zth.shape == ztf.shape
                    assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #34
0
    def test_conv2d(self):
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)

        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(4, 3, 2, 2), (4, 3, 3, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(
                    convert_kernel(kernel_val, dim_ordering='th'))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='th'))
                ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='th'))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #35
0
    def test_random_normal(self):
        mean = 0.
        std = 1.
        rand = KTF.eval(KTF.random_normal((1000, 1000), mean=mean, stddev=std))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand) - mean) < 0.01
        assert np.abs(np.std(rand) - std) < 0.01

        rand = KTH.eval(KTH.random_normal((1000, 1000), mean=mean, stddev=std))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand) - mean) < 0.01
        assert np.abs(np.std(rand) - std) < 0.01
Пример #36
0
    def test_random_normal(self):
        mean = 0.
        std = 1.
        rand = KTF.eval(KTF.random_normal((1000, 1000), mean=mean, std=std))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - mean) < 0.01)
        assert(np.abs(np.std(rand) - std) < 0.01)

        rand = KTH.eval(KTH.random_normal((1000, 1000), mean=mean, std=std))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - mean) < 0.01)
        assert(np.abs(np.std(rand) - std) < 0.01)
Пример #37
0
    def test_conv2d(self):
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)

        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(4, 3, 2, 2), (4, 3, 3, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(KTH.conv2d(xth, kernel_th))
                ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #38
0
    def test_random_binomial(self):
        p = 0.5
        rand = KTF.eval(KTF.random_binomial((1000, 1000), p))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - p) < 0.01)
        assert(np.max(rand) == 1)
        assert(np.min(rand) == 0)

        rand = KTH.eval(KTH.random_binomial((1000, 1000), p))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand) - p) < 0.01)
        assert(np.max(rand) == 1)
        assert(np.min(rand) == 0)
Пример #39
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_z = KTH.tile(arr_th, n)
        th_rep = KTH.eval(th_z)
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_rep.shape
Пример #40
0
    def test_in_top_k(self):
        batch_size = 20
        num_classes = 10

        # Random prediction test case
        predictions = np.random.random((batch_size, num_classes)).astype('float32')
        targets = np.random.randint(num_classes, size=batch_size, dtype='int32')

        predictions_th = KTH.variable(predictions, dtype='float32')
        targets_th = KTH.variable(targets, dtype='int32')
        predictions_tf = KTF.variable(predictions, dtype='float32')
        targets_tf = KTF.variable(targets, dtype='int32')

        for k in range(1, num_classes + 1):
            res_th = KTH.eval(KTH.in_top_k(predictions_th, targets_th, k))
            res_tf = KTF.eval(KTF.in_top_k(predictions_tf, targets_tf, k))

            assert res_th.shape == res_tf.shape
            assert_allclose(res_th, res_tf, atol=1e-05)

        # Identical prediction test case:
        # randomly set half of the predictions to an identical value
        num_identical = num_classes // 2
        for i in range(batch_size):
            idx_identical = np.random.choice(num_classes, size=num_identical, replace=False)
            predictions[i, idx_identical] = predictions[i, 0]
        targets = np.zeros(batch_size, dtype='int32')

        predictions_th = KTH.variable(predictions, dtype='float32')
        targets_th = KTH.variable(targets, dtype='int32')
        predictions_tf = KTF.variable(predictions, dtype='float32')
        targets_tf = KTF.variable(targets, dtype='int32')

        for k in range(1, num_classes + 1):
            res_th = KTH.eval(KTH.in_top_k(predictions_th, targets_th, k))
            res_tf = KTF.eval(KTF.in_top_k(predictions_tf, targets_tf, k))

            assert res_th.shape == res_tf.shape
            assert_allclose(res_th, res_tf, atol=1e-05)
Пример #41
0
    def test_switch(self):
        val = np.random.random()
        xth = KTH.variable(val)
        xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)

        xtf = KTF.variable(val)
        xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)

        zth = KTH.eval(xth)
        ztf = KTF.eval(xtf)

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #42
0
    def test_random_binomial(self):
        p = 0.5
        rand = KTF.eval(KTF.random_binomial((1000, 1000), p))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand) - p) < 0.01
        assert np.max(rand) == 1
        assert np.min(rand) == 0

        rand = KTH.eval(KTH.random_binomial((1000, 1000), p))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand) - p) < 0.01
        assert np.max(rand) == 1
        assert np.min(rand) == 0
Пример #43
0
    def test_switch(self):
        val = np.random.random()
        xth = KTH.variable(val)
        xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)

        xtf = KTF.variable(val)
        xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)

        zth = KTH.eval(xth)
        ztf = KTF.eval(xtf)

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #44
0
 def test_batch_dot_shape(self):
     x_batch = KTF.ones(shape=(32, 20))
     y_batch = KTF.ones(shape=(32, 20))
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=1)
     assert_allclose(KTF.eval(xy_batch_dot),
                     np.ones((32, 1)) * 20,
                     atol=1e-05)
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=0)
     assert_allclose(KTF.eval(xy_batch_dot),
                     np.ones((20, 1)) * 32,
                     atol=1e-05)
     # making sure swapping axes when ndim == 2 works
     x_batch = KTF.ones(shape=(32, 20))
     y_batch = KTF.ones(shape=(20, 32))
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=(0, 1))
     assert_allclose(KTF.eval(xy_batch_dot),
                     np.ones((20, 1)) * 32,
                     atol=1e-05)
     xy_batch_dot = KTF.batch_dot(x_batch, y_batch, axes=(1, 0))
     assert_allclose(KTF.eval(xy_batch_dot),
                     np.ones((32, 1)) * 20,
                     atol=1e-05)
Пример #45
0
    def test_random_uniform(self):
        min_val = -1.
        max_val = 1.
        rand = KTF.eval(KTF.random_uniform((1000, 1000), min_val, max_val))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand)) < 0.01
        assert np.max(rand) <= max_val
        assert np.min(rand) >= min_val

        rand = KTH.eval(KTH.random_uniform((1000, 1000), min_val, max_val))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand)) < 0.01
        assert np.max(rand) <= max_val
        assert np.min(rand) >= min_val
Пример #46
0
    def test_random_uniform(self):
        min_val = -1.
        max_val = 1.
        rand = KTF.eval(KTF.random_uniform((1000, 1000), min_val, max_val))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand)) < 0.01
        assert np.max(rand) <= max_val
        assert np.min(rand) >= min_val

        rand = KTH.eval(KTH.random_uniform((1000, 1000), min_val, max_val))
        assert rand.shape == (1000, 1000)
        assert np.abs(np.mean(rand)) < 0.01
        assert np.max(rand) <= max_val
        assert np.min(rand) >= min_val
Пример #47
0
    def test_random_uniform(self):
        min = -1.
        max = 1.
        rand = KTF.eval(KTF.random_uniform((1000, 1000), min, max))
        assert (rand.shape == (1000, 1000))
        assert (np.abs(np.mean(rand)) < 0.01)
        assert (np.max(rand) <= max)
        assert (np.min(rand) >= min)

        rand = KTH.eval(KTH.random_uniform((1000, 1000), min, max))
        assert (rand.shape == (1000, 1000))
        assert (np.abs(np.mean(rand)) < 0.01)
        assert (np.max(rand) <= max)
        assert (np.min(rand) >= min)
Пример #48
0
def check_two_tensor_operation(function_name, x_input_shape, y_input_shape, **kwargs):
    xval = np.random.random(x_input_shape) - 0.5
    xth = KTH.variable(xval)
    xtf = KTF.variable(xval)

    yval = np.random.random(y_input_shape) - 0.5
    yth = KTH.variable(yval)
    ytf = KTF.variable(yval)

    zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Пример #49
0
    def test_random_uniform(self):
        min = -1.
        max = 1.
        rand = KTF.eval(KTF.random_uniform((1000, 1000), min, max))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand)) < 0.01)
        assert(np.max(rand) <= max)
        assert(np.min(rand) >= min)

        rand = KTH.eval(KTH.random_uniform((1000, 1000), min, max))
        assert(rand.shape == (1000, 1000))
        assert(np.abs(np.mean(rand)) < 0.01)
        assert(np.max(rand) <= max)
        assert(np.min(rand) >= min)
Пример #50
0
 def test_extract(self, input_shape, kernel_shape):
     xval = np.random.random(input_shape)
     kernel = [kernel_shape, kernel_shape]
     strides = [kernel_shape, kernel_shape]
     xth = KTH.variable(xval)
     xtf = KTF.variable(xval)
     ztf = KTF.eval(KCTF.extract_image_patches(xtf, kernel, strides,
                                               data_format='channels_first',
                                               padding='valid'))
     zth = KTH.eval(KCTH.extract_image_patches(xth, kernel, strides,
                                               data_format='channels_first',
                                               padding='valid'))
     assert zth.shape == ztf.shape
     assert_allclose(zth, ztf, atol=1e-02)
Пример #51
0
def check_two_tensor_operation(function_name, x_input_shape, y_input_shape,
                               **kwargs):
    xval = np.random.random(x_input_shape) - 0.5
    xth = KTH.variable(xval)
    xtf = KTF.variable(xval)

    yval = np.random.random(y_input_shape) - 0.5
    yth = KTH.variable(yval)
    ytf = KTF.variable(yval)

    zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Пример #52
0
    def test_moments(self):
        input_shape = (10, 10, 10, 10)
        x_0 = np.zeros(input_shape)
        x_1 = np.ones(input_shape)
        x_random = np.random.random(input_shape)

        th_axes = [0, 2, 3]
        tf_axes = [0, 1, 2]

        for ip in [x_0, x_1, x_random]:
            for axes in [th_axes, tf_axes]:
                for keep_dims in [True, False]:
                    ip_th = KTH.variable(ip)
                    th_mean, th_var = KCTH.moments(ip_th,
                                                   axes,
                                                   keep_dims=keep_dims)

                    ip_tf = KTF.variable(ip)
                    tf_mean, tf_var = KCTF.moments(ip_tf,
                                                   axes,
                                                   keep_dims=keep_dims)

                    th_mean_val = KTH.eval(th_mean)
                    tf_mean_val = KTF.eval(tf_mean)
                    th_var_val = KTH.eval(th_var)
                    tf_var_val = KTF.eval(tf_var)

                    # absolute tolerance needed when working with zeros
                    assert_allclose(th_mean_val,
                                    tf_mean_val,
                                    rtol=1e-4,
                                    atol=1e-10)
                    assert_allclose(th_var_val,
                                    tf_var_val,
                                    rtol=1e-4,
                                    atol=1e-10)
Пример #53
0
    def test_ctc(self):
        # simplified version of TensorFlow's test

        label_lens = np.expand_dims(np.asarray([5, 4]), 1)
        input_lens = np.expand_dims(np.asarray([5, 5]), 1)  # number of timesteps

        # the Theano and Tensorflow CTC code use different methods to ensure
        # numerical stability.  The Theano code subtracts out the max
        # before the final log, so the results are different but scale
        # identically and still train properly
        loss_log_probs_tf = [3.34211, 5.42262]
        loss_log_probs_th = [1.73308, 3.81351]

        # dimensions are batch x time x categories
        labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
        inputs = np.asarray(
            [
                [
                    [0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
                    [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
                    [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
                    [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
                    [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
                ],
                [
                    [0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
                    [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
                    [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
                    [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
                    [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046],
                ],
            ],
            dtype=np.float32,
        )

        labels_tf = KTF.variable(labels, dtype="int32")
        inputs_tf = KTF.variable(inputs, dtype="float32")
        input_lens_tf = KTF.variable(input_lens, dtype="int32")
        label_lens_tf = KTF.variable(label_lens, dtype="int32")
        res = KTF.eval(KTF.ctc_batch_cost(labels_tf, inputs_tf, input_lens_tf, label_lens_tf))
        assert_allclose(res[:, 0], loss_log_probs_tf, atol=1e-05)

        labels_th = KTH.variable(labels, dtype="int32")
        inputs_th = KTH.variable(inputs, dtype="float32")
        input_lens_th = KTH.variable(input_lens, dtype="int32")
        label_lens_th = KTH.variable(label_lens, dtype="int32")
        res = KTH.eval(KTH.ctc_batch_cost(labels_th, inputs_th, input_lens_th, label_lens_th))
        assert_allclose(res[0, :], loss_log_probs_th, atol=1e-05)
Пример #54
0
    def test_gradient(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        expth = xth * KTH.exp(xth)
        exptf = xtf * KTF.exp(xtf)
        lossth = KTH.sum(expth)
        losstf = KTF.sum(exptf)

        gradth = KTH.gradients(lossth, [expth])
        gradtf = KTF.gradients(losstf, [exptf])

        zth = KTH.eval(gradth[0])
        ztf = KTF.eval(gradtf[0])
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
Пример #55
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_rep = KTH.eval(KTH.repeat_elements(arr_th, reps, axis=rep_axis))
                tf_rep = KTF.eval(KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
Пример #56
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape
Пример #57
0
    def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation("reshape", (4, 2), shape=(8, 1))
        check_single_tensor_operation("permute_dimensions", (4, 2, 3), pattern=(2, 0, 1))
        check_single_tensor_operation("repeat", (4, 1), n=3)
        check_single_tensor_operation("flatten", (4, 1))
        check_single_tensor_operation("expand_dims", (4, 3), dim=-1)
        check_single_tensor_operation("expand_dims", (4, 3, 2), dim=1)
        check_single_tensor_operation("squeeze", (4, 3, 1), axis=2)
Пример #58
0
def check_composed_tensor_operations(first_function_name, first_function_args,
                                     second_function_name, second_function_args,
                                     input_shape):
    ''' Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    '''
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
Пример #59
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_z = KTH.tile(arr_th, n)
        th_rep = KTH.eval(th_z)
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_rep.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 4))
            n = 2
            y = KTH.tile(x, n)
            assert y._keras_shape == (None, 8)
            n = (4, 3)
            y = K.tile(x, n)
            assert y._keras_shape == (None, 12)