Example #1
0
    def test_fit_to_less_width(self):
        """Fit a tensor to a smalles width (i.e. trimming).

        Given a 3D tensor of shape [batch, length, width], apply the
        `ops.fit()` operator to it with the a smaller `width` as the
        target one and check that the last axis of the tensor have been
        deleted.
        """
        batch = 2
        length = 5
        width = 4
        fit_width = 3
        delta = width - fit_width

        shape = [None, None, None]
        input_ = tf.placeholder(dtype=tf.float32, shape=shape)
        output = ops.fit(input_, fit_width)

        input_actual = np.random.rand(batch, length, width)  # pylint: disable=I0011,E1101
        delete_idx = [width - (i + 1) for i in range(delta)]
        output_expected = np.delete(input_actual, delete_idx, axis=2)  # pylint: disable=I0011,E1101
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            output_actual = sess.run(output, {input_: input_actual})
        self.assertAllClose(output_expected, output_actual)
Example #2
0
    def test_fit_to_greater_width(self):
        """Fit a tensor to a larger width (i.e. padding).

        Given a 3D tensor of shape [batch, length, width], apply the
        `ops.fit()` operator to it with the a larger `width` as the
        target one and check that the tensor has now padded values
        (with 0 as the padding value).
        """
        batch = 2
        length = 5
        width = 4
        fit_width = 5

        shape = [None, None, None]
        input_ = tf.placeholder(dtype=tf.float32, shape=shape)
        output = ops.fit(input_, fit_width)

        input_actual = np.random.rand(batch, length, width)  # pylint: disable=I0011,E1101
        output_expected = np.pad(  # pylint: disable=I0011,E1101
            input_actual,
            ((0, 0), (0, 0), (0, fit_width - width)),
            mode='constant')
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            output_actual = sess.run(output, {input_: input_actual})
        self.assertAllClose(output_expected, output_actual)
Example #3
0
    def next_inp(self, time, output):
        """Returns the next input.

        Arguments:
          time: a `int` or unit `Tensor` representing the current timestep.
          output: a `2D Tensor` of shape `[batch_size, output_size]` representing
            the current output.

        *NOTE* that at time `t+1` the desired decoder input is the output
        from the previous step, `t`, it means that at timestep `t` the next
        input is the desired output for the very same timestep, if decoder
        inputs have been provided -- otherwise is just the current output.
        """
        if self._inputs_ta:
            output = tf.cond(time < self._inputs_ta.size(),
                             lambda: self._inputs_ta.read(time),
                             lambda: self.zero_output())  # pylint: disable=W0108
        next_inp = ops.fit(output, self._inp_size)
        return next_inp
Example #4
0
    def test_fit_same_width(self):
        """Fit a tensor to a dimension which is the actual one.

        Given a 3D tensor of shape [batch, length, width], apply the
        `ops.fit()` operator to it with the same `width` as the target one
        and check that the output tensor is the same of the input one.
        """
        batch = 2
        length = 5
        width = 4

        shape = [None, None, None]
        input_ = tf.placeholder(dtype=tf.float32, shape=shape)
        output = ops.fit(input_, width)
        self.assertEqual(output.get_shape().as_list()[-1], width)

        input_actual = np.random.rand(batch, length, width)  # pylint: disable=I0011,E1101
        output_expected = input_actual
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            output_actual = sess.run(output, {input_: input_actual})
        self.assertAllClose(output_expected, output_actual)