def test_pack_padded_long_sequence_forward_backward(total_length, padding_value,
                                                    batch_first, shapes, seed, ctx, func_name):
    if not func_name.endswith("Cuda"):
        pytest.skip(
            "PackPaddedSequence tests except for Cuda for very long sequence skips.")

    from nbla_test_utils import function_tester
    rng = np.random.RandomState(seed)

    sequences = [rng.randn(*shape).astype(np.float32) for shape in shapes]
    padded_sequence = pad_sequence(sequences, batch_first)
    lengths = np.array([seq.shape[0] for seq in sequences])
    inputs = [padded_sequence, lengths]
    func_args0 = [batch_first]
    func_args1 = [batch_first, padding_value, total_length]
    insert_identity = [True, False]

    # Forward
    function_tester(rng, F.pack_padded_sequence, ref_pack_padded_sequence, inputs,
                    ctx=ctx, func_name=func_name, func_args=func_args0,
                    backward=[False, False],
                    atol_f=1e-3, atol_b=1e-2, insert_identity=insert_identity)

    # Backward
    import nnabla as nn
    padded_sequence0 = nn.Variable.from_numpy_array(
        inputs[0]).apply(need_grad=True)
    lengths = nn.Variable.from_numpy_array(inputs[1])
    with nn.context_scope(ctx), nn.auto_forward():
        # Pack backward
        padded_sequence0.g = rng.randn(*padded_sequence0.shape)
        packed_sequence0, batch_sizes = F.pack_padded_sequence(
            padded_sequence0, lengths, *func_args0)
        g = rng.randn(*packed_sequence0.shape)
        packed_sequence0.g = g
        packed_sequence0.parent.backward([padded_sequence0, lengths], [packed_sequence0, batch_sizes],
                                         [False, False])
        # Unpack
        packed_sequence1 = nn.Variable.from_numpy_array(g)
        padded_sequence1, lengths = F.pad_packed_sequence(
            packed_sequence1, batch_sizes, *func_args1)
        # Compare w/o accum
        np.testing.assert_allclose(padded_sequence0.g.flatten(),
                                   padded_sequence1.d.flatten(
                                   )[:np.prod(padded_sequence0.shape)],
                                   atol=1e-4,
                                   err_msg="{} test (w/o accum) with long sequence failed.".format(func_name))
        # Compare w/ accum
        packed_sequence0.parent.backward([padded_sequence0, lengths], [packed_sequence0, batch_sizes],
                                         [True, False])
        np.testing.assert_allclose(padded_sequence0.g.flatten() / 2,
                                   padded_sequence1.d.flatten(
                                   )[:np.prod(padded_sequence0.shape)],
                                   atol=1e-4,
                                   err_msg="{} test (w/ accum) with long sequence failed.".format(func_name))
Exemple #2
0
def pack_padded_sequence(padded_sequence,
                         lengths,
                         batch_first=False,
                         enforce_sorted=True):
    r"""Pack a padded variable-length sequences.

    This method packs a padded variable-length sequences.

    :math:`T` is the max length over the lengths of sequences.
    :math:`B` is the batch size equal to the length of the sequences.     
    :math:`*` is the remaining dimensions including none.

    .. note::
      This function **must** be used the dynamic computation mode.


    Example:

    .. code-block:: python

      import numpy as np
      import nnabla as nn
      import nnabla.functions as F
      import nnabla.utils.rnn as rnn_utils

      nn.set_auto_forward(True)

      l2v = lambda ldata: nn.Variable.from_numpy_array(np.asarray(ldata))
      a = l2v([1, 1, 1, 1])
      b = l2v([2, 2, 2])
      c = l2v([2, 2, 2])
      d = l2v([3, 3])
      e = l2v([3, 3])
      sequences = [a, b, c, d, e]
      lengths = l2v([seq.shape[0] for seq in sequences])

      padded_sequence = rnn_utils.pad_sequence(sequences)
      print(padded_sequence.d)

      packed_sequence = rnn_utils.pack_padded_sequence(padded_sequence, lengths)
      print(packed_sequence.data.d)
      print(packed_sequence.batch_sizes.d)

    Args: 
      padded_sequence (:obj:`nnabla.Variable`): Padded sequence of (:math:`T \times B \times *`)
                                                or (:math:`B \times T \times *`) shape.
      lengths (:obj:`nnabla.Variable`): Sequence length for each batch and always resides in CPU.
      batch_first (bool): `padded_sequence` is of (:math:`T`, :math:`B`, :math:`*`) shape if False,
                          otherwise (:math:`B`, :math:`T`, :math:`*`).
      enforce_sorted (bool): Sequences are sorted by the length in a decreasing order if True. Default is True.

    Returns: 
        :obj:`PackedSequence`
    """
    if enforce_sorted:
        sorted_indices = None
        unsorted_indices = None
    else:
        # TODO: replace cuda context when the bug fix of the sort
        with nn.context_scope(nn.Context()):
            lengths, sorted_indices = F.sort(lengths,
                                             axis=0,
                                             reverse=True,
                                             with_index=True)

        B = sorted_indices.shape[0]
        unsorted_indices = F.scatter_nd(F.arange(0, B),
                                        sorted_indices.reshape((1, B)),
                                        shape=(B, ))
        axis = 0 if batch_first else 1
        padded_sequence = F.gather(padded_sequence, sorted_indices, axis)

    packed_sequence, batch_sizes = F.pack_padded_sequence(
        padded_sequence, lengths, batch_first)
    packed_sequence0 = PackedSequence()
    packed_sequence0.data = packed_sequence
    packed_sequence0.batch_sizes = batch_sizes
    packed_sequence0.sorted_indices = sorted_indices
    packed_sequence0.unsorted_indices = unsorted_indices

    return packed_sequence0