Exemple #1
0
    def forward(ctx, out_fsa: Fsa, unused_in_fsa_scores: torch.Tensor,
                arc_map: _k2.RaggedInt) -> torch.Tensor:
        if False:
            # TODO(fangjun): this is for debugging only. Can be removed.
            expected_scores = _k2.index_and_sum(
                unused_in_fsa_scores.contiguous(), arc_map)
            assert torch.all(torch.eq(out_fsa.scores, expected_scores))

        ctx.save_for_backward(unused_in_fsa_scores)
        ctx.arc_map = arc_map
        return out_fsa.scores
Exemple #2
0
    def forward(ctx, src: torch.Tensor, indexes: k2.RaggedInt) -> torch.Tensor:
        '''Index a 1-D tensor with a ragged tensor of indexes, perform
        a sum-per-sublist operation, and return the resulting 1-D tensor.

        Note:
          It supports autograd.

        Args:
          src:
            1-D tensor with dtype torch.float32. For example, it can
            be a float tensor attribute of an FSA.
          indexes:
            A ragged tensor with two axes. For example, it can be
            the arc map from :func:`_k2.remove_epsilon`
        Returns:
          1-D torch.Tensor with dtype being `torch.float32`.
        '''
        assert src.ndim == 1
        assert src.dtype == torch.float32
        assert indexes.num_axes() == 2
        ctx.save_for_backward(src)
        ctx.indexes = indexes
        ans = _k2.index_and_sum(src, indexes)
        return ans