예제 #1
0
def index_add(index: torch.Tensor, value: torch.Tensor,
              in_out: torch.Tensor) -> None:
    '''It implements in_out[index[i]] += value[i].

    Caution:
      It has similar semantics with `torch.Tensor.index_add_` except
      that (1) index.dtype == torch.int32; (2) -1 <= index[i] < in_out.shape[0].
      index[i] == -1 is ignored.

    Caution:
      `in_out` is modified **in-place**.

    Caution:
      This functions does NOT support autograd.

    Args:
      index:
        A 1-D tensor with dtype torch.int32.  -1 <= index[i] < in_out.shape[0]
      value:
        A 1-D tensor with dtype torch.float32. index.numel() == value.numel()
      in_out:
        A 1-D tensor with dtype torch.float32.

    Returns:
      Return None.
    '''

    _k2.index_add(index, value, in_out)
예제 #2
0
def index_add(index: torch.Tensor, value: torch.Tensor,
              in_out: torch.Tensor) -> None:
    '''It implements in_out[index[i]] += value[i].

    Caution:
      It has similar semantics with `torch.Tensor.index_add_` except
      that (1) index.dtype == torch.int32; (2) -1 <= index[i] < in_out.shape[0].
      index[i] == -1 is ignored.

    Caution:
      `in_out` is modified **in-place**.

    Caution:
      This functions does NOT support autograd.

    Args:
      index:
        A 1-D tensor with dtype torch.int32.  -1 <= index[i] < in_out.shape[0]
        CAUTION: It has to be contiguous.
      value:
        A 1-D or 2-D tensor with dtype torch.float32 or torch.float32.
        index.shape[0] == value.shape[0]
      in_out:
        A 1-D or 2-D tensor with the same dtype as `value`. It satisfies
        in_out.shape[1] == value.shape[1]

    Returns:
      Return None.
    '''

    _k2.index_add(index, value, in_out)
예제 #3
0
파일: autograd.py 프로젝트: zhu-han/k2
    def backward(ctx, out_fsa_grad: torch.Tensor) \
            -> Tuple[None, None, None, None, torch.Tensor, torch.Tensor]: # noqa
        a_scores, b_scores = ctx.saved_tensors
        arc_map_a = ctx.arc_map_a
        arc_map_b = ctx.arc_map_b

        grad_a = torch.zeros(a_scores.size(0),
                             dtype=torch.float32,
                             device=a_scores.device,
                             requires_grad=False)

        grad_b = torch.zeros(
            *b_scores.shape,
            dtype=torch.float32,
            device=b_scores.device,
            requires_grad=False).contiguous()  # will use its `view()` later

        _k2.index_add(arc_map_a, out_fsa_grad, grad_a)
        _k2.index_add(arc_map_b, out_fsa_grad, grad_b.view(-1))

        return (
            None,  # a_fsas
            None,  # b_fsas
            None,  # out_fsa
            None,  # output_beam
            grad_a,  # unused_scores_a
            grad_b,  # unused_scores_b
            None,  # a_to_b_map
            None,  # seqframe_idx_name
            None  # frame_idx_name
        )
예제 #4
0
    def backward(ctx, out_grad) -> Tuple[torch.Tensor, None]:
        src, index = ctx.saved_tensors

        ans = torch.zeros(src.shape,
                          dtype=torch.float32,
                          device=src.device,
                          requires_grad=False)
        _k2.index_add(index, out_grad, ans)
        return ans, None
예제 #5
0
 def backward(ctx, out_grad: torch.Tensor) -> Tuple[torch.Tensor, None]:
     indexes = ctx.indexes
     src, = ctx.saved_tensors
     expanded = _k2.index_select(out_grad, indexes.row_ids(1))
     ans = torch.zeros(src.shape,
                       dtype=torch.float32,
                       device=src.device,
                       requires_grad=False)
     _k2.index_add(indexes.values(), expanded, ans)
     return ans, None
예제 #6
0
 def backward(ctx, out_fsa_grad: torch.Tensor
             ) -> Tuple[None, None, torch.Tensor]:  # noqa
     arc_map = ctx.arc_map
     fsas_scores, = ctx.saved_tensors
     ans = torch.zeros(fsas_scores.size(0),
                       dtype=torch.float32,
                       device=fsas_scores.device,
                       requires_grad=False)
     _k2.index_add(arc_map, out_fsa_grad, ans)
     return None, None, ans
예제 #7
0
파일: ops.py 프로젝트: entn-at/k2
    def backward(ctx, out_grad) -> Tuple[torch.Tensor, None]:
        src, index = ctx.saved_tensors

        ans = torch.zeros(src.shape,
                          dtype=out_grad.dtype,
                          device=src.device,
                          requires_grad=False)
        _k2.index_add(index, out_grad, ans)
        return (
            ans,  # src
            None,  # index
            None  # default_value
        )
예제 #8
0
    def backward(
        ctx, out_fsa_scores_grad: torch.Tensor
    ) -> Tuple[None, torch.Tensor, None]:  # noqa
        unused_in_fsa_scores, arc_map = ctx.saved_tensors

        ans = torch.zeros(unused_in_fsa_scores.shape,
                          dtype=torch.float32,
                          device=unused_in_fsa_scores.device,
                          requires_grad=False)
        _k2.index_add(arc_map, out_fsa_scores_grad, ans)
        return (
            None,  # out_fsa
            ans,  # unused_in_fsa_scores
            None  # arc_map
        )
예제 #9
0
    def backward(
        ctx, out_fsa_scores_grad: torch.Tensor
    ) -> Tuple[None, torch.Tensor, None]:  # noqa
        unused_in_fsa_scores, = ctx.saved_tensors
        arc_map = ctx.arc_map

        expanded = _k2.index_select(out_fsa_scores_grad, arc_map.row_ids(1))
        ans = torch.zeros(unused_in_fsa_scores.shape,
                          dtype=torch.float32,
                          device=unused_in_fsa_scores.device,
                          requires_grad=False)
        _k2.index_add(arc_map.values(), expanded, ans)

        return (
            None,  # out_fsa
            ans,  # unused_in_fsa_scores
            None  # arc_map
        )
예제 #10
0
파일: autograd.py 프로젝트: qijiaxing/k2
    def backward(ctx, out_fsa_grad: torch.Tensor) \
            -> Tuple[None, None, None, None, None, None, None, torch.Tensor, torch.Tensor]: # noqa
        a_scores, b_scores = ctx.saved_tensors
        arc_map_a = ctx.arc_map_a
        arc_map_b = ctx.arc_map_b

        grad_a = torch.zeros(a_scores.size(0),
                             dtype=torch.float32,
                             device=a_scores.device,
                             requires_grad=False)

        grad_b = torch.zeros(
            *b_scores.shape,
            dtype=torch.float32,
            device=b_scores.device,
            requires_grad=False).contiguous()  # will use its `view()` later

        _k2.index_add(arc_map_a, out_fsa_grad, grad_a)
        _k2.index_add(arc_map_b, out_fsa_grad, grad_b.view(-1))

        return None, None, None, None, None, None, None, grad_a, grad_b
예제 #11
0
def index_add(index: torch.Tensor, value: torch.Tensor,
              in_out: torch.Tensor) -> None:
    '''It implements in_out[index[i]] += value[i].

    Caution:
      It has similar semantics with `torch.Tensor.index_add_` except
      that:

        - `index.dtype == torch.int32`
        - `-1 <= index[i] < in_out.shape[0]`
        - `index[i] == -1` is ignored.
        - `index` has to be a 1-D **contiguous** tensor.

    Caution:
      `in_out` is modified **in-place**.

    Caution:
      This functions does NOT support autograd.

    Args:
      index:
        A 1-D **contiguous** tensor with dtype `torch.int32`.
        Must satisfy `-1 <= index[i] < in_out.shape[0]`
      value:
        A 1-D or 2-D tensor with dtype `torch.int32`, `torch.float32`,
        or `torch.float64`.
        Must satisfy `index.shape[0] == value.shape[0]`
      in_out:
        A 1-D or 2-D tensor with the same dtype as `value`. It satisfies
        `in_out.shape[1] == value.shape[1]` if it is a 2-D tensor.

    Returns:
      Return None.
    '''

    _k2.index_add(index, value, in_out)