Example #1
0
def add_epsilon_self_loops(fsa: Fsa) -> Fsa:
    '''Add epsilon self-loops to an Fsa or FsaVec.

    This is required when composing using a composition method that does not
    treat epsilons specially, if the other FSA has epsilons in it.

    Args:
      fsa:
        The input FSA. It can be either a single FSA or an FsaVec.

    Returns:
      An instance of :class:`Fsa` that has an epsilon self-loop on every
      non-final state.
    '''

    need_arc_map = True
    ragged_arc, arc_map = _k2.add_epsilon_self_loops(fsa.arcs,
                                                     need_arc_map=need_arc_map)

    out_fsa = Fsa(ragged_arc)
    for name, value in fsa.named_tensor_attr():
        new_value = index_attr(value, arc_map)
        setattr(out_fsa, name, new_value)

    for name, value in fsa.named_non_tensor_attr():
        setattr(out_fsa, name, value)

    return out_fsa
Example #2
0
def add_epsilon_self_loops(
        fsa: Fsa,
        ret_arc_map: bool = False
) -> Union[Fsa, Tuple[Fsa, torch.Tensor]]:  # noqa
    '''Add epsilon self-loops to an Fsa or FsaVec.

    This is required when composing using a composition method that does not
    treat epsilons specially, if the other FSA has epsilons in it.

    Args:
      fsa:
        The input FSA. It can be either a single FSA or an FsaVec.
      ret_arc_map:
        If False, return the resulting Fsa.
        If True, return an extra arc map.

    Returns:
      If ret_arc_map is False, return an instance of :class:`Fsa` that has an
      epsilon self-loop on every non-final state.
      If ret_arc_map is True, it returns an extra arc_map. arc_map[i] is the
      arc index in the input `fsa` that corresponds to the i-th arc in the
      resulting Fsa. arc_map[i] is -1 if the i-th arc in the resulting Fsa
      has no counterpart in the input `fsa`.
    '''

    need_arc_map = True
    ragged_arc, arc_map = _k2.add_epsilon_self_loops(fsa.arcs,
                                                     need_arc_map=need_arc_map)

    out_fsa = k2.utils.fsa_from_unary_function_tensor(fsa, ragged_arc, arc_map)
    if ret_arc_map:
        return out_fsa, arc_map
    else:
        return out_fsa
Example #3
0
def add_epsilon_self_loops(fsa: Fsa) -> Fsa:
    '''Add epsilon self-loops to an Fsa or FsaVec.

    This is required when composing using a composition method that does not
    treat epsilons specially, if the other FSA has epsilons in it.

    Args:
      fsa:
        The input FSA. It can be either a single FSA or a FsaVec.

    Returns:
      An instance of :class:`Fsa` that has an epsilon self-loop on every
      non-final state.
    '''

    need_arc_map = True
    ragged_arc, arc_map = _k2.add_epsilon_self_loops(fsa.arcs,
                                                     need_arc_map=need_arc_map)
    arc_map = arc_map.to(torch.int64) + 1

    # TODO(fangjun): implement _k2.index to process indexes == -1

    out_fsa = Fsa.from_ragged_arc(ragged_arc)
    for name, value in fsa.named_tensor_attr():
        padding = value.new_zeros((1, *value.shape[1:]))
        value = torch.cat((padding, value), dim=0)
        new_value = value.index_select(0, arc_map)
        setattr(out_fsa, name, new_value)

    for name, value in fsa.named_non_tensor_attr():
        setattr(out_fsa, name, value)

    return out_fsa
Example #4
0
File: fsa_algo.py Project: yyht/k2
def add_epsilon_self_loops(fsa: Fsa) -> Fsa:
    '''Add epsilon self-loops to an Fsa or FsaVec.

    This is required when composing using a composition method that does not
    treat epsilons specially, if the other FSA has epsilons in it.

    Args:
      fsa:
        The input FSA. It can be either a single FSA or an FsaVec.

    Returns:
      An instance of :class:`Fsa` that has an epsilon self-loop on every
      non-final state.
    '''

    need_arc_map = True
    ragged_arc, arc_map = _k2.add_epsilon_self_loops(fsa.arcs,
                                                     need_arc_map=need_arc_map)

    out_fsa = k2.utils.fsa_from_unary_function_tensor(fsa, ragged_arc, arc_map)
    return out_fsa
Example #5
0
    def test_with_negative_1(self):
        devices = [torch.device('cpu')]
        if torch.cuda.is_available():
            devices.append(torch.device('cuda', 0))

        for device in devices:
            s = '''
                0 1 2 10
                0 1 1 20
                1 2 -1 30
                2
            '''
            src = k2.Fsa.from_str(s).to(device).requires_grad_(True)
            src.float_attr = torch.tensor([0.1, 0.2, 0.3],
                                          dtype=torch.float32,
                                          requires_grad=True,
                                          device=device)
            src.int_attr = torch.tensor([1, 2, 3],
                                        dtype=torch.int32,
                                        device=device)
            src.ragged_attr = k2.RaggedInt('[[1 2 3] [5 6] []]').to(device)
            src.attr1 = 'src'
            src.attr2 = 'fsa'
            ragged_arc, arc_map = _k2.add_epsilon_self_loops(src.arcs,
                                                             need_arc_map=True)
            dest = k2.utils.fsa_from_unary_function_tensor(
                src, ragged_arc, arc_map)
            assert torch.allclose(
                dest.float_attr,
                torch.tensor([0.0, 0.1, 0.2, 0.0, 0.3],
                             dtype=torch.float32,
                             device=device))

            assert torch.all(
                torch.eq(
                    dest.scores,
                    torch.tensor([0, 10, 20, 0, 30],
                                 dtype=torch.float32,
                                 device=device)))

            assert torch.all(
                torch.eq(
                    dest.int_attr,
                    torch.tensor([0, 1, 2, 0, 3],
                                 dtype=torch.int32,
                                 device=device)))

            expected_ragged_attr = k2.RaggedInt('[ [] [1 2 3] [5 6] [] []]')
            self.assertEqual(str(dest.ragged_attr), str(expected_ragged_attr))

            assert dest.attr1 == src.attr1
            assert dest.attr2 == src.attr2

            # now for autograd
            scale = torch.tensor([10, 20, 30, 40, 50], device=device)
            (dest.float_attr * scale).sum().backward()
            (dest.scores * scale).sum().backward()

            expected_grad = torch.tensor([20, 30, 50],
                                         dtype=torch.float32,
                                         device=device)

            assert torch.all(torch.eq(src.float_attr.grad, expected_grad))

            assert torch.all(torch.eq(src.scores.grad, expected_grad))