Example #1
0
def remove_epsilon_and_add_self_loops(fsa: Fsa,
                                      remove_filler: bool = True) -> Fsa:
    '''Remove epsilons (symbol zero) in the input Fsa, and then add
    epsilon self-loops to all states in the input Fsa (usually as
    a preparation for intersection with treat_epsilons_specially=0).

    Args:
      fsa:
        The input FSA. It can be either a single FSA or an FsaVec.
      remove_filler:
        If true, we will remove any `filler values` of attributes when
        converting linear to ragged attributes.
    Returns:
      The resulting Fsa.   See :func:`remove_epsilon` for details.
      The only epsilons will be epsilon self-loops on all states.
    '''
    if fsa.properties & fsa_properties.EPSILON_FREE != 0:
        return add_epsilon_self_loops(fsa)

    ragged_arc, arc_map = _k2.remove_epsilon_and_add_self_loops(
        fsa.arcs, fsa.properties)

    out_fsa = k2.utils.fsa_from_unary_function_ragged(
        fsa, ragged_arc, arc_map, remove_filler=remove_filler)

    return out_fsa
Example #2
0
def remove_epsilon_and_add_self_loops(fsa: Fsa) -> Fsa:
    '''Remove epsilons (symbol zero) in the input Fsa, and then add
    epsilon self-loops to all states in the input Fsa (usually as
    a preparation for intersection with treat_epsilons_specially=0).

    Args:
      fsa:
        The input FSA. It can be either a single FSA or an FsaVec.
    Returns:
      The resulting Fsa.   See :func:`remove_epsilon` for details.
      The only epsilons will be epsilon self-loops on all states.
    '''
    if fsa.properties & fsa_properties.EPSILON_FREE != 0:
        return add_epsilon_self_loops(fsa)

    ragged_arc, arc_map = _k2.remove_epsilon_and_add_self_loops(
        fsa.arcs, fsa.properties)

    out_fsa = k2.utils.fsa_from_unary_function_ragged(fsa, ragged_arc, arc_map)

    if hasattr(out_fsa, 'aux_labels') and \
            isinstance(out_fsa.aux_labels, k2.RaggedInt):
        out_fsa.aux_labels = k2.ragged.remove_values_eq(out_fsa.aux_labels, 0)

    return out_fsa
Example #3
0
    def test_with_empty_list(self):
        for device in self.devices:
            s = '''
                0 1 0 0
                0 1 1 0
                1 2 -1 0
                2
            '''
            scores = torch.tensor([1, 2, 3],
                                  dtype=torch.float32,
                                  device=device,
                                  requires_grad=True)
            scores_copy = scores.detach().clone().requires_grad_(True)
            src = k2.Fsa.from_str(s).to(device)
            src.scores = scores
            src.attr1 = "hello"
            src.attr2 = "k2"
            float_attr = torch.tensor([0.1, 0.2, 0.3],
                                      dtype=torch.float32,
                                      requires_grad=True,
                                      device=device)

            src.float_attr = float_attr.detach().clone().requires_grad_(True)
            src.int_attr = torch.tensor([1, 2, 3],
                                        dtype=torch.int32,
                                        device=device)
            src.ragged_attr = k2.RaggedInt(
                '[ [10 20] [30 40 50] [60 70] ]').to(device)

            ragged_arc, arc_map = _k2.remove_epsilon_and_add_self_loops(
                src.arcs, src.properties)
            dest = k2.utils.fsa_from_unary_function_ragged(
                src, ragged_arc, arc_map)
            assert dest.attr1 == src.attr1
            assert dest.attr2 == src.attr2

            expected_arc_map = k2.RaggedInt('[ [] [1] [0 2] [] [2] ]')
            self.assertEqual(str(arc_map), str(expected_arc_map))

            expected_int_attr = k2.RaggedInt('[ [] [2] [1 3] [] [3] ]')
            self.assertEqual(str(dest.int_attr), str(expected_int_attr))

            expected_ragged_attr = k2.RaggedInt(
                '[ [] [30 40 50] [10 20 60 70] [] [60 70] ]')
            self.assertEqual(str(dest.ragged_attr),
                             str(expected_ragged_attr))

            expected_float_attr = torch.empty_like(dest.float_attr)
            expected_float_attr[0] = 0
            expected_float_attr[1] = float_attr[1]
            expected_float_attr[2] = float_attr[0] + float_attr[2]
            expected_float_attr[3] = 0
            expected_float_attr[4] = float_attr[2]

            assert torch.all(torch.eq(dest.float_attr,
                                      expected_float_attr))

            expected_scores = torch.empty_like(dest.scores)
            expected_scores[0] = 0
            expected_scores[1] = scores_copy[1]
            expected_scores[2] = scores_copy[0] + scores_copy[2]
            expected_scores[3] = 0
            expected_scores[4] = scores_copy[2]
            assert torch.all(torch.eq(dest.scores, expected_scores))

            scale = torch.tensor([10, 20, 30, 40, 50]).to(float_attr)
            (dest.float_attr * scale).sum().backward()
            (expected_float_attr * scale).sum().backward()
            assert torch.all(torch.eq(src.float_attr.grad, float_attr.grad))

            (dest.scores * scale).sum().backward()
            (expected_scores * scale).sum().backward()
            assert torch.all(torch.eq(scores.grad, scores_copy.grad))