def test_max_weight(self): forward_max_weights = k2.DoubleArray1.create_array_with_size( self.num_states) backward_max_weights = k2.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2.WfsaWithFbWeights(self.fsa, self.weights, k2.FbWeightType.kMaxWeight, forward_max_weights, backward_max_weights) beam = 10.0 determinizer = k2.DeterminizerMax(wfsa, beam, 100) fsa_size = k2.IntArray2Size() arc_derivs_size = k2.IntArray2Size() determinizer.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2.IntArray2.create_array_with_size(arc_derivs_size) arc_weights_out = k2.FloatArray1.create_array_with_size(fsa_size.size2) determinizer.get_output(fsa_out, arc_weights_out, arc_derivs) self.assertTrue(k2.is_deterministic(fsa_out)) self.assertEqual(fsa_out.size1, 7) self.assertEqual(fsa_out.size2, 9) self.assertEqual(arc_derivs.size1, 9) self.assertEqual(arc_derivs.size2, 12) self.assertTrue( k2.is_rand_equivalent_max_weight(self.fsa, self.weights, fsa_out, arc_weights_out, beam))
def test_logsum_weight(self): forward_logsum_weights = k2.DoubleArray1.create_array_with_size( self.num_states) backward_logsum_weights = k2.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2.WfsaWithFbWeights(self.fsa, self.weights, k2.FbWeightType.kLogSumWeight, forward_logsum_weights, backward_logsum_weights) beam = 10.0 determinizer = k2.DeterminizerLogSum(wfsa, beam, 100) fsa_size = k2.IntArray2Size() arc_derivs_size = k2.IntArray2Size() determinizer.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2.LogSumArcDerivs.create_arc_derivs_with_size( arc_derivs_size) arc_weights_out = k2.FloatArray1.create_array_with_size(fsa_size.size2) determinizer.get_output(fsa_out, arc_weights_out, arc_derivs) self.assertTrue(k2.is_deterministic(fsa_out)) self.assertEqual(fsa_out.size1, 7) self.assertEqual(fsa_out.size2, 9) self.assertEqual(arc_derivs.size1, 9) self.assertEqual(arc_derivs.size2, 15) self.assertTrue( k2.is_rand_equivalent_logsum_weight(self.fsa, self.weights, fsa_out, arc_weights_out, beam)) # cast float to int arc_ids = k2.StridedIntArray1.from_float_tensor(arc_derivs.data[:, 0])
def test_logsum_weight(self): forward_logsum_weights = k2.DoubleArray1.create_array_with_size( self.num_states) backward_logsum_weights = k2.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2.WfsaWithFbWeights(self.fsa, self.weights, k2.FbWeightType.kLogSumWeight, forward_logsum_weights, backward_logsum_weights) beam = 8.0 remover = k2.EpsilonsRemoverLogSum(wfsa, beam) fsa_size = k2.IntArray2Size() arc_derivs_size = k2.IntArray2Size() remover.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2.LogSumArcDerivs.create_arc_derivs_with_size( arc_derivs_size) arc_weights_out = k2.FloatArray1.create_array_with_size(fsa_size.size2) remover.get_output(fsa_out, arc_weights_out, arc_derivs) self.assertTrue(k2.is_epsilon_free(fsa_out)) self.assertEqual(fsa_out.size1, 6) self.assertEqual(fsa_out.size2, 11) self.assertEqual(arc_derivs.size1, 11) self.assertEqual(arc_derivs.size2, 20) self.assertTrue( k2.is_rand_equivalent_after_rmeps_pruned_logsum( self.fsa, self.weights, fsa_out, arc_weights_out, beam)) # cast float to int arc_ids = k2.StridedIntArray1.from_float_tensor(arc_derivs.data[:, 0]) # we may get different value of `arc_ids.get_data(1)` # with different STL implementations as we use # `std::unordered_map` in implementation of rmepsilon, # thus below assertion may fail on some platforms. self.assertEqual(arc_ids.get_data(1), 1)
def test_max_weight(self): forward_max_weights = k2.DoubleArray1.create_array_with_size( self.num_states) backward_max_weights = k2.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2.WfsaWithFbWeights(self.fsa, self.weights, k2.FbWeightType.kMaxWeight, forward_max_weights, backward_max_weights) beam = 8.0 remover = k2.EpsilonsRemoverMax(wfsa, beam) fsa_size = k2.IntArray2Size() arc_derivs_size = k2.IntArray2Size() remover.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2.IntArray2.create_array_with_size(arc_derivs_size) arc_weights_out = k2.FloatArray1.create_array_with_size(fsa_size.size2) remover.get_output(fsa_out, arc_weights_out, arc_derivs) self.assertTrue(k2.is_epsilon_free(fsa_out)) self.assertEqual(fsa_out.size1, 6) self.assertEqual(fsa_out.size2, 11) self.assertEqual(arc_derivs.size1, 11) self.assertEqual(arc_derivs.size2, 18) self.assertTrue( k2.is_rand_equivalent_max_weight(self.fsa, self.weights, fsa_out, arc_weights_out, beam))
def test_mapper2_case_1(self): # empty arc map array_size = k2.IntArray2Size(0, 0) arc_map = k2.IntArray2.create_array_with_size(array_size) mapper = k2.AuxLabels2Mapper(self.aux_labels_in, arc_map) aux_size = k2.IntArray2Size() mapper.get_sizes(aux_size) self.assertEqual(aux_size.size1, 0) self.assertEqual(aux_size.size2, 0) labels_out = k2.AuxLabels.create_array_with_size(aux_size) mapper.get_output(labels_out) self.assertTrue(labels_out.empty())
def test_bad_case_1(self): # empty fsa array_size = k2.IntArray2Size(0, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) rand_path = k2.RandPath(fsa, False) array_size = k2.IntArray2Size() rand_path.get_sizes(array_size) path = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertFalse(status) self.assertTrue(k2.is_empty(path)) self.assertTrue(arc_map.empty())
def test_empty_fsa(self): array_size = k2.IntArray2Size(0, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) sorter = k2.ArcSorter(fsa) array_size = k2.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) sorter.get_output(fsa_out, arc_map) self.assertTrue(k2.is_empty(fsa)) # test without arc_map sorter.get_output(fsa_out) self.assertTrue(k2.is_empty(fsa_out))
def test_case_4(self): # a cyclic input fsa # after trimming, the cycle remains (it is not a self-loop); # so the output fsa is NOT topsorted. s = r''' 0 3 3 0 2 2 1 0 1 2 6 -1 3 5 5 3 2 2 3 5 5 4 4 4 5 3 3 5 4 4 6 ''' fsa = k2.str_to_fsa(s) connection = k2.Connection(fsa) array_size = k2.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) status = connection.get_output(fsa_out) self.assertFalse(status) self.assertFalse(k2.is_top_sorted(fsa_out))
def test_case_1(self): # a non-connected, non-topsorted, acyclic input fsa; # the output fsa is topsorted. s = r''' 0 1 1 0 2 2 1 3 3 1 6 -1 2 4 2 2 6 -1 2 1 1 5 0 1 6 ''' fsa = k2.str_to_fsa(s) connection = k2.Connection(fsa) array_size = k2.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) status = connection.get_output(fsa_out, arc_map) self.assertTrue(status) expected_arc_indexes = torch.IntTensor([0, 2, 4, 5, 5]) expected_arcs = torch.IntTensor([[0, 2, 1], [0, 1, 2], [1, 3, -1], [1, 2, 1], [2, 3, -1]]) expected_arc_map = torch.IntTensor([0, 1, 5, 6, 3]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_case_2(self): # a cyclic input fsa # after trimming, the cycle is removed; # so the output fsa should be topsorted. s = r''' 0 1 1 0 2 2 1 3 3 1 6 6 2 4 2 2 6 3 2 6 -1 5 0 1 5 7 -1 7 ''' fsa = k2.str_to_fsa(s) connection = k2.Connection(fsa) array_size = k2.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) status = connection.get_output(fsa_out, arc_map) self.assertTrue(status) self.assertTrue(k2.is_empty(fsa_out)) self.assertTrue(arc_map.empty())
def test_arc_sort(self): s = r''' 0 1 2 0 4 0 0 2 0 1 2 1 1 3 0 2 1 0 4 ''' fsa = k2.str_to_fsa(s) sorter = k2.ArcSorter(fsa) array_size = k2.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) sorter.get_output(fsa_out, arc_map) expected_arc_indexes = torch.IntTensor([0, 3, 5, 6, 6, 6]) expected_arcs = torch.IntTensor([[0, 2, 0], [0, 4, 0], [0, 1, 2], [1, 3, 0], [1, 2, 1], [2, 1, 0]]) expected_arc_map = torch.IntTensor([2, 1, 0, 4, 3, 5]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_case_4(self): # connected fsa s = r''' 0 4 40 0 2 20 1 6 -1 2 3 30 3 6 -1 3 1 10 4 5 50 5 2 8 6 ''' fsa = k2.str_to_fsa(s) sorter = k2.TopSorter(fsa) array_size = k2.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) state_map = k2.IntArray1.create_array_with_size(array_size.size1) status = sorter.get_output(fsa_out, state_map) self.assertTrue(status) expected_arc_indexes = torch.IntTensor([0, 2, 3, 4, 5, 7, 8, 8]) expected_arcs = torch.IntTensor([[0, 1, 40], [0, 3, 20], [1, 2, 50], [2, 3, 8], [3, 4, 30], [4, 6, -1], [4, 5, 10], [5, 6, -1]]) expected_state_map = torch.IntTensor([0, 4, 5, 2, 3, 1, 6]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(state_map.data, expected_state_map))
def test_case_1(self): # empty fsa array_size = k2.IntArray2Size(0, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) sorter = k2.TopSorter(fsa) array_size = k2.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) state_map = k2.IntArray1.create_array_with_size(array_size.size1) status = sorter.get_output(fsa_out, state_map) self.assertTrue(status) self.assertTrue(k2.is_empty(fsa_out)) self.assertTrue(state_map.empty()) # test without arc_map sorter.get_output(fsa_out) self.assertTrue(k2.is_empty(fsa_out))
def test_case_1(self): # empty fsa array_size = k2.IntArray2Size(0, 0) fsa_in = k2.Fsa.create_fsa_with_size(array_size) indexes = torch.IntTensor([0, 1, 3, 6, 7]) data = torch.IntTensor([1, 2, 3, 4, 5, 6, 7]) labels_in = k2.AuxLabels(indexes, data) inverter = k2.FstInverter(fsa_in, labels_in) fsa_size = k2.IntArray2Size() aux_size = k2.IntArray2Size() inverter.get_sizes(fsa_size, aux_size) self.assertEqual(aux_size.size1, 0) self.assertEqual(aux_size.size2, 0) fsa_out = k2.Fsa.create_fsa_with_size(fsa_size) labels_out = k2.AuxLabels.create_array_with_size(aux_size) inverter.get_output(fsa_out, labels_out) self.assertTrue(k2.is_empty(fsa_out)) self.assertTrue(labels_out.empty())
def test_empty_fsa(self): array_size = k2.IntArray2Size(0, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(fsa.size2) k2.arc_sort(fsa, arc_map) self.assertTrue(k2.is_empty(fsa)) self.assertTrue(arc_map.empty()) # test without arc_map k2.arc_sort(fsa) self.assertTrue(k2.is_empty(fsa))
def test_case_3(self): # non-top-sorted input FSA s = r''' 0 1 1 0 1 0 0 3 2 1 2 3 1 3 4 2 1 5 2 5 -1 3 1 6 4 5 -1 5 ''' fsa_in = k2.str_to_fsa(s) indexes = torch.IntTensor([0, 2, 3, 3, 6, 6, 7, 8, 10, 11]) data = torch.IntTensor([1, 2, 3, 5, 6, 7, 8, -1, 9, 10, -1]) labels_in = k2.AuxLabels(indexes, data) inverter = k2.FstInverter(fsa_in, labels_in) fsa_size = k2.IntArray2Size() aux_size = k2.IntArray2Size() inverter.get_sizes(fsa_size, aux_size) fsa_out = k2.Fsa.create_fsa_with_size(fsa_size) labels_out = k2.AuxLabels.create_array_with_size(aux_size) inverter.get_output(fsa_out, labels_out) expected_arc_indexes = torch.IntTensor( [0, 3, 4, 5, 7, 8, 9, 11, 12, 13, 13]) expected_arcs = torch.IntTensor([[0, 1, 1], [0, 3, 3], [0, 7, 0], [1, 3, 2], [2, 3, 10], [3, 4, 5], [3, 7, 0], [4, 5, 6], [5, 6, 7], [6, 3, 8], [6, 9, -1], [7, 2, 9], [8, 9, -1]]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) expected_label_indexes = torch.IntTensor( [0, 0, 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 7, 8]) expected_labels = torch.IntTensor([2, 1, 6, 4, 3, 5, -1, -1]) self.assertTrue(torch.equal(labels_out.indexes, expected_label_indexes)) self.assertTrue(torch.equal(labels_out.data, expected_labels))
def test_case_1(self): # empty fsa array_size = k2.IntArray2Size(0, 0) fsa_a = k2.Fsa.create_fsa_with_size(array_size) fsa_b = k2.Fsa.create_fsa_with_size(array_size) intersection = k2.Intersection(fsa_a, fsa_b) array_size = k2.IntArray2Size() intersection.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) arc_map_a = k2.IntArray1.create_array_with_size(array_size.size2) arc_map_b = k2.IntArray1.create_array_with_size(array_size.size2) status = intersection.get_output(fsa_out, arc_map_a, arc_map_b) self.assertTrue(status) self.assertTrue(k2.is_empty(fsa_out)) self.assertTrue(arc_map_a.empty()) self.assertTrue(arc_map_b.empty()) # test without arc_map status = intersection.get_output(fsa_out) self.assertTrue(status) self.assertTrue(k2.is_empty(fsa_out))
def test_mapper1_case_2(self): arc_map = k2.IntArray1(torch.IntTensor([2, 0, 3])) mapper = k2.AuxLabels1Mapper(self.aux_labels_in, arc_map) aux_size = k2.IntArray2Size() mapper.get_sizes(aux_size) labels_out = k2.AuxLabels.create_array_with_size(aux_size) mapper.get_output(labels_out) self.assertEqual(aux_size.size1, 3) self.assertEqual(aux_size.size2, 5) expected_indexes = torch.IntTensor([0, 3, 4, 5]) expected_data = torch.IntTensor([4, 5, 6, 1, 7]) self.assertTrue(torch.equal(labels_out.indexes, expected_indexes)) self.assertTrue(torch.equal(labels_out.data, expected_data))
def test_mapper2_case_2(self): indexes = torch.IntTensor([0, 2, 4, 5, 6]) data = torch.IntTensor([2, 3, 0, 1, 0, 2]) arc_map = k2.IntArray2(indexes, data) mapper = k2.AuxLabels2Mapper(self.aux_labels_in, arc_map) aux_size = k2.IntArray2Size() mapper.get_sizes(aux_size) labels_out = k2.AuxLabels.create_array_with_size(aux_size) mapper.get_output(labels_out) self.assertEqual(aux_size.size1, 4) self.assertEqual(aux_size.size2, 11) expected_indexes = torch.IntTensor([0, 4, 7, 8, 11]) expected_data = torch.IntTensor([4, 5, 6, 7, 1, 2, 3, 1, 4, 5, 6]) self.assertTrue(torch.equal(labels_out.indexes, expected_indexes)) self.assertTrue(torch.equal(labels_out.data, expected_data))
def test_bad_case_2(self): # non-connected fsa s_a = r''' 0 1 1 0 2 2 1 3 4 3 ''' fsa = k2.str_to_fsa(s_a) rand_path = k2.RandPath(fsa, False) array_size = k2.IntArray2Size() rand_path.get_sizes(array_size) path = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertFalse(status) self.assertTrue(k2.is_empty(path)) self.assertTrue(arc_map.empty())
def test_case_3(self): # non-connected fsa (not accessible) s = r''' 0 2 -1 1 0 1 1 2 0 2 ''' fsa = k2.str_to_fsa(s) sorter = k2.TopSorter(fsa) array_size = k2.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) state_map = k2.IntArray1.create_array_with_size(array_size.size1) status = sorter.get_output(fsa_out, state_map) self.assertFalse(status) self.assertTrue(k2.is_empty(fsa_out)) self.assertTrue(state_map.empty())
def test_case_2(self): s_a = r''' 0 1 1 1 2 0 1 3 1 1 4 2 2 2 1 2 3 1 2 3 2 3 3 0 3 4 1 4 ''' fsa_a = k2.str_to_fsa(s_a) s_b = r''' 0 1 1 1 3 1 1 2 2 2 3 1 3 ''' fsa_b = k2.str_to_fsa(s_b) intersection = k2.Intersection(fsa_a, fsa_b) array_size = k2.IntArray2Size() intersection.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) arc_map_a = k2.IntArray1.create_array_with_size(array_size.size2) arc_map_b = k2.IntArray1.create_array_with_size(array_size.size2) status = intersection.get_output(fsa_out, arc_map_a, arc_map_b) self.assertTrue(status) expected_arc_indexes = torch.IntTensor([0, 1, 4, 7, 8, 8, 8, 10, 10]) expected_arcs = torch.IntTensor([[0, 1, 1], [1, 2, 0], [1, 3, 1], [1, 4, 2], [2, 5, 1], [2, 3, 1], [2, 6, 2], [3, 3, 0], [6, 6, 0], [6, 7, 1]]) expected_arc_map_a = torch.IntTensor([0, 1, 2, 3, 4, 5, 6, 7, 7, 8]) expected_arc_map_b = torch.IntTensor([0, -1, 1, 2, 1, 1, 2, -1, -1, 3]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map_a.data, expected_arc_map_a)) self.assertTrue(torch.equal(arc_map_b.data, expected_arc_map_b))
def test_good_case_1(self): s_a = r''' 0 1 1 0 2 2 1 2 3 2 3 4 2 4 5 3 4 7 4 5 9 5 ''' fsa = k2.str_to_fsa(s_a) rand_path = k2.RandPath(fsa, False) array_size = k2.IntArray2Size() rand_path.get_sizes(array_size) path = k2.Fsa.create_fsa_with_size(array_size) status = rand_path.get_output(path) self.assertTrue(status) self.assertFalse(k2.is_empty(path))
def test_eps_arc_1(self): s_a = r''' 0 1 1 0 2 0 1 2 3 2 3 0 2 4 5 3 4 7 4 5 9 5 ''' fsa = k2.str_to_fsa(s_a) rand_path = k2.RandPath(fsa, True) array_size = k2.IntArray2Size() rand_path.get_sizes(array_size) path = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertTrue(status) self.assertFalse(k2.is_empty(path)) self.assertFalse(arc_map.empty())
def test_good_case_2(self): s_a = r''' 0 1 1 1 2 3 2 3 4 3 ''' fsa = k2.str_to_fsa(s_a) rand_path = k2.RandPath(fsa, False) array_size = k2.IntArray2Size() rand_path.get_sizes(array_size) path = k2.Fsa.create_fsa_with_size(array_size) arc_map = k2.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertTrue(status) self.assertFalse(k2.is_empty(path)) self.assertFalse(arc_map.empty()) expected_arc_indexes = torch.IntTensor([0, 1, 2, 3, 3]) expected_arcs = torch.IntTensor([[0, 1, 1], [1, 2, 3], [2, 3, 4]]) expected_arc_map = torch.IntTensor([0, 1, 2]) self.assertTrue(torch.equal(path.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(path.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_case_3(self): # a non-connected, non-topsorted, acyclic input fsa; # the output fsa is topsorted. s = r''' 0 3 3 0 5 5 1 2 2 2 1 1 3 5 5 3 2 2 3 4 4 3 6 -1 4 5 5 4 6 -1 5 6 -1 6 ''' fsa = k2.str_to_fsa(s) connection = k2.Connection(fsa) array_size = k2.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2.Fsa.create_fsa_with_size(array_size) connection.get_output(fsa_out) self.assertTrue(k2.is_top_sorted(fsa_out))
def test_bad_cases2(self): # empty fsa array_size = k2.IntArray2Size(0, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) self.assertFalse(k2.has_self_loops(fsa))
def test_bad_case1(self): # fsa should contain at least two states array_size = k2.IntArray2Size(1, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) self.assertFalse(k2.is_valid(fsa))
def test_good_cases1(self): # empty fsa array_size = k2.IntArray2Size(0, 0) fsa = k2.Fsa.create_fsa_with_size(array_size) self.assertTrue(k2.is_top_sorted(fsa))