def test_pruned_logsum(self): forward_logsum_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) backward_logsum_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2host.WfsaWithFbWeights(self.fsa, k2host.FbWeightType.kLogSumWeight, forward_logsum_weights, backward_logsum_weights) beam = 10.0 determinizer = k2host.DeterminizerPrunedLogSum( wfsa, beam, 100, k2host.FbWeightType.kNoWeight) fsa_size = k2host.IntArray2Size() arc_derivs_size = k2host.IntArray2Size() determinizer.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2host.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2host.LogSumArcDerivs.create_arc_derivs_with_size( arc_derivs_size) arc_weights_out = k2host.FloatArray1.create_array_with_size( fsa_size.size2) determinizer.get_output(fsa_out, arc_derivs) self.assertTrue(k2host.is_deterministic(fsa_out)) self.assertEqual(fsa_out.size1, 7) self.assertEqual(fsa_out.size2, 9) self.assertEqual(arc_derivs.size1, 9) self.assertEqual(arc_derivs.size2, 15) self.assertTrue( k2host.is_rand_equivalent_logsum_weight(self.fsa, fsa_out, beam)) # cast float to int arc_ids = k2host.StridedIntArray1.from_float_tensor(arc_derivs.data[:, 0])
def test_pruned_max(self): forward_max_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) backward_max_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2host.WfsaWithFbWeights(self.fsa, k2host.FbWeightType.kMaxWeight, forward_max_weights, backward_max_weights) beam = 8.0 remover = k2host.EpsilonsRemoverPrunedMax(wfsa, beam) fsa_size = k2host.IntArray2Size() arc_derivs_size = k2host.IntArray2Size() remover.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2host.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2host.IntArray2.create_array_with_size(arc_derivs_size) arc_weights_out = k2host.FloatArray1.create_array_with_size( fsa_size.size2) remover.get_output(fsa_out, arc_derivs) self.assertTrue(k2host.is_epsilon_free(fsa_out)) self.assertEqual(fsa_out.size1, 6) self.assertEqual(fsa_out.size2, 11) # TODO: fix this self.assertEqual(arc_derivs.size1, 11) # TODO: fix this self.assertEqual(arc_derivs.size2, 18) # TODO: fix this self.assertTrue( k2host.is_rand_equivalent_max_weight(self.fsa, fsa_out, beam))
def test_pruned_max(self): forward_max_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) backward_max_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2host.WfsaWithFbWeights(self.fsa, k2host.FbWeightType.kMaxWeight, forward_max_weights, backward_max_weights) beam = 10.0 determinizer = k2host.DeterminizerPrunedMax( wfsa, beam, 100, k2host.FbWeightType.kNoWeight) fsa_size = k2host.IntArray2Size() arc_derivs_size = k2host.IntArray2Size() determinizer.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2host.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2host.IntArray2.create_array_with_size(arc_derivs_size) arc_weights_out = k2host.FloatArray1.create_array_with_size( fsa_size.size2) determinizer.get_output(fsa_out, arc_derivs) self.assertTrue(k2host.is_deterministic(fsa_out)) self.assertEqual(fsa_out.size1, 7) self.assertEqual(fsa_out.size2, 9) self.assertEqual(arc_derivs.size1, 9) self.assertEqual(arc_derivs.size2, 12) self.assertTrue( k2host.is_rand_equivalent_max_weight(self.fsa, fsa_out, beam))
def test_mapper2_case_1(self): # empty arc map array_size = k2host.IntArray2Size(0, 0) arc_map = k2host.IntArray2.create_array_with_size(array_size) mapper = k2host.AuxLabels2Mapper(self.aux_labels_in, arc_map) aux_size = k2host.IntArray2Size() mapper.get_sizes(aux_size) self.assertEqual(aux_size.size1, 0) self.assertEqual(aux_size.size2, 0) labels_out = k2host.AuxLabels.create_array_with_size(aux_size) mapper.get_output(labels_out) self.assertTrue(labels_out.empty())
def test_bad_case_1(self): # empty fsa array_size = k2host.IntArray2Size(0, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) rand_path = k2host.RandPath(fsa, False) array_size = k2host.IntArray2Size() rand_path.get_sizes(array_size) path = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertFalse(status) self.assertTrue(k2host.is_empty(path)) self.assertTrue(arc_map.empty())
def test_empty_fsa(self): array_size = k2host.IntArray2Size(0, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) sorter = k2host.ArcSorter(fsa) array_size = k2host.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) sorter.get_output(fsa_out, arc_map) self.assertTrue(k2host.is_empty(fsa)) # test without arc_map sorter.get_output(fsa_out) self.assertTrue(k2host.is_empty(fsa_out))
def test_case_4(self): # connected fsa s = r''' 0 4 40 0 0 2 20 0 1 6 -1 0 2 3 30 0 3 6 -1 0 3 1 10 0 4 5 50 0 5 2 8 0 6 ''' fsa = k2host.str_to_fsa(s) sorter = k2host.TopSorter(fsa) array_size = k2host.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = sorter.get_output(fsa_out, arc_map) self.assertTrue(status) expected_arc_indexes = torch.IntTensor([0, 2, 3, 4, 5, 7, 8, 8]) expected_arcs = torch.IntTensor([[0, 1, 40, 0], [0, 3, 20, 0], [1, 2, 50, 0], [2, 3, 8, 0], [3, 4, 30, 0], [4, 6, -1, 0], [4, 5, 10, 0], [5, 6, -1, 0]]) expected_arc_map = torch.IntTensor([0, 1, 6, 7, 3, 4, 5, 2]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_good_case_2(self): s_a = r''' 0 1 1 0 1 2 3 0 2 3 4 0 3 4 -1 0 4 ''' fsa = k2host.str_to_fsa(s_a) rand_path = k2host.RandPath(fsa, False) array_size = k2host.IntArray2Size() rand_path.get_sizes(array_size) path = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertTrue(status) self.assertFalse(k2host.is_empty(path)) self.assertFalse(arc_map.empty()) expected_arc_indexes = torch.IntTensor([0, 1, 2, 3, 4, 4]) expected_arcs = torch.IntTensor([[0, 1, 1, 0], [1, 2, 3, 0], [2, 3, 4, 0], [3, 4, -1, 0]]) expected_arc_map = torch.IntTensor([0, 1, 2, 3]) self.assertTrue(torch.equal(path.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(path.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_case_2(self): # a cyclic input fsa # after trimming, the cycle is removed; # so the output fsa should be topsorted. s = r''' 0 1 1 0 0 2 2 1 1 3 3 -2 1 6 6 -3 2 4 2 4 2 6 3 5 2 6 -1 6 5 0 1 7 5 7 -1 8 7 ''' fsa = k2host.str_to_fsa(s) connection = k2host.Connection(fsa) array_size = k2host.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = connection.get_output(fsa_out, arc_map) self.assertTrue(status) self.assertTrue(k2host.is_empty(fsa_out)) self.assertTrue(arc_map.empty())
def test_case_1(self): # a non-connected, non-topsorted, acyclic input fsa; # the output fsa is topsorted. s = r''' 0 1 1 0 0 2 2 0 1 3 3 0 1 6 -1 0 2 4 2 0 2 6 -1 0 2 1 1 0 5 0 1 0 6 ''' fsa = k2host.str_to_fsa(s) connection = k2host.Connection(fsa) array_size = k2host.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = connection.get_output(fsa_out, arc_map) self.assertTrue(status) expected_arc_indexes = torch.IntTensor([0, 2, 4, 5, 5]) expected_arcs = torch.IntTensor([[0, 2, 1, 0], [0, 1, 2, 0], [1, 3, -1, 0], [1, 2, 1, 0], [2, 3, -1, 0]]) expected_arc_map = torch.IntTensor([0, 1, 5, 6, 3]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_case_4(self): # a cyclic input fsa # after trimming, the cycle remains (it is not a self-loop); # so the output fsa is NOT topsorted. s = r''' 0 3 3 1 0 2 2 2 1 0 1 3 2 6 -1 4 3 5 5 5 3 2 2 6 3 5 5 7 4 4 4 8 5 3 3 9 5 4 4 10 6 ''' fsa = k2host.str_to_fsa(s) connection = k2host.Connection(fsa) array_size = k2host.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) status = connection.get_output(fsa_out) self.assertFalse(status) self.assertFalse(k2host.is_top_sorted(fsa_out))
def test_arc_sort(self): s = r''' 0 1 2 1 0 4 0 2 0 2 0 3 1 2 1 4 1 3 0 5 2 1 0 6 4 ''' fsa = k2host.str_to_fsa(s) sorter = k2host.ArcSorter(fsa) array_size = k2host.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) sorter.get_output(fsa_out, arc_map) expected_arc_indexes = torch.IntTensor([0, 3, 5, 6, 6, 6]) expected_arcs = torch.IntTensor([[0, 2, 0, float_to_int(3)], [0, 4, 0, float_to_int(2)], [0, 1, 2, float_to_int(1)], [1, 3, 0, float_to_int(5)], [1, 2, 1, float_to_int(4)], [2, 1, 0, float_to_int(6)]]) expected_arc_map = torch.IntTensor([2, 1, 0, 4, 3, 5]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_case_1(self): # empty fsa array_size = k2host.IntArray2Size(0, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) sorter = k2host.TopSorter(fsa) array_size = k2host.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = sorter.get_output(fsa_out, arc_map) self.assertTrue(status) self.assertTrue(k2host.is_empty(fsa_out)) self.assertTrue(arc_map.empty()) # test without arc_map sorter.get_output(fsa_out) self.assertTrue(k2host.is_empty(fsa_out))
def test_case_1(self): # empty fsa array_size = k2host.IntArray2Size(0, 0) fsa_in = k2host.Fsa.create_fsa_with_size(array_size) indexes = torch.IntTensor([0, 1, 3, 6, 7]) data = torch.IntTensor([1, 2, 3, 4, 5, 6, 7]) labels_in = k2host.AuxLabels(indexes, data) inverter = k2host.FstInverter(fsa_in, labels_in) fsa_size = k2host.IntArray2Size() aux_size = k2host.IntArray2Size() inverter.get_sizes(fsa_size, aux_size) self.assertEqual(aux_size.size1, 0) self.assertEqual(aux_size.size2, 0) fsa_out = k2host.Fsa.create_fsa_with_size(fsa_size) labels_out = k2host.AuxLabels.create_array_with_size(aux_size) inverter.get_output(fsa_out, labels_out) self.assertTrue(k2host.is_empty(fsa_out)) self.assertTrue(labels_out.empty())
def test_case_3(self): # non-top-sorted input FSA s = r''' 0 1 1 0 0 1 0 0 0 3 2 0 1 2 3 0 1 3 4 0 2 1 5 0 2 5 -1 0 3 1 6 0 4 5 -1 0 5 ''' fsa_in = k2host.str_to_fsa(s) indexes = torch.IntTensor([0, 2, 3, 3, 6, 6, 7, 8, 10, 11]) data = torch.IntTensor([1, 2, 3, 5, 6, 7, 8, -1, 9, 10, -1]) labels_in = k2host.AuxLabels(indexes, data) inverter = k2host.FstInverter(fsa_in, labels_in) fsa_size = k2host.IntArray2Size() aux_size = k2host.IntArray2Size() inverter.get_sizes(fsa_size, aux_size) fsa_out = k2host.Fsa.create_fsa_with_size(fsa_size) labels_out = k2host.AuxLabels.create_array_with_size(aux_size) inverter.get_output(fsa_out, labels_out) expected_arc_indexes = torch.IntTensor( [0, 3, 4, 5, 7, 8, 9, 11, 12, 13, 13]) expected_arcs = torch.IntTensor([[0, 1, 1, 0], [0, 3, 3, 0], [0, 7, 0, 0], [1, 3, 2, 0], [2, 3, 10, 0], [3, 4, 5, 0], [3, 7, 0, 0], [4, 5, 6, 0], [5, 6, 7, 0], [6, 3, 8, 0], [6, 9, -1, 0], [7, 2, 9, 0], [8, 9, -1, 0]]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) expected_label_indexes = torch.IntTensor( [0, 0, 0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 7, 8]) expected_labels = torch.IntTensor([2, 1, 6, 4, 3, 5, -1, -1]) self.assertTrue(torch.equal(labels_out.indexes, expected_label_indexes)) self.assertTrue(torch.equal(labels_out.data, expected_labels))
def test_empty_fsa(self): array_size = k2host.IntArray2Size(0, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(fsa.size2) k2host.arc_sort(fsa, arc_map) self.assertTrue(k2host.is_empty(fsa)) self.assertTrue(arc_map.empty()) # test without arc_map k2host.arc_sort(fsa) self.assertTrue(k2host.is_empty(fsa))
def test_case_1(self): # empty fsa array_size = k2host.IntArray2Size(0, 0) fsa_a = k2host.Fsa.create_fsa_with_size(array_size) fsa_b = k2host.Fsa.create_fsa_with_size(array_size) intersection = k2host.Intersection(fsa_a, fsa_b) array_size = k2host.IntArray2Size() intersection.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map_a = k2host.IntArray1.create_array_with_size(array_size.size2) arc_map_b = k2host.IntArray1.create_array_with_size(array_size.size2) status = intersection.get_output(fsa_out, arc_map_a, arc_map_b) self.assertTrue(status) self.assertTrue(k2host.is_empty(fsa_out)) self.assertTrue(arc_map_a.empty()) self.assertTrue(arc_map_b.empty()) # test without arc_map status = intersection.get_output(fsa_out) self.assertTrue(status) self.assertTrue(k2host.is_empty(fsa_out))
def test_mapper1_case_2(self): arc_map = k2host.IntArray1(torch.IntTensor([2, 0, 3])) mapper = k2host.AuxLabels1Mapper(self.aux_labels_in, arc_map) aux_size = k2host.IntArray2Size() mapper.get_sizes(aux_size) labels_out = k2host.AuxLabels.create_array_with_size(aux_size) mapper.get_output(labels_out) self.assertEqual(aux_size.size1, 3) self.assertEqual(aux_size.size2, 5) expected_indexes = torch.IntTensor([0, 3, 4, 5]) expected_data = torch.IntTensor([4, 5, 6, 1, 7]) self.assertTrue(torch.equal(labels_out.indexes, expected_indexes)) self.assertTrue(torch.equal(labels_out.data, expected_data))
def test_pruned_logsum(self): forward_logsum_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) backward_logsum_weights = k2host.DoubleArray1.create_array_with_size( self.num_states) wfsa = k2host.WfsaWithFbWeights(self.fsa, k2host.FbWeightType.kLogSumWeight, forward_logsum_weights, backward_logsum_weights) beam = 8.0 remover = k2host.EpsilonsRemoverPrunedLogSum(wfsa, beam) fsa_size = k2host.IntArray2Size() arc_derivs_size = k2host.IntArray2Size() remover.get_sizes(fsa_size, arc_derivs_size) fsa_out = k2host.Fsa.create_fsa_with_size(fsa_size) arc_derivs = k2host.LogSumArcDerivs.create_arc_derivs_with_size( arc_derivs_size) arc_weights_out = k2host.FloatArray1.create_array_with_size( fsa_size.size2) remover.get_output(fsa_out, arc_derivs) self.assertTrue(k2host.is_epsilon_free(fsa_out)) self.assertEqual(fsa_out.size1, 6) self.assertEqual(fsa_out.size2, 11) # TODO: fix this self.assertEqual(arc_derivs.size1, 11) # TODO: fix this self.assertEqual(arc_derivs.size2, 20) # TODO: fix this # TODO(haowen): uncomment this after re-implementing # IsRandEquivalentAfterRmEpsPrunedLogSum #self.assertTrue( # k2host.is_rand_equivalent_after_rmeps_pruned_logsum( # self.fsa, fsa_out, beam)) # cast float to int arc_ids = k2host.StridedIntArray1.from_float_tensor(arc_derivs.data[:, 0]) # we may get different value of `arc_ids.get_data(1)` # with different STL implementations as we use # `std::unordered_map` in implementation of rmepsilon, # thus below assertion may fail on some platforms. self.assertEqual(arc_ids.get_data(1), 1)
def test_mapper2_case_2(self): indexes = torch.IntTensor([0, 2, 4, 5, 6]) data = torch.IntTensor([2, 3, 0, 1, 0, 2]) arc_map = k2host.IntArray2(indexes, data) mapper = k2host.AuxLabels2Mapper(self.aux_labels_in, arc_map) aux_size = k2host.IntArray2Size() mapper.get_sizes(aux_size) labels_out = k2host.AuxLabels.create_array_with_size(aux_size) mapper.get_output(labels_out) self.assertEqual(aux_size.size1, 4) self.assertEqual(aux_size.size2, 11) expected_indexes = torch.IntTensor([0, 4, 7, 8, 11]) expected_data = torch.IntTensor([4, 5, 6, 7, 1, 2, 3, 1, 4, 5, 6]) self.assertTrue(torch.equal(labels_out.indexes, expected_indexes)) self.assertTrue(torch.equal(labels_out.data, expected_data))
def test_case_2(self): s_a = r''' 0 1 1 0 1 2 0 0 1 3 1 0 1 4 2 0 2 2 1 0 2 3 1 0 2 3 2 0 3 3 0 0 3 4 1 0 4 ''' fsa_a = k2host.str_to_fsa(s_a) s_b = r''' 0 1 1 0 1 3 1 0 1 2 2 0 2 3 1 0 3 ''' fsa_b = k2host.str_to_fsa(s_b) intersection = k2host.Intersection(fsa_a, fsa_b) array_size = k2host.IntArray2Size() intersection.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map_a = k2host.IntArray1.create_array_with_size(array_size.size2) arc_map_b = k2host.IntArray1.create_array_with_size(array_size.size2) status = intersection.get_output(fsa_out, arc_map_a, arc_map_b) self.assertTrue(status) expected_arc_indexes = torch.IntTensor([0, 1, 4, 7, 8, 8, 8, 10, 10]) expected_arcs = torch.IntTensor([[0, 1, 1, 0], [1, 2, 0, 0], [1, 3, 1, 0], [1, 4, 2, 0], [2, 5, 1, 0], [2, 3, 1, 0], [2, 6, 2, 0], [3, 3, 0, 0], [6, 6, 0, 0], [6, 7, 1, 0]]) expected_arc_map_a = torch.IntTensor([0, 1, 2, 3, 4, 5, 6, 7, 7, 8]) expected_arc_map_b = torch.IntTensor([0, -1, 1, 2, 1, 1, 2, -1, -1, 3]) self.assertTrue(torch.equal(fsa_out.indexes, expected_arc_indexes)) self.assertTrue(torch.equal(fsa_out.data, expected_arcs)) self.assertTrue(torch.equal(arc_map_a.data, expected_arc_map_a)) self.assertTrue(torch.equal(arc_map_b.data, expected_arc_map_b))
def test_case_3(self): # non-connected fsa (not accessible) s = r''' 0 2 -1 0 1 0 1 0 1 2 0 0 2 ''' fsa = k2host.str_to_fsa(s) sorter = k2host.TopSorter(fsa) array_size = k2host.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) state_map = k2host.IntArray1.create_array_with_size(array_size.size1) status = sorter.get_output(fsa_out, state_map) self.assertFalse(status) self.assertTrue(k2host.is_empty(fsa_out)) self.assertTrue(state_map.empty())
def test_bad_case_2(self): # non-connected fsa s_a = r''' 0 1 1 0 0 2 2 0 1 3 4 0 4 ''' fsa = k2host.str_to_fsa(s_a) rand_path = k2host.RandPath(fsa, False) array_size = k2host.IntArray2Size() rand_path.get_sizes(array_size) path = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertFalse(status) self.assertTrue(k2host.is_empty(path)) self.assertTrue(arc_map.empty())
def test_case_3(self): # non-connected fsa (not accessible) s = r''' 0 2 -1 0 1 0 1 0 1 2 0 0 2 ''' fsa = k2host.str_to_fsa(s) sorter = k2host.TopSorter(fsa) array_size = k2host.IntArray2Size() sorter.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = sorter.get_output(fsa_out, arc_map) self.assertTrue(status) self.assertFalse(k2host.is_empty(fsa_out)) expected_arc_map = torch.IntTensor([0]) self.assertTrue(torch.equal(arc_map.data, expected_arc_map))
def test_good_case_1(self): s_a = r''' 0 1 1 0 0 2 2 0 1 2 3 0 2 3 4 0 2 4 5 0 3 4 7 0 4 5 9 0 5 ''' fsa = k2host.str_to_fsa(s_a) rand_path = k2host.RandPath(fsa, False) array_size = k2host.IntArray2Size() rand_path.get_sizes(array_size) path = k2host.Fsa.create_fsa_with_size(array_size) status = rand_path.get_output(path) self.assertTrue(status) self.assertFalse(k2host.is_empty(path))
def test_eps_arc_1(self): s_a = r''' 0 1 1 0 0 2 0 0 1 2 3 0 2 3 0 0 2 4 5 0 3 4 7 0 4 5 9 0 5 ''' fsa = k2host.str_to_fsa(s_a) rand_path = k2host.RandPath(fsa, True) array_size = k2host.IntArray2Size() rand_path.get_sizes(array_size) path = k2host.Fsa.create_fsa_with_size(array_size) arc_map = k2host.IntArray1.create_array_with_size(array_size.size2) status = rand_path.get_output(path, arc_map) self.assertTrue(status) self.assertFalse(k2host.is_empty(path)) self.assertFalse(arc_map.empty())
def test_case_3(self): # a non-connected, non-topsorted, acyclic input fsa; # the output fsa is topsorted. s = r''' 0 3 3 1 0 5 5 2 1 2 2 3 2 1 1 4 3 5 5 5 3 2 2 -6 3 4 4 7 3 6 -1 8 4 5 5 9 4 6 -1 10 5 6 -1 11 6 ''' fsa = k2host.str_to_fsa(s) connection = k2host.Connection(fsa) array_size = k2host.IntArray2Size() connection.get_sizes(array_size) fsa_out = k2host.Fsa.create_fsa_with_size(array_size) connection.get_output(fsa_out) self.assertTrue(k2host.is_top_sorted(fsa_out))
def test_good_cases1(self): # empty fsa array_size = k2host.IntArray2Size(0, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) self.assertTrue(k2host.is_top_sorted(fsa))
def test_bad_case1(self): # fsa should contain at least two states array_size = k2host.IntArray2Size(1, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) self.assertFalse(k2host.is_valid(fsa))
def test_bad_cases2(self): # empty fsa array_size = k2host.IntArray2Size(0, 0) fsa = k2host.Fsa.create_fsa_with_size(array_size) self.assertFalse(k2host.has_self_loops(fsa))