def test_index_ragged_shape_two_axes(self): for device in self.devices: shape = k2.RaggedShape('[ [x x] [] [x x x] ]').to(device) indexes = torch.tensor([-1, 0, -1, 0, 1, 2, 0, 2, 1, -1], dtype=torch.int32, device=device) ans, value_indexes = shape.index(axis=0, indexes=indexes, need_value_indexes=True) expected_ans = k2.RaggedShape( '[ [] [x x] [] [x x] [] [x x x] [x x] [x x x] [] [] ]').to( device) assert ans == expected_ans expected_value_indexes = torch.tensor( [0, 1, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4], dtype=torch.int32, device=device) assert torch.all(torch.eq(value_indexes, expected_value_indexes)) # for axis == 1 indexes = torch.tensor([0, 0, 0, 1, 2, 2, 2, 3, 3], dtype=torch.int32, device=device) ans, value_indexes = shape.index(axis=1, indexes=indexes, need_value_indexes=True) expected_ans = k2.RaggedShape('[ [x x x x] [] [x x x x x] ]').to( device) assert ans == expected_ans assert torch.all(torch.eq(indexes, value_indexes))
def test_index_ragged_shape_two_axes(self): devices = [torch.device('cpu')] if torch.cuda.is_available(): devices.append(torch.device('cuda', 0)) for device in devices: shape = k2.RaggedShape('[ [x x] [] [x x x] ]').to(device) indexes = torch.tensor([-1, 0, -1, 0, 1, 2, 0, 2, 1, -1], dtype=torch.int32, device=device) ans, value_indexes = k2.ragged.index(shape, indexes, axis=0, need_value_indexes=True) expected_ans = k2.RaggedShape( '[ [] [x x] [] [x x] [] [x x x] [x x] [x x x] [] [] ]') self.assertEqual(str(ans), str(expected_ans)) expected_value_indexes = torch.tensor( [0, 1, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4], dtype=torch.int32, device=device) assert torch.all(torch.eq(value_indexes, expected_value_indexes)) # for axis == 1 indexes = torch.tensor([0, 0, 0, 1, 2, 2, 2, 3, 3], dtype=torch.int32, device=device) ans, value_indexes = k2.ragged.index(shape, indexes, axis=1, need_value_indexes=True) expected_ans = k2.RaggedShape('[ [x x x x] [] [x x x x x] ]') self.assertEqual(str(ans), str(expected_ans)) assert torch.all(torch.eq(indexes, value_indexes))
def test_top_k(self): fsa0 = k2.Fsa.from_str(''' 0 1 -1 0 1 ''') fsas = [fsa0.clone() for i in range(10)] fsa_vec = k2.create_fsa_vec(fsas) fsa_vec.scores = torch.tensor([3, 0, 1, 5, 4, 2, 8, 1, 9, 6], dtype=torch.float) # 0 1 2 3 4 5 6 7 8 9 # [ [3 0] [1 5 4] [2 8 1 9 6] shape = k2.RaggedShape('[ [x x] [x x x] [x x x x x] ]') nbest = k2.Nbest(fsa_vec, shape) # top_k: k is 1 nbest1 = nbest.top_k(1) expected_fsa = k2.create_fsa_vec([fsa_vec[0], fsa_vec[3], fsa_vec[8]]) assert str(nbest1.fsa) == str(expected_fsa) expected_shape = k2.RaggedShape('[ [x] [x] [x] ]') assert nbest1.shape == expected_shape # top_k: k is 2 nbest2 = nbest.top_k(2) expected_fsa = k2.create_fsa_vec([ fsa_vec[0], fsa_vec[1], fsa_vec[3], fsa_vec[4], fsa_vec[8], fsa_vec[6] ]) assert str(nbest2.fsa) == str(expected_fsa) expected_shape = k2.RaggedShape('[ [x x] [x x] [x x] ]') assert nbest2.shape == expected_shape # top_k: k is 3 nbest3 = nbest.top_k(3) expected_fsa = k2.create_fsa_vec([ fsa_vec[0], fsa_vec[1], fsa_vec[1], fsa_vec[3], fsa_vec[4], fsa_vec[2], fsa_vec[8], fsa_vec[6], fsa_vec[9] ]) assert str(nbest3.fsa) == str(expected_fsa) expected_shape = k2.RaggedShape('[ [x x x] [x x x] [x x x] ]') assert nbest3.shape == expected_shape # top_k: k is 4 nbest4 = nbest.top_k(4) expected_fsa = k2.create_fsa_vec([ fsa_vec[0], fsa_vec[1], fsa_vec[1], fsa_vec[1], fsa_vec[3], fsa_vec[4], fsa_vec[2], fsa_vec[2], fsa_vec[8], fsa_vec[6], fsa_vec[9], fsa_vec[5] ]) assert str(nbest4.fsa) == str(expected_fsa) expected_shape = k2.RaggedShape('[ [x x x x] [x x x x] [x x x x] ]') assert nbest4.shape == expected_shape
def test_get_layer_three_axes(self): shape = k2.RaggedShape( '[ [[x x] [] [x] [x x x]] [[] [] [x x] [x] [x x]] ]') shape0 = k2.ragged.get_layer(shape, 0) expected_shape0 = k2.RaggedShape('[ [x x x x] [x x x x x] ]') self.assertEqual(str(shape0), str(expected_shape0)) shape1 = k2.ragged.get_layer(shape, 1) expected_shape1 = k2.RaggedShape( '[ [x x] [] [x] [x x x] [] [] [x x] [x] [x x] ]') self.assertEqual(str(shape1), str(expected_shape1))
def test_remove_axis_ragged_shape(self): shape = k2.RaggedShape('[ [[x x] [] [x]] [[] [x x] [x x x] [x]] ]') ans = k2.ragged.remove_axis(shape, 0) expected = k2.RaggedShape('[[x x] [] [x] [] [x x] [x x x] [x]]') self.assertEqual(str(ans), str(expected)) ans = k2.ragged.remove_axis(shape, 1) expected = k2.RaggedShape('[[x x x] [x x x x x x]]') self.assertEqual(str(ans), str(expected)) ans = k2.ragged.remove_axis(shape, 2) expected = k2.RaggedShape('[[x x x] [x x x x]]') self.assertEqual(str(ans), str(expected))
def test_index_ragged_shape_three_axes(self): for device in self.devices: shape = k2.RaggedShape('[ [[x x x] [x x] []] [[x] [x x x]] ]').to( device) indexes = torch.tensor([-1, 0, 1, 1, -1, 0], dtype=torch.int32, device=device) ans, value_indexes = k2.ragged.index(shape, indexes, axis=0, need_value_indexes=True) expected_ans = k2.RaggedShape( '[ [] [[x x x] [x x] []] [[x] [x x x]] [[x] [x x x]] [] [[x x x] [x x] []] ]' # noqa ) self.assertEqual(str(ans), str(expected_ans)) expected_value_indexes = torch.tensor( [0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8, 0, 1, 2, 3, 4], dtype=torch.int32, device=device) assert torch.all(torch.eq(value_indexes, expected_value_indexes)) # for axis == 1 indexes = torch.tensor([0, 0, 2, 3, 3, 4], dtype=torch.int32, device=device) ans, value_indexes = k2.ragged.index(shape, indexes, axis=1, need_value_indexes=True) expected_ans = k2.RaggedShape( '[ [[x x x] [x x x] []] [[x] [x] [x x x]] ]') self.assertEqual(str(ans), str(expected_ans)) expected_value_indexes = torch.tensor( [0, 1, 2, 0, 1, 2, 5, 5, 6, 7, 8], dtype=torch.int32, device=device) assert torch.all(torch.eq(value_indexes, expected_value_indexes)) # for axis == 2 indexes = torch.tensor([0, 2, 2, 3, 4, 4, 6, 6, 7], dtype=torch.int32, device=device) ans, value_indexes = k2.ragged.index(shape, indexes, axis=2, need_value_indexes=True) expected_ans = k2.RaggedShape( '[ [[x x x] [x x x] [] ] [[] [x x x]] ]') self.assertEqual(str(ans), str(expected_ans)) assert torch.all(torch.eq(indexes, value_indexes))
def test_get_layer_three_axes(self): for device in self.devices: shape = k2.RaggedShape( '[ [[x x] [] [x] [x x x]] [[] [] [x x] [x] [x x]] ]') shape = shape.to(device) shape0 = shape.get_layer(0) expected_shape0 = k2.RaggedShape('[ [x x x x] [x x x x x] ]').to( device) assert shape0 == expected_shape0 shape1 = shape.get_layer(1) expected_shape1 = k2.RaggedShape( '[ [x x] [] [x] [x x x] [] [] [x x] [x] [x x] ]').to(device) assert shape1 == expected_shape1
def test_regular_ragged_shape(self): shape = k2.ragged.regular_ragged_shape(1, 2) expected = k2.RaggedShape('[[x x]]') self.assertEqual(str(shape), str(expected)) shape = k2.ragged.regular_ragged_shape(2, 3) expected = k2.RaggedShape('[[x x x] [x x x]]') self.assertEqual(str(shape), str(expected)) assert shape.row_splits(1).device.type == 'cpu' if torch.cuda.is_available(): device = torch.device('cuda', 0) shape = shape.to(device) assert shape.row_splits(1).is_cuda
def test_regular_ragged_shape(self): shape = k2.ragged.regular_ragged_shape(1, 2) expected = k2.RaggedShape('[[x x]]') assert shape == expected shape = k2.ragged.regular_ragged_shape(2, 3) expected = k2.RaggedShape('[[x x x] [x x x]]') assert shape == expected assert shape.row_splits(1).device.type == 'cpu' if torch.cuda.is_available() and k2.with_cuda: device = torch.device('cuda', 0) shape = shape.to(device) assert shape.row_splits(1).is_cuda
def test_get_layer_two_axes(self): for device in self.devices: shape = k2.RaggedShape('[ [x x x] [x] [] [x x] ]').to(device) subshape = k2.ragged.get_layer(shape, 0) # subshape should contain the same information as shape self.assertEqual(subshape.num_axes(), 2) self.assertEqual(str(subshape), str(shape))
def test_fsa_vec_as_dict_ragged(self): r = k2.RaggedInt(k2.RaggedShape('[ [ x x ] [x] [ x x ] [x]]'), torch.tensor([3, 4, 5, 6, 7, 8], dtype=torch.int32)) g = k2.Fsa.from_str('0 1 3 0.0\n 1 2 -1 0.0\n 2') h = k2.create_fsa_vec([g, g]) h.aux_labels = r assert (h[0].aux_labels.dim0() == h[0].labels.shape[0])
def test_get_layer_two_axes(self): for device in self.devices: shape = k2.RaggedShape('[ [x x x] [x] [] [x x] ]').to(device) subshape = shape.get_layer(0) # subshape should contain the same information as shape assert subshape.num_axes == 2 assert subshape == shape
def test_create_ragged_shape2_with_row_splits(self): for device in self.devices: row_splits = torch.tensor([0, 1, 3], dtype=torch.int32, device=device) shape = k2.ragged.create_ragged_shape2(row_splits=row_splits) expected_shape = k2.RaggedShape('[[x] [x x]]').to(device) assert shape == expected_shape
def test_remove_axis_ragged_shape(self): for device in self.devices: shape = k2.RaggedShape('[ [[x x] [] [x]] [[] [x x] [x x x] [x]] ]') shape = shape.to(device) ans = shape.remove_axis(0) expected = k2.RaggedShape( '[[x x] [] [x] [] [x x] [x x x] [x]]').to(device) assert ans == expected ans = shape.remove_axis(1) expected = k2.RaggedShape('[[x x x] [x x x x x x]]').to(device) assert ans == expected ans = shape.remove_axis(2) expected = k2.RaggedShape('[[x x x] [x x x x]]').to(device) assert ans == expected
def test_nbest_constructor(self): fsa = k2.Fsa.from_str(''' 0 1 -1 0.1 1 ''') fsa_vec = k2.create_fsa_vec([fsa, fsa, fsa]) shape = k2.RaggedShape('[[x x] [x]]') nbest = k2.Nbest(fsa_vec, shape)
def test_ragged_shape(self): # test case reference: # https://github.com/k2-fsa/k2/blob/f79ce20ce2deeb8f4ed82a0ea028da34cb26e40e/k2/csrc/ragged_shape_test.cu#L60 src = ''' [ [ [[ x x] [x]] [[x x]] ] [ [[x x x]] [[x] [x x x]] [[x]] ] [ [[x x] [] [x]] ] ] ''' devices = [torch.device('cpu')] if torch.cuda.is_available(): devices.append(torch.device('cuda', 0)) for device in devices: shape = k2.RaggedShape(src) shape = shape.to(device) assert shape.num_axes() == 4 assert shape.dim0() == 3 assert shape.tot_size(0) == 3 assert shape.tot_size(1) == 6 assert shape.tot_size(2) == 10 assert shape.tot_size(3) == 16 assert shape.num_elements() == shape.tot_size(3) assert shape.max_size(1) == 3 assert shape.max_size(2) == 3 assert shape.max_size(3) == 3 assert torch.allclose( shape.row_splits(1), torch.tensor([0, 2, 5, 6], dtype=torch.int32).to(device)) assert torch.allclose( shape.row_splits(2), torch.tensor([0, 2, 3, 4, 6, 7, 10], dtype=torch.int32).to(device)) assert torch.allclose( shape.row_splits(3), torch.tensor([0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16], dtype=torch.int32).to(device)) assert torch.allclose( shape.row_ids(1), torch.tensor([0, 0, 1, 1, 1, 2], dtype=torch.int32).to(device)) assert torch.allclose( shape.row_ids(2), torch.tensor([0, 0, 1, 2, 3, 3, 4, 5, 5, 5], dtype=torch.int32).to(device)) assert torch.allclose( shape.row_ids(3), torch.tensor([0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9], dtype=torch.int32).to(device))