def test_jieba_with_offsets_5():
    """Test add dict with file path"""
    DATA_FILE4 = "../data/dataset/testJiebaDataset/6.txt"

    data = ds.TextFileDataset(DATA_FILE4)
    jieba_op = JiebaTokenizer(HMM_FILE,
                              MP_FILE,
                              mode=JiebaMode.MP,
                              with_offsets=True)
    jieba_op.add_word("江大桥", 20000)
    data = data.map(input_columns=["text"],
                    output_columns=["token", "offsets_start", "offsets_limit"],
                    columns_order=["token", "offsets_start", "offsets_limit"],
                    operations=jieba_op,
                    num_parallel_workers=1)
    expect = ['江州', '市长', '江大桥', '参加', '了', '长江大桥', '的', '通车', '仪式']
    expected_offsets_start = [0, 6, 12, 21, 27, 30, 42, 45, 51]
    expected_offsets_limit = [6, 12, 21, 27, 30, 42, 45, 51, 57]
    for i in data.create_dict_iterator():
        ret = to_str(i["token"])
        for index, item in enumerate(ret):
            assert item == expect[index]
        for index, item in enumerate(i["offsets_start"]):
            assert item == expected_offsets_start[index]
        for index, item in enumerate(i["offsets_limit"]):
            assert item == expected_offsets_limit[index]
def test_jieba_2_2():
    """Test add_word with invalid None Input"""
    jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
    try:
        jieba_op.add_word(None)
    except ValueError:
        pass
def test_jieba_with_offsets_2_1():
    """Test add_word with freq"""
    DATA_FILE4 = "../data/dataset/testJiebaDataset/4.txt"
    data = ds.TextFileDataset(DATA_FILE4)
    jieba_op = JiebaTokenizer(HMM_FILE,
                              MP_FILE,
                              mode=JiebaMode.MP,
                              with_offsets=True)
    jieba_op.add_word("男默女泪", 10)
    data = data.map(input_columns=["text"],
                    output_columns=["token", "offsets_start", "offsets_limit"],
                    columns_order=["token", "offsets_start", "offsets_limit"],
                    operations=jieba_op,
                    num_parallel_workers=2)
    expect = ['男默女泪', '市', '长江大桥']
    expected_offsets_start = [0, 12, 15]
    expected_offsets_limit = [12, 15, 27]
    for i in data.create_dict_iterator():
        ret = to_str(i["token"])
        for index, item in enumerate(ret):
            assert item == expect[index]
        for index, item in enumerate(i["offsets_start"]):
            assert item == expected_offsets_start[index]
        for index, item in enumerate(i["offsets_limit"]):
            assert item == expected_offsets_limit[index]
示例#4
0
def test_jieba_2_1():
    """Test add_word with freq"""
    DATA_FILE4 = "../data/dataset/testJiebaDataset/4.txt"
    data = ds.TextFileDataset(DATA_FILE4)
    jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
    jieba_op.add_word("男默女泪", 10)
    data = data.map(operations=jieba_op, input_columns=["text"],
                    num_parallel_workers=2)
    expect = ['男默女泪', '市', '长江大桥']
    for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
        ret = to_str(i["text"])
        for index, item in enumerate(ret):
            assert item == expect[index]
示例#5
0
def test_jieba_2_3():
    """Test add_word with freq, the value of freq affects the result of segmentation"""
    DATA_FILE4 = "../data/dataset/testJiebaDataset/6.txt"
    data = ds.TextFileDataset(DATA_FILE4)
    jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
    jieba_op.add_word("江大桥", 20000)
    data = data.map(operations=jieba_op, input_columns=["text"],
                    num_parallel_workers=2)
    expect = ['江州', '市长', '江大桥', '参加', '了', '长江大桥', '的', '通车', '仪式']
    for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
        ret = to_str(i["text"])
        for index, item in enumerate(ret):
            assert item == expect[index]
def test_jieba_5():
    """Test add dict with file path"""
    DATA_FILE4 = "../data/dataset/testJiebaDataset/6.txt"

    data = ds.TextFileDataset(DATA_FILE4)
    jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
    jieba_op.add_word("江大桥", 20000)
    data = data.map(input_columns=["text"],
                    operations=jieba_op,
                    num_parallel_workers=1)
    expect = ['江州', '市长', '江大桥', '参加', '了', '长江大桥', '的', '通车', '仪式']
    for i in data.create_dict_iterator():
        ret = to_str(i["text"])
        for index, item in enumerate(ret):
            assert item == expect[index]
示例#7
0
def test_jieba_callable():
    """
    Test jieba tokenizer op is callable
    """
    logger.info("test_jieba_callable")
    jieba_op1 = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
    jieba_op2 = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.HMM)

    text1 = "今天天气太好了我们一起去外面玩吧"
    text2 = "男默女泪市长江大桥"
    assert np.array_equal(jieba_op1(text1), ['今天天气', '太好了', '我们', '一起', '去', '外面', '玩吧'])
    assert np.array_equal(jieba_op2(text1), ['今天', '天气', '太', '好', '了', '我们', '一起', '去', '外面', '玩', '吧'])

    jieba_op1.add_word("男默女泪")
    assert np.array_equal(jieba_op1(text2), ['男默女泪', '市', '长江大桥'])
示例#8
0
def test_jieba_callable():
    """
    Test jieba tokenizer op is callable
    """
    logger.info("test_jieba_callable")
    jieba_op1 = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
    jieba_op2 = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.HMM)

    # test one tensor
    text1 = "今天天气太好了我们一起去外面玩吧"
    text2 = "男默女泪市长江大桥"
    assert np.array_equal(jieba_op1(text1),
                          ['今天天气', '太好了', '我们', '一起', '去', '外面', '玩吧'])
    assert np.array_equal(
        jieba_op2(text1),
        ['今天', '天气', '太', '好', '了', '我们', '一起', '去', '外面', '玩', '吧'])
    jieba_op1.add_word("男默女泪")
    assert np.array_equal(jieba_op1(text2), ['男默女泪', '市', '长江大桥'])

    # test input multiple tensors
    with pytest.raises(RuntimeError) as info:
        _ = jieba_op1(text1, text2)
    assert "JiebaTokenizer: input only support one column data." in str(
        info.value)