예제 #1
0
def test_alpino_unicode():
    "Test what happens with non-ascii characters in input"
    _check_alpino()
    text = u"Bjarnfre\xf0arson leeft"
    # tokenize should convery to utf-8 and only add final line break
    assert_equal(tokenize(text).decode("utf-8"), text + "\n")
    saf = alpino(text, output='saf')
    assert_equal({t['lemma'] for t in saf['tokens']},
                 {u"Bjarnfre\xf0arson", u"leef"})

    text = u"\u738b\u6bc5 ook"
    saf = alpino(text, output='saf')
    assert_equal({t['lemma'] for t in saf['tokens']},
                 {u"\u738b\u6bc5", u"ook"})

    text = u"E\xe9n test nog"
    saf = alpino(text, output='saf')
    assert_equal({t['lemma'] for t in saf['tokens']},
                 {u"\xe9\xe9n", "test", "nog"})
예제 #2
0
def test_alpino_unicode():
    "Test what happens with non-ascii characters in input"
    _check_alpino()
    text = u"Bjarnfre\xf0arson leeft"
    # tokenize should convery to utf-8 and only add final line break
    assert_equal(tokenize(text).decode("utf-8"), text + "\n")
    saf = alpino(text, output='saf')
    assert_equal({t['lemma']
                  for t in saf['tokens']}, {u"Bjarnfre\xf0arson", u"leef"})

    text = u"\u738b\u6bc5 ook"
    saf = alpino(text, output='saf')
    assert_equal({t['lemma']
                  for t in saf['tokens']}, {u"\u738b\u6bc5", u"ook"})

    text = u"E\xe9n test nog"
    saf = alpino(text, output='saf')
    assert_equal({t['lemma']
                  for t in saf['tokens']}, {u"\xe9\xe9n", "test", "nog"})
예제 #3
0
def test_tokenize():
    _check_alpino()
    text = u"D\xedt is een zin, met komma |nietwaar|? En nog 'n zin"
    expected = u"D\xedt is een zin , met komma nietwaar ?\nEn nog 'n zin\n"
    assert_equal(tokenize(text), expected.encode('utf-8'))
예제 #4
0
def test_tokenize():
    _check_alpino()
    text = u"D\xedt is een zin, met komma |nietwaar|? En nog 'n zin"
    expected = u"D\xedt is een zin , met komma nietwaar ?\nEn nog 'n zin\n"
    assert_equal(tokenize(text), expected.encode('utf-8'))