Пример #1
0
def test_mcc_tagger_accuracy():
    global tagger_mc, all_tags

    expected = 0.838369

    confusion = tagger_base.eval_tagger(tagger_mc,
                                        'most-common.preds',
                                        all_tags=all_tags)
    actual = scorer.accuracy(confusion)

    ok_(expected < actual,
        msg="NOT_IN_RANGE Expected:%f, Actual:%f" % (expected, actual))
Пример #2
0
def test_classifier():
    global all_tags
    
    expected = 0.16667992047713717

    noun_weights = most_common.get_noun_weights()
    noun_tagger = tagger_base.make_classifier_tagger(noun_weights)

    confusion = tagger_base.eval_tagger(noun_tagger,'all_nouns.preds',all_tags=all_tags)
    actual  = scorer.accuracy(confusion)

    assert_almost_equal(expected, actual,places=3, msg="UNEQUAL Expected:%s, Actual:%s" %(expected, actual))
Пример #3
0
def test_nr_hmm_test_accuracy():
    confusion = scorer.get_confusion(NR_TEST_FILE, 'hmm-te-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .903)
Пример #4
0
def test_nr_hmm_dev_accuracy():
    confusion = scorer.get_confusion(NR_DEV_FILE, 'hmm-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .910)
Пример #5
0
def test_hmm_test_accuracy():
    confusion = scorer.get_confusion(TEST_FILE, 'hmm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .880)
Пример #6
0
def test_hmm_dev_accuracy():
    confusion = scorer.get_confusion(DEV_FILE, 'hmm-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .870)
Пример #7
0
def test_bilstm_test_accuracy():
    confusion = scorer.get_confusion(DEV_FILE,'bilstm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .83)