Esempio n. 1
0
def test_mcc_tagger_accuracy():
    global tagger_mc, all_tags
        
    expected = 0.811124

    confusion = tagger_base.eval_tagger(tagger_mc,'most-common.preds',all_tags=all_tags)
    actual = scorer.accuracy(confusion)
    
    ok_(expected < actual, msg="NOT_IN_RANGE Expected:%f, Actual:%f" %(expected, actual))
def test_classifier():
    global all_tags
    
    expected = 0.1527613022274944

    noun_weights = most_common.get_noun_weights()
    noun_tagger = tagger_base.make_classifier_tagger(noun_weights)

    confusion = tagger_base.eval_tagger(noun_tagger,'all_nouns.preds',all_tags=all_tags)
    actual  = scorer.accuracy(confusion)

    assert_almost_equal(expected, actual,places=3, msg="UNEQUAL Expected:%s, Actual:%s" %(expected, actual))
def test_nr_hmm_test_accuracy():
    confusion = scorer.get_confusion(NR_TEST_FILE, 'hmm-te-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .853)
def test_nr_hmm_dev_accuracy():
    confusion = scorer.get_confusion(NR_DEV_FILE, 'hmm-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .861)
def test_hmm_test_accuracy():
    confusion = scorer.get_confusion(TEST_FILE, 'hmm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .840)
def test_hmm_dev_accuracy():
    confusion = scorer.get_confusion(DEV_FILE, 'hmm-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .840)
def test_nr_test_accuracy3():
    confusion = scorer.get_confusion(NR_TEST_FILE, 'bakeoff-te-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .86)
def test_en_dev_accuracy1():
    confusion = scorer.get_confusion(DEV_FILE, 'bakeoff-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .85)
def test_nr_dev_accuracy3():
    confusion = scorer.get_confusion(NR_DEV_FILE, 'bakeoff-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .87)
def test_en_test_accuracy3():
    confusion = scorer.get_confusion(TEST_FILE, 'bakeoff-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .86)
def test_bilstm_dev_accuracy():
    confusion = scorer.get_confusion(DEV_FILE, 'bilstm-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .85)