Exemplo n.º 1
0
def test_get_most_common_tag():
    expected = 0.63
    weights = most_common.get_most_common_weights(TRAIN_FILE)
    confusion = tagger_base.evalTagger(tagger_base.makeClassifierTagger(weights),'mcc')
    actual = scorer.accuracy(confusion)
    
    ok_(expected < actual, msg="NOT_IN_RANGE Expected:%f, Actual:%f" %(expected, actual))
Exemplo n.º 2
0
def test_get_most_common_tag():
    expected = 0.63
    weights = most_common.get_most_common_weights(TRAIN_FILE)
    confusion = tagger_base.evalTagger(
        tagger_base.makeClassifierTagger(weights), 'mcc')
    actual = scorer.accuracy(confusion)

    ok_(expected < actual,
        msg="NOT_IN_RANGE Expected:%f, Actual:%f" % (expected, actual))
Exemplo n.º 3
0
def test_classifier_tagger():
    expected = 0.136844287788
    noun_weights = most_common.get_noun_weights()
    noun_tagger = tagger_base.makeClassifierTagger(noun_weights)
    
    confusion = tagger_base.evalTagger(noun_tagger,'nouns')
    actual  = scorer.accuracy(confusion)

    assert_almost_equals(expected, actual,places=3, msg="UNEQUAL Expected:%s, Actual:%s" %(expected, actual))
Exemplo n.º 4
0
def test_classifier_tagger():
    expected = 0.136844287788
    noun_weights = most_common.get_noun_weights()
    noun_tagger = tagger_base.makeClassifierTagger(noun_weights)

    confusion = tagger_base.evalTagger(noun_tagger, 'nouns')
    actual = scorer.accuracy(confusion)

    assert_almost_equals(expected,
                         actual,
                         places=3,
                         msg="UNEQUAL Expected:%s, Actual:%s" %
                         (expected, actual))