def test_viterbi_trans(): test_weights = defaultdict(float) test_tags = ['N','V','V','N'] for i in range(len(sent)): for feat in features.wordFeatures(sent,test_tags[i],'X',i): test_weights[feat] = 1 for feat in features.wordFeatures(sent,'X','X',i): test_weights[feat] = 1 expected_output = test_tags expected_score = 8.0 actual_output, actual_score=viterbi.viterbiTagger(sent,features.wordTransFeatures,test_weights,alltags) eq_(expected_output, actual_output, msg="UNEQUAL viterbi trans output Expected:%s, Actual:%s" %(expected_output, actual_output) ) eq_(expected_score, actual_score, msg="UNEQUAL viterbi trans score Expected:%s, Actual:%s" %(expected_score, actual_score) )
def test_basic_classifer(): test_weights = defaultdict(float) test_tags = ['N','V','V','N'] for i in range(len(sent)): for feat in features.wordFeatures(sent,test_tags[i],'X',i): test_weights[feat] = 1 for feat in features.wordFeatures(sent,'X','X',i): test_weights[feat] = 1 expected = test_tags actual = tagger_base.classifierTagger(sent,features.wordFeatures,test_weights,alltags) eq_ (expected, actual, msg="UNEQUAL Expected:%s, Actual:%s" %(expected, actual) ) expected_acc = 0.139539705577 confusion = tagger_base.evalTagger(lambda words,alltags : tagger_base.classifierTagger(words,features.wordFeatures,test_weights,alltags),'test') actual_acc =scorer.accuracy(confusion) assert_almost_equals(expected_acc ,actual_acc,places = 3)
def test_viterbi_trans(): test_weights = defaultdict(float) test_tags = ['N', 'V', 'V', 'N'] for i in range(len(sent)): for feat in features.wordFeatures(sent, test_tags[i], 'X', i): test_weights[feat] = 1 for feat in features.wordFeatures(sent, 'X', 'X', i): test_weights[feat] = 1 expected_output = test_tags expected_score = 8.0 actual_output, actual_score = viterbi.viterbiTagger( sent, features.wordTransFeatures, test_weights, alltags) eq_(expected_output, actual_output, msg="UNEQUAL viterbi trans output Expected:%s, Actual:%s" % (expected_output, actual_output)) eq_(expected_score, actual_score, msg="UNEQUAL viterbi trans score Expected:%s, Actual:%s" % (expected_score, actual_score))
def test_basic_classifer(): test_weights = defaultdict(float) test_tags = ['N', 'V', 'V', 'N'] for i in range(len(sent)): for feat in features.wordFeatures(sent, test_tags[i], 'X', i): test_weights[feat] = 1 for feat in features.wordFeatures(sent, 'X', 'X', i): test_weights[feat] = 1 expected = test_tags actual = tagger_base.classifierTagger(sent, features.wordFeatures, test_weights, alltags) eq_(expected, actual, msg="UNEQUAL Expected:%s, Actual:%s" % (expected, actual)) expected_acc = 0.139539705577 confusion = tagger_base.evalTagger( lambda words, alltags: tagger_base.classifierTagger( words, features.wordFeatures, test_weights, alltags), 'test') actual_acc = scorer.accuracy(confusion) assert_almost_equals(expected_acc, actual_acc, places=3)