コード例 #1
0
ファイル: tagger_base.py プロジェクト: arbylee1/gt-nlp-class
def eval_tagger(tagger,
                outfilename=None,
                all_tags=None,
                trainfile=TRAIN_FILE,
                testfile=DEV_FILE):
    """Calculate confusion_matrix for a given tagger

    Parameters:
    tagger -- Function mapping (words, possible_tags) to an optimal
              sequence of tags for the words
    outfilename -- Filename to write tagger predictions to
    testfile -- (optional) Filename containing true labels

    Returns:
    confusion_matrix -- dict of occurences of (true_label, pred_label)
    """
    def get_outfile():
        if outfilename is not None:
            return open(outfilename, 'w')
        else:
            return tempfile.NamedTemporaryFile('w', delete=False)

    with get_outfile() as outfile:
        apply_tagger(tagger, outfile.name, all_tags, trainfile, testfile)
        confusion = scorer.get_confusion(
            testfile, outfile.name)  #run the scorer on the prediction file

    return confusion
コード例 #2
0
def test_model_crf_nr_dev_accuracies():
    confusion = scorer.get_confusion(DEV_FILE, '../bilstm-dev-en.preds')
    acc = scorer.accuracy(confusion)
    print("Acc: " + str(acc))
    # ok_(acc > .86, "Accuracy Obt: " + str(acc))

    confusion = scorer.get_confusion(DEV_FILE, '../bilstm_crf-dev-en.preds')
    acc = scorer.accuracy(confusion)
    print("Acc: " + str(acc))
    # ok_(acc > .86, "Accuracy Obt: " + str(acc))

    confusion = scorer.get_confusion(NR_DEV_FILE, '../bilstm_crf-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    print("Acc: " + str(acc))
    # ok_(acc > .86, "Accuracy Obt: " + str(acc))

    confusion = scorer.get_confusion(NR_DEV_FILE, '../bilstm-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    print("Acc: " + str(acc))
コード例 #3
0
ファイル: tagger_base.py プロジェクト: cedebrun/gt-nlp-class
def eval_model(model,outfilename, word_to_ix, all_tags=None,trainfile=TRAIN_FILE,testfile=DEV_FILE):
    """Calculate confusion_matrix for a given model

    Parameters:
    tagger -- Model mapping (words) to an optimal
              sequence of tags for the words
    outfilename -- Filename to write tagger predictions to
    testfile -- (optional) Filename containing true labels

    Returns:
    confusion_matrix -- dict of occurences of (true_label, pred_label)
    """
    apply_model(model,outfilename,word_to_ix, all_tags,trainfile,testfile)
    return scorer.get_confusion(testfile,outfilename) #run the scorer on the prediction file
コード例 #4
0
def eval_tagger(tagger,outfilename,all_tags=None,trainfile=TRAIN_FILE,testfile=DEV_FILE):
    """Calculate confusion_matrix for a given tagger

    Parameters:
    tagger -- Function mapping (words, possible_tags) to an optimal
              sequence of tags for the words
    outfilename -- Filename to write tagger predictions to
    testfile -- (optional) Filename containing true labels

    Returns:
    confusion_matrix -- dict of occurences of (true_label, pred_label)
    """
    apply_tagger(tagger,outfilename,all_tags,trainfile,testfile)
    return scorer.get_confusion(testfile,outfilename) #run the scorer on the prediction file
コード例 #5
0
ファイル: tagger_base.py プロジェクト: cedebrun/gt-nlp-class
def eval_tagger(tagger,outfilename=None,all_tags=None,trainfile=TRAIN_FILE,testfile=DEV_FILE):
    """Calculate confusion_matrix for a given tagger

    Parameters:
    tagger -- Function mapping (words, possible_tags) to an optimal
              sequence of tags for the words
    outfilename -- Filename to write tagger predictions to
    testfile -- (optional) Filename containing true labels

    Returns:
    confusion_matrix -- dict of occurences of (true_label, pred_label)
    """
    def get_outfile():
        if outfilename is not None:
            return open(outfilename,'w')
        else:
            return tempfile.NamedTemporaryFile('w',delete=False)
        
    with get_outfile() as outfile:
        apply_tagger(tagger,outfile.name,all_tags,trainfile,testfile)
        confusion = scorer.get_confusion(testfile,outfile.name) #run the scorer on the prediction file

    return confusion
コード例 #6
0
ファイル: test_hmm.py プロジェクト: cedebrun/gt-nlp-class
def test_hmm_test_accuracy():
    confusion = scorer.get_confusion(TEST_FILE,'hmm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .840)
コード例 #7
0
ファイル: test_hmm.py プロジェクト: zyc130130/gt-nlp-class
def test_hmm_test_accuracy():
    confusion = scorer.get_confusion(TEST_FILE, 'hmm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .840)
コード例 #8
0
ファイル: test_hmm.py プロジェクト: zyc130130/gt-nlp-class
def test_nr_hmm_test_accuracy():
    confusion = scorer.get_confusion(NR_TEST_FILE, 'hmm-te-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .853)
コード例 #9
0
def test_bakeoff_acc_d2_6_ja_beat_the_prof():
    acc = scorer.accuracy(
        scorer.get_confusion(JA_TEST_FILE, 'avp-words-best-te.ja.preds'))
    assert_greater(acc, .87882)
コード例 #10
0
def test_sp_score_d1_7():
    confusion = scorer.get_confusion(JA_DEV_FILE,'avp-words.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .78) # should be .7902
コード例 #11
0
def test_sp_score_d1_7_test():
    confusion = scorer.get_confusion(JA_TEST_FILE, 'avp-words-te.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .741)  # should be .7514
コード例 #12
0
def test_neighbor_acc_d2_5_en():
    confusion = scorer.get_confusion(DEV_FILE, 'avp-words-neighbor.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .848)  # should be .858
コード例 #13
0
def test_neighbor_acc_d2_5_ja():
    confusion = scorer.get_confusion(JA_DEV_FILE,'avp-words-neighbor.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.792) # should be .802
コード例 #14
0
def test_bakeoff_acc_d2_6_en_half_credit():
    acc = scorer.accuracy(scorer.get_confusion(DEV_FILE,'avp-words-best.preds'))
    assert_greater(acc,.87) 
コード例 #15
0
def test_suff_feats_acc_d2_2_ja_test():
    confusion = scorer.get_confusion(JA_TEST_FILE,'avp-words-suff-te.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.834) # should be .844
コード例 #16
0
def test_neighbor_acc_d2_5_en():
    confusion = scorer.get_confusion(DEV_FILE,'avp-words-neighbor.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.848) # should be .858
コード例 #17
0
def test_suff_feats_acc_d2_2_ja_dev():
    confusion = scorer.get_confusion(JA_DEV_FILE,'avp-words-suff.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.872) # should be .882
コード例 #18
0
def test_suff_feats_acc_d2_2_en_dev():
    confusion = scorer.get_confusion(DEV_FILE,'avp-words-suff.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.834) # should be .844
コード例 #19
0
def test_sp_score_d1_7_test():
    confusion = scorer.get_confusion(JA_TEST_FILE,'avp-words-te.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .741) # should be .7514
コード例 #20
0
ファイル: test_hmm.py プロジェクト: cedebrun/gt-nlp-class
def test_nr_hmm_test_accuracy():
    confusion = scorer.get_confusion(NR_TEST_FILE,'hmm-te-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .853)
コード例 #21
0
def test_bakeoff_acc_d2_6_ja_full_credit():
    acc = scorer.accuracy(scorer.get_confusion(JA_DEV_FILE,'avp-words-best.ja.preds'))
    assert_greater(acc,.90) 
コード例 #22
0
def test_sp_score_d1_6_test():
    confusion = scorer.get_confusion(TEST_FILE, 'avp-words-te.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .815)  # should be .8229
コード例 #23
0
def test_bakeoff_acc_d2_6_ja_beat_the_prof():
    acc = scorer.accuracy(scorer.get_confusion(JA_TEST_FILE,'avp-words-best-te.ja.preds'))
    assert_greater(acc,.87882)
コード例 #24
0
def test_suff_feats_acc_d2_2_ja_dev():
    confusion = scorer.get_confusion(JA_DEV_FILE, 'avp-words-suff.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .872)  # should be .882
コード例 #25
0
def test_hmm_feat_acc_d3_3_en():
    confusion = scorer.get_confusion(DEV_FILE,'sp-hmm.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.862) # should be .872
コード例 #26
0
def test_bakeoff_acc_d2_6_en_half_credit():
    acc = scorer.accuracy(
        scorer.get_confusion(DEV_FILE, 'avp-words-best.preds'))
    assert_greater(acc, .87)
コード例 #27
0
def test_hmm_feat_acc_d3_3_ja():
    confusion = scorer.get_confusion(JA_DEV_FILE,'sp-hmm.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.797) # should be .807
コード例 #28
0
def test_hmm_feat_acc_d3_3_ja():
    confusion = scorer.get_confusion(JA_DEV_FILE, 'sp-hmm.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .797)  # should be .807
コード例 #29
0
def test_bakeoff_acc_d3_4_en_half_credit():
    acc = scorer.accuracy(scorer.get_confusion(DEV_FILE,'sp-best.preds'))
    assert_greater(acc,.885) 
コード例 #30
0
ファイル: test_hmm.py プロジェクト: zyc130130/gt-nlp-class
def test_hmm_dev_accuracy():
    confusion = scorer.get_confusion(DEV_FILE, 'hmm-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .840)
コード例 #31
0
def test_bakeoff_acc_d3_4_en_beat_the_prof():
    acc = scorer.accuracy(scorer.get_confusion(TEST_FILE,'sp-best-te.preds'))
    assert_greater(acc,.88735) # same as with the classification-based tagger!
コード例 #32
0
ファイル: test_hmm.py プロジェクト: zyc130130/gt-nlp-class
def test_nr_hmm_dev_accuracy():
    confusion = scorer.get_confusion(NR_DEV_FILE, 'hmm-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .861)
コード例 #33
0
def test_bakeoff_acc_d3_4_ja_full_credit():
    acc = scorer.accuracy(scorer.get_confusion(JA_DEV_FILE,'sp-best.ja.preds'))
    assert_greater(acc,.91) 
コード例 #34
0
ファイル: test_hmm.py プロジェクト: cedebrun/gt-nlp-class
def test_hmm_dev_accuracy():
    confusion = scorer.get_confusion(DEV_FILE,'hmm-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .840)
コード例 #35
0
def test_bakeoff_acc_d3_4_ja_beat_the_prof():
    acc = scorer.accuracy(scorer.get_confusion(JA_TEST_FILE,'sp-best-te.ja.preds'))
    assert_greater(acc,.879926) 
コード例 #36
0
ファイル: test_hmm.py プロジェクト: cedebrun/gt-nlp-class
def test_nr_hmm_dev_accuracy():
    confusion = scorer.get_confusion(NR_DEV_FILE,'hmm-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .861)
コード例 #37
0
def test_bilstm_test_accuracy():
    confusion = scorer.get_confusion(DEV_FILE, '../bilstm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .83,
        "Accuracy expected: 0.83, actual:" + str(acc))  # change the no's
コード例 #38
0
def test_sp_score_d1_6():
    confusion = scorer.get_confusion(DEV_FILE, 'avp-words.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .805)  # should be .8129
コード例 #39
0
def test_model_en_test_accuracy1():
    confusion = scorer.get_confusion(TEST_FILE, 'bakeoff-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .87)
コード例 #40
0
def test_sp_score_d1_7():
    confusion = scorer.get_confusion(JA_DEV_FILE, 'avp-words.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .78)  # should be .7902
コード例 #41
0
def test_model_nr_dev_accuracy1():
    confusion = scorer.get_confusion(NR_DEV_FILE, 'bakeoff-dev-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .89)
コード例 #42
0
def test_suff_feats_acc_d2_2_en_dev():
    confusion = scorer.get_confusion(DEV_FILE, 'avp-words-suff.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .834)  # should be .844
コード例 #43
0
def test_model_nr_test_accuracy1():
    confusion = scorer.get_confusion(NR_TEST_FILE, 'bakeoff-te-nr.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .88)
コード例 #44
0
def test_suff_feats_acc_d2_2_ja_test():
    confusion = scorer.get_confusion(JA_TEST_FILE,
                                     'avp-words-suff-te.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .834)  # should be .844
コード例 #45
0
def test_model_en_dev_accuracy1():
    confusion = scorer.get_confusion(DEV_FILE, 'bakeoff-dev-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .88)
コード例 #46
0
def test_neighbor_acc_d2_5_ja():
    confusion = scorer.get_confusion(JA_DEV_FILE,
                                     'avp-words-neighbor.ja.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .792)  # should be .802
コード例 #47
0
def test_bakeoff_acc_d3_4_en_beat_the_prof():
    acc = scorer.accuracy(scorer.get_confusion(TEST_FILE, 'sp-best-te.preds'))
    assert_greater(acc,
                   .88735)  # same as with the classification-based tagger!
コード例 #48
0
def test_bakeoff_acc_d2_6_ja_full_credit():
    acc = scorer.accuracy(
        scorer.get_confusion(JA_DEV_FILE, 'avp-words-best.ja.preds'))
    assert_greater(acc, .90)
コード例 #49
0
def test_bakeoff_acc_d3_4_ja_beat_the_prof():
    acc = scorer.accuracy(
        scorer.get_confusion(JA_TEST_FILE, 'sp-best-te.ja.preds'))
    assert_greater(acc, .879926)
コード例 #50
0
def test_hmm_feat_acc_d3_3_en():
    confusion = scorer.get_confusion(DEV_FILE, 'sp-hmm.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .862)  # should be .872
コード例 #51
0
def test_sp_score_d1_6():
    confusion = scorer.get_confusion(DEV_FILE,'avp-words.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc,.805) # should be .8129
コード例 #52
0
def test_bakeoff_acc_d3_4_en_half_credit():
    acc = scorer.accuracy(scorer.get_confusion(DEV_FILE, 'sp-best.preds'))
    assert_greater(acc, .885)
コード例 #53
0
def test_sp_score_d1_6_test():
    confusion = scorer.get_confusion(TEST_FILE,'avp-words-te.preds')
    acc = scorer.accuracy(confusion)
    assert_greater(acc, .815) # should be .8229
コード例 #54
0
def test_bakeoff_acc_d3_4_ja_full_credit():
    acc = scorer.accuracy(scorer.get_confusion(JA_DEV_FILE,
                                               'sp-best.ja.preds'))
    assert_greater(acc, .91)
コード例 #55
0
def test_ja_hmm_dev_accuracy():
    confusion = scorer.get_confusion(JA_DEV_FILE,'hmm-dev-ja.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .84)
コード例 #56
0
ファイル: test_bilstm.py プロジェクト: cedebrun/gt-nlp-class
def test_bilstm_test_accuracy():
    confusion = scorer.get_confusion(DEV_FILE,'bilstm-te-en.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .83) #change the no's
コード例 #57
0
def test_ja_hmm_test_accuracy():
    confusion = scorer.get_confusion(JA_TEST_FILE,'hmm-test-ja.preds')
    acc = scorer.accuracy(confusion)
    ok_(acc > .81)