Esempio n. 1
0
def test_d4_2a_perc_estimate():
    global y_dv, x_tr_pruned, y_tr

    # run on a subset of data
    theta_perc,theta_perc_history = perceptron.estimate_perceptron(x_tr_pruned[:10],y_tr[:10],3)
    eq_(theta_perc[('2000s','its')],-1)
    eq_(theta_perc[('2000s','what')],1)
    eq_(theta_perc[('1980s','what')],4)
    eq_(theta_perc[('1980s','its')],-15)
    eq_(theta_perc_history[0][('1980s','what')],2)
Esempio n. 2
0
def test_perc_d4_2():
    global y_dv, x_tr, y_tr

    # run on a subset of data
    theta_perc,theta_perc_history = perceptron.estimate_perceptron(x_tr[:10],y_tr[:10],3)
    eq_(theta_perc[('worldnews','its')],1)
    eq_(theta_perc[('science','its')],0)
    eq_(theta_perc[('science','what')],4)
    eq_(theta_perc[('worldnews','always')],-1)
    eq_(theta_perc_history[0][('science','what')],2)
    
    y_hat_dv = evaluation.read_predictions('perc-dev.preds')
    # i get 64.6% accuracy
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.62)
def test_perc_d4_2():
    global y_dv, x_tr, y_tr

    # run on a subset of data
    theta_perc,theta_perc_history = perceptron.estimate_perceptron(x_tr[:10],y_tr[:10],3)
    eq_(theta_perc[('worldnews','its')],1)
    eq_(theta_perc[('science','its')],0)
    eq_(theta_perc[('science','what')],4)
    eq_(theta_perc[('worldnews','always')],-1)
    eq_(theta_perc_history[0][('science','what')],2)
    
    y_hat_dv = evaluation.read_predictions('perc-dev.preds')
    # i get 64.6% accuracy
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.62)
Esempio n. 4
0
    'reddit-train.csv',  #filename
    'subreddit',  #label field
    preprocessor=preproc.tokenize_and_downcase)  #your preprocessor

y_dv, x_dv = preproc.read_data(
    'reddit-dev.csv',  #filename
    'subreddit',  #label field
    preprocessor=preproc.tokenize_and_downcase)  #your preprocessor
y_te, x_te = preproc.read_data(
    'reddit-test.csv',  #filename
    'subreddit',  #label field
    preprocessor=preproc.tokenize_and_downcase)  #your preprocessor

corpus_counts = preproc.get_corpus_counts(x_tr)

vocab = [word for word, count in corpus_counts.iteritems() if count > 10]
print len(vocab)
print len(x_tr[0])

x_tr = [{key: val
         for key, val in x_i.iteritems() if key in vocab} for x_i in x_tr]
x_dv = [{key: val
         for key, val in x_i.iteritems() if key in vocab} for x_i in x_dv]
x_te = [{key: val
         for key, val in x_i.iteritems() if key in vocab} for x_i in x_te]

from gtnlplib import perceptron
reload(perceptron)

theta_perc, theta_perc_history = perceptron.estimate_perceptron(x_tr, y_tr, 20)