Пример #1
0
def find_best_smoother(x_tr, y_tr, x_dv, y_dv, smoothers):
    """find the smoothing value that gives the best accuracy on the dev data

    :param x_tr: training instances
    :param y_tr: training labels
    :param x_dv: dev instances
    :param y_dv: dev labels
    :param smoothers: list of smoothing values to try
    :returns: best smoothing value, scores of all smoothing values
    :rtype: float, dict

    """
    labels = set(
        [u'worldnews', u'science', u'askreddit', u'iama', u'todayilearned'])
    bestAcc = 0
    returnDict = {}
    for smoothing in smoothers:
        #estimate_nb(x_tr,y_tr,smoothing);
        theta_nb = estimate_nb(x_tr, y_tr, smoothing)
        #dev_predict = clf_base.predict(x_dv,theta_nb,labels);
        #train_predict = clf_base.predict(x_tr,theta_nb,labels);
        y_hat = clf_base.predict_all(x_dv, theta_nb, labels)
        accuracy = evaluation.acc(y_hat, y_dv)
        print "accuracy: ", accuracy
        if (accuracy > bestAcc):
            bestAcc = accuracy
        returnDict[smoothing] = accuracy
    return bestAcc, returnDict
Пример #2
0
def find_best_smoother(x_tr, y_tr, x_dv, y_dv, smoothers):
    """
    find the smoothing value that gives the best accuracy on the dev data

    :param x_tr: training instances
    :param y_tr: training labels
    :param x_dv: dev instances
    :param y_dv: dev labels
    :param smoothers: list of smoothing values
    :returns: best smoothing value
    :rtype: float

    """

    labels = list(set(y_tr))

    best_acc = 0
    best_smoother = None
    scores = {}

    for smoother in smoothers:
        theta_i = estimate_nb(x_tr, y_tr, smoother)
        y_hat = clf_base.predict_all(x_dv, theta_i, labels)
        acc = evaluation.acc(y_hat, y_dv)
        scores[smoother] = acc
        if acc > best_acc:
            best_acc = acc
            best_smoother = smoother

    return best_smoother, scores
def test_lr_d5_3_test():
    # NOTE! This test is for the TAs to run
    # You cannot pass this test without the true test labels.
    # This is a sanity check to make sure your solution for 5.3 is not too crazy

    global y_te
    y_hat_te = evaluation.read_predictions('lr-best-test.preds')
    assert_greater_equal(evaluation.acc(y_hat_te,y_te),.63)
Пример #4
0
def test_lr_d5_3_test():
    # NOTE! This test is for the TAs to run
    # You cannot pass this test without the true test labels.
    # This is a sanity check to make sure your solution for 5.3 is not too crazy

    global y_te
    y_hat_te = evaluation.read_predictions('lr-best-test.preds')
    assert_greater_equal(evaluation.acc(y_hat_te,y_te),.63)
def test_clf_base_d2_3():
    global x_dv, y_dv, y_te, labels

    y_hat = clf_base.predict_all(x_dv,hand_weights.theta_hand,labels)
    assert_greater_equal(evaluation.acc(y_hat,y_dv),.41)

    # just make sure the file is there
    y_hat_te = evaluation.read_predictions('hand-test.preds')
    eq_(len(y_hat_te),len(y_te))
Пример #6
0
def test_clf_base_d2_3():
    global x_dv, y_dv, y_te, labels

    y_hat = clf_base.predict_all(x_dv,hand_weights.theta_hand,labels)
    assert_greater_equal(evaluation.acc(y_hat,y_dv),.41)

    # just make sure the file is there
    y_hat_te = evaluation.read_predictions('hand-test.preds')
    eq_(len(y_hat_te),len(y_te))
def test_perc_d4_3_test():
    # NOTE! This test is for the TAs to run
    # You cannot pass this test without the true test labels.
    # This is a sanity check to make sure your solution for 4.3 is not too crazy

    global y_te
    y_hat_te = evaluation.read_predictions('avp-test.preds')
    # i get 66.8% accuracy
    assert_greater_equal(evaluation.acc(y_hat_te,y_te),.645)
Пример #8
0
def test_perc_d4_3_test():
    # NOTE! This test is for the TAs to run
    # You cannot pass this test without the true test labels.
    # This is a sanity check to make sure your solution for 4.3 is not too crazy

    global y_te
    y_hat_te = evaluation.read_predictions('avp-test.preds')
    # i get 66.8% accuracy
    assert_greater_equal(evaluation.acc(y_hat_te,y_te),.645)
Пример #9
0
def train_model(loss,
                model,
                X_tr_var,
                Y_tr_var,
                num_its=200,
                X_dv_var=None,
                Y_dv_var=None,
                status_frequency=10,
                optim_args={
                    'lr': 0.002,
                    'momentum': 0
                },
                param_file='best.params'):

    # initialize optimizer
    optimizer = optim.SGD(model.parameters(), **optim_args)

    losses = []
    accuracies = []

    for epoch in range(num_its):
        # set gradient to zero
        optimizer.zero_grad()
        # run model forward to produce loss
        output = loss.forward(model.forward(X_tr_var), Y_tr_var)
        # backpropagate and train
        output.backward()
        optimizer.step()

        #print(output.item())
        losses.append(output.item())

        # write parameters if this is the best epoch yet
        if X_dv_var is not None:
            # run forward on dev data
            _, Y_hat = model.forward(X_dv_var).max(dim=1)
            # compute dev accuracy
            acc = evaluation.acc(Y_hat.data.numpy(), Y_dv_var.data.numpy())
            # save
            if len(accuracies) == 0 or acc > max(accuracies):
                state = {
                    'state_dict': model.state_dict(),
                    'epoch': len(accuracies) + 1,
                    'accuracy': acc
                }
                torch.save(state, param_file)
            accuracies.append(acc)

        # print status message if desired
        if status_frequency > 0 and epoch % status_frequency == 0:
            print("Epoch " + str(epoch + 1) + ": Dev Accuracy: " + str(acc))

    # load parameters of best model
    checkpoint = torch.load(param_file)
    model.load_state_dict(checkpoint['state_dict'])

    return model, losses, accuracies
Пример #10
0
def test_d2_2_predict():
    global x_tr_pruned, x_dv_pruned, y_dv

    y_hat,scores = clf_base.predict(x_tr_pruned[0],hand_weights.theta_hand,labels)
    eq_(scores['pre-1980'],0.1)
    assert_almost_equals(scores['2000s'],1.3,places=5)
    eq_(y_hat,'2000s')
    eq_(scores['1980s'],0.0)

    y_hat = clf_base.predict_all(x_dv_pruned,hand_weights.theta_hand,labels)
    assert_almost_equals(evaluation.acc(y_hat,y_dv),.3422222, places=5)
Пример #11
0
def test_avp_d4_3():
    global y_dv, x_tr, y_tr

    # run on a subset of data
    theta_avp,theta_avp_history = perceptron.estimate_avg_perceptron(x_tr[:10],y_tr[:10],3)
    assert_almost_equals(theta_avp[('science','what')],3.2258,places=2)
    assert_almost_equals(theta_avp[('science','its')],0,places=2)
    assert_almost_equals(theta_avp[('worldnews','its')],0.871,places=2)
    
    y_hat_dv = evaluation.read_predictions('avp-dev.preds')
    # i get 66.4% accuracy
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.64)
Пример #12
0
def test_d2_2_predict():
    global x_tr_pruned, x_dv_pruned, y_dv

    y_hat, scores = clf_base.predict(x_tr_pruned[0], hand_weights.theta_hand,
                                     labels)
    eq_(scores['pre-1980'], 0.1)
    assert_almost_equals(scores['2000s'], 1.3, places=5)
    eq_(y_hat, '2000s')
    eq_(scores['1980s'], 0.0)

    y_hat = clf_base.predict_all(x_dv_pruned, hand_weights.theta_hand, labels)
    assert_almost_equals(evaluation.acc(y_hat, y_dv), .3422222, places=5)
Пример #13
0
def find_best_smoother(x_tr, y_tr, x_dv, y_dv, smoothers):
    """find the smoothing value that gives the best accuracy on the dev data
    """
    scores = {}
    labels = set(y_tr)
    for s in smoothers:
        theta_nb = estimate_nb(x_tr, y_tr, s)
        y_hat = clf_base.predict_all(x_dv, theta_nb, labels)
        scores[s] = evaluation.acc(y_hat, y_dv)
    l = scores.values()
    best = smoothers[np.argmax(l)]
    return best, scores
Пример #14
0
def test_lr_d5_2():
    global x_tr, y_tr, y_dv, y_te

    # run on a subset of data
    theta_lr,theta_lr_hist = logreg.estimate_logreg(x_tr[:10],y_tr[:10],3)
    assert_almost_equals(theta_lr[('science','what')],.000402,places=4)
    assert_almost_equals(theta_lr[('iama', 'missile')],-0.00031832285759249263,places=4)
    assert_almost_equals(theta_lr[('iama',constants.OFFSET)],.00045298,places=4)
    assert_almost_equals(theta_lr[('askreddit',constants.OFFSET)],0.,places=4)

    # dev set accuracy
    y_hat_dv = evaluation.read_predictions('lr-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.595)
Пример #15
0
def test_lr_d5_2():
    global x_tr, y_tr, y_dv, y_te

    # run on a subset of data
    theta_lr,theta_lr_hist = logreg.estimate_logreg(x_tr[:10],y_tr[:10],3)
    assert_almost_equals(theta_lr[('science','what')],.000402,places=4)
    assert_almost_equals(theta_lr[('iama', 'missile')],-0.00031832285759249263,places=4)
    assert_almost_equals(theta_lr[('iama',constants.OFFSET)],.00045298,places=4)
    assert_almost_equals(theta_lr[('askreddit',constants.OFFSET)],0.,places=4)

    # dev set accuracy
    y_hat_dv = evaluation.read_predictions('lr-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.595)
Пример #16
0
def test_perc_d4_2():
    global y_dv, x_tr, y_tr

    # run on a subset of data
    theta_perc,theta_perc_history = perceptron.estimate_perceptron(x_tr[:10],y_tr[:10],3)
    eq_(theta_perc[('worldnews','its')],1)
    eq_(theta_perc[('science','its')],0)
    eq_(theta_perc[('science','what')],4)
    eq_(theta_perc[('worldnews','always')],-1)
    eq_(theta_perc_history[0][('science','what')],2)
    
    y_hat_dv = evaluation.read_predictions('perc-dev.preds')
    # i get 64.6% accuracy
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.62)
Пример #17
0
def test_perc_d4_2():
    global y_dv, x_tr, y_tr

    # run on a subset of data
    theta_perc,theta_perc_history = perceptron.estimate_perceptron(x_tr[:10],y_tr[:10],3)
    eq_(theta_perc[('worldnews','its')],1)
    eq_(theta_perc[('science','its')],0)
    eq_(theta_perc[('science','what')],4)
    eq_(theta_perc[('worldnews','always')],-1)
    eq_(theta_perc_history[0][('science','what')],2)
    
    y_hat_dv = evaluation.read_predictions('perc-dev.preds')
    # i get 64.6% accuracy
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.62)
Пример #18
0
def train_model(loss, model, X_tr_var, Y_tr_var,
                num_its = 200,
                X_dv_var = None,
                Y_dv_var = None,
                status_frequency=10,
                optim_args = {'lr':0.002,'momentum':0},
                param_file = 'best.params'):

    # initialize optimizer
    optimizer = optim.SGD(model.parameters(), **optim_args)

    losses = []
    accuracies = []

    for epoch in range(num_its):
        # set gradient to zero
        optimizer.zero_grad()
        # run model forward to produce loss
        output = loss.forward(model.forward(X_tr_var),Y_tr_var)
        # backpropagate and train
        output.backward()
        optimizer.step()

        losses.append(output.data[0])

        # write parameters if this is the best epoch yet
        if X_dv_var is not None:
            # run forward on dev data
            _, Y_hat = model.forward(X_dv_var).max(dim=1)
            # compute dev accuracy
            acc = evaluation.acc(Y_hat.data.numpy(),Y_dv_var.data.numpy())
            # save
            if len(accuracies) == 0 or acc > max(accuracies):
                state = {'state_dict':model.state_dict(),
                         'epoch':len(accuracies)+1,
                         'accuracy':acc}
                torch.save(state,param_file)
            accuracies.append(acc)

        # print status message if desired
        if status_frequency > 0 and epoch % status_frequency == 0:
            print("Epoch "+str(epoch+1)+": Dev Accuracy: "+str(acc))

    # load parameters of best model
    checkpoint = torch.load(param_file)
    model.load_state_dict(checkpoint['state_dict'])
    
    return model, losses, accuracies
Пример #19
0
def test_avp_d4_3():
    global y_dv, x_tr, y_tr

    theta_avp,theta_avp_history = perceptron.estimate_avg_perceptron(x_tr[:10],y_tr[:10],3)
    # with t=0 initialization
    #assert_almost_equals(theta_avp[('science','what')],3.2,places=1)
    # with t=1 initialization
    assert_almost_equals(theta_avp[('science','what')],3.2258,places=1)
    assert_almost_equals(theta_avp[('science','its')],0,places=2)

    # with t=0 initialization
    #assert_almost_equals(theta_avp[('worldnews','its')],0.866,places=1)
    # with t=1 initialization
    assert_almost_equals(theta_avp[('worldnews','its')],0.871,places=1)
    
    y_hat_dv = evaluation.read_predictions('avp-dev.preds')
    # i get 66.4% accuracy
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.64)
Пример #20
0
def find_best_smoother(x_tr, y_tr, x_dv, y_dv, smoothers):
    '''
    find the smoothing value that gives the best accuracy on the dev data

    :param x_tr: training instances
    :param y_tr: training labels
    :param x_dv: dev instances
    :param y_dv: dev labels
    :param smoothers: list of smoothing values
    :returns: best smoothing value
    :rtype: float

    '''
    score = {}
    for smoother in smoothers:
        theta_nb = estimate_nb(x_tr, y_tr, smoother)
        y_hat = clf_base.predict_all(x_dv, theta_nb, set(y_tr))
        score[smoother] = (evaluation.acc(y_hat, y_dv))
    return clf_base.argmax(score), score
Пример #21
0
def find_best_smoother(x_tr, y_tr, x_dv, y_dv, smoothers):
    """find the smoothing value that gives the best accuracy on the dev data

    :param x_tr: training instances
    :param y_tr: training labels
    :param x_dv: dev instances
    :param y_dv: dev labels
    :param smoothers: list of smoothing values to try
    :returns: best smoothing value, scores of all smoothing values
    :rtype: float, dict

    """
    smoother_acc = {}
    labels = set(y_dv)
    for smoother in smoothers:
        theta = estimate_nb(x_tr, y_tr, smoother)
        y_hat = clf_base.predict_all(x_dv, theta, labels)
        smoother_acc[smoother] = evaluation.acc(y_hat, y_dv)

    argmax = lambda x: max(x.iteritems(), key=lambda y: y[1])[0]
    return argmax(smoother_acc), smoother_acc
Пример #22
0
def find_best_smoother(x_tr, y_tr, x_dv, y_dv, smoothers):
    '''
    find the smoothing value that gives the best accuracy on the dev data

    :param x_tr: training instances
    :param y_tr: training labels
    :param x_dv: dev instances
    :param y_dv: dev labels
    :param smoothers: list of smoothing values
    :returns: best smoothing value
    :rtype: float

    '''
    accuracy = {}
    genres = set(y_dv)
    for smoother in smoothers:
        accuracy[smoother] = evaluation.acc(
            clf_base.predict_all(x_dv, estimate_nb(x_tr, y_tr, smoother),
                                 genres), y_dv)

    best_smoother = clf_base.argmax(accuracy)
    return best_smoother, accuracy
Пример #23
0
def find_best_smoother(x_tr_pruned, y_tr, x_dv_pruned, y_dv, smoothers):
    '''
    find the smoothing value that gives the best accuracy on the dev data

    :param x_tr: training instances
    :param y_tr: training labels
    :param x_dv: dev instances
    :param y_dv: dev labels
    :param smoothers: list of smoothing values
    :returns: 1) best smoothing value, 2) a dictionary of smoothing values and dev set accuracy.
    :rtype: 1) float, 2) dictionary

    '''
    smther_dict = {}
    labels = set(y_tr)
    for x in smoothers:
        theta_nb = estimate_nb(x_tr_pruned, y_tr, x)
        y_hat = clf_base.predict_all(x_dv_pruned, theta_nb, labels)
        smther_dict[x] = evaluation.acc(y_hat, y_dv)
    key_min = min(smther_dict.keys(), key=(lambda k: smther_dict[k]))

    return smther_dict[key_min], smther_dict
Пример #24
0
def test_d7_3_bakeoff_dev4():
    global Y_dv_var
    acc = evaluation.acc(np.load('bakeoff-dev.preds.npy'), Y_dv_var.data.numpy())
    assert_greater_equal(acc, 0.55)
Пример #25
0
def test_feats_d7_1():
    global y_dv
    y_hat_dv = evaluation.read_predictions('bakeoff-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.78)
Пример #26
0
def test_feats_d7_1_test():
    global y_te
    y_hat_te = evaluation.read_predictions('bakeoff-test.preds')
    assert_greater_equal(evaluation.acc(y_hat_te,y_te),.722)
Пример #27
0
def test_lr_d5_3():
    global y_dv
    y_hat_dv = evaluation.read_predictions('lr-best-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.66)
Пример #28
0
def test_d5_5_accuracy():
    global Y_dv_var
    acc = evaluation.acc(np.load('logreg-es-dev.preds.npy'),Y_dv_var.data.numpy())
    assert_greater_equal(acc,0.5)
Пример #29
0
def test_d7_3_bakeoff_dev4():
    global Y_dv_var
    acc = evaluation.acc(np.load('bakeoff-dev.preds.npy'),Y_dv_var.data.numpy())
    assert_greater_equal(acc,0.55)
Пример #30
0
def train_model(loss, model, X_tr,Y_tr, word_to_ix, tag_to_ix, X_dv=None, Y_dv = None, num_its=50, status_frequency=10,
               optim_args = {'lr':0.1,'momentum':0},
               param_file = 'best.params'):
    
    #initialize optimizer
    optimizer = optim.SGD(model.parameters(), **optim_args)
    
    losses=[]
    accuracies=[]
    
    for epoch in range(num_its):
        
        loss_value=0
        count1=0
        
        for X,Y in zip(X_tr,Y_tr):
            X_tr_var = prepare_sequence(X, word_to_ix)
            Y_tr_var = prepare_sequence(Y, tag_to_ix)
            
            # set gradient to zero
            optimizer.zero_grad()
            
            lstm_feats= model.forward(X_tr_var)
            output = loss(lstm_feats,Y_tr_var)
            
            output.backward()
            optimizer.step()
            loss_value += output.data[0]
            count1+=1
            
            
        losses.append(loss_value/count1)
        
        # write parameters if this is the best epoch yet
        acc=0        
        if X_dv is not None and Y_dv is not None:
            acc=0
            count2=0
            for Xdv, Ydv in zip(X_dv, Y_dv):
                
                X_dv_var = prepare_sequence(Xdv, word_to_ix)
                Y_dv_var = prepare_sequence(Ydv, tag_to_ix)
                # run forward on dev data
                Y_hat = model.predict(X_dv_var)
                
                Yhat = np.array([tag_to_ix[yhat] for yhat in Y_hat])
                Ydv = np.array([tag_to_ix[ydv] for ydv in Ydv])
                
                # compute dev accuracy
                acc += (evaluation.acc(Yhat,Ydv))*len(Xdv)
                count2 += len(Xdv)
                # save
            acc/=count2
            if len(accuracies) == 0 or acc > max(accuracies):
                state = {'state_dict':model.state_dict(),
                         'epoch':len(accuracies)+1,
                         'accuracy':acc}
                torch.save(state,param_file)
            accuracies.append(acc)
        # print status message if desired
        if status_frequency > 0 and epoch % status_frequency == 0:
            print("Epoch "+str(epoch+1)+": Dev Accuracy: "+str(acc))
    return model, losses, accuracies
Пример #31
0
def test_d3_3b_nb():
    global y_dv
    y_hat_dv = evaluation.read_predictions('nb-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv, y_dv), .46)
Пример #32
0
def test_lr_d5_3():
    global y_dv
    y_hat_dv = evaluation.read_predictions('lr-best-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.66)
def test_d4_2b_perc_accuracy():
    global y_dv
    # i get 43% accuracy
    y_hat_dv = evaluation.read_predictions('perc-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv, y_dv), .43)
Пример #34
0
def test_d3_3b_nb():
    global y_dv
    y_hat_dv = evaluation.read_predictions('nb-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.46)
Пример #35
0
def test_feats_d7_1_test():
    global y_te
    y_hat_te = evaluation.read_predictions('bakeoff-test.preds')
    assert_greater_equal(evaluation.acc(y_hat_te,y_te),.722)
Пример #36
0
def test_d5_5_accuracy():
    global Y_dv_var
    acc = evaluation.acc(np.load('logreg-es-dev.preds.npy'), Y_dv_var.data.numpy())
    assert_greater_equal(acc, 0.5)
Пример #37
0
def test_d4_2b_perc_accuracy():
    global y_dv
    # i get 43% accuracy
    y_hat_dv = evaluation.read_predictions('perc-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.43)
Пример #38
0
def train_model(loss,
                model,
                X_tr,
                Y_tr,
                word_to_ix,
                tag_to_ix,
                X_dv=None,
                Y_dv=None,
                num_its=50,
                status_frequency=10,
                optim_args={
                    'lr': 0.1,
                    'momentum': 0
                },
                param_file='best.params'):

    #initialize optimizer
    optimizer = optim.SGD(model.parameters(), **optim_args)

    losses = []
    accuracies = []

    for epoch in range(num_its):

        loss_value = 0
        count1 = 0

        for X, Y in zip(X_tr, Y_tr):
            X_tr_var = prepare_sequence(X, word_to_ix)
            Y_tr_var = prepare_sequence(Y, tag_to_ix)

            # set gradient to zero
            optimizer.zero_grad()

            lstm_feats = model.forward(X_tr_var)
            output = loss(lstm_feats, Y_tr_var)

            output.backward()
            optimizer.step()
            loss_value += output.data[0]
            count1 += 1

        losses.append(loss_value / count1)

        # write parameters if this is the best epoch yet
        acc = 0
        if X_dv is not None and Y_dv is not None:
            acc = 0
            count2 = 0
            for Xdv, Ydv in zip(X_dv, Y_dv):

                X_dv_var = prepare_sequence(Xdv, word_to_ix)
                Y_dv_var = prepare_sequence(Ydv, tag_to_ix)
                # run forward on dev data
                Y_hat = model.predict(X_dv_var)

                Yhat = np.array([tag_to_ix[yhat] for yhat in Y_hat])
                Ydv = np.array([tag_to_ix[ydv] for ydv in Ydv])

                # compute dev accuracy
                acc += (evaluation.acc(Yhat, Ydv)) * len(Xdv)
                count2 += len(Xdv)
                # save
            acc /= count2
            if len(accuracies) == 0 or acc > max(accuracies):
                state = {
                    'state_dict': model.state_dict(),
                    'epoch': len(accuracies) + 1,
                    'accuracy': acc
                }
                torch.save(state, param_file)
            accuracies.append(acc)
        # print status message if desired
        if status_frequency > 0 and epoch % status_frequency == 0:
            print("Epoch " + str(epoch + 1) + ": Dev Accuracy: " + str(acc))
    return model, losses, accuracies
Пример #39
0
def test_feats_d7_1():
    global y_dv
    y_hat_dv = evaluation.read_predictions('bakeoff-dev.preds')
    assert_greater_equal(evaluation.acc(y_hat_dv,y_dv),.78)