示例#1
0
def trainPerceptron(N_its,inst_generator,labels, outfile, devkey):
    tr_acc = [None]*N_its #holder for training accuracy
    dv_acc = [None]*N_its #holder for dev accuracy
    weights = defaultdict(float) 
    for i in xrange(N_its):
        weights,tr_err,tr_tot = oneItPerceptron(inst_generator,weights,labels) #call your function for a single iteration
        confusion = evalClassifier(weights,outfile, devkey) #evaluate on dev data
        dv_acc[i] = scorer.accuracy(confusion) #compute accuracy
        tr_acc[i] = 1. - tr_err/float(tr_tot) #compute training accuracy from output
        print i,'dev: ',dv_acc[i],'train: ',tr_acc[i]
    return weights, tr_acc, dv_acc
示例#2
0
def trainPerceptron(N_its, inst_generator, labels, outfile, devkey):
    tr_acc = [None] * N_its  #holder for training accuracy
    dv_acc = [None] * N_its  #holder for dev accuracy
    weights = defaultdict(float)
    for i in xrange(N_its):
        weights, tr_err, tr_tot = oneItPerceptron(
            inst_generator, weights,
            labels)  #call your function for a single iteration
        confusion = evalClassifier(weights, outfile,
                                   devkey)  #evaluate on dev data
        dv_acc[i] = scorer.accuracy(confusion)  #compute accuracy
        tr_acc[i] = 1. - tr_err / float(
            tr_tot)  #compute training accuracy from output
        print i, 'dev: ', dv_acc[i], 'train: ', tr_acc[i]
    return weights, tr_acc, dv_acc
示例#3
0
def trainAvgPerceptron(N_its, inst_generator, labels, outfile, devkey):
    tr_acc = [None] * N_its  # holder for training accuracy
    dv_acc = [None] * N_its  # holder for dev accuracy
    weights = defaultdict(float)
    avg_weights = defaultdict(float)
    wsum = defaultdict(float)
    cur_T = 1
    for i in xrange(N_its):
        weights, wsum, tr_err, tr_tot = oneItAvgPerceptron(
            inst_generator, weights, wsum, labels, cur_T
        )  # call your function for a single iteration
        cur_T += tr_tot
        for w in wsum:
            avg_weights[w] = weights[w] - wsum[w] / float(cur_T)
        confusion = evalClassifier(avg_weights, outfile, devkey)  # evaluate on dev data
        dv_acc[i] = scorer.accuracy(confusion)  # compute accuracy
        tr_acc[i] = 1.0 - tr_err / float(tr_tot)  # compute training accuracy from output
        print i, "dev: ", dv_acc[i], "train: ", tr_acc[i]

    return avg_weights, tr_acc, dv_acc
示例#4
0
def trainAvgPerceptron(N_its, inst_generator, labels, outfile, devkey):
    tr_acc = [None] * N_its  #holder for training accuracy
    dv_acc = [None] * N_its  #holder for dev accuracy
    weights = defaultdict(float)
    avg_weights = defaultdict(float)
    wsum = defaultdict(float)
    cur_T = 1
    for i in xrange(N_its):
        weights, wsum, tr_err, tr_tot = oneItAvgPerceptron(
            inst_generator, weights, wsum, labels,
            cur_T)  #call your function for a single iteration
        cur_T += tr_tot
        for w in wsum:
            avg_weights[w] = weights[w] - wsum[w] / float(cur_T)
        confusion = evalClassifier(avg_weights, outfile,
                                   devkey)  #evaluate on dev data
        dv_acc[i] = scorer.accuracy(confusion)  #compute accuracy
        tr_acc[i] = 1. - tr_err / float(
            tr_tot)  #compute training accuracy from output
        print i, 'dev: ', dv_acc[i], 'train: ', tr_acc[i]

    return avg_weights, tr_acc, dv_acc