コード例 #1
0
ファイル: do_forest.py プロジェクト: gitforhf/rt_rank
ntra = len(traintargets)
ntes = len(testtargets)
totalpreds = [0] * len(targets)

for itr in range(0, ITER):
    # write target
    for i in range(0, ntra):
        p.stdin.write('%2.4f\n' % traintargets[i])

    # read prediction
    for i in range(0, len(totalpreds)):
        l = p.stdout.readline()
        try:
            totalpreds[i] += float(l.split(' ', 1)[0])
        except:
            print l
            sys.exit(1)

    # get and store results
    preds = [float(i) / (itr + 1) for i in totalpreds]
    [TRrmse, TRerr, TRndcg] = evaluate(preds[0:ntra], trainqueries,
                                       traintargets)
    [TErmse, TEerr, TEndcg] = evaluate(preds[ntra:], testqueries, testtargets)
    print >> sys.stderr, "%i,%2.5f,%2.5f,%2.5f,%2.5f,%2.5f,%2.5f" % (
        itr, TRrmse, TRerr, TRndcg, TErmse, TEerr, TEndcg)
    p.stdin.flush()

# print preds to stdout
print '\n'.join([str(i) for i in preds])
コード例 #2
0
ファイル: do_boost-weighted.py プロジェクト: HeBing/fcat
W=[a for a in open(WEIGHTDATA, 'r').read().split('\n') if len(a)>0];
weights=[float(a) for a in W]

ntra=len(traintargets)
ntst=len(testtargets)
nall=ntra+ntst

preds = [0]*nall
# Read initial cumulative probabilities if available
if INITPRED!='':
    preds = [float(l.split(' ', 1)[0]) for l in open(INITPRED)]

# Run boosting        
for itr in range(ITER):        
    if (itr % 10)==0:
        [TRrmse,TRerr,TRndcg]=evaluate(preds[0:ntra],trainqueries,traintargets)
        [TErmse,TEerr,TEndcg]=evaluate(preds[ntra:nall],testqueries,testtargets)
        print >>sys.stderr,"%i,%2.5f,%2.5f,%2.5f,%2.5f,%2.5f,%2.5f" % (itr,TRrmse,TRerr,TRndcg,TErmse,TEerr,TEndcg) 
        sys.stdout.flush()
    
    # write target
    for i in range(ntra):p.stdin.write('%2.4f\n' % (traintargets[i]-preds[i]))
    for i in range(0,ntra):p.stdin.write('%2.4f\n'%(weights[i]))
    p.stdin.flush()
        
    # read prediction
    for i in range(nall):
        l=p.stdout.readline()
        preds[i] += STEPSIZE*float(l.split(' ',1)[0])

# print predictions to stdout
コード例 #3
0
ファイル: evaluate.py プロジェクト: vangogh0318/algorithm
# readpreds
def readpreds(filename):
    preds = []
    for line in open(filename):
        preds += [float(line.strip())]
    return preds


# if -h in args, print help
if "-h" in argv:
    printhelp()
    exit()

# check args
if len(argv) != 3:
    print("illegal usage, incorrect number of arguments")
    printhelp()
    exit()

# get args
testfile = argv[1]
predfile = argv[2]

# read targets and predictions
labels, queries = readtargets(testfile)
preds = readpreds(predfile)

# compute and print metrics
rmse, err, ndcg = evaluate(preds, queries, labels)
print("rmse: %f, err: %f, ndcg: %f" % (rmse, err, ndcg))
コード例 #4
0
ファイル: evaluate.py プロジェクト: KaenChan/rank-elm
    preds = []
    for line in open(filename):
        preds += [float(line.strip())]
    return preds


# if -h in args, print help
if '-h' in argv:
    printhelp()
    exit()

# check args
if len(argv) != 3:
    print('illegal usage, incorrect number of arguments')
    printhelp()
    exit()

# get args    
testfile = argv[1]
predfile = argv[2]

# read targets and predictions
labels,queries = readtargets(testfile)
preds = readpreds(predfile)

max_grade = max(labels)

# compute and print metrics
rmse,err,ndcg = evaluate(preds,queries,labels,max_grade=max_grade)
print("rmse: %f, err: %f, ndcg: %f" % (rmse,err,ndcg))
コード例 #5
0
# readpreds
def readpreds(filename):
    preds = []
    for line in open(filename):
        preds += [float(line.strip())]
    return preds


# if -h in args, print help
if '-h' in argv:
    printhelp()
    exit()

# check args
if len(argv) != 3:
    print('illegal usage, incorrect number of arguments')
    printhelp()
    exit()

# get args
testfile = argv[1]
predfile = argv[2]

# read targets and predictions
labels, queries = readtargets(testfile)
preds = readpreds(predfile)

# compute and print metrics
rmse, err, ndcg = evaluate(preds, queries, labels)
print("rmse: %f, err: %f, ndcg: %f" % (rmse, err, ndcg))
コード例 #6
0
ファイル: evaluate.py プロジェクト: yhexie/robust-elm-irls
    preds = []
    for line in open(filename):
        preds += [float(line.strip())]
    return preds


# if -h in args, print help
if '-h' in argv:
    printhelp()
    exit()

# check args
if len(argv) != 3:
    print('illegal usage, incorrect number of arguments')
    printhelp()
    exit()

# get args
testfile = argv[1]
predfile = argv[2]

# read targets and predictions
labels, queries = readtargets(testfile)
preds = readpreds(predfile)

max_grade = max(labels)

# compute and print metrics
rmse, err, ndcg = evaluate(preds, queries, labels, max_grade=max_grade)
print("rmse: %f, err: %f, ndcg: %f" % (rmse, err, ndcg))