Example #1
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_auc = 0.
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #Leave-one-out cross-validation predictions, this is fast due to
        #computational short-cut
        P_loo = learner.leave_one_out()
        acc = auc(Y_train, P_loo)
        print("regparam 2**%d, loo-auc %f" %(log_regparam, acc))
        if acc > best_auc:
            best_auc = acc
            best_regparam = regparam
    learner.solve(best_regparam)
    P_test = learner.predict(X_test)
    print("best regparam %f with loo-auc %f" %(best_regparam, best_auc)) 
    print("test set auc %f" %auc(Y_test, P_test))
Example #2
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_auc = 0.0
    # exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    for log_regparam in log_regparams:
        regparam = 2.0 ** log_regparam
        # RLS is re-trained with the new regparam, this
        # is very fast due to computational short-cut
        learner.solve(regparam)
        # Leave-one-out cross-validation predictions, this is fast due to
        # computational short-cut
        P_loo = learner.leave_one_out()
        acc = auc(Y_train, P_loo)
        print("regparam 2**%d, loo-auc %f" % (log_regparam, acc))
        if acc > best_auc:
            best_auc = acc
            best_regparam = regparam
    learner.solve(best_regparam)
    P_test = learner.predict(X_test)
    print("best regparam %f with loo-auc %f" % (best_regparam, best_auc))
    print("test set auc %f" % auc(Y_test, P_test))
Example #3
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    lpo_aucs = []
    test_aucs = []
    for i in range(1000):
        X_small = X_train[i * 30:i * 30 + 30]
        Y_small = Y_train[i * 30:i * 30 + 30]
        pairs_start = []
        pairs_end = []
        for i in range(len(Y_small)):
            for j in range(len(Y_small)):
                if Y_small[i] == 1. and Y_small[j] == -1.:
                    pairs_start.append(i)
                    pairs_end.append(j)
        learner = RLS(X_small, Y_small)
        pairs_start = np.array(pairs_start)
        pairs_end = np.array(pairs_end)
        P_start, P_end = learner.leave_pair_out(pairs_start, pairs_end)
        lpo_a = np.mean(P_start > P_end + 0.5 * (P_start == P_end))
        P_test = learner.predict(X_test)
        test_a = auc(Y_test, P_test)
        lpo_aucs.append(lpo_a)
        test_aucs.append(test_a)
    print("mean lpo over auc over 1000 repetitions: %f" % np.mean(lpo_aucs))
    print("mean test auc over 1000 repetitions %f" % np.mean(test_aucs))
 def onkeypressed(self, event):
     print('You pressed', event.key)
     if event.key == '1':
         print('Assigned all selected points to class 1')
         newclazz = 1
         mmc.claim_all_points_in_working_set(newclazz)
     if event.key == '0':
         print('Assigned all selected points to class 0')
         newclazz = 0
         mmc.claim_all_points_in_working_set(newclazz)
     if event.key == 'a':
         print('Selected all points')
         newws = list(set(range(len(self.collection))) - self.lockedset)
         self.mmc.new_working_set(newws)
         self.lasso.line.set_visible(False)
     if event.key == 'c':
         changecount = mmc.cyclic_descent_in_working_set()
         print('Performed ', changecount, 'cyclic descent steps')
     if event.key == 'l':
         print('Locked the class labels of selected points')
         self.lockedset = self.lockedset | self.selectedset
         newws = list(self.selectedset - self.lockedset)
         self.mmc.new_working_set(newws)
     if event.key == 'u':
         print('Unlocked the selected points')
         self.lockedset = self.lockedset - self.selectedset
         newws = list(self.selectedset - self.lockedset)
         self.mmc.new_working_set(newws)
     if event.key == 'p':
         print('Compute predictions and AUC on data')
         preds = self.mmc.predict(Xmat)
         print(auc(mmc.Y[:, 0], preds[:, 0]))
     self.redrawall()
Example #5
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    loo_aucs = []
    test_aucs = []
    for i in range(1000):
        X_small = X_train[i * 30:i * 30 + 30]
        Y_small = Y_train[i * 30:i * 30 + 30]
        learner = RLS(X_small, Y_small)
        P_loo = learner.leave_one_out()
        loo_a = auc(Y_small, P_loo)
        P_test = learner.predict(X_test)
        test_a = auc(Y_test, P_test)
        loo_aucs.append(loo_a)
        test_aucs.append(test_a)
    print("mean loo auc over 1000 repetitions %f" % np.mean(loo_aucs))
    print("mean test auc over 1000 repetitions %f" % np.mean(test_aucs))
Example #6
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    loo_aucs = []
    test_aucs = []
    for i in range(1000):
        X_small = X_train[i*30: i*30 + 30]
        Y_small = Y_train[i*30: i*30 + 30]
        learner = RLS(X_small, Y_small)
        P_loo = learner.leave_one_out()
        loo_a = auc(Y_small, P_loo)
        P_test = learner.predict(X_test)
        test_a = auc(Y_test, P_test)
        loo_aucs.append(loo_a)
        test_aucs.append(test_a)
    print("mean loo auc over 1000 repetitions %f" %np.mean(loo_aucs))
    print("mean test auc over 1000 repetitions %f" %np.mean(test_aucs))
Example #7
0
def train_rls():
    X_train, Y_train, X_test, Y_test = load_newsgroups()
    #CGRLS does not support multi-output learning, so we train
    #one classifier for the first column of Y. Multi-class learning
    #would be implemented by training one CGRLS for each column, and
    #taking the argmax of class predictions.
    predictions = []
    rls = CGRLS(X_train, Y_train[:, 0], regparam=100.0)
    P = rls.predict(X_test)
    perf = auc(Y_test[:, 0], P)
    print("auc for task 1 %f" % perf)
Example #8
0
def train_rls():
    X_train, Y_train, X_test, Y_test = load_newsgroups()
    #CGRLS does not support multi-output learning, so we train
    #one classifier for the first column of Y. Multi-class learning
    #would be implemented by training one CGRLS for each column, and
    #taking the argmax of class predictions.
    predictions = []
    rls = CGRankRLS(X_train, Y_train[:,0], regparam= 100.0)
    P = rls.predict(X_test)
    perf = auc(Y_test[:,0], P)
    print("auc for task 1 %f" %perf) 
Example #9
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    #subsample, leave-pair-out on whole data would take
    #a lot of time
    X_train = X_train[:1000]
    Y_train = Y_train[:1000]
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    regparams = [2.**-5, 1., 2.**5]
    learner = LeavePairOutRLS(X_train, Y_train, regparams=regparams)
    print("best regparam %f" % learner.regparam)
    print("lpo auc " + str(learner.cv_performances))
    P_test = learner.predict(X_test)
    print("test auc %f" % auc(Y_test, P_test))
Example #10
0
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    # subsample, leave-pair-out on whole data would take
    # a lot of time
    X_train = X_train[:1000]
    Y_train = Y_train[:1000]
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    regparams = [2.0 ** -5, 1.0, 2.0 ** 5]
    learner = LeavePairOutRLS(X_train, Y_train, regparams=regparams)
    print("best regparam %f" % learner.regparam)
    print("lpo auc " + str(learner.cv_performances))
    P_test = learner.predict(X_test)
    print("test auc %f" % auc(Y_test, P_test))
 def callback(self, learner):
     K1 = learner.resource_pool['kmatrix1']
     K2 = learner.resource_pool['kmatrix2']
     rowind = learner.label_row_inds
     colind = learner.label_col_inds
     loss = dual_svm_objective(learner.A, K1, K2, Y, rowind, colind, lamb)
     print "iteration", self.iter
     print "Dual RLS loss", loss
     model = KernelPairwisePredictor(learner.A, rowind, colind)
     #model = learner.predictor
     if rowinds_test == None:
         P = model.predict(K1_test, K2_test).ravel()
     else:
         P = model.predict(K1_test, K2_test, rowinds_test, colinds_test)
     perf = auc(Y_test, P)
     print "Test set AUC", perf
     self.iter += 1
 def callback(self, learner):
     X1 = learner.resource_pool['xmatrix1']
     X2 = learner.resource_pool['xmatrix2']
     rowind = learner.label_row_inds
     colind = learner.label_col_inds
     w = learner.W.ravel()
     loss = primal_svm_objective(w, X1, X2, Y, rowind, colind, lamb)
     print "iteration", self.iter
     print "Primal SVM loss", loss
     model = LinearPairwisePredictor(learner.W)
     #model = learner.predictor
     if rowinds_test == None:
         P = model.predict(X1_test, X2_test).ravel()
     else:
         P = model.predict(X1_test, X2_test, rowinds_test, colinds_test)
     perf = auc(Y_test, P)
     print "Test set AUC", perf 
     self.iter += 1
Example #13
0
import numpy as np
from rlscore.learner.rls import KfoldRLS
from rlscore.utilities.reader import read_folds
from rlscore.utilities.reader import read_sparse
from rlscore.measure import auc
train_labels = np.loadtxt("./legacy_tests/data/class_train.labels")
test_labels = np.loadtxt("./legacy_tests/data/class_test.labels")
folds = read_folds("./legacy_tests/data/folds.txt")
train_features = read_sparse("./legacy_tests/data/class_train.features")
test_features = read_sparse("./legacy_tests/data/class_test.features")
kwargs = {}
kwargs['measure']=auc
kwargs['regparams'] = [2**i for i in range(-10,11)]
kwargs["Y"] = train_labels
kwargs["X"] = train_features
kwargs["folds"] = folds
learner = KfoldRLS(**kwargs)
grid = kwargs['regparams']
perfs = learner.cv_performances
for i in range(len(grid)):
    print "parameter %f cv_performance %f" %(grid[i], perfs[i])
P = learner.predict(test_features)
test_perf = auc(test_labels, P)
print "test set performance: %f" %test_perf
Example #14
0
import numpy as np
from rlscore.learner.mmc import MMC
from rlscore.utilities.reader import read_sparse
from rlscore.measure import auc
train_labels = np.loadtxt("./legacy_tests/data/class_train.labels")
test_labels = np.loadtxt("./legacy_tests/data/class_test.labels")
train_features = read_sparse("./legacy_tests/data/class_train.features")
test_features = read_sparse("./legacy_tests/data/class_test.features")
kwargs = {}
kwargs["Y"] = train_labels
kwargs["X"] = train_features
kwargs["regparam"] = 1
learner = MMC(**kwargs)
P = learner.predict(test_features)
test_perf = auc(test_labels, P)
print("test set performance: %f" %test_perf)
Example #15
0
from rlscore.measure import auc

#My class labels, three examples in positive and two in negative
Y = [-1, -1, -1, 1, 1]

#Predict all ties
P = [1, 1, 1, 1, 1]

print("My auc with all ties %f" % auc(Y, P))

#Use Y for prediction
print("My auc with using Y as P is %f" % auc(Y, Y))

#Perfect predictions: AUC is a ranking measure, so all that matters
#is that positive instances get higher predictions than negatives
P2 = [-5, 2, -1, 4, 3.2]

print("My auc with correctly ranked predictions is %f" % auc(Y, P2))

#Let's make the predictions worse

P2 = [-5, 2, -1, 1, 3.2]

print("Now my auc dropped to %f" % auc(Y, P2))

#AUC is undefined if all instances belong to same class, let's crash auc

Y2 = [1, 1, 1, 1, 1]
#this will not work
auc(Y2, P2)
Example #16
0
from rlscore.measure import auc

#My class labels, three examples in positive and two in negative
Y = [-1, -1, -1, 1, 1]

#Predict all ties
P = [1, 1, 1, 1, 1]

print("My auc with all ties %f" %auc(Y,P))

#Use Y for prediction
print("My auc with using Y as P is %f" %auc(Y,Y))

#Perfect predictions: AUC is a ranking measure, so all that matters
#is that positive instances get higher predictions than negatives
P2 = [-5, 2, -1, 4, 3.2]

print("My auc with correctly ranked predictions is %f" %auc(Y,P2))

#Let's make the predictions worse

P2 = [-5, 2, -1, 1, 3.2]

print("Now my auc dropped to %f" %auc(Y,P2))

#AUC is undefined if all instances belong to same class, let's crash auc 

Y2 = [1, 1, 1, 1, 1]
#this will not work
auc(Y2, P2)