コード例 #1
0
ファイル: lpo.py プロジェクト: aatapa/RLScore
def lpo_core(X,y, regparam):
    start, end = [], []
    for i in range(X.shape[0]-1):
        for j in range(i+1, X.shape[0]):
            start.append(i)
            end.append(j)
    rls = RLS(X,y, regparam=regparam, kernel="GaussianKernel", gamma=0.01)
    pred0, pred1 = rls.leave_pair_out(start, end)
    return pred0, pred1
コード例 #2
0
ファイル: lgo.py プロジェクト: aatapa/RLScore
def lgo_core(X,y, groups, regparam):
    logo = LeaveOneGroupOut()
    rls = RLS(X,y, regparam=regparam, kernel="GaussianKernel", gamma=0.01)
    errors = []
    for train, test in logo.split(X, y, groups=groups):
        p = rls.holdout(test)
        e = sqerror(y[test], p)
        errors.append(e)
    return np.mean(errors)
コード例 #3
0
ファイル: kernel4.py プロジェクト: aatapa/RLScore
def train_rls():
    X_train, Y_train, X_test, Y_test = load_housing()
    learner = RLS(X_train, Y_train, kernel="GaussianKernel", regparam=1, gamma=1)
    #Leave-one-out cross-validation predictions, this is fast due to
    #computational short-cut
    P_loo = learner.leave_one_out()
    #Test set predictions
    P_test = learner.predict(X_test)
    print("leave-one-out error %f" %sqerror(Y_train, P_loo))
    print("test error %f" %sqerror(Y_test, P_test))
    #Sanity check, can we do better than predicting mean of training labels?
    print("mean predictor %f" %sqerror(Y_test, np.ones(Y_test.shape)*np.mean(Y_train)))
コード例 #4
0
ファイル: predictor3.py プロジェクト: aatapa/RLScore
def train_rls():
    X_train, Y_train, X_test, Y_test = load_housing()
    learner = RLS(X_train, Y_train, kernel="GaussianKernel", regparam=0.0003, gamma=0.00003)
    #This is how we make predictions
    P_test = learner.predict(X_test)
    #We can separate the predictor from learner
    predictor = learner.predictor
    #And do the same predictions
    P_test = predictor.predict(X_test)
    #Let's get the coefficients of the predictor
    A = predictor.A
    print("A-coefficients " +str(A))
    print("number of coefficients %d" %len(A))
コード例 #5
0
ファイル: predictor1.py プロジェクト: aatapa/RLScore
def train_rls():
    #Trains RLS with default parameters (regparam=1.0, kernel='LinearKernel')
    X_train, Y_train, X_test, Y_test = load_housing()
    learner = RLS(X_train, Y_train)
    #This is how we make predictions
    P_test = learner.predict(X_test)
    #We can separate the predictor from learner
    predictor = learner.predictor
    #And do the same predictions
    P_test = predictor.predict(X_test)
    #Let's get the coefficients of the predictor
    w = predictor.W
    b = predictor.b
    print("number of coefficients %d" %len(w))
    print("w-coefficients " +str(w))
    print("bias term %f" %b)
コード例 #6
0
def train_rls():
    X_train, Y_train, X_test, Y_test = load_housing()
    #select randomly 100 basis vectors
    indices = range(X_train.shape[0])
    indices = random.sample(indices, 100)
    basis_vectors = X_train[indices]    
    learner = RLS(X_train, Y_train, basis_vectors = basis_vectors, kernel="GaussianKernel", regparam=0.0003, gamma=0.00003)
    #Leave-one-out cross-validation predictions, this is fast due to
    #computational short-cut
    P_loo = learner.leave_one_out()
    #Test set predictions
    P_test = learner.predict(X_test)
    print("leave-one-out error %f" %sqerror(Y_train, P_loo))
    print("test error %f" %sqerror(Y_test, P_test))
    #Sanity check, can we do better than predicting mean of training labels?
    print("mean predictor %f" %sqerror(Y_test, np.ones(Y_test.shape)*np.mean(Y_train)))
コード例 #7
0
ファイル: testRLS.py プロジェクト: jbjorne/CAMDA2014
def testRLS(input):
    X, Y = svmlight_format.load_svmlight_file(input)

    hoindices = range(int(0.1 * len(Y)))
    hocompl = list(set(range(len(Y))) - set(hoindices))
    trainX = X[hocompl]
    testX = X[hoindices]
    trainY = Y[hocompl]
    testY = Y[hoindices]
    print len(trainY), len(testY)

    kwargs = {}
    kwargs["train_features"] = trainX
    kwargs["train_labels"] = trainY

    rls = RLS.createLearner(**kwargs)
    rls.train()
    bestperf = -1.0
    for logrp in range(-5, 5):
        rp = 2.0 ** logrp
        rls.solve(rp)
        Ploo = rls.computeLOO()
        perf = cindex(trainY, Ploo)
        print logrp, perf
        if perf > bestperf:
            bestperf = perf
            bestlogrp = logrp
    rp = 2.0 ** bestlogrp
    rls.solve(rp)
    P = rls.getModel().predict(testX)
コード例 #8
0
ファイル: test_cg_rls.py プロジェクト: hongguangguo/RLScore
 def testCGRLS(self):
     m, n = 100, 300
     for regparam in [0.00000001, 1, 100000000]:
         Xtrain = np.mat(np.random.rand(m, n))
         Y = np.mat(np.random.rand(m, 1))
         rpool = {}
         rpool["train_features"] = Xtrain
         rpool["train_labels"] = Y
         rpool["regparam"] = regparam
         rpool["bias"] = 1.0
         rls = RLS.createLearner(**rpool)
         rls.solve(regparam)
         model = rls.getModel()
         W = model.W
         b = model.b
         rls = CGRLS.createLearner(**rpool)
         rls.train()
         model = rls.getModel()
         W2 = model.W
         b2 = model.b
         for i in range(W.shape[0]):
             # for j in range(W.shape[1]):
             #    self.assertAlmostEqual(W[i,j],W2[i,j],places=5)
             self.assertAlmostEqual(W[i], W2[i], places=5)
         self.assertAlmostEqual(b, b2, places=5)
コード例 #9
0
ファイル: looRLS.py プロジェクト: jbjorne/CAMDA2014
def looRLS(XPath, yPath, metaPath):
    X, Y = readAuto(XPath, yPath)
    meta = {}
    if metaPath != None:
        print "Loading metadata from", metaPath
        meta = result.getMeta(metaPath)
    X_train, X_hidden, Y_train, Y_hidden = hidden.split(X, Y, meta=meta) 
    kwargs = {}
    kwargs['train_features'] = X_train
    kwargs['train_labels'] = Y_train
    kwargs['regparam'] = 1.0
    rls = RLS.createLearner(**kwargs)
    rls.train()
    bestperf = -1. 
    for logrp in range(5, 25):
        rp = 2. ** logrp
        rls.solve(rp)
        Ploo = rls.computeLOO()
        perf = cindex(Y_train, Ploo)
        print "Leave-one-out %f for lambda 2^%d" %(perf, logrp)
        if perf > bestperf:
            bestperf = perf
            bestlogrp = logrp
    rp = 2. ** bestlogrp
    print "Best leave-one-out %f for lambda 2^%d" %(bestperf, bestlogrp)
    rls.solve(rp)
    model = rls.getModel()
    P = model.predict(X_hidden)
    perf = cindex(Y_hidden, P)
    print "final performance: %f" %perf
コード例 #10
0
ファイル: nfoldRLS.py プロジェクト: jbjorne/CAMDA2014
def nfoldRLS(X, Y, fcount):
    kwargs = {}
    kwargs['train_features'] = X
    kwargs['train_labels'] = Y
    rls = RLS.createLearner(**kwargs)
    rls.train()
    bestperf = -1. 
    for logrp in range(5, 25):
        rp = 2. ** logrp
        rls.solve(rp)
        perfs = []
        kf = KFold(len(Y), n_folds=fcount, indices=True, shuffle=True, random_state=77)
        for train, test in kf:
            P = rls.computeHO(test)
            perf = cindex(Y[test], P)
            perfs.append(perf)
        perf = np.mean(perfs)
        print "N-fold CV %f for lambda 2^%d" %(perf, logrp)
        if perf > bestperf:
            bestperf = perf
            bestlogrp = logrp
    rp = 2. ** bestlogrp
    print "Best N-fold CV %f for lambda 2^%d" %(bestperf, bestlogrp)
    rls.solve(rp)
    model = rls.getModel()
    return model
コード例 #11
0
ファイル: regression6.py プロジェクト: aatapa/RLScore
def train_rls():
    # Trains RLS with a precomputed kernel matrix
    X_train, Y_train, X_test, Y_test = load_housing()
    # Minor techincal detail: adding 1.0 simulates the effect of adding a
    # constant valued bias feature, as is done by 'LinearKernel' by deafault
    K_train = np.dot(X_train, X_train.T) + 1.0
    K_test = np.dot(X_test, X_train.T) + 1.0
    learner = RLS(K_train, Y_train, kernel="PrecomputedKernel")
    # Leave-one-out cross-validation predictions, this is fast due to
    # computational short-cut
    P_loo = learner.leave_one_out()
    # Test set predictions
    P_test = learner.predict(K_test)
    print("leave-one-out error %f" % sqerror(Y_train, P_loo))
    print("test error %f" % sqerror(Y_test, P_test))
    # Sanity check, can we do better than predicting mean of training labels?
    print("mean predictor %f" % sqerror(Y_test, np.ones(Y_test.shape) * np.mean(Y_train)))
コード例 #12
0
ファイル: classification2.py プロジェクト: aatapa/RLScore
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    loo_aucs = []
    test_aucs = []
    for i in range(1000):
        X_small = X_train[i*30: i*30 + 30]
        Y_small = Y_train[i*30: i*30 + 30]
        learner = RLS(X_small, Y_small)
        P_loo = learner.leave_one_out()
        loo_a = auc(Y_small, P_loo)
        P_test = learner.predict(X_test)
        test_a = auc(Y_test, P_test)
        loo_aucs.append(loo_a)
        test_aucs.append(test_a)
    print("mean loo auc over 1000 repetitions %f" %np.mean(loo_aucs))
    print("mean test auc over 1000 repetitions %f" %np.mean(test_aucs))
コード例 #13
0
ファイル: classification0.py プロジェクト: aatapa/RLScore
def train_rls():
    X_train, Y_train, foo = read_svmlight("a1a.t")
    X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1])
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_accuracy = 0.
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #Leave-one-out cross-validation predictions, this is fast due to
        #computational short-cut
        P_loo = learner.leave_one_out()
        acc = accuracy(Y_train, P_loo)
        print("regparam 2**%d, loo-accuracy %f" %(log_regparam, acc))
        if acc > best_accuracy:
            best_accuracy = acc
            best_regparam = regparam
    learner.solve(best_regparam)
    P_test = learner.predict(X_test)
    print("best regparam %f with loo-accuracy %f" %(best_regparam, best_accuracy)) 
    print("test set accuracy %f" %accuracy(Y_test, P_test))
コード例 #14
0
ファイル: regression2.py プロジェクト: aatapa/RLScore
def train_rls():
    #Select regparam with leave-one-out cross-validation
    X_train, Y_train, X_test, Y_test = load_housing()
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_error = float("inf")
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #Leave-one-out cross-validation predictions, this is fast due to
        #computational short-cut
        P_loo = learner.leave_one_out()
        e = sqerror(Y_train, P_loo)
        print("regparam 2**%d, loo-error %f" %(log_regparam, e))
        if e < best_error:
            best_error = e
            best_regparam = regparam
    learner.solve(best_regparam)
    P_test = learner.predict(X_test)
    print("best regparam %f with loo-error %f" %(best_regparam, best_error)) 
    print("test error %f" %sqerror(Y_test, P_test))
コード例 #15
0
ファイル: kernel4.py プロジェクト: disc5/RLScore
def train_rls():
    X_train, Y_train, X_test, Y_test = load_housing()
    learner = RLS(X_train,
                  Y_train,
                  kernel="GaussianKernel",
                  regparam=1,
                  gamma=1)
    #Leave-one-out cross-validation predictions, this is fast due to
    #computational short-cut
    P_loo = learner.leave_one_out()
    #Test set predictions
    P_test = learner.predict(X_test)
    print("leave-one-out error %f" % sqerror(Y_train, P_loo))
    print("test error %f" % sqerror(Y_test, P_test))
    #Sanity check, can we do better than predicting mean of training labels?
    print("mean predictor %f" %
          sqerror(Y_test,
                  np.ones(Y_test.shape) * np.mean(Y_train)))
コード例 #16
0
ファイル: regression6.py プロジェクト: disc5/RLScore
def train_rls():
    #Trains RLS with a precomputed kernel matrix
    X_train, Y_train, X_test, Y_test = load_housing()
    #Minor techincal detail: adding 1.0 simulates the effect of adding a
    #constant valued bias feature, as is done by 'LinearKernel' by deafault
    K_train = np.dot(X_train, X_train.T) + 1.0
    K_test = np.dot(X_test, X_train.T) + 1.0
    learner = RLS(K_train, Y_train, kernel="PrecomputedKernel")
    #Leave-one-out cross-validation predictions, this is fast due to
    #computational short-cut
    P_loo = learner.leave_one_out()
    #Test set predictions
    P_test = learner.predict(K_test)
    print("leave-one-out error %f" % sqerror(Y_train, P_loo))
    print("test error %f" % sqerror(Y_test, P_test))
    #Sanity check, can we do better than predicting mean of training labels?
    print("mean predictor %f" %
          sqerror(Y_test,
                  np.ones(Y_test.shape) * np.mean(Y_train)))
コード例 #17
0
ファイル: test-rls.py プロジェクト: Nie-yingchun/dwilib
def search_params_linear(trainX, trainY, group_ids, rprange):
    """Search best parameters for kernel and regularization."""
    kwargs = {
        'train_features': trainX,
        'train_labels': trainY,
        'kernel_obj': LinearKernel(trainX),
    }
    rls = RLS.createLearner(**kwargs)
    rls.train()
    perf, perf_groups, rp = search_rp(rls, trainY, group_ids, rprange)
    return perf, perf_groups, rp
コード例 #18
0
ファイル: test_rls.py プロジェクト: aatapa/RLScore
 def test_leave_pair_out(self):
     #compares holdout and leave-pair-out
     start = [0, 2, 3, 5]
     end = [1, 3, 6, 8]
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             #LPO with linear kernel
             rls1 = RLS(X, Y, regparam = 7.0, bias=3.0)
             lpo_start, lpo_end = rls1.leave_pair_out(start, end)
             ho_start, ho_end = [], []
             for i in range(len(start)):
                 P = rls1.holdout([start[i], end[i]])
                 ho_start.append(P[0])
                 ho_end.append(P[1])
             ho_start = np.array(ho_start)
             ho_end = np.array(ho_end)
             assert_allclose(ho_start, lpo_start)
             assert_allclose(ho_end, lpo_end)
             #LPO Gaussian kernel
             rls1 = RLS(X, Y, regparam = 11.0, kenerl="PolynomialKernel", coef0=1, degree=3)
             lpo_start, lpo_end = rls1.leave_pair_out(start, end)
             ho_start, ho_end = [], []
             for i in range(len(start)):
                 P = rls1.holdout([start[i], end[i]])
                 ho_start.append(P[0])
                 ho_end.append(P[1])
             ho_start = np.array(ho_start)
             ho_end = np.array(ho_end)
             assert_allclose(ho_start, lpo_start)
             assert_allclose(ho_end, lpo_end)
コード例 #19
0
ファイル: test_rls.py プロジェクト: aatapa/RLScore
 def test_kernel(self):
     #tests that learning with kernels works
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             #Basic case
             dual_rls = RLS(X, Y, kernel= "GaussianKernel", regparam=5.0, gamma=0.01)
             kernel = GaussianKernel(X, gamma = 0.01)
             K = kernel.getKM(X)
             m = K.shape[0]
             A = dual_rls.predictor.A
             A2 = np.linalg.solve(K+5.0*np.eye(m), Y)
             assert_allclose(A, A2)
             #Fast regularization
             dual_rls.solve(1000)
             A = dual_rls.predictor.A
             A2 = np.linalg.solve(K+ 1000 * np.eye(m), Y)
             assert_allclose(A, A2)
             #Precomputed kernel
             dual_rls = RLS(K, Y, kernel="PrecomputedKernel", regparam = 1000)
             assert_allclose(dual_rls.predictor.W, A2)
             #Reduced set approximation
             kernel = PolynomialKernel(X[self.bvectors], gamma=0.5, coef0 = 1.2, degree = 2)              
             Kr = kernel.getKM(X)
             Krr = kernel.getKM(X[self.bvectors])
             dual_rls = RLS(X, Y, kernel="PolynomialKernel", basis_vectors = X[self.bvectors], regparam = 200, gamma=0.5, coef0=1.2, degree = 2)
             A = dual_rls.predictor.A
             A2 = np.linalg.solve(np.dot(Kr.T, Kr)+ 200 * Krr, np.dot(Kr.T, Y))
             assert_allclose(A, A2)
             dual_rls = RLS(Kr, Y, kernel="PrecomputedKernel", basis_vectors = Krr, regparam=200)
             A = dual_rls.predictor.W
             assert_allclose(A, A2)
コード例 #20
0
ファイル: regression4.py プロジェクト: aatapa/RLScore
def train_rls():
    #Selects both the gamma parameter for Gaussian kernel, and regparam with loocv
    X_train, Y_train, X_test, Y_test = load_housing()
    regparams = [2.**i for i in range(-15, 16)]
    gammas = regparams
    best_regparam = None
    best_gamma = None
    best_error = float("inf")
    for gamma in gammas:
        #New RLS is initialized for each kernel parameter
        learner = RLS(X_train, Y_train, kernel="GaussianKernel", gamma=gamma)
        for regparam in regparams:
            #RLS is re-trained with the new regparam, this
            #is very fast due to computational short-cut
            learner.solve(regparam)
            #Leave-one-out cross-validation predictions, this is fast due to
            #computational short-cut
            P_loo = learner.leave_one_out()
            e = sqerror(Y_train, P_loo)
            #print "regparam", regparam, "gamma", gamma, "loo-error", e
            if e < best_error:
                best_error = e
                best_regparam = regparam
                best_gamma = gamma
    learner = RLS(X_train, Y_train, regparam = best_regparam, kernel="GaussianKernel", gamma=best_gamma)
    P_test = learner.predict(X_test)
    print("best parameters gamma %f regparam %f" %(best_gamma, best_regparam))
    print("best leave-one-out error %f" %best_error)
    print("test error %f" %sqerror(Y_test, P_test))
コード例 #21
0
ファイル: test_cg_rls.py プロジェクト: vivian457/RLScore
 def testCGRLS(self):
     m, n = 100, 300
     for regparam in [0.00000001, 1, 100000000]:
         Xtrain = np.mat(np.random.rand(m, n))
         Y = np.mat(np.random.rand(m, 1))
         rpool = {}
         rpool['X'] = Xtrain
         rpool['Y'] = Y
         rpool['regparam'] = regparam
         rpool["bias"] = 2.0
         rls = RLS(**rpool)
         rls.solve(regparam)
         model = rls.predictor
         W = model.W
         b = model.b
         rls = CGRLS(**rpool)
         model = rls.predictor
         W2 = model.W
         b2 = model.b
         for i in range(W.shape[0]):
                 self.assertAlmostEqual(W[i], W2[i], places=5)
         self.assertAlmostEqual(b, b2, places=5)
コード例 #22
0
def train_rls():
    X_train, Y_train, X_test, Y_test = load_housing()
    #select randomly 100 basis vectors
    indices = range(X_train.shape[0])
    indices = random.sample(indices, 100)
    basis_vectors = X_train[indices]
    learner = RLS(X_train,
                  Y_train,
                  basis_vectors=basis_vectors,
                  kernel="GaussianKernel",
                  regparam=0.0003,
                  gamma=0.00003)
    #Leave-one-out cross-validation predictions, this is fast due to
    #computational short-cut
    P_loo = learner.leave_one_out()
    #Test set predictions
    P_test = learner.predict(X_test)
    print("leave-one-out error %f" % sqerror(Y_train, P_loo))
    print("test error %f" % sqerror(Y_test, P_test))
    #Sanity check, can we do better than predicting mean of training labels?
    print("mean predictor %f" %
          sqerror(Y_test,
                  np.ones(Y_test.shape) * np.mean(Y_train)))
コード例 #23
0
def plot_rls():
    #Select regparam with k-fold cross-validation,
    #where instances related to a single sentence form
    #together a fold
    X_train = read_sparse("train_2000_x.txt")
    Y_train = np.loadtxt("train_2000_y.txt")
    X_test = read_sparse("test_2000_x.txt", X_train.shape[1])
    Y_test = np.loadtxt("test_2000_y.txt")
    #list of sentence ids
    ids = np.loadtxt("train_2000_qids.txt")
    #mapped to a list of lists, where each list
    #contains indices for one fold
    folds = map_ids(ids)
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_error = float("inf")
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    kfold_errors = []
    loo_errors = []
    test_errors = []
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #K-fold cross-validation
        perfs = []
        for fold in folds:
            #computes holdout predictions, where instances
            #in fold are left out of training set
            P = learner.holdout(fold)
            perfs.append(sqerror(Y_train[fold], P))
        e_kfold = np.mean(perfs)
        kfold_errors.append(e_kfold)
        P_loo = learner.leave_one_out()
        e_loo = sqerror(Y_train, P_loo)
        loo_errors.append(e_loo)
        P_test = learner.predict(X_test)
        e_test = sqerror(Y_test, P_test)
        test_errors.append(e_test)
    plt.semilogy(log_regparams, loo_errors, label="leave-one-out")
    plt.semilogy(log_regparams, kfold_errors, label="leave-sentence-out")
    plt.semilogy(log_regparams, test_errors, label="test error")
    plt.xlabel("$log_2(\lambda)$")
    plt.ylabel("mean squared error")
    plt.legend(loc=3)
    plt.show()
コード例 #24
0
def train_rls():
    #Select regparam with k-fold cross-validation,
    #where instances related to a single sentence form
    #together a fold
    X_train = read_sparse("train_2000_x.txt")
    Y_train = np.loadtxt("train_2000_y.txt")
    X_test = read_sparse("test_2000_x.txt", X_train.shape[1])
    Y_test = np.loadtxt("test_2000_y.txt")
    #list of sentence ids
    ids = np.loadtxt("train_2000_qids.txt")
    #mapped to a list of lists, where each list
    #contains indices for one fold
    folds = map_ids(ids)
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_error = float("inf")
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #K-fold cross-validation
        P = np.zeros(Y_train.shape)
        for fold in folds:
            #computes holdout predictions, where instances
            #in fold are left out of training set
            P[fold] = learner.holdout(fold)
        e = sqerror(Y_train, P)
        print("regparam 2**%d, k-fold error %f" % (log_regparam, e))
        if e < best_error:
            best_error = e
            best_regparam = regparam
    learner.solve(best_regparam)
    P_test = learner.predict(X_test)
    print("best regparam %f k-fold error %f" % (best_regparam, best_error))
    print("test error %f" % sqerror(Y_test, P_test))
コード例 #25
0
ファイル: test-rls.py プロジェクト: Nie-yingchun/dwilib
def search_params_nonlinear(trainX, trainY, group_ids, rprange, gammarange):
    """Search best parameters for kernel and regularization."""
    bestperf = -1.
    for loggamma in range(*gammarange):
        gamma = 2. ** loggamma
        kwargs = {
            'train_features': trainX,
            'train_labels': trainY,
            #'kernel_obj': LinearKernel(trainX),
            'kernel_obj': GaussianKernel(trainX, gamma=gamma),
            #'kernel_obj': PolynomialKernel(trainX, gamma=gamma, coef0=1, degree=2),
        }
        rls = RLS.createLearner(**kwargs)
        rls.train()
        perf, perf_groups, rp = search_rp(rls, trainY, group_ids, rprange)
        if perf > bestperf:
            bestperf = perf
            bestperf_groups = perf_groups
            bestrp = rp
            bestgamma = gamma
    return bestperf, bestperf_groups, bestrp, bestgamma
コード例 #26
0
def plot_rls():
    #Select regparam with k-fold cross-validation,
    #where instances related to a single sentence form
    #together a fold
    X_train =  read_sparse("train_2000_x.txt")
    Y_train =  np.loadtxt("train_2000_y.txt")
    X_test =  read_sparse("test_2000_x.txt", X_train.shape[1])
    Y_test =  np.loadtxt("test_2000_y.txt")
    #list of sentence ids
    ids =  np.loadtxt("train_2000_qids.txt")
    #mapped to a list of lists, where each list
    #contains indices for one fold
    folds = map_ids(ids)
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_error = float("inf")
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    kfold_errors = []
    loo_errors = []
    test_errors = []
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #K-fold cross-validation
        perfs = []
        for fold in folds:
            #computes holdout predictions, where instances
            #in fold are left out of training set
            P = learner.holdout(fold)
            perfs.append(sqerror(Y_train[fold], P))
        e_kfold = np.mean(perfs)
        kfold_errors.append(e_kfold)
        P_loo = learner.leave_one_out()
        e_loo = sqerror(Y_train, P_loo)
        loo_errors.append(e_loo)
        P_test = learner.predict(X_test)
        e_test = sqerror(Y_test, P_test)
        test_errors.append(e_test)
    plt.semilogy(log_regparams, loo_errors, label = "leave-one-out")
    plt.semilogy(log_regparams, kfold_errors, label = "leave-sentence-out")
    plt.semilogy(log_regparams, test_errors, label = "test error")
    plt.xlabel("$log_2(\lambda)$")
    plt.ylabel("mean squared error")
    plt.legend(loc=3)
    plt.show()
コード例 #27
0
ファイル: parse_regression2.py プロジェクト: aatapa/RLScore
def train_rls():
    #Select regparam with k-fold cross-validation,
    #where instances related to a single sentence form
    #together a fold
    X_train =  read_sparse("train_2000_x.txt")
    Y_train =  np.loadtxt("train_2000_y.txt")
    X_test =  read_sparse("test_2000_x.txt", X_train.shape[1])
    Y_test =  np.loadtxt("test_2000_y.txt")
    #list of sentence ids
    ids =  np.loadtxt("train_2000_qids.txt")
    #mapped to a list of lists, where each list
    #contains indices for one fold
    folds = map_ids(ids)
    learner = RLS(X_train, Y_train)
    best_regparam = None
    best_error = float("inf")
    #exponential grid of possible regparam values
    log_regparams = range(-15, 16)
    for log_regparam in log_regparams:
        regparam = 2.**log_regparam
        #RLS is re-trained with the new regparam, this
        #is very fast due to computational short-cut
        learner.solve(regparam)
        #K-fold cross-validation
        P = np.zeros(Y_train.shape)
        for fold in folds:
            #computes holdout predictions, where instances
            #in fold are left out of training set
            P[fold] = learner.holdout(fold)
        e = sqerror(Y_train, P)
        print("regparam 2**%d, k-fold error %f" %(log_regparam, e))
        if e < best_error:
            best_error = e
            best_regparam = regparam
    learner.solve(best_regparam)
    P_test = learner.predict(X_test)
    print("best regparam %f k-fold error %f" %(best_regparam, best_error))
    print("test error %f" %sqerror(Y_test, P_test))
コード例 #28
0
ファイル: test_rls.py プロジェクト: vivian457/RLScore
    def testRLS(self):

        print
        print
        print
        print
        print("Testing the cross-validation routines of the RLS module.")
        print
        print
        floattype = np.float64

        m, n = 400, 100
        Xtrain = np.random.rand(m, n)
        K = np.dot(Xtrain, Xtrain.T)
        ylen = 2
        Y = np.zeros((m, ylen), dtype=floattype)
        Y = np.random.rand(m, ylen)

        hoindices = [45]
        hoindices2 = [45, 50]
        hoindices3 = [45, 50, 55]
        hocompl = list(set(range(m)) - set(hoindices))

        Kho = K[np.ix_(hocompl, hocompl)]
        Yho = Y[hocompl]

        kwargs = {}
        kwargs['Y'] = Y
        kwargs['X'] = K
        kwargs['kernel'] = 'PrecomputedKernel'
        dualrls = RLS(**kwargs)

        kwargs = {}
        kwargs["X"] = Xtrain
        kwargs["Y"] = Y
        kwargs["bias"] = 0.
        primalrls = RLS(**kwargs)

        kwargs = {}
        kwargs['Y'] = Yho
        kwargs['X'] = Kho
        kwargs['kernel'] = 'PrecomputedKernel'
        dualrls_naive = RLS(**kwargs)

        testkm = K[np.ix_(hocompl, hoindices)]
        trainX = Xtrain[hocompl]
        testX = Xtrain[hoindices]
        kwargs = {}
        kwargs['Y'] = Yho
        kwargs['X'] = trainX
        kwargs["bias"] = 0.
        primalrls_naive = RLS(**kwargs)

        loglambdas = range(-5, 5)
        for j in range(0, len(loglambdas)):
            regparam = 2.**loglambdas[j]
            print
            print("Regparam 2^%1d" % loglambdas[j])

            dumbho = np.dot(
                testkm.T,
                np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho))
            dumbho = np.squeeze(dumbho)
            print(str(dumbho) + ' Dumb HO (dual)')

            dualrls_naive.solve(regparam)
            predho1 = dualrls_naive.predictor.predict(testkm.T)
            print(str(predho1) + ' Naive HO (dual)')

            dualrls.solve(regparam)
            predho2 = dualrls.holdout(hoindices)
            print(str(predho2) + ' Fast HO (dual)')

            dualrls.solve(regparam)
            predho = dualrls.leave_one_out()[hoindices[0]]
            print(str(predho) + ' Fast LOO (dual)')

            primalrls_naive.solve(regparam)
            predho3 = primalrls_naive.predictor.predict(testX)
            print(str(predho3) + ' Naive HO (primal)')

            primalrls.solve(regparam)
            predho4 = primalrls.holdout(hoindices)
            print(str(predho4) + ' Fast HO (primal)')
            for predho in [predho1, predho2, predho3, predho4]:
                self.assertEqual(dumbho.shape, predho.shape)
                assert_allclose(dumbho, predho)
                #for row in range(predho.shape[0]):
                #    for col in range(predho.shape[1]):
                #        self.assertAlmostEqual(dumbho[row,col],predho[row,col])
            primalrls.solve(regparam)
            predho = primalrls.leave_one_out()[hoindices[0]]
            print(str(predho) + ' Fast LOO (primal)')
        print()
        hoindices = range(100, 300)
        hocompl = list(set(range(m)) - set(hoindices))

        Kho = K[np.ix_(hocompl, hocompl)]
        Yho = Y[hocompl]
        testkm = K[np.ix_(hocompl, hoindices)]

        dumbho = np.dot(
            testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])),
                             Yho))

        kwargs = {}
        kwargs['Y'] = Y
        kwargs['X'] = Xtrain
        dualrls.solve(regparam)
        predho2 = dualrls.holdout(hoindices2)
        print(str(predho2) + ' Fast HO')
        hopred = dualrls.leave_pair_out(np.array([hoindices2[0], 4, 6]),
                                        np.array([hoindices2[1], 5, 7]))
        print(str(hopred[0][0]) + '\n' + str(hopred[1][0]) + ' Fast LPO')
コード例 #29
0
 def testRLS(self):
     
     print("\n\n\n\nTesting the cross-validation routines of the RLS module.\n\n")
     
     m, n = 100, 300
     Xtrain = random.rand(m, n)
     Y = mat(random.rand(m, 1))
     basis_vectors = [0,3,7,8]
     
     #hoindices = [45, 50, 55]
     hoindices = [0, 1, 2]
     hocompl = list(set(range(m)) - set(hoindices))
     
     bk = GaussianKernel(**{'X':Xtrain[basis_vectors], 'gamma':0.001})
     
     rpool = {}
     rpool['X'] = Xtrain
     bk2 = GaussianKernel(**{'X':Xtrain, 'gamma':0.001})
     K = np.mat(bk2.getKM(Xtrain))
     
     Yho = Y[hocompl]
     
     
     rpool = {}
     rpool['Y'] = Y
     rpool['X'] = Xtrain
     rpool['basis_vectors'] = Xtrain[basis_vectors]
     
     Xhocompl = Xtrain[hocompl]
     testX = Xtrain[hoindices]
     
     rpool = {}
     rpool['Y'] = Yho
     rpool['X'] = Xhocompl
     rpool["kernel"] = "RsetKernel"
     rpool["base_kernel"] = bk
     rpool["basis_features"] = Xtrain[basis_vectors]
     #rk = RsetKernel(**{'base_kernel':bk, 'basis_features':Xtrain[basis_vectors], 'X':Xhocompl})
     dualrls_naive = RLS(**rpool)
     
     rpool = {}
     rpool['Y'] = Yho
     rpool['X'] = Xhocompl
     
     rsaK = K[:, basis_vectors] * la.inv(K[ix_(basis_vectors, basis_vectors)]) * K[basis_vectors]
     rsaKho = rsaK[ix_(hocompl, hocompl)]
     rsa_testkm = rsaK[ix_(hocompl, hoindices)]
     loglambdas = range(-5, 5)
     for j in range(0, len(loglambdas)):
         regparam = 2. ** loglambdas[j]
         print("\nRegparam 2^%1d" % loglambdas[j])
         
         print((rsa_testkm.T * la.inv(rsaKho + regparam * eye(rsaKho.shape[0])) * Yho).T, 'Dumb HO (dual)')
         dumbho = np.squeeze(np.array(rsa_testkm.T * la.inv(rsaKho + regparam * eye(rsaKho.shape[0])) * Yho))
         
         dualrls_naive.solve(regparam)
         predho1 = np.squeeze(dualrls_naive.predictor.predict(testX))
         print(predho1.T, 'Naive HO (dual)')
         
         #dualrls.solve(regparam)
         #predho2 = np.squeeze(dualrls.computeHO(hoindices))
         #print predho2.T, 'Fast HO (dual)'
         
         for predho in [dumbho, predho1]:#, predho2]:
             self.assertEqual(dumbho.shape, predho.shape)
             for row in range(predho.shape[0]):
                 #for col in range(predho.shape[1]):
                 #    self.assertAlmostEqual(dumbho[row,col],predho[row,col])
                     self.assertAlmostEqual(dumbho[row],predho[row])
コード例 #30
0
ファイル: test_rls.py プロジェクト: aatapa/RLScore
 def test_linear(self):
     #Test that learning with linear kernel works correctly both
     #with low and high-dimensional data
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             #Basic case
             primal_rls = RLS(X, Y, regparam=1.0, bias=0.)
             W = primal_rls.predictor.W
             d = X.shape[1]
             W2 = np.linalg.solve(np.dot(X.T, X) + np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Fast regularization algorithm
             primal_rls.solve(10.)
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(np.dot(X.T, X) + 10.*np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Bias term included
             primal_rls = RLS(X, Y, regparam=1.0, bias=2.)
             O = np.sqrt(2.) * np.ones((X.shape[0],1))
             X_new = np.hstack((X, O))
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(np.dot(X_new.T, X_new) + np.eye(d+1), np.dot(X_new.T, Y))
             b = primal_rls.predictor.b
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #reduced set approximation
             primal_rls = RLS(X, Y, basis_vectors = X[self.bvectors], regparam=5.0, bias=2.)
             W = primal_rls.predictor.W
             b = primal_rls.predictor.b
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(np.dot(Kr.T, Kr)+ 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #Using pre-computed linear kernel matrix
             kernel = LinearKernel(X, bias = 2.)
             K = kernel.getKM(X)
             dual_rls = RLS(K, Y, kernel = "PrecomputedKernel", regparam=0.01)
             W = np.dot(X_new.T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             W2 = np.linalg.solve(np.dot(X_new.T, X_new) + 0.01 * np.eye(d+1), np.dot(X_new.T, Y))
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
             #Pre-computed linear kernel, reduced set approximation
             kernel = LinearKernel(X[self.bvectors], bias = 2.)
             dual_rls = RLS(kernel.getKM(X), Y, kernel="PrecomputedKernel", basis_vectors = kernel.getKM(X[self.bvectors]), regparam=5.0)
             W = np.dot(X_new[self.bvectors].T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(np.dot(Kr.T, Kr)+ 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
コード例 #31
0
ファイル: test_rls.py プロジェクト: vivian457/RLScore
 def test_holdout(self):
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             m = X.shape[0]
             hoindices = [3, 5, 8, 10, 17, 21]
             hocompl = list(set(range(m)) - set(hoindices))
             #Holdout with linear kernel
             rls1 = RLS(X, Y)
             rls2 = RLS(X[hocompl], Y[hocompl])
             P1 = rls1.holdout(hoindices)
             P2 = rls2.predict(X[hoindices])
             assert_allclose(P1, P2)
             #Holdout with bias
             rls1 = RLS(X, Y, bias=3.0)
             rls2 = RLS(X[hocompl], Y[hocompl], bias=3.0)
             P1 = rls1.holdout(hoindices)
             P2 = rls2.predict(X[hoindices])
             assert_allclose(P1, P2)
             #Fast regularization
             for i in range(-15, 15):
                 rls1.solve(2**i)
                 rls2.solve(2**i)
                 P1 = rls1.holdout(hoindices)
                 P2 = rls2.predict(X[hoindices])
                 assert_allclose(P1, P2)
             #Kernel holdout
             rls1 = RLS(X, Y, kernel="GaussianKernel", gamma=0.01)
             rls2 = RLS(X[hocompl],
                        Y[hocompl],
                        kernel="GaussianKernel",
                        gamma=0.01)
             P1 = rls1.holdout(hoindices)
             P2 = rls2.predict(X[hoindices])
             assert_allclose(P1, P2)
             for i in range(-15, 15):
                 rls1.solve(2**i)
                 rls2.solve(2**i)
                 P1 = rls1.holdout(hoindices)
                 P2 = rls2.predict(X[hoindices])
                 assert_allclose(P1, P2)
             #Incorrect indices
             I = [0, 3, 100]
             self.assertRaises(IndexError, rls1.holdout, I)
             I = [-1, 0, 2]
             self.assertRaises(IndexError, rls1.holdout, I)
             I = [1, 1, 2]
             self.assertRaises(IndexError, rls1.holdout, I)
コード例 #32
0
ファイル: loo.py プロジェクト: vivian457/RLScore
def loo_core(X,y,regparam):
    learner = RLS(X,y,regparam, bias=0.)
    p = learner.leave_one_out()
    e = sqerror(y, p)
    return e
コード例 #33
0
ファイル: test_rls.py プロジェクト: aatapa/RLScore
 def testRLS(self):
     
     print
     print
     print
     print
     print("Testing the cross-validation routines of the RLS module.")
     print
     print
     floattype = np.float64
     
     m, n = 400, 100
     Xtrain = np.random.rand(m, n)
     K = np.dot(Xtrain, Xtrain.T)
     ylen = 2
     Y = np.zeros((m, ylen), dtype=floattype)
     Y = np.random.rand(m, ylen)
     
     hoindices = [45]
     hoindices2 = [45, 50]
     hoindices3 = [45, 50, 55]
     hocompl = list(set(range(m)) - set(hoindices))
     
     Kho = K[np.ix_(hocompl, hocompl)]
     Yho = Y[hocompl]
     
     kwargs = {}
     kwargs['Y'] = Y
     kwargs['X'] = K
     kwargs['kernel'] = 'PrecomputedKernel'
     dualrls = RLS(**kwargs)
     
     kwargs = {}
     kwargs["X"] = Xtrain
     kwargs["Y"] = Y
     kwargs["bias"] = 0.
     primalrls = RLS(**kwargs)
     
     kwargs = {}
     kwargs['Y'] = Yho
     kwargs['X'] = Kho
     kwargs['kernel'] = 'PrecomputedKernel'
     dualrls_naive = RLS(**kwargs)
     
     testkm = K[np.ix_(hocompl, hoindices)]
     trainX = Xtrain[hocompl]
     testX = Xtrain[hoindices]
     kwargs = {}
     kwargs['Y'] = Yho
     kwargs['X'] = trainX
     kwargs["bias"] = 0.
     primalrls_naive = RLS(**kwargs)
     
     loglambdas = range(-5, 5)
     for j in range(0, len(loglambdas)):
         regparam = 2. ** loglambdas[j]
         print
         print("Regparam 2^%1d" % loglambdas[j])
         
         dumbho = np.dot(testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho))
         dumbho = np.squeeze(dumbho)
         print(str(dumbho) + ' Dumb HO (dual)')
         
         dualrls_naive.solve(regparam)
         predho1 = dualrls_naive.predictor.predict(testkm.T)
         print(str(predho1) + ' Naive HO (dual)')
         
         dualrls.solve(regparam)
         predho2 = dualrls.holdout(hoindices)
         print(str(predho2) + ' Fast HO (dual)')
         
         dualrls.solve(regparam)
         predho = dualrls.leave_one_out()[hoindices[0]]
         print(str(predho) + ' Fast LOO (dual)')
         
         primalrls_naive.solve(regparam)
         predho3 = primalrls_naive.predictor.predict(testX)
         print(str(predho3) + ' Naive HO (primal)')
         
         primalrls.solve(regparam)
         predho4 = primalrls.holdout(hoindices)
         print(str(predho4) + ' Fast HO (primal)')
         for predho in [predho1, predho2, predho3, predho4]:
             self.assertEqual(dumbho.shape, predho.shape)
             assert_allclose(dumbho, predho)
             #for row in range(predho.shape[0]):
             #    for col in range(predho.shape[1]):
             #        self.assertAlmostEqual(dumbho[row,col],predho[row,col])
         primalrls.solve(regparam)
         predho = primalrls.leave_one_out()[hoindices[0]]
         print(str(predho) + ' Fast LOO (primal)')
     print()
     hoindices = range(100, 300)
     hocompl = list(set(range(m)) - set(hoindices))
     
     Kho = K[np.ix_(hocompl, hocompl)]
     Yho = Y[hocompl]
     testkm = K[np.ix_(hocompl, hoindices)]
     
     dumbho = np.dot(testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho))
     
     kwargs = {}
     kwargs['Y'] = Y
     kwargs['X'] = Xtrain
     dualrls.solve(regparam)
     predho2 = dualrls.holdout(hoindices2)
     print(str(predho2) + ' Fast HO')
     hopred = dualrls.leave_pair_out(np.array([hoindices2[0], 4, 6]), np.array([hoindices2[1], 5, 7]))
     print(str(hopred[0][0]) + '\n' + str(hopred[1][0]) + ' Fast LPO')
コード例 #34
0
ファイル: test_rls.py プロジェクト: aatapa/RLScore
 def test_holdout(self):
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             m = X.shape[0]
             hoindices = [3, 5, 8, 10, 17, 21]
             hocompl = list(set(range(m)) - set(hoindices))
             #Holdout with linear kernel
             rls1 = RLS(X, Y)
             rls2 = RLS(X[hocompl], Y[hocompl])
             P1 = rls1.holdout(hoindices)
             P2 = rls2.predict(X[hoindices])
             assert_allclose(P1, P2)
             #Holdout with bias
             rls1 = RLS(X, Y, bias = 3.0)
             rls2 = RLS(X[hocompl], Y[hocompl], bias = 3.0)
             P1 = rls1.holdout(hoindices)
             P2 = rls2.predict(X[hoindices])
             assert_allclose(P1, P2)
             #Fast regularization
             for i in range(-15, 15):
                 rls1.solve(2**i)
                 rls2.solve(2**i)
                 P1 = rls1.holdout(hoindices)
                 P2 = rls2.predict(X[hoindices])
                 assert_allclose(P1, P2)
             #Kernel holdout
             rls1 = RLS(X, Y, kernel = "GaussianKernel", gamma = 0.01)
             rls2 = RLS(X[hocompl], Y[hocompl], kernel = "GaussianKernel", gamma = 0.01)
             P1 = rls1.holdout(hoindices)
             P2 = rls2.predict(X[hoindices])
             assert_allclose(P1, P2)
             for i in range(-15, 15):
                 rls1.solve(2**i)
                 rls2.solve(2**i)
                 P1 = rls1.holdout(hoindices)
                 P2 = rls2.predict(X[hoindices])
                 assert_allclose(P1, P2)
             #Incorrect indices
             I = [0, 3, 100]
             self.assertRaises(IndexError, rls1.holdout, I)
             I = [-1, 0, 2]
             self.assertRaises(IndexError, rls1.holdout, I)
             I = [1,1,2]
             self.assertRaises(IndexError, rls1.holdout, I)
コード例 #35
0
ファイル: test_rls.py プロジェクト: vivian457/RLScore
 def test_loo(self):
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             m = X.shape[0]
             #LOO with linear kernel
             rls1 = RLS(X, Y, regparam=7.0, bias=3.0)
             P1 = rls1.leave_one_out()
             P2 = []
             for i in range(X.shape[0]):
                 X_train = np.delete(X, i, axis=0)
                 Y_train = np.delete(Y, i, axis=0)
                 X_test = X[i]
                 rls2 = RLS(X_train, Y_train, regparam=7.0, bias=3.0)
                 P2.append(rls2.predict(X_test))
             P2 = np.array(P2)
             assert_allclose(P1, P2)
             #Fast regularization
             rls1.solve(1024)
             P1 = rls1.leave_one_out()
             P2 = []
             for i in range(X.shape[0]):
                 X_train = np.delete(X, i, axis=0)
                 Y_train = np.delete(Y, i, axis=0)
                 X_test = X[i]
                 rls2 = RLS(X_train, Y_train, regparam=1024, bias=3.0)
                 P2.append(rls2.predict(X_test))
             P2 = np.array(P2)
             assert_allclose(P1, P2)
             #kernels
             rls1 = RLS(X, Y, kernel="GaussianKernel", gamma=0.01)
             P1 = rls1.leave_one_out()
             P2 = []
             for i in range(X.shape[0]):
                 X_train = np.delete(X, i, axis=0)
                 Y_train = np.delete(Y, i, axis=0)
                 X_test = X[i]
                 rls2 = RLS(X_train,
                            Y_train,
                            kernel="GaussianKernel",
                            gamma=0.01)
                 P2.append(rls2.predict(X_test))
             P2 = np.array(P2)
             assert_allclose(P1, P2)
コード例 #36
0
class LooRLS(object):
    def __init__(self):
        self.learner = None
        self.y_src = None
        self.measure = None

    def fit(self,
            X_src,
            y_src,
            X_tgt_known,
            y_tgt_known,
            X_tgt_unknown,
            y_tgt_unknown,
            verbose=False):
        # Map labels from set {1,2,3} to one-vs-all encoding

        if np.count_nonzero(y_src) >= len(y_src):
            zerolabels = False
        else:
            zerolabels = True

        y_src = to_one_vs_all(y_src, zerolabels)

        regparams = [2.**i for i in range(-15, 16)]
        if len(np.unique(y_src)) > 2:
            self.measure = ova_accuracy
        else:
            self.measure = accuracy

        self.learner = LeaveOneOutRLS(X_src,
                                      y_src,
                                      regparams=regparams,
                                      measure=self.measure)
        p_tgt = self.learner.predict(X_tgt_known)
        # ova_accuracy computes one-vs-all classification accuracy directly between transformed
        # class label matrix, and a matrix of predictions, where each column corresponds to a class
        self.learner = RLS(X_src, y_src)
        best_regparam = None
        best_accuracy = 0.
        # exponential grid of possible regparam values
        log_regparams = range(-15, 16)
        for log_regparam in log_regparams:
            regparam = 2.**log_regparam
            # RLS is re-trained with the new regparam, this
            # is very fast due to computational short-cut
            self.learner.solve(regparam)
            # Leave-one-out cross-validation predictions, this is fast due to
            # computational short-cut
            P_loo = self.learner.leave_one_out()
            acc = self.measure(y_src, P_loo)
            if verbose == True:
                print("LooRLS regparam 2**%d, loo-accuracy %f" %
                      (log_regparam, acc))
            if acc > best_accuracy:
                best_accuracy = acc
                best_regparam = regparam
        self.learner.solve(best_regparam)
        if verbose == True:
            print("LooRLS best regparam %f with loo-accuracy %f" %
                  (best_regparam, best_accuracy))

    def predict(self, X, y=None):
        ypred = self.learner.predict(X)
        if y is not None:
            if np.count_nonzero(y) >= len(y):
                zerolabels = False
            else:
                zerolabels = True
            y = to_one_vs_all(y, zerolabels)
            return ypred, self.measure(y, ypred)
        return ypred
コード例 #37
0
ファイル: test_rls.py プロジェクト: vivian457/RLScore
 def test_linear(self):
     #Test that learning with linear kernel works correctly both
     #with low and high-dimensional data
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             #Basic case
             primal_rls = RLS(X, Y, regparam=1.0, bias=0.)
             W = primal_rls.predictor.W
             d = X.shape[1]
             W2 = np.linalg.solve(
                 np.dot(X.T, X) + np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Fast regularization algorithm
             primal_rls.solve(10.)
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(
                 np.dot(X.T, X) + 10. * np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Bias term included
             primal_rls = RLS(X, Y, regparam=1.0, bias=2.)
             O = np.sqrt(2.) * np.ones((X.shape[0], 1))
             X_new = np.hstack((X, O))
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(
                 np.dot(X_new.T, X_new) + np.eye(d + 1), np.dot(X_new.T, Y))
             b = primal_rls.predictor.b
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #reduced set approximation
             primal_rls = RLS(X,
                              Y,
                              basis_vectors=X[self.bvectors],
                              regparam=5.0,
                              bias=2.)
             W = primal_rls.predictor.W
             b = primal_rls.predictor.b
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(
                 np.dot(Kr.T, Kr) + 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #Using pre-computed linear kernel matrix
             kernel = LinearKernel(X, bias=2.)
             K = kernel.getKM(X)
             dual_rls = RLS(K, Y, kernel="PrecomputedKernel", regparam=0.01)
             W = np.dot(X_new.T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             W2 = np.linalg.solve(
                 np.dot(X_new.T, X_new) + 0.01 * np.eye(d + 1),
                 np.dot(X_new.T, Y))
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
             #Pre-computed linear kernel, reduced set approximation
             kernel = LinearKernel(X[self.bvectors], bias=2.)
             dual_rls = RLS(kernel.getKM(X),
                            Y,
                            kernel="PrecomputedKernel",
                            basis_vectors=kernel.getKM(X[self.bvectors]),
                            regparam=5.0)
             W = np.dot(X_new[self.bvectors].T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(
                 np.dot(Kr.T, Kr) + 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
コード例 #38
0
 def testRLS(self):
     
     print
     print
     print
     print
     print "Testing the cross-validation routines of the RLS module."
     print
     print
     floattype = float64
     
     m, n = 100, 300
     Xtrain = random.rand(m, n)
     ylen = 1
     Y = mat(zeros((m, ylen), dtype=floattype))
     Y = mat(random.rand(m, 1))
     basis_vectors = [0,3,7,8]
     
     def complement(indices, m):
         compl = range(m)
         for ind in indices:
             compl.remove(ind)
         return compl
     
     #hoindices = [45, 50, 55]
     hoindices = [0, 1, 2]
     hocompl = complement(hoindices, m)
     
     #bk = LinearKernel.Kernel()
     #bk = GaussianKernel.Kernel()
     bk = GaussianKernel.createKernel(**{'train_features':Xtrain[basis_vectors], 'gamma':'0.001'})
     rk = RsetKernel.createKernel(**{'base_kernel':bk, 'basis_features':Xtrain[basis_vectors], 'train_features':Xtrain})
     
     rpool = {}
     rpool['train_features'] = Xtrain
     bk2 = GaussianKernel.createKernel(**{'train_features':Xtrain, 'gamma':'0.001'})
     K = np.mat(bk2.getKM(Xtrain))
     
     Kho = K[ix_(hocompl, hocompl)]
     Yho = Y[hocompl]
     
     #rpool = {}
     #rpool['train_labels'] = Y
     #rpool['kernel_matrix'] = K[basis_vectors]
     #rpool['basis_vectors'] = basis_vectors
     #dualrls = RLS.createLearner(**rpool)
     
     rpool = {}
     rpool['train_labels'] = Y
     rpool['train_features'] = Xtrain
     rpool['basis_vectors'] = Xtrain[basis_vectors]
     primalrls = RLS.createLearner(**rpool)
     
     testkm = K[ix_(hocompl, hoindices)]
     Xhocompl = Xtrain[hocompl]
     testX = Xtrain[hoindices]
     
     rpool = {}
     rpool['train_labels'] = Yho
     rpool['train_features'] = Xhocompl
     rk = RsetKernel.createKernel(**{'base_kernel':bk, 'basis_features':Xtrain[basis_vectors], 'train_features':Xhocompl})
     rpool['kernel_obj'] = rk
     dualrls_naive = RLS.createLearner(**rpool)
     
     rpool = {}
     rpool['train_labels'] = Yho
     rpool['train_features'] = Xhocompl
     primalrls_naive = RLS.createLearner(**rpool)
     
     rsaK = K[:, basis_vectors] * la.inv(K[ix_(basis_vectors, basis_vectors)]) * K[basis_vectors]
     rsaKho = rsaK[ix_(hocompl, hocompl)]
     rsa_testkm = rsaK[ix_(hocompl, hoindices)]
     loglambdas = range(-5, 5)
     for j in range(0, len(loglambdas)):
         regparam = 2. ** loglambdas[j]
         print
         print "Regparam 2^%1d" % loglambdas[j]
         
         print (rsa_testkm.T * la.inv(rsaKho + regparam * eye(rsaKho.shape[0])) * Yho).T, 'Dumb HO (dual)'
         dumbho = np.squeeze(np.array(rsa_testkm.T * la.inv(rsaKho + regparam * eye(rsaKho.shape[0])) * Yho))
         
         dualrls_naive.solve(regparam)
         predho1 = np.squeeze(dualrls_naive.getModel().predict(testX))
         print predho1.T, 'Naive HO (dual)'
         
         #dualrls.solve(regparam)
         #predho2 = np.squeeze(dualrls.computeHO(hoindices))
         #print predho2.T, 'Fast HO (dual)'
         
         for predho in [dumbho, predho1]:#, predho2]:
             self.assertEqual(dumbho.shape, predho.shape)
             for row in range(predho.shape[0]):
                 #for col in range(predho.shape[1]):
                 #    self.assertAlmostEqual(dumbho[row,col],predho[row,col])
                     self.assertAlmostEqual(dumbho[row],predho[row])
コード例 #39
0
ファイル: test_rls.py プロジェクト: lenovor/RLScore
 def testRLS(self):
     
     print
     print
     print
     print
     print "Testing the cross-validation routines of the RLS module."
     print
     print
     floattype = float64
     
     m, n = 400, 100
     Xtrain = mat(random.rand(m, n))
     K = Xtrain * Xtrain.T
     ylen = 2
     Y = mat(zeros((m, ylen), dtype=floattype))
     Y = mat(random.rand(m, ylen))
     
     #hoindices = [45, 50, 55]
     hoindices = [45]
     hocompl = list(set(range(m)) - set(hoindices))
     
     Kho = K[ix_(hocompl, hocompl)]
     Yho = Y[hocompl]
     
     kwargs = {}
     kwargs['train_labels'] = Y
     kwargs['kernel_matrix'] = K
     dualrls = RLS.createLearner(**kwargs)
     
     kwargs = {}
     kwargs["train_features"] = Xtrain
     kwargs["train_labels"] = Y
     primalrls = RLS.createLearner(**kwargs)
     
     kwargs = {}
     kwargs['train_labels'] = Yho
     kwargs['kernel_matrix'] = Kho
     dualrls_naive = RLS.createLearner(**kwargs)
     
     testkm = K[ix_(hocompl, hoindices)]
     trainX = Xtrain[hocompl]
     testX = Xtrain[hoindices]
     kwargs = {}
     kwargs['train_labels'] = Yho
     kwargs['train_features'] = trainX
     primalrls_naive = RLS.createLearner(**kwargs)
     
     loglambdas = range(-5, 5)
     for j in range(0, len(loglambdas)):
         regparam = 2. ** loglambdas[j]
         print
         print "Regparam 2^%1d" % loglambdas[j]
         
         dumbho = testkm.T * la.inv(Kho + regparam * eye(Kho.shape[0])) * Yho
         print dumbho, 'Dumb HO (dual)'
         
         dualrls_naive.solve(regparam)
         predho1 = dualrls_naive.getModel().predict(testkm.T)
         print predho1, 'Naive HO (dual)'
         
         dualrls.solve(regparam)
         predho2 = dualrls.computeHO(hoindices)
         print predho2, 'Fast HO (dual)'
         
         dualrls.solve(regparam)
         predho = dualrls.computeLOO()[hoindices[0]]
         print predho, 'Fast LOO (dual)'
         
         primalrls_naive.solve(regparam)
         predho3 = primalrls_naive.getModel().predict(testX)
         print predho3, 'Naive HO (primal)'
         
         primalrls.solve(regparam)
         predho4 = primalrls.computeHO(hoindices)
         print predho4, 'Fast HO (primal)'
         for predho in [predho1, predho2, predho3, predho4]:
             self.assertEqual(dumbho.shape, predho.shape)
             for row in range(predho.shape[0]):
                 for col in range(predho.shape[1]):
                     self.assertAlmostEqual(dumbho[row,col],predho[row,col])
         primalrls.solve(regparam)
         predho = primalrls.computeLOO()[hoindices[0]]
         print predho, 'Fast LOO (primal)'
     print
     hoindices = range(100, 300)
     hocompl = list(set(range(m)) - set(hoindices))
     
     Kho = K[ix_(hocompl, hocompl)]
     Yho = Y[hocompl]
     testkm = K[ix_(hocompl, hoindices)]
     
     dumbho = testkm.T * la.inv(Kho + regparam * eye(Kho.shape[0])) * Yho
     
     kwargs = {}
     kwargs['train_labels'] = Yho
     kwargs['kernel_matrix'] = Kho
     dualrls_naive = RLS.createLearner(**kwargs)
     dualrls_naive.solve(regparam)
     predho1 = dualrls_naive.getModel().predict(testkm.T)
     print sum(abs(predho1-dumbho)), 'Naive HO (dual)'
     
     dualrls.solve(regparam)
     predho2 = dualrls.computeHO(hoindices)
     print sum(abs(predho2-dumbho)), 'Fast HO (dual)'
コード例 #40
0
ファイル: test_rls.py プロジェクト: aatapa/RLScore
 def test_loo(self):
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             m = X.shape[0]
             #LOO with linear kernel
             rls1 = RLS(X, Y, regparam = 7.0, bias=3.0)
             P1 = rls1.leave_one_out()
             P2 = []
             for i in range(X.shape[0]): 
                 X_train = np.delete(X, i, axis=0)
                 Y_train = np.delete(Y, i, axis=0)
                 X_test = X[i]
                 rls2 = RLS(X_train, Y_train, regparam = 7.0, bias = 3.0)
                 P2.append(rls2.predict(X_test))
             P2 = np.array(P2)
             assert_allclose(P1, P2)
             #Fast regularization
             rls1.solve(1024)
             P1 = rls1.leave_one_out()
             P2 = []
             for i in range(X.shape[0]): 
                 X_train = np.delete(X, i, axis=0)
                 Y_train = np.delete(Y, i, axis=0)
                 X_test = X[i]
                 rls2 = RLS(X_train, Y_train, regparam = 1024, bias = 3.0)
                 P2.append(rls2.predict(X_test))
             P2 = np.array(P2)
             assert_allclose(P1, P2)
             #kernels
             rls1 = RLS(X, Y, kernel = "GaussianKernel", gamma = 0.01)
             P1 = rls1.leave_one_out()
             P2 = []
             for i in range(X.shape[0]): 
                 X_train = np.delete(X, i, axis=0)
                 Y_train = np.delete(Y, i, axis=0)
                 X_test = X[i]
                 rls2 = RLS(X_train, Y_train, kernel = "GaussianKernel", gamma = 0.01)
                 P2.append(rls2.predict(X_test))
             P2 = np.array(P2)
             assert_allclose(P1, P2)