def train_rls(): X_train, Y_train, foo = read_svmlight("a1a.t") X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1]) learner = RLS(X_train, Y_train) best_regparam = None best_accuracy = 0. #exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #Leave-one-out cross-validation predictions, this is fast due to #computational short-cut P_loo = learner.leave_one_out() acc = accuracy(Y_train, P_loo) print("regparam 2**%d, loo-accuracy %f" %(log_regparam, acc)) if acc > best_accuracy: best_accuracy = acc best_regparam = regparam learner.solve(best_regparam) P_test = learner.predict(X_test) print("best regparam %f with loo-accuracy %f" %(best_regparam, best_accuracy)) print("test set accuracy %f" %accuracy(Y_test, P_test))
def train_rls(): #Selects both the gamma parameter for Gaussian kernel, and regparam with loocv X_train, Y_train, X_test, Y_test = load_housing() regparams = [2.**i for i in range(-15, 16)] gammas = regparams best_regparam = None best_gamma = None best_error = float("inf") for gamma in gammas: #New RLS is initialized for each kernel parameter learner = RLS(X_train, Y_train, kernel="GaussianKernel", gamma=gamma) for regparam in regparams: #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #Leave-one-out cross-validation predictions, this is fast due to #computational short-cut P_loo = learner.leave_one_out() e = sqerror(Y_train, P_loo) #print "regparam", regparam, "gamma", gamma, "loo-error", e if e < best_error: best_error = e best_regparam = regparam best_gamma = gamma learner = RLS(X_train, Y_train, regparam = best_regparam, kernel="GaussianKernel", gamma=best_gamma) P_test = learner.predict(X_test) print("best parameters gamma %f regparam %f" %(best_gamma, best_regparam)) print("best leave-one-out error %f" %best_error) print("test error %f" %sqerror(Y_test, P_test))
def train_rls(): #Select regparam with leave-one-out cross-validation X_train, Y_train, X_test, Y_test = load_housing() learner = RLS(X_train, Y_train) best_regparam = None best_error = float("inf") #exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #Leave-one-out cross-validation predictions, this is fast due to #computational short-cut P_loo = learner.leave_one_out() e = sqerror(Y_train, P_loo) print("regparam 2**%d, loo-error %f" % (log_regparam, e)) if e < best_error: best_error = e best_regparam = regparam learner.solve(best_regparam) P_test = learner.predict(X_test) print("best regparam %f with loo-error %f" % (best_regparam, best_error)) print("test error %f" % sqerror(Y_test, P_test))
def train_rls(): #Selects both the gamma parameter for Gaussian kernel, and regparam with loocv X_train, Y_train, X_test, Y_test = load_housing() regparams = [2.**i for i in range(-15, 16)] gammas = regparams best_regparam = None best_gamma = None best_error = float("inf") for gamma in gammas: #New RLS is initialized for each kernel parameter learner = RLS(X_train, Y_train, kernel="GaussianKernel", gamma=gamma) for regparam in regparams: #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #Leave-one-out cross-validation predictions, this is fast due to #computational short-cut P_loo = learner.leave_one_out() e = sqerror(Y_train, P_loo) #print "regparam", regparam, "gamma", gamma, "loo-error", e if e < best_error: best_error = e best_regparam = regparam best_gamma = gamma learner = RLS(X_train, Y_train, regparam=best_regparam, kernel="GaussianKernel", gamma=best_gamma) P_test = learner.predict(X_test) print("best parameters gamma %f regparam %f" % (best_gamma, best_regparam)) print("best leave-one-out error %f" % best_error) print("test error %f" % sqerror(Y_test, P_test))
def train_rls(): #Select regparam with leave-one-out cross-validation X_train, Y_train, X_test, Y_test = load_housing() learner = RLS(X_train, Y_train) best_regparam = None best_error = float("inf") #exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #Leave-one-out cross-validation predictions, this is fast due to #computational short-cut P_loo = learner.leave_one_out() e = sqerror(Y_train, P_loo) print("regparam 2**%d, loo-error %f" %(log_regparam, e)) if e < best_error: best_error = e best_regparam = regparam learner.solve(best_regparam) P_test = learner.predict(X_test) print("best regparam %f with loo-error %f" %(best_regparam, best_error)) print("test error %f" %sqerror(Y_test, P_test))
def test_kernel(self): #tests that learning with kernels works for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: #Basic case dual_rls = RLS(X, Y, kernel= "GaussianKernel", regparam=5.0, gamma=0.01) kernel = GaussianKernel(X, gamma = 0.01) K = kernel.getKM(X) m = K.shape[0] A = dual_rls.predictor.A A2 = np.linalg.solve(K+5.0*np.eye(m), Y) assert_allclose(A, A2) #Fast regularization dual_rls.solve(1000) A = dual_rls.predictor.A A2 = np.linalg.solve(K+ 1000 * np.eye(m), Y) assert_allclose(A, A2) #Precomputed kernel dual_rls = RLS(K, Y, kernel="PrecomputedKernel", regparam = 1000) assert_allclose(dual_rls.predictor.W, A2) #Reduced set approximation kernel = PolynomialKernel(X[self.bvectors], gamma=0.5, coef0 = 1.2, degree = 2) Kr = kernel.getKM(X) Krr = kernel.getKM(X[self.bvectors]) dual_rls = RLS(X, Y, kernel="PolynomialKernel", basis_vectors = X[self.bvectors], regparam = 200, gamma=0.5, coef0=1.2, degree = 2) A = dual_rls.predictor.A A2 = np.linalg.solve(np.dot(Kr.T, Kr)+ 200 * Krr, np.dot(Kr.T, Y)) assert_allclose(A, A2) dual_rls = RLS(Kr, Y, kernel="PrecomputedKernel", basis_vectors = Krr, regparam=200) A = dual_rls.predictor.W assert_allclose(A, A2)
def train_rls(): X_train, Y_train, foo = read_svmlight("a1a.t") X_test, Y_test, foo = read_svmlight("a1a", X_train.shape[1]) learner = RLS(X_train, Y_train) best_regparam = None best_accuracy = 0. #exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #Leave-one-out cross-validation predictions, this is fast due to #computational short-cut P_loo = learner.leave_one_out() acc = accuracy(Y_train, P_loo) print("regparam 2**%d, loo-accuracy %f" %(log_regparam, acc)) if acc > best_accuracy: best_accuracy = acc best_regparam = regparam learner.solve(best_regparam) P_test = learner.predict(X_test) print("best regparam %f with loo-accuracy %f" %(best_regparam, best_accuracy)) print("test set accuracy %f" %accuracy(Y_test, P_test))
def test_kernel(self): #tests that learning with kernels works for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: #Basic case dual_rls = RLS(X, Y, kernel="GaussianKernel", regparam=5.0, gamma=0.01) kernel = GaussianKernel(X, gamma=0.01) K = kernel.getKM(X) m = K.shape[0] A = dual_rls.predictor.A A2 = np.linalg.solve(K + 5.0 * np.eye(m), Y) assert_allclose(A, A2) #Fast regularization dual_rls.solve(1000) A = dual_rls.predictor.A A2 = np.linalg.solve(K + 1000 * np.eye(m), Y) assert_allclose(A, A2) #Precomputed kernel dual_rls = RLS(K, Y, kernel="PrecomputedKernel", regparam=1000) assert_allclose(dual_rls.predictor.W, A2) #Reduced set approximation kernel = PolynomialKernel(X[self.bvectors], gamma=0.5, coef0=1.2, degree=2) Kr = kernel.getKM(X) Krr = kernel.getKM(X[self.bvectors]) dual_rls = RLS(X, Y, kernel="PolynomialKernel", basis_vectors=X[self.bvectors], regparam=200, gamma=0.5, coef0=1.2, degree=2) A = dual_rls.predictor.A A2 = np.linalg.solve( np.dot(Kr.T, Kr) + 200 * Krr, np.dot(Kr.T, Y)) assert_allclose(A, A2) dual_rls = RLS(Kr, Y, kernel="PrecomputedKernel", basis_vectors=Krr, regparam=200) A = dual_rls.predictor.W assert_allclose(A, A2)
def plot_rls(): #Select regparam with k-fold cross-validation, #where instances related to a single sentence form #together a fold X_train = read_sparse("train_2000_x.txt") Y_train = np.loadtxt("train_2000_y.txt") X_test = read_sparse("test_2000_x.txt", X_train.shape[1]) Y_test = np.loadtxt("test_2000_y.txt") #list of sentence ids ids = np.loadtxt("train_2000_qids.txt") #mapped to a list of lists, where each list #contains indices for one fold folds = map_ids(ids) learner = RLS(X_train, Y_train) best_regparam = None best_error = float("inf") #exponential grid of possible regparam values log_regparams = range(-15, 16) kfold_errors = [] loo_errors = [] test_errors = [] for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #K-fold cross-validation perfs = [] for fold in folds: #computes holdout predictions, where instances #in fold are left out of training set P = learner.holdout(fold) perfs.append(sqerror(Y_train[fold], P)) e_kfold = np.mean(perfs) kfold_errors.append(e_kfold) P_loo = learner.leave_one_out() e_loo = sqerror(Y_train, P_loo) loo_errors.append(e_loo) P_test = learner.predict(X_test) e_test = sqerror(Y_test, P_test) test_errors.append(e_test) plt.semilogy(log_regparams, loo_errors, label = "leave-one-out") plt.semilogy(log_regparams, kfold_errors, label = "leave-sentence-out") plt.semilogy(log_regparams, test_errors, label = "test error") plt.xlabel("$log_2(\lambda)$") plt.ylabel("mean squared error") plt.legend(loc=3) plt.show()
def plot_rls(): #Select regparam with k-fold cross-validation, #where instances related to a single sentence form #together a fold X_train = read_sparse("train_2000_x.txt") Y_train = np.loadtxt("train_2000_y.txt") X_test = read_sparse("test_2000_x.txt", X_train.shape[1]) Y_test = np.loadtxt("test_2000_y.txt") #list of sentence ids ids = np.loadtxt("train_2000_qids.txt") #mapped to a list of lists, where each list #contains indices for one fold folds = map_ids(ids) learner = RLS(X_train, Y_train) best_regparam = None best_error = float("inf") #exponential grid of possible regparam values log_regparams = range(-15, 16) kfold_errors = [] loo_errors = [] test_errors = [] for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #K-fold cross-validation perfs = [] for fold in folds: #computes holdout predictions, where instances #in fold are left out of training set P = learner.holdout(fold) perfs.append(sqerror(Y_train[fold], P)) e_kfold = np.mean(perfs) kfold_errors.append(e_kfold) P_loo = learner.leave_one_out() e_loo = sqerror(Y_train, P_loo) loo_errors.append(e_loo) P_test = learner.predict(X_test) e_test = sqerror(Y_test, P_test) test_errors.append(e_test) plt.semilogy(log_regparams, loo_errors, label="leave-one-out") plt.semilogy(log_regparams, kfold_errors, label="leave-sentence-out") plt.semilogy(log_regparams, test_errors, label="test error") plt.xlabel("$log_2(\lambda)$") plt.ylabel("mean squared error") plt.legend(loc=3) plt.show()
def test_loo(self): for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: m = X.shape[0] #LOO with linear kernel rls1 = RLS(X, Y, regparam=7.0, bias=3.0) P1 = rls1.leave_one_out() P2 = [] for i in range(X.shape[0]): X_train = np.delete(X, i, axis=0) Y_train = np.delete(Y, i, axis=0) X_test = X[i] rls2 = RLS(X_train, Y_train, regparam=7.0, bias=3.0) P2.append(rls2.predict(X_test)) P2 = np.array(P2) assert_allclose(P1, P2) #Fast regularization rls1.solve(1024) P1 = rls1.leave_one_out() P2 = [] for i in range(X.shape[0]): X_train = np.delete(X, i, axis=0) Y_train = np.delete(Y, i, axis=0) X_test = X[i] rls2 = RLS(X_train, Y_train, regparam=1024, bias=3.0) P2.append(rls2.predict(X_test)) P2 = np.array(P2) assert_allclose(P1, P2) #kernels rls1 = RLS(X, Y, kernel="GaussianKernel", gamma=0.01) P1 = rls1.leave_one_out() P2 = [] for i in range(X.shape[0]): X_train = np.delete(X, i, axis=0) Y_train = np.delete(Y, i, axis=0) X_test = X[i] rls2 = RLS(X_train, Y_train, kernel="GaussianKernel", gamma=0.01) P2.append(rls2.predict(X_test)) P2 = np.array(P2) assert_allclose(P1, P2)
def test_loo(self): for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: m = X.shape[0] #LOO with linear kernel rls1 = RLS(X, Y, regparam = 7.0, bias=3.0) P1 = rls1.leave_one_out() P2 = [] for i in range(X.shape[0]): X_train = np.delete(X, i, axis=0) Y_train = np.delete(Y, i, axis=0) X_test = X[i] rls2 = RLS(X_train, Y_train, regparam = 7.0, bias = 3.0) P2.append(rls2.predict(X_test)) P2 = np.array(P2) assert_allclose(P1, P2) #Fast regularization rls1.solve(1024) P1 = rls1.leave_one_out() P2 = [] for i in range(X.shape[0]): X_train = np.delete(X, i, axis=0) Y_train = np.delete(Y, i, axis=0) X_test = X[i] rls2 = RLS(X_train, Y_train, regparam = 1024, bias = 3.0) P2.append(rls2.predict(X_test)) P2 = np.array(P2) assert_allclose(P1, P2) #kernels rls1 = RLS(X, Y, kernel = "GaussianKernel", gamma = 0.01) P1 = rls1.leave_one_out() P2 = [] for i in range(X.shape[0]): X_train = np.delete(X, i, axis=0) Y_train = np.delete(Y, i, axis=0) X_test = X[i] rls2 = RLS(X_train, Y_train, kernel = "GaussianKernel", gamma = 0.01) P2.append(rls2.predict(X_test)) P2 = np.array(P2) assert_allclose(P1, P2)
def testCGRLS(self): m, n = 100, 300 for regparam in [0.00000001, 1, 100000000]: Xtrain = np.mat(np.random.rand(m, n)) Y = np.mat(np.random.rand(m, 1)) rpool = {} rpool['X'] = Xtrain rpool['Y'] = Y rpool['regparam'] = regparam rpool["bias"] = 2.0 rls = RLS(**rpool) rls.solve(regparam) model = rls.predictor W = model.W b = model.b rls = CGRLS(**rpool) model = rls.predictor W2 = model.W b2 = model.b for i in range(W.shape[0]): self.assertAlmostEqual(W[i], W2[i], places=5) self.assertAlmostEqual(b, b2, places=5)
def train_rls(): #Select regparam with k-fold cross-validation, #where instances related to a single sentence form #together a fold X_train = read_sparse("train_2000_x.txt") Y_train = np.loadtxt("train_2000_y.txt") X_test = read_sparse("test_2000_x.txt", X_train.shape[1]) Y_test = np.loadtxt("test_2000_y.txt") #list of sentence ids ids = np.loadtxt("train_2000_qids.txt") #mapped to a list of lists, where each list #contains indices for one fold folds = map_ids(ids) learner = RLS(X_train, Y_train) best_regparam = None best_error = float("inf") #exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #K-fold cross-validation P = np.zeros(Y_train.shape) for fold in folds: #computes holdout predictions, where instances #in fold are left out of training set P[fold] = learner.holdout(fold) e = sqerror(Y_train, P) print("regparam 2**%d, k-fold error %f" % (log_regparam, e)) if e < best_error: best_error = e best_regparam = regparam learner.solve(best_regparam) P_test = learner.predict(X_test) print("best regparam %f k-fold error %f" % (best_regparam, best_error)) print("test error %f" % sqerror(Y_test, P_test))
def train_rls(): #Select regparam with k-fold cross-validation, #where instances related to a single sentence form #together a fold X_train = read_sparse("train_2000_x.txt") Y_train = np.loadtxt("train_2000_y.txt") X_test = read_sparse("test_2000_x.txt", X_train.shape[1]) Y_test = np.loadtxt("test_2000_y.txt") #list of sentence ids ids = np.loadtxt("train_2000_qids.txt") #mapped to a list of lists, where each list #contains indices for one fold folds = map_ids(ids) learner = RLS(X_train, Y_train) best_regparam = None best_error = float("inf") #exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam #RLS is re-trained with the new regparam, this #is very fast due to computational short-cut learner.solve(regparam) #K-fold cross-validation P = np.zeros(Y_train.shape) for fold in folds: #computes holdout predictions, where instances #in fold are left out of training set P[fold] = learner.holdout(fold) e = sqerror(Y_train, P) print("regparam 2**%d, k-fold error %f" %(log_regparam, e)) if e < best_error: best_error = e best_regparam = regparam learner.solve(best_regparam) P_test = learner.predict(X_test) print("best regparam %f k-fold error %f" %(best_regparam, best_error)) print("test error %f" %sqerror(Y_test, P_test))
def test_holdout(self): for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: m = X.shape[0] hoindices = [3, 5, 8, 10, 17, 21] hocompl = list(set(range(m)) - set(hoindices)) #Holdout with linear kernel rls1 = RLS(X, Y) rls2 = RLS(X[hocompl], Y[hocompl]) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Holdout with bias rls1 = RLS(X, Y, bias=3.0) rls2 = RLS(X[hocompl], Y[hocompl], bias=3.0) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Fast regularization for i in range(-15, 15): rls1.solve(2**i) rls2.solve(2**i) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Kernel holdout rls1 = RLS(X, Y, kernel="GaussianKernel", gamma=0.01) rls2 = RLS(X[hocompl], Y[hocompl], kernel="GaussianKernel", gamma=0.01) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) for i in range(-15, 15): rls1.solve(2**i) rls2.solve(2**i) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Incorrect indices I = [0, 3, 100] self.assertRaises(IndexError, rls1.holdout, I) I = [-1, 0, 2] self.assertRaises(IndexError, rls1.holdout, I) I = [1, 1, 2] self.assertRaises(IndexError, rls1.holdout, I)
def test_holdout(self): for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: m = X.shape[0] hoindices = [3, 5, 8, 10, 17, 21] hocompl = list(set(range(m)) - set(hoindices)) #Holdout with linear kernel rls1 = RLS(X, Y) rls2 = RLS(X[hocompl], Y[hocompl]) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Holdout with bias rls1 = RLS(X, Y, bias = 3.0) rls2 = RLS(X[hocompl], Y[hocompl], bias = 3.0) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Fast regularization for i in range(-15, 15): rls1.solve(2**i) rls2.solve(2**i) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Kernel holdout rls1 = RLS(X, Y, kernel = "GaussianKernel", gamma = 0.01) rls2 = RLS(X[hocompl], Y[hocompl], kernel = "GaussianKernel", gamma = 0.01) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) for i in range(-15, 15): rls1.solve(2**i) rls2.solve(2**i) P1 = rls1.holdout(hoindices) P2 = rls2.predict(X[hoindices]) assert_allclose(P1, P2) #Incorrect indices I = [0, 3, 100] self.assertRaises(IndexError, rls1.holdout, I) I = [-1, 0, 2] self.assertRaises(IndexError, rls1.holdout, I) I = [1,1,2] self.assertRaises(IndexError, rls1.holdout, I)
def testRLS(self): print print print print print("Testing the cross-validation routines of the RLS module.") print print floattype = np.float64 m, n = 400, 100 Xtrain = np.random.rand(m, n) K = np.dot(Xtrain, Xtrain.T) ylen = 2 Y = np.zeros((m, ylen), dtype=floattype) Y = np.random.rand(m, ylen) hoindices = [45] hoindices2 = [45, 50] hoindices3 = [45, 50, 55] hocompl = list(set(range(m)) - set(hoindices)) Kho = K[np.ix_(hocompl, hocompl)] Yho = Y[hocompl] kwargs = {} kwargs['Y'] = Y kwargs['X'] = K kwargs['kernel'] = 'PrecomputedKernel' dualrls = RLS(**kwargs) kwargs = {} kwargs["X"] = Xtrain kwargs["Y"] = Y kwargs["bias"] = 0. primalrls = RLS(**kwargs) kwargs = {} kwargs['Y'] = Yho kwargs['X'] = Kho kwargs['kernel'] = 'PrecomputedKernel' dualrls_naive = RLS(**kwargs) testkm = K[np.ix_(hocompl, hoindices)] trainX = Xtrain[hocompl] testX = Xtrain[hoindices] kwargs = {} kwargs['Y'] = Yho kwargs['X'] = trainX kwargs["bias"] = 0. primalrls_naive = RLS(**kwargs) loglambdas = range(-5, 5) for j in range(0, len(loglambdas)): regparam = 2. ** loglambdas[j] print print("Regparam 2^%1d" % loglambdas[j]) dumbho = np.dot(testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho)) dumbho = np.squeeze(dumbho) print(str(dumbho) + ' Dumb HO (dual)') dualrls_naive.solve(regparam) predho1 = dualrls_naive.predictor.predict(testkm.T) print(str(predho1) + ' Naive HO (dual)') dualrls.solve(regparam) predho2 = dualrls.holdout(hoindices) print(str(predho2) + ' Fast HO (dual)') dualrls.solve(regparam) predho = dualrls.leave_one_out()[hoindices[0]] print(str(predho) + ' Fast LOO (dual)') primalrls_naive.solve(regparam) predho3 = primalrls_naive.predictor.predict(testX) print(str(predho3) + ' Naive HO (primal)') primalrls.solve(regparam) predho4 = primalrls.holdout(hoindices) print(str(predho4) + ' Fast HO (primal)') for predho in [predho1, predho2, predho3, predho4]: self.assertEqual(dumbho.shape, predho.shape) assert_allclose(dumbho, predho) #for row in range(predho.shape[0]): # for col in range(predho.shape[1]): # self.assertAlmostEqual(dumbho[row,col],predho[row,col]) primalrls.solve(regparam) predho = primalrls.leave_one_out()[hoindices[0]] print(str(predho) + ' Fast LOO (primal)') print() hoindices = range(100, 300) hocompl = list(set(range(m)) - set(hoindices)) Kho = K[np.ix_(hocompl, hocompl)] Yho = Y[hocompl] testkm = K[np.ix_(hocompl, hoindices)] dumbho = np.dot(testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho)) kwargs = {} kwargs['Y'] = Y kwargs['X'] = Xtrain dualrls.solve(regparam) predho2 = dualrls.holdout(hoindices2) print(str(predho2) + ' Fast HO') hopred = dualrls.leave_pair_out(np.array([hoindices2[0], 4, 6]), np.array([hoindices2[1], 5, 7])) print(str(hopred[0][0]) + '\n' + str(hopred[1][0]) + ' Fast LPO')
def test_linear(self): #Test that learning with linear kernel works correctly both #with low and high-dimensional data for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: #Basic case primal_rls = RLS(X, Y, regparam=1.0, bias=0.) W = primal_rls.predictor.W d = X.shape[1] W2 = np.linalg.solve( np.dot(X.T, X) + np.eye(d), np.dot(X.T, Y)) assert_allclose(W, W2) #Fast regularization algorithm primal_rls.solve(10.) W = primal_rls.predictor.W W2 = np.linalg.solve( np.dot(X.T, X) + 10. * np.eye(d), np.dot(X.T, Y)) assert_allclose(W, W2) #Bias term included primal_rls = RLS(X, Y, regparam=1.0, bias=2.) O = np.sqrt(2.) * np.ones((X.shape[0], 1)) X_new = np.hstack((X, O)) W = primal_rls.predictor.W W2 = np.linalg.solve( np.dot(X_new.T, X_new) + np.eye(d + 1), np.dot(X_new.T, Y)) b = primal_rls.predictor.b b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, np.sqrt(2) * b2) #reduced set approximation primal_rls = RLS(X, Y, basis_vectors=X[self.bvectors], regparam=5.0, bias=2.) W = primal_rls.predictor.W b = primal_rls.predictor.b K = np.dot(X_new, X_new.T) Kr = K[:, self.bvectors] Krr = K[np.ix_(self.bvectors, self.bvectors)] A = np.linalg.solve( np.dot(Kr.T, Kr) + 5.0 * Krr, np.dot(Kr.T, Y)) W2 = np.dot(X_new[self.bvectors].T, A) b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, np.sqrt(2) * b2) #Using pre-computed linear kernel matrix kernel = LinearKernel(X, bias=2.) K = kernel.getKM(X) dual_rls = RLS(K, Y, kernel="PrecomputedKernel", regparam=0.01) W = np.dot(X_new.T, dual_rls.predictor.W) b = W[-1] W = W[:-1] W2 = np.linalg.solve( np.dot(X_new.T, X_new) + 0.01 * np.eye(d + 1), np.dot(X_new.T, Y)) b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, b2) #Pre-computed linear kernel, reduced set approximation kernel = LinearKernel(X[self.bvectors], bias=2.) dual_rls = RLS(kernel.getKM(X), Y, kernel="PrecomputedKernel", basis_vectors=kernel.getKM(X[self.bvectors]), regparam=5.0) W = np.dot(X_new[self.bvectors].T, dual_rls.predictor.W) b = W[-1] W = W[:-1] K = np.dot(X_new, X_new.T) Kr = K[:, self.bvectors] Krr = K[np.ix_(self.bvectors, self.bvectors)] A = np.linalg.solve( np.dot(Kr.T, Kr) + 5.0 * Krr, np.dot(Kr.T, Y)) W2 = np.dot(X_new[self.bvectors].T, A) b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, b2)
def testRLS(self): print("\n\n\n\nTesting the cross-validation routines of the RLS module.\n\n") m, n = 100, 300 Xtrain = random.rand(m, n) Y = mat(random.rand(m, 1)) basis_vectors = [0,3,7,8] #hoindices = [45, 50, 55] hoindices = [0, 1, 2] hocompl = list(set(range(m)) - set(hoindices)) bk = GaussianKernel(**{'X':Xtrain[basis_vectors], 'gamma':0.001}) rpool = {} rpool['X'] = Xtrain bk2 = GaussianKernel(**{'X':Xtrain, 'gamma':0.001}) K = np.mat(bk2.getKM(Xtrain)) Yho = Y[hocompl] rpool = {} rpool['Y'] = Y rpool['X'] = Xtrain rpool['basis_vectors'] = Xtrain[basis_vectors] Xhocompl = Xtrain[hocompl] testX = Xtrain[hoindices] rpool = {} rpool['Y'] = Yho rpool['X'] = Xhocompl rpool["kernel"] = "RsetKernel" rpool["base_kernel"] = bk rpool["basis_features"] = Xtrain[basis_vectors] #rk = RsetKernel(**{'base_kernel':bk, 'basis_features':Xtrain[basis_vectors], 'X':Xhocompl}) dualrls_naive = RLS(**rpool) rpool = {} rpool['Y'] = Yho rpool['X'] = Xhocompl rsaK = K[:, basis_vectors] * la.inv(K[ix_(basis_vectors, basis_vectors)]) * K[basis_vectors] rsaKho = rsaK[ix_(hocompl, hocompl)] rsa_testkm = rsaK[ix_(hocompl, hoindices)] loglambdas = range(-5, 5) for j in range(0, len(loglambdas)): regparam = 2. ** loglambdas[j] print("\nRegparam 2^%1d" % loglambdas[j]) print((rsa_testkm.T * la.inv(rsaKho + regparam * eye(rsaKho.shape[0])) * Yho).T, 'Dumb HO (dual)') dumbho = np.squeeze(np.array(rsa_testkm.T * la.inv(rsaKho + regparam * eye(rsaKho.shape[0])) * Yho)) dualrls_naive.solve(regparam) predho1 = np.squeeze(dualrls_naive.predictor.predict(testX)) print(predho1.T, 'Naive HO (dual)') #dualrls.solve(regparam) #predho2 = np.squeeze(dualrls.computeHO(hoindices)) #print predho2.T, 'Fast HO (dual)' for predho in [dumbho, predho1]:#, predho2]: self.assertEqual(dumbho.shape, predho.shape) for row in range(predho.shape[0]): #for col in range(predho.shape[1]): # self.assertAlmostEqual(dumbho[row,col],predho[row,col]) self.assertAlmostEqual(dumbho[row],predho[row])
def testRLS(self): print print print print print("Testing the cross-validation routines of the RLS module.") print print floattype = np.float64 m, n = 400, 100 Xtrain = np.random.rand(m, n) K = np.dot(Xtrain, Xtrain.T) ylen = 2 Y = np.zeros((m, ylen), dtype=floattype) Y = np.random.rand(m, ylen) hoindices = [45] hoindices2 = [45, 50] hoindices3 = [45, 50, 55] hocompl = list(set(range(m)) - set(hoindices)) Kho = K[np.ix_(hocompl, hocompl)] Yho = Y[hocompl] kwargs = {} kwargs['Y'] = Y kwargs['X'] = K kwargs['kernel'] = 'PrecomputedKernel' dualrls = RLS(**kwargs) kwargs = {} kwargs["X"] = Xtrain kwargs["Y"] = Y kwargs["bias"] = 0. primalrls = RLS(**kwargs) kwargs = {} kwargs['Y'] = Yho kwargs['X'] = Kho kwargs['kernel'] = 'PrecomputedKernel' dualrls_naive = RLS(**kwargs) testkm = K[np.ix_(hocompl, hoindices)] trainX = Xtrain[hocompl] testX = Xtrain[hoindices] kwargs = {} kwargs['Y'] = Yho kwargs['X'] = trainX kwargs["bias"] = 0. primalrls_naive = RLS(**kwargs) loglambdas = range(-5, 5) for j in range(0, len(loglambdas)): regparam = 2.**loglambdas[j] print print("Regparam 2^%1d" % loglambdas[j]) dumbho = np.dot( testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho)) dumbho = np.squeeze(dumbho) print(str(dumbho) + ' Dumb HO (dual)') dualrls_naive.solve(regparam) predho1 = dualrls_naive.predictor.predict(testkm.T) print(str(predho1) + ' Naive HO (dual)') dualrls.solve(regparam) predho2 = dualrls.holdout(hoindices) print(str(predho2) + ' Fast HO (dual)') dualrls.solve(regparam) predho = dualrls.leave_one_out()[hoindices[0]] print(str(predho) + ' Fast LOO (dual)') primalrls_naive.solve(regparam) predho3 = primalrls_naive.predictor.predict(testX) print(str(predho3) + ' Naive HO (primal)') primalrls.solve(regparam) predho4 = primalrls.holdout(hoindices) print(str(predho4) + ' Fast HO (primal)') for predho in [predho1, predho2, predho3, predho4]: self.assertEqual(dumbho.shape, predho.shape) assert_allclose(dumbho, predho) #for row in range(predho.shape[0]): # for col in range(predho.shape[1]): # self.assertAlmostEqual(dumbho[row,col],predho[row,col]) primalrls.solve(regparam) predho = primalrls.leave_one_out()[hoindices[0]] print(str(predho) + ' Fast LOO (primal)') print() hoindices = range(100, 300) hocompl = list(set(range(m)) - set(hoindices)) Kho = K[np.ix_(hocompl, hocompl)] Yho = Y[hocompl] testkm = K[np.ix_(hocompl, hoindices)] dumbho = np.dot( testkm.T, np.dot(la.inv(Kho + regparam * np.eye(Kho.shape[0])), Yho)) kwargs = {} kwargs['Y'] = Y kwargs['X'] = Xtrain dualrls.solve(regparam) predho2 = dualrls.holdout(hoindices2) print(str(predho2) + ' Fast HO') hopred = dualrls.leave_pair_out(np.array([hoindices2[0], 4, 6]), np.array([hoindices2[1], 5, 7])) print(str(hopred[0][0]) + '\n' + str(hopred[1][0]) + ' Fast LPO')
class LooRLS(object): def __init__(self): self.learner = None self.y_src = None self.measure = None def fit(self, X_src, y_src, X_tgt_known, y_tgt_known, X_tgt_unknown, y_tgt_unknown, verbose=False): # Map labels from set {1,2,3} to one-vs-all encoding if np.count_nonzero(y_src) >= len(y_src): zerolabels = False else: zerolabels = True y_src = to_one_vs_all(y_src, zerolabels) regparams = [2.**i for i in range(-15, 16)] if len(np.unique(y_src)) > 2: self.measure = ova_accuracy else: self.measure = accuracy self.learner = LeaveOneOutRLS(X_src, y_src, regparams=regparams, measure=self.measure) p_tgt = self.learner.predict(X_tgt_known) # ova_accuracy computes one-vs-all classification accuracy directly between transformed # class label matrix, and a matrix of predictions, where each column corresponds to a class self.learner = RLS(X_src, y_src) best_regparam = None best_accuracy = 0. # exponential grid of possible regparam values log_regparams = range(-15, 16) for log_regparam in log_regparams: regparam = 2.**log_regparam # RLS is re-trained with the new regparam, this # is very fast due to computational short-cut self.learner.solve(regparam) # Leave-one-out cross-validation predictions, this is fast due to # computational short-cut P_loo = self.learner.leave_one_out() acc = self.measure(y_src, P_loo) if verbose == True: print("LooRLS regparam 2**%d, loo-accuracy %f" % (log_regparam, acc)) if acc > best_accuracy: best_accuracy = acc best_regparam = regparam self.learner.solve(best_regparam) if verbose == True: print("LooRLS best regparam %f with loo-accuracy %f" % (best_regparam, best_accuracy)) def predict(self, X, y=None): ypred = self.learner.predict(X) if y is not None: if np.count_nonzero(y) >= len(y): zerolabels = False else: zerolabels = True y = to_one_vs_all(y, zerolabels) return ypred, self.measure(y, ypred) return ypred
def test_linear(self): #Test that learning with linear kernel works correctly both #with low and high-dimensional data for X in [self.Xtrain1, self.Xtrain2]: for Y in [self.Ytrain1, self.Ytrain2]: #Basic case primal_rls = RLS(X, Y, regparam=1.0, bias=0.) W = primal_rls.predictor.W d = X.shape[1] W2 = np.linalg.solve(np.dot(X.T, X) + np.eye(d), np.dot(X.T, Y)) assert_allclose(W, W2) #Fast regularization algorithm primal_rls.solve(10.) W = primal_rls.predictor.W W2 = np.linalg.solve(np.dot(X.T, X) + 10.*np.eye(d), np.dot(X.T, Y)) assert_allclose(W, W2) #Bias term included primal_rls = RLS(X, Y, regparam=1.0, bias=2.) O = np.sqrt(2.) * np.ones((X.shape[0],1)) X_new = np.hstack((X, O)) W = primal_rls.predictor.W W2 = np.linalg.solve(np.dot(X_new.T, X_new) + np.eye(d+1), np.dot(X_new.T, Y)) b = primal_rls.predictor.b b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, np.sqrt(2) * b2) #reduced set approximation primal_rls = RLS(X, Y, basis_vectors = X[self.bvectors], regparam=5.0, bias=2.) W = primal_rls.predictor.W b = primal_rls.predictor.b K = np.dot(X_new, X_new.T) Kr = K[:, self.bvectors] Krr = K[np.ix_(self.bvectors, self.bvectors)] A = np.linalg.solve(np.dot(Kr.T, Kr)+ 5.0 * Krr, np.dot(Kr.T, Y)) W2 = np.dot(X_new[self.bvectors].T, A) b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, np.sqrt(2) * b2) #Using pre-computed linear kernel matrix kernel = LinearKernel(X, bias = 2.) K = kernel.getKM(X) dual_rls = RLS(K, Y, kernel = "PrecomputedKernel", regparam=0.01) W = np.dot(X_new.T, dual_rls.predictor.W) b = W[-1] W = W[:-1] W2 = np.linalg.solve(np.dot(X_new.T, X_new) + 0.01 * np.eye(d+1), np.dot(X_new.T, Y)) b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, b2) #Pre-computed linear kernel, reduced set approximation kernel = LinearKernel(X[self.bvectors], bias = 2.) dual_rls = RLS(kernel.getKM(X), Y, kernel="PrecomputedKernel", basis_vectors = kernel.getKM(X[self.bvectors]), regparam=5.0) W = np.dot(X_new[self.bvectors].T, dual_rls.predictor.W) b = W[-1] W = W[:-1] K = np.dot(X_new, X_new.T) Kr = K[:, self.bvectors] Krr = K[np.ix_(self.bvectors, self.bvectors)] A = np.linalg.solve(np.dot(Kr.T, Kr)+ 5.0 * Krr, np.dot(Kr.T, Y)) W2 = np.dot(X_new[self.bvectors].T, A) b2 = W2[-1] W2 = W2[:-1] assert_allclose(W, W2) assert_allclose(b, b2)