예제 #1
0
 def testOrdinalRegression(self):
     m, n = 100, 300
     for regparam in [0.00000001, 1, 100000000]:
     #for regparam in [1000]:
         Xtrain = np.mat(np.random.rand(n, m))
         Y = np.mat(np.random.rand(m, 1))
         rpool = {}
         rpool['train_features'] = Xtrain.T
         rpool['train_labels'] = Y
         rpool['regparam'] = regparam
         rpool["bias"] = 1.0
         k = LinearKernel.createKernel(**rpool)
         rpool['kernel_obj'] = k
         rls = CGRankRLS.createLearner(**rpool)
         rls.train()
         model = rls.getModel()   
         W = model.W
         In = np.mat(np.identity(n))
         Im = np.mat(np.identity(m))
         L = np.mat(Im-(1./m)*np.ones((m,m), dtype=np.float64))
         G = Xtrain*L*Xtrain.T+regparam*In
         W2 = np.squeeze(np.array(G.I*Xtrain*L*Y))
         for i in range(W.shape[0]):
             #for j in range(W.shape[1]):
             #    self.assertAlmostEqual(W[i,j],W2[i,j], places=5)
                 self.assertAlmostEqual(W[i], W2[i], places = 5)
예제 #2
0
 def testOrdinalRegression(self):
     m, n = 100, 300
     for regparam in [0.00000001, 1, 100000000]:
         #for regparam in [1000]:
         Xtrain = np.mat(np.random.rand(n, m))
         Y = np.mat(np.random.rand(m, 1))
         rpool = {}
         rpool['train_features'] = Xtrain.T
         rpool['train_labels'] = Y
         rpool['regparam'] = regparam
         rpool["bias"] = 1.0
         k = LinearKernel.createKernel(**rpool)
         rpool['kernel_obj'] = k
         rls = CGRankRLS.createLearner(**rpool)
         rls.train()
         model = rls.getModel()
         W = model.W
         In = np.mat(np.identity(n))
         Im = np.mat(np.identity(m))
         L = np.mat(Im - (1. / m) * np.ones((m, m), dtype=np.float64))
         G = Xtrain * L * Xtrain.T + regparam * In
         W2 = np.squeeze(np.array(G.I * Xtrain * L * Y))
         for i in range(W.shape[0]):
             #for j in range(W.shape[1]):
             #    self.assertAlmostEqual(W[i,j],W2[i,j], places=5)
             self.assertAlmostEqual(W[i], W2[i], places=5)
예제 #3
0
 def generate_xortask(self,
         trainpos1 = 5,
         trainneg1 = 5,
         trainpos2 = 6,
         trainneg2 = 7,
         testpos1 = 26,
         testneg1 = 27,
         testpos2 = 25,
         testneg2 = 25
         ):
     
     np.random.seed(55)
     X_train1, Y_train1 = self.generate_data(trainpos1, trainneg1, 5, 0, 1)
     X_train2, Y_train2 = self.generate_data(trainpos2, trainneg2, 5, 4, 6)
     
     X_test1, Y_test1 = self.generate_data(testpos1, testneg1, 5, 0, 1)
     X_test2, Y_test2 = self.generate_data(testpos2, testneg2, 5, 4, 6)
     
     #kernel1 = GaussianKernel.createKernel(gamma=0.01, train_features=X_train1)
     kernel1 = LinearKernel.createKernel(train_features=X_train1)
     K_train1 = kernel1.getKM(X_train1)
     K_test1 = kernel1.getKM(X_test1)
     
     #kernel2 = GaussianKernel.createKernel(gamma=0.01, train_features=X_train2)
     kernel2 = LinearKernel.createKernel(train_features=X_train2)
     K_train2 = kernel2.getKM(X_train2)
     K_test2 = kernel2.getKM(X_test2)
     
     #The function to be learned is a xor function on the class labels
     #of the two classification problems
     Y_train = -1.*np.ones((trainpos1+trainneg1, trainpos2+trainneg2))
     for i in range(trainpos1+trainneg1):
         for j in range(trainpos2+trainneg2):
             if Y_train1[i,0] != Y_train2[j,0]:
                 Y_train[i, j] = 1.
     
     Y_test = -1.*np.ones((testpos1+testneg1, testpos2+testneg2))    
     for i in range(testpos1+testneg1):
         for j in range(testpos2+testneg2):
             if Y_test1[i,0] != Y_test2[j,0]:
                 Y_test[i, j] = 1.
     
     return K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2
예제 #4
0
    def testPairwisePreferences(self):
        m, n = 100, 300
        for regparam in [0.00000001, 1, 100000000]:
            Xtrain = np.mat(np.random.rand(n, m))
            Y = np.mat(np.random.rand(m, 1))

            pairs = []
            for i in range(1000):
                a = random.randint(0, m - 1)
                b = random.randint(0, m - 1)
                if Y[a] > Y[b]:
                    pairs.append((a, b))
                else:
                    pairs.append((b, a))
            pairs = np.array(pairs)
            rpool = {}
            rpool['train_features'] = Xtrain.T
            #rpool['train_labels'] = Y
            rpool['train_preferences'] = pairs
            rpool['regparam'] = regparam
            rpool["bias"] = 1.0
            k = LinearKernel.createKernel(**rpool)
            rpool['kernel_obj'] = k
            rls = CGRankRLS.createLearner(**rpool)
            rls.train()
            model = rls.getModel()
            W = model.W
            In = np.mat(np.identity(n))
            Im = np.mat(np.identity(m))
            vals = np.concatenate([
                np.ones((pairs.shape[0]), dtype=np.float64), -np.ones(
                    (pairs.shape[0]), dtype=np.float64)
            ])
            row = np.concatenate(
                [np.arange(pairs.shape[0]),
                 np.arange(pairs.shape[0])])
            col = np.concatenate([pairs[:, 0], pairs[:, 1]])
            coo = coo_matrix((vals, (row, col)),
                             shape=(pairs.shape[0], Xtrain.T.shape[0]))
            L = (coo.T * coo).todense()
            G = Xtrain * L * Xtrain.T + regparam * In
            W2 = np.squeeze(
                np.array(G.I * Xtrain * coo.T *
                         np.mat(np.ones((pairs.shape[0], 1)))))
            for i in range(W.shape[0]):
                #for j in range(W.shape[1]):
                #    self.assertAlmostEqual(W[i,j],W2[i,j], places=4)
                self.assertAlmostEqual(W[i], W2[i], places=4)
 def testPairwisePreferences(self):
     m, n = 100, 300
     Xtrain = np.mat(np.random.rand(m, n))
     Xtest = np.mat(np.random.rand(5, n))
     for regparam in [0.00000001, 1, 100000000]:
         
         Y = np.mat(np.random.rand(m, 1))
         
         pairs = []
         for i in range(1000):
             a = random.randint(0, m - 1)
             b = random.randint(0, m - 1)
             if Y[a] > Y[b]:
                 pairs.append((a, b))
             else:
                 pairs.append((b, a))
         pairs = np.array(pairs)
         rpool = {}
         rpool['train_features'] = Xtrain
         #rpool['train_labels'] = Y
         rpool['train_preferences'] = pairs
         rpool['regparam'] = regparam
         rpool["bias"] = 1.0
         k = LinearKernel.createKernel(**rpool)
         rpool['kernel_obj'] = k
         rls = PPRankRLS.createLearner(**rpool)
         rls.train()
         model = rls.getModel()   
         W = model.W
         In = np.mat(np.identity(n))
         Im = np.mat(np.identity(m))
         vals = np.concatenate([np.ones((pairs.shape[0]), dtype = np.float64), -np.ones((pairs.shape[0]), dtype = np.float64)])
         row = np.concatenate([np.arange(pairs.shape[0]), np.arange(pairs.shape[0])])
         col = np.concatenate([pairs[:, 0], pairs[:, 1]])
         coo = coo_matrix((vals, (row, col)), shape = (pairs.shape[0], Xtrain.shape[0]))
         L = (coo.T * coo).todense()
         G = Xtrain.T * L * Xtrain + regparam * In
         W2 = np.squeeze(np.array(G.I * Xtrain.T * coo.T * np.mat(np.ones((pairs.shape[0], 1)))))
         P1 = model.predict(Xtest)
         P2 = Xtest * np.mat(W2).T
         #print P1
         #print P2
         for i in range(P1.shape[0]):
                 #self.assertAlmostEqual(W[i], W2[i], places = 4)
                 self.assertAlmostEqual(P1[i], P2[i,0], places = 3)
예제 #6
0
    def testLabelRankRLS(self):
        
        print
        print
        print
        print
        print "Testing the cross-validation routines of the LabelRankRLS module."
        print
        print
        
        np.random.seed(100)
        floattype = np.float64
        
        m, n = 100, 400 #data, features
        Xtrain = np.mat(np.random.rand(m, n))
        K = Xtrain * Xtrain.T
        ylen = 1
        Y = np.mat(np.zeros((m, ylen), dtype=floattype))
        Y[:, 0] = np.sum(Xtrain, 1)
        
        
        objcount = 20
        labelcount = 5
        
        hoindices = range(labelcount)
        hocompl = list(set(range(m)) - set(hoindices))
        
        size = m
        
        P = np.mat(np.zeros((m, objcount), dtype=np.float64))
        Q = np.mat(np.zeros((objcount, m), dtype=np.float64))
        qidlist = [0 for i in range(100)]
        for h in range(5, 12):
            qidlist[h] = 1
        for h in range(12, 32):
            qidlist[h] = 2
        for h in range(32, 34):
            qidlist[h] = 3
        for h in range(34, 85):
            qidlist[h] = 4
        for h in range(85, 100):
            qidlist[h] = 5
        qidlist_cv = qidlist[5: len(qidlist)]
        
        objcount = max(qidlist) + 1
        P = np.mat(np.zeros((m, objcount), dtype=np.float64))
        for i in range(m):
            qid = qidlist[i]
            P[i, qid] = 1.
        labelcounts = np.sum(P, axis=0)
        P = np.divide(P, np.sqrt(labelcounts))
        D = np.mat(np.ones((1, m), dtype=np.float64))
        L = np.multiply(np.eye(m), D) - P * P.T
        
        Kcv = K[np.ix_(hocompl, hocompl)]
        Ycv = Y[hocompl]
        Ktest = K[np.ix_(hoindices, hocompl)]
        Lcv = L[np.ix_(hocompl, hocompl)]
        
        Xcv = Xtrain[hocompl]
        #Pcv = P[hocompl]#KLUDGE!!!!!
        Pcv = P[np.ix_(hocompl, range(1, P.shape[1]))]#KLUDGE!!!!!
        Xtest = Xtrain[hoindices]
        Yho = Y[hocompl]
        
        rpool = {}
        rpool["train_features"] = Xtrain
        rpool["train_labels"] = Y
        rpool["train_qids"] = mapQids(qidlist)
        primalrls = LabelRankRLS.createLearner(**rpool)        
        
        rpool = {}
        rpool["kernel_matrix"] = K
        rpool["train_labels"] = Y
        rpool["train_qids"] = mapQids(qidlist)        
        dualrls = LabelRankRLS.createLearner(**rpool)
        
        
        rpool = {}
        rpool['train_features'] = Xcv
        rpool['train_labels'] = Yho
        rpool['kernel_obj'] = LinearKernel.createKernel(**rpool)
        rpool['train_qids'] = mapQids(qidlist_cv)
        primalrls_naive = LabelRankRLS.createLearner(**rpool)

        
        rpool = {}
        rpool['kernel_matrix'] = Kcv
        rpool['train_labels'] = Yho
        rpool['train_features'] = Xcv
        rpool['kernel_obj'] = LinearKernel.createKernel(**rpool)
        rpool['train_qids'] = mapQids(qidlist_cv)
        dualrls_naive = LabelRankRLS.createLearner(**rpool)
        
        
        
        testkm = K[np.ix_(hocompl, hoindices)]
        
        loglambdas = range(-5, 5)
        for j in range(0, len(loglambdas)):
            regparam = 2. ** loglambdas[j]
            print
            print "Regparam 2^%1d" % loglambdas[j]
            
            
            print np.squeeze(np.array((testkm.T * la.inv(Lcv * Kcv + regparam * np.eye(Lcv.shape[0])) * Lcv * Yho).T)), 'Dumb HO'
            
            predhos = []
            primalrls_naive.solve(regparam)
            predho = primalrls_naive.getModel().predict(Xtest)
            print predho.T, 'Naive HO (primal)'
            predhos.append(predho)
            
            dualrls_naive.solve(regparam)
            predho = dualrls_naive.getModel().predict(testkm.T)
            print predho.T, 'Naive HO (dual)'
            predhos.append(predho)
            
            primalrls.solve(regparam)
            predho = np.squeeze(primalrls.computeHO(hoindices))
            print predho.T, 'Fast HO (primal)'
            predhos.append(predho)
            
            dualrls.solve(regparam)
            predho = np.squeeze(dualrls.computeHO(hoindices))
            print predho.T, 'Fast HO (dual)'
            predhos.append(predho)
            
            predho0 = predhos.pop(0)
            for predho in predhos:
                self.assertEqual(predho0.shape, predho.shape)
                for row in range(predho.shape[0]):
                    #for col in range(predho.shape[1]):
                    #    self.assertAlmostEqual(predho0[row,col],predho[row,col], places=5)
                        self.assertAlmostEqual(predho0[row],predho[row], places=5)
예제 #7
0
파일: test_rls.py 프로젝트: aatapa/RLScore
 def test_linear(self):
     #Test that learning with linear kernel works correctly both
     #with low and high-dimensional data
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             #Basic case
             primal_rls = RLS(X, Y, regparam=1.0, bias=0.)
             W = primal_rls.predictor.W
             d = X.shape[1]
             W2 = np.linalg.solve(np.dot(X.T, X) + np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Fast regularization algorithm
             primal_rls.solve(10.)
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(np.dot(X.T, X) + 10.*np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Bias term included
             primal_rls = RLS(X, Y, regparam=1.0, bias=2.)
             O = np.sqrt(2.) * np.ones((X.shape[0],1))
             X_new = np.hstack((X, O))
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(np.dot(X_new.T, X_new) + np.eye(d+1), np.dot(X_new.T, Y))
             b = primal_rls.predictor.b
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #reduced set approximation
             primal_rls = RLS(X, Y, basis_vectors = X[self.bvectors], regparam=5.0, bias=2.)
             W = primal_rls.predictor.W
             b = primal_rls.predictor.b
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(np.dot(Kr.T, Kr)+ 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #Using pre-computed linear kernel matrix
             kernel = LinearKernel(X, bias = 2.)
             K = kernel.getKM(X)
             dual_rls = RLS(K, Y, kernel = "PrecomputedKernel", regparam=0.01)
             W = np.dot(X_new.T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             W2 = np.linalg.solve(np.dot(X_new.T, X_new) + 0.01 * np.eye(d+1), np.dot(X_new.T, Y))
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
             #Pre-computed linear kernel, reduced set approximation
             kernel = LinearKernel(X[self.bvectors], bias = 2.)
             dual_rls = RLS(kernel.getKM(X), Y, kernel="PrecomputedKernel", basis_vectors = kernel.getKM(X[self.bvectors]), regparam=5.0)
             W = np.dot(X_new[self.bvectors].T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(np.dot(Kr.T, Kr)+ 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
예제 #8
0
    def testLabelRankRLS(self):

        print
        print
        print
        print
        print "Testing the cross-validation routines of the LabelRankRLS module."
        print
        print

        np.random.seed(100)
        floattype = np.float64

        m, n = 100, 400  #data, features
        Xtrain = np.mat(np.random.rand(m, n))
        K = Xtrain * Xtrain.T
        ylen = 1
        Y = np.mat(np.zeros((m, ylen), dtype=floattype))
        Y[:, 0] = np.sum(Xtrain, 1)

        objcount = 20
        labelcount = 5

        hoindices = range(labelcount)
        hocompl = list(set(range(m)) - set(hoindices))

        size = m

        P = np.mat(np.zeros((m, objcount), dtype=np.float64))
        Q = np.mat(np.zeros((objcount, m), dtype=np.float64))
        qidlist = [0 for i in range(100)]
        for h in range(5, 12):
            qidlist[h] = 1
        for h in range(12, 32):
            qidlist[h] = 2
        for h in range(32, 34):
            qidlist[h] = 3
        for h in range(34, 85):
            qidlist[h] = 4
        for h in range(85, 100):
            qidlist[h] = 5
        qidlist_cv = qidlist[5:len(qidlist)]

        objcount = max(qidlist) + 1
        P = np.mat(np.zeros((m, objcount), dtype=np.float64))
        for i in range(m):
            qid = qidlist[i]
            P[i, qid] = 1.
        labelcounts = np.sum(P, axis=0)
        P = np.divide(P, np.sqrt(labelcounts))
        D = np.mat(np.ones((1, m), dtype=np.float64))
        L = np.multiply(np.eye(m), D) - P * P.T

        Kcv = K[np.ix_(hocompl, hocompl)]
        Ycv = Y[hocompl]
        Ktest = K[np.ix_(hoindices, hocompl)]
        Lcv = L[np.ix_(hocompl, hocompl)]

        Xcv = Xtrain[hocompl]
        #Pcv = P[hocompl]#KLUDGE!!!!!
        Pcv = P[np.ix_(hocompl, range(1, P.shape[1]))]  #KLUDGE!!!!!
        Xtest = Xtrain[hoindices]
        Yho = Y[hocompl]

        rpool = {}
        rpool["train_features"] = Xtrain
        rpool["train_labels"] = Y
        rpool["train_qids"] = mapQids(qidlist)
        primalrls = LabelRankRLS.createLearner(**rpool)

        rpool = {}
        rpool["kernel_matrix"] = K
        rpool["train_labels"] = Y
        rpool["train_qids"] = mapQids(qidlist)
        dualrls = LabelRankRLS.createLearner(**rpool)

        rpool = {}
        rpool['train_features'] = Xcv
        rpool['train_labels'] = Yho
        rpool['kernel_obj'] = LinearKernel.createKernel(**rpool)
        rpool['train_qids'] = mapQids(qidlist_cv)
        primalrls_naive = LabelRankRLS.createLearner(**rpool)

        rpool = {}
        rpool['kernel_matrix'] = Kcv
        rpool['train_labels'] = Yho
        rpool['train_features'] = Xcv
        rpool['kernel_obj'] = LinearKernel.createKernel(**rpool)
        rpool['train_qids'] = mapQids(qidlist_cv)
        dualrls_naive = LabelRankRLS.createLearner(**rpool)

        testkm = K[np.ix_(hocompl, hoindices)]

        loglambdas = range(-5, 5)
        for j in range(0, len(loglambdas)):
            regparam = 2.**loglambdas[j]
            print
            print "Regparam 2^%1d" % loglambdas[j]

            print np.squeeze(
                np.array((testkm.T *
                          la.inv(Lcv * Kcv + regparam * np.eye(Lcv.shape[0])) *
                          Lcv * Yho).T)), 'Dumb HO'

            predhos = []
            primalrls_naive.solve(regparam)
            predho = primalrls_naive.getModel().predict(Xtest)
            print predho.T, 'Naive HO (primal)'
            predhos.append(predho)

            dualrls_naive.solve(regparam)
            predho = dualrls_naive.getModel().predict(testkm.T)
            print predho.T, 'Naive HO (dual)'
            predhos.append(predho)

            primalrls.solve(regparam)
            predho = np.squeeze(primalrls.computeHO(hoindices))
            print predho.T, 'Fast HO (primal)'
            predhos.append(predho)

            dualrls.solve(regparam)
            predho = np.squeeze(dualrls.computeHO(hoindices))
            print predho.T, 'Fast HO (dual)'
            predhos.append(predho)

            predho0 = predhos.pop(0)
            for predho in predhos:
                self.assertEqual(predho0.shape, predho.shape)
                for row in range(predho.shape[0]):
                    #for col in range(predho.shape[1]):
                    #    self.assertAlmostEqual(predho0[row,col],predho[row,col], places=5)
                    self.assertAlmostEqual(predho0[row], predho[row], places=5)
예제 #9
0
 def test_linear(self):
     #Test that learning with linear kernel works correctly both
     #with low and high-dimensional data
     for X in [self.Xtrain1, self.Xtrain2]:
         for Y in [self.Ytrain1, self.Ytrain2]:
             #Basic case
             primal_rls = RLS(X, Y, regparam=1.0, bias=0.)
             W = primal_rls.predictor.W
             d = X.shape[1]
             W2 = np.linalg.solve(
                 np.dot(X.T, X) + np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Fast regularization algorithm
             primal_rls.solve(10.)
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(
                 np.dot(X.T, X) + 10. * np.eye(d), np.dot(X.T, Y))
             assert_allclose(W, W2)
             #Bias term included
             primal_rls = RLS(X, Y, regparam=1.0, bias=2.)
             O = np.sqrt(2.) * np.ones((X.shape[0], 1))
             X_new = np.hstack((X, O))
             W = primal_rls.predictor.W
             W2 = np.linalg.solve(
                 np.dot(X_new.T, X_new) + np.eye(d + 1), np.dot(X_new.T, Y))
             b = primal_rls.predictor.b
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #reduced set approximation
             primal_rls = RLS(X,
                              Y,
                              basis_vectors=X[self.bvectors],
                              regparam=5.0,
                              bias=2.)
             W = primal_rls.predictor.W
             b = primal_rls.predictor.b
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(
                 np.dot(Kr.T, Kr) + 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, np.sqrt(2) * b2)
             #Using pre-computed linear kernel matrix
             kernel = LinearKernel(X, bias=2.)
             K = kernel.getKM(X)
             dual_rls = RLS(K, Y, kernel="PrecomputedKernel", regparam=0.01)
             W = np.dot(X_new.T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             W2 = np.linalg.solve(
                 np.dot(X_new.T, X_new) + 0.01 * np.eye(d + 1),
                 np.dot(X_new.T, Y))
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)
             #Pre-computed linear kernel, reduced set approximation
             kernel = LinearKernel(X[self.bvectors], bias=2.)
             dual_rls = RLS(kernel.getKM(X),
                            Y,
                            kernel="PrecomputedKernel",
                            basis_vectors=kernel.getKM(X[self.bvectors]),
                            regparam=5.0)
             W = np.dot(X_new[self.bvectors].T, dual_rls.predictor.W)
             b = W[-1]
             W = W[:-1]
             K = np.dot(X_new, X_new.T)
             Kr = K[:, self.bvectors]
             Krr = K[np.ix_(self.bvectors, self.bvectors)]
             A = np.linalg.solve(
                 np.dot(Kr.T, Kr) + 5.0 * Krr, np.dot(Kr.T, Y))
             W2 = np.dot(X_new[self.bvectors].T, A)
             b2 = W2[-1]
             W2 = W2[:-1]
             assert_allclose(W, W2)
             assert_allclose(b, b2)