示例#1
0
    def solve_kernel(self, regparam1, regparam2):
        self.regparam1 = regparam1
        self.regparam2 = regparam2
        K1, K2 = self.K1, self.K2
        Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
        #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
        if not self.trained:
            self.trained = True
            evals1, V = decomposition.decomposeKernelMatrix(K1)
            evals1 = np.mat(evals1).T
            evals1 = np.multiply(evals1, evals1)
            V = np.mat(V)
            self.evals1 = evals1
            self.V = V

            evals2, U = decomposition.decomposeKernelMatrix(K2)
            evals2 = np.mat(evals2).T
            evals2 = np.multiply(evals2, evals2)
            U = np.mat(U)
            self.evals2 = evals2
            self.U = U
            self.VTYU = V.T * self.Y * U

        #newevals = 1. / (self.evals1 * self.evals2.T + regparam)
        self.newevals1 = 1. / (self.evals1 + regparam1)
        self.newevals2 = 1. / (self.evals2 + regparam2)
        newevals = self.newevals1 * self.newevals2.T

        self.A = np.multiply(self.VTYU, newevals)
        self.A = self.V * self.A * self.U.T
        self.model = KernelPairwiseModel(self.A)
示例#2
0
 def solve_kernel(self, regparam1, regparam2):
     self.regparam1 = regparam1
     self.regparam2 = regparam2
     K1, K2 = self.K1, self.K2
     Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
     #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
     if not self.trained:
         self.trained = True
         evals1, V  = decomposition.decomposeKernelMatrix(K1)
         evals1 = np.mat(evals1).T
         evals1 = np.multiply(evals1, evals1)
         V = np.mat(V)
         self.evals1 = evals1
         self.V = V
         
         evals2, U = decomposition.decomposeKernelMatrix(K2)
         evals2 = np.mat(evals2).T
         evals2 = np.multiply(evals2, evals2)
         U = np.mat(U)
         self.evals2 = evals2
         self.U = U
         self.VTYU = V.T * self.Y * U
     
     #newevals = 1. / (self.evals1 * self.evals2.T + regparam)
     self.newevals1 = 1. / (self.evals1 + regparam1)
     self.newevals2 = 1. / (self.evals2 + regparam2)
     newevals = self.newevals1 * self.newevals2.T
     
     self.A = np.multiply(self.VTYU, newevals)
     self.A = self.V * self.A * self.U.T
     self.model = KernelPairwiseModel(self.A)
示例#3
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = array_tools.as_2d_array(rpool['X'], True)
     if rpool.has_key('basis_vectors'):
         basis_vectors = array_tools.as_2d_array(rpool['basis_vectors'], True)
         if not self.X.shape[1] == basis_vectors.shape[1]:
             raise Exception("X and basis_vectors have different number of columns")
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 1.
     if basis_vectors != None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors != None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X,self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
示例#4
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = rpool['train_features']
     if rpool.has_key('basis_vectors'):
         basis_vectors = rpool['basis_vectors']
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if basis_vectors != None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors != None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X,self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
示例#5
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = rpool['train_features']
     if rpool.has_key('basis_vectors'):
         basis_vectors = rpool['basis_vectors']
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if basis_vectors != None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors != None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X, self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
示例#6
0
文件: adapter.py 项目: peknau/RLScore
 def decompositionFromPool(self, rpool):
     kernel = rpool[data_sources.KERNEL_OBJ]
     self.X = rpool[data_sources.TRAIN_FEATURES]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         bvectors = rpool[data_sources.BASIS_VECTORS]
     else:
         bvectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if bvectors != None or self.X.shape[1] > self.X.shape[0]:
         K = kernel.getKM(self.X).T
         #First possibility: subset of regressors has been invoked
         if bvectors != None:
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(
                 K, bvectors)
         #Second possibility: dual mode if more attributes than examples
         else:
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X, self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
示例#7
0
文件: adapter.py 项目: peknau/RLScore
 def decompositionFromPool(self, rpool):
     kernel = rpool[data_sources.KERNEL_OBJ]
     self.X = rpool[data_sources.TRAIN_FEATURES]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         bvectors = rpool[data_sources.BASIS_VECTORS]
     else:
         bvectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if bvectors != None or self.X.shape[1] > self.X.shape[0]:
         K = kernel.getKM(self.X).T
         #First possibility: subset of regressors has been invoked
         if bvectors != None:
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K, bvectors)
         #Second possibility: dual mode if more attributes than examples
         else:
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X,self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
示例#8
0
 def decompositionFromPool(self, rpool):
     """Builds decomposition representing the training data from resource pool.
     Default implementation
     builds and decomposes the kernel matrix itself (standard case), or the 
     empirical kernel map of the training data, if reduced set approximation is
     used. Inheriting classes may also re-implement this by decomposing the feature
     map of the data (e.g. linear kernel with low-dimensional data).
     @param rpool: resource pool
     @type rpool: dict
     @return: svals, evecs, U, Z
     @rtype: tuple of numpy matrices
     """
     train_X = rpool['X']
     kernel = rpool['kernel_obj']
     if rpool.has_key('basis_vectors'):
         basis_vectors = rpool['basis_vectors']
         if not train_X.shape[1] == basis_vectors.shape[1]:
             raise Exception("X and basis_vectors have different number of columns")
         K_r = kernel.getKM(train_X).T
         Krr = kernel.getKM(basis_vectors)
         svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
     else:
         K = kernel.getKM(train_X).T
         svals, evecs = decomposition.decomposeKernelMatrix(K)
         U, Z = None, None
     return svals, evecs, U, Z
示例#9
0
 def decompositionFromPool(self, rpool):
     K_train = rpool['kernel_matrix']
     if rpool.has_key('basis_vectors'):
         svals, rsvecs, U, Z = decomposition.decomposeSubsetKM(K_train, rpool['basis_vectors'])
     else:
         svals, rsvecs = decomposition.decomposeKernelMatrix(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
示例#10
0
文件: adapter.py 项目: peknau/RLScore
 def decompositionFromPool(self, rpool):
     K_train = rpool[data_sources.KMATRIX]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         svals, rsvecs, U, Z = decomposition.decomposeSubsetKM(K_train, rpool[data_sources.BASIS_VECTORS])
     else:
         svals, rsvecs = decomposition.decomposeKernelMatrix(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
示例#11
0
文件: adapter.py 项目: peknau/RLScore
 def decompositionFromPool(self, rpool):
     K_train = rpool[data_sources.KMATRIX]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         svals, rsvecs, U, Z = decomposition.decomposeSubsetKM(
             K_train, rpool[data_sources.BASIS_VECTORS])
     else:
         svals, rsvecs = decomposition.decomposeKernelMatrix(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
示例#12
0
 def decompositionFromPool(self, rpool):
     K_train = rpool['kernel_matrix']
     if rpool.has_key('basis_vectors'):
         svals, rsvecs, U, Z = decomposition.decomposeSubsetKM(
             K_train, rpool['basis_vectors'])
     else:
         svals, rsvecs = decomposition.decomposeKernelMatrix(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
示例#13
0
 def decompositionFromPool(self, rpool):
     K_train = rpool['kernel_matrix']
     if rpool.has_key('basis_vectors'):
         if not K_train.shape[1] == rpool["basis_vectors"].shape[1]:
             raise Exception("When using basis vectors, both kernel matrices must contain equal number of columns")
         svals, rsvecs, U, Z = decomposition.decomposeSubsetKM(K_train.T, rpool['basis_vectors'])
     else:
         svals, rsvecs = decomposition.decomposeKernelMatrix(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
示例#14
0
 def solve(self, regparam):
     """Re-trains RankRLS for the given regparam.
            
     Parameters
     ----------
     regparam : float, optional
         regularization parameter, regparam > 0 (default=1.0)
         
     Notes
     -----   
     """
     size = self.svecs.shape[0]
     
     if not hasattr(self, "multipleright"):
         vals = np.concatenate([np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones((self.pairs.shape[0]), dtype = np.float64)])
         row = np.concatenate([np.arange(self.pairs.shape[0]), np.arange(self.pairs.shape[0])])
         col = np.concatenate([self.pairs[:, 0], self.pairs[:, 1]])
         coo = coo_matrix((vals, (row, col)), shape = (self.pairs.shape[0], size))
         self.L = (coo.T * coo)#.todense()
         
         #Eigenvalues of the kernel matrix
         evals = np.multiply(self.svals, self.svals)
         
         #Temporary variables
         ssvecs = np.multiply(self.svecs, self.svals)
         
         #These are cached for later use in solve and computeHO functions
         ssvecsTLssvecs = ssvecs.T * self.L * ssvecs
         LRsvals, LRevecs = decomposition.decomposeKernelMatrix(ssvecsTLssvecs)
         LRevals = np.multiply(LRsvals, LRsvals)
         LY = coo.T * np.mat(np.ones((self.pairs.shape[0], 1)))
         self.multipleright = LRevecs.T * (ssvecs.T * LY)
         self.multipleleft = ssvecs * LRevecs
         self.LRevals = LRevals
         self.LRevecs = LRevecs
     
     
     self.regparam = regparam
     
     #Compute the eigenvalues determined by the given regularization parameter
     self.neweigvals = 1. / (self.LRevals + regparam)
     self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
     #self.results['model'] = self.getModel()
     self.predictor = self.svdad.createModel(self)
 def solve(self, regparam):
     """Trains the prediction function, using the given regularization parameter.
     
     This implementation simply changes the regparam, and then calls the train method.
     
     Parameters
     ----------
     regparam: float (regparam > 0)
         regularization parameter
     """
     
     if not hasattr(self, "multipleright"):
         vals = np.concatenate([np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones((self.pairs.shape[0]), dtype = np.float64)])
         row = np.concatenate([np.arange(self.pairs.shape[0]), np.arange(self.pairs.shape[0])])
         col = np.concatenate([self.pairs[:, 0], self.pairs[:, 1]])
         coo = coo_matrix((vals, (row, col)), shape = (self.pairs.shape[0], self.size))
         self.L = (coo.T * coo)#.todense()
         
         #Eigenvalues of the kernel matrix
         evals = np.multiply(self.svals, self.svals)
         
         #Temporary variables
         ssvecs = np.multiply(self.svecs, self.svals)
         
         #These are cached for later use in solve and computeHO functions
         ssvecsTLssvecs = ssvecs.T * self.L * ssvecs
         LRsvals, LRevecs = decomposition.decomposeKernelMatrix(ssvecsTLssvecs)
         LRevals = np.multiply(LRsvals, LRsvals)
         LY = coo.T * np.mat(np.ones((self.pairs.shape[0], 1)))
         self.multipleright = LRevecs.T * (ssvecs.T * LY)
         self.multipleleft = ssvecs * LRevecs
         self.LRevals = LRevals
         self.LRevecs = LRevecs
     
     
     self.regparam = regparam
     
     #Compute the eigenvalues determined by the given regularization parameter
     self.neweigvals = 1. / (self.LRevals + regparam)
     self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
     self.results['model'] = self.getModel()
示例#16
0
文件: adapter.py 项目: peknau/RLScore
 def decompositionFromPool(self, rpool):
     """Builds decomposition representing the training data from resource pool.
     Default implementation
     builds and decomposes the kernel matrix itself (standard case), or the 
     empirical kernel map of the training data, if reduced set approximation is
     used. Inheriting classes may also re-implement this by decomposing the feature
     map of the data (e.g. linear kernel with low-dimensional data).
     @param rpool: resource pool
     @type rpool: dict
     @return: svals, evecs, U, Z
     @rtype: tuple of numpy matrices
     """
     train_X = rpool[data_sources.TRAIN_FEATURES]
     kernel = rpool[data_sources.KERNEL_OBJ]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         bvectors = rpool[data_sources.BASIS_VECTORS]
         K = kernel.getKM(train_X).T
         svals, evecs, U, Z = decomposition.decomposeSubsetKM(K, bvectors)
     else:
         K = kernel.getKM(train_X).T
         svals, evecs = decomposition.decomposeKernelMatrix(K)
         U, Z = None, None
     return svals, evecs, U, Z
示例#17
0
文件: adapter.py 项目: peknau/RLScore
 def decompositionFromPool(self, rpool):
     """Builds decomposition representing the training data from resource pool.
     Default implementation
     builds and decomposes the kernel matrix itself (standard case), or the 
     empirical kernel map of the training data, if reduced set approximation is
     used. Inheriting classes may also re-implement this by decomposing the feature
     map of the data (e.g. linear kernel with low-dimensional data).
     @param rpool: resource pool
     @type rpool: dict
     @return: svals, evecs, U, Z
     @rtype: tuple of numpy matrices
     """
     train_X = rpool[data_sources.TRAIN_FEATURES]
     kernel = rpool[data_sources.KERNEL_OBJ]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         bvectors = rpool[data_sources.BASIS_VECTORS]
         K = kernel.getKM(train_X).T
         svals, evecs, U, Z = decomposition.decomposeSubsetKM(K, bvectors)
     else:
         K = kernel.getKM(train_X).T
         svals, evecs = decomposition.decomposeKernelMatrix(K)
         U, Z = None, None
     return svals, evecs, U, Z
示例#18
0
 def decompositionFromPool(self, rpool):
     """Builds decomposition representing the training data from resource pool.
     Default implementation
     builds and decomposes the kernel matrix itself (standard case), or the 
     empirical kernel map of the training data, if reduced set approximation is
     used. Inheriting classes may also re-implement this by decomposing the feature
     map of the data (e.g. linear kernel with low-dimensional data).
     @param rpool: resource pool
     @type rpool: dict
     @return: svals, evecs, U, Z
     @rtype: tuple of numpy matrices
     """
     train_X = rpool['train_features']
     kernel = rpool['kernel_obj']
     if rpool.has_key('basis_vectors'):
         basis_vectors = rpool['basis_vectors']
         K_r = kernel.getKM(train_X).T
         Krr = kernel.getKM(basis_vectors)
         svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
     else:
         K = kernel.getKM(train_X).T
         svals, evecs = decomposition.decomposeKernelMatrix(K)
         U, Z = None, None
     return svals, evecs, U, Z
示例#19
0
 def solve(self, regparam=1.0):
     """Trains the learning algorithm, using the given regularization parameter.
            
     Parameters
     ----------
     regparam: float (regparam > 0)
         regularization parameter
     """
     if not hasattr(self, "D"):
         qidlist = self.qidlist
         objcount = max(qidlist) + 1
         
         labelcounts = np.mat(np.zeros((1, objcount)))
         Pvals = np.ones(self.size)
         for i in range(self.size):
             qid = qidlist[i]
             labelcounts[0, qid] = labelcounts[0, qid] + 1
         D = np.mat(np.ones((1, self.size), dtype=np.float64))
         
         #The centering matrix way (HO computations should be modified accordingly too)
         for i in range(self.size):
             qid = qidlist[i]
             Pvals[i] = 1. / np.sqrt(labelcounts[0, qid])
         
         #The old Laplacian matrix way
         #for i in range(self.size):
         #    qid = qidlist[i]
         #    D[0, i] = labelcounts[0, qid]
         
         P = scipy.sparse.coo_matrix((Pvals, (np.arange(0, self.size), qidlist)), shape=(self.size,objcount))
         P_csc = P.tocsc()
         P_csr = P.tocsr()
         
         
         #Eigenvalues of the kernel matrix
         evals = np.multiply(self.svals, self.svals)
         
         #Temporary variables
         ssvecs = np.multiply(self.svecs, self.svals)
         
         #These are cached for later use in solve and computeHO functions
         ssvecsTLssvecs = (np.multiply(ssvecs.T, D) - (ssvecs.T * P_csc) * P_csr.T) * ssvecs
         LRsvals, LRevecs = decomposition.decomposeKernelMatrix(ssvecsTLssvecs)
         LRevals = np.multiply(LRsvals, LRsvals)
         LY = np.multiply(D.T, self.Y) - P_csr * (P_csc.T * self.Y)
         self.multipleright = LRevecs.T * (ssvecs.T * LY)
         self.multipleleft = ssvecs * LRevecs
         self.LRevals = LRevals
         self.LRevecs = LRevecs
         self.D = D
     
     
     self.regparam = regparam
     
     #Compute the eigenvalues determined by the given regularization parameter
     self.neweigvals = 1. / (self.LRevals + regparam)
     self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
     #if self.U == None:
         #Dual RLS
     #    pass
         #self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
     #else:
         #Primal RLS
         #self.A = self.U.T * (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright))
         #self.A = self.U.T * np.multiply(self.svals.T,  self.svecs.T * self.A)
     self.results['model'] = self.getModel()
示例#20
0
 def solve(self, regparam1, regparam2):
     """Re-trains TwoStepRLS for the given regparams
            
     Parameters
     ----------
     regparam1: float
         regularization parameter 1, regparam1 > 0
     
     regparam2: float
         regularization parameter 2, regparam2 > 0
         
     Notes
     -----    
             
     Computational complexity of re-training:
     
     m = n_samples1, n = n_samples2, d = n_features1, e  = n_features2
     
     O(ed^2 + de^2) Linear version (assumption: d < m, e < n)
     
     O(m^3 + n^3) Kernel version
     """
     self.regparam1 = regparam1
     self.regparam2 = regparam2
     if self.kernelmode:
         K1, K2 = self.K1, self.K2
         Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
         #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
         if not self.trained:
             self.trained = True
             evals1, V  = decomposition.decomposeKernelMatrix(K1)
             evals1 = np.mat(evals1).T
             evals1 = np.multiply(evals1, evals1)
             V = np.mat(V)
             self.evals1 = evals1
             self.V = V
             
             evals2, U = decomposition.decomposeKernelMatrix(K2)
             evals2 = np.mat(evals2).T
             evals2 = np.multiply(evals2, evals2)
             U = np.mat(U)
             self.evals2 = evals2
             self.U = U
             self.VTYU = V.T * self.Y * U
         
         #newevals = 1. / (self.evals1 * self.evals2.T + regparam)
         self.newevals1 = 1. / (self.evals1 + regparam1)
         self.newevals2 = 1. / (self.evals2 + regparam2)
         newevals = self.newevals1 * self.newevals2.T
         
         self.A = np.multiply(self.VTYU, newevals)
         self.A = self.V * self.A * self.U.T
         self.A = np.array(self.A)
         #self.predictor = KernelPairwisePredictor(self.A)
         label_row_inds, label_col_inds = np.unravel_index(np.arange(K1.shape[0] * K2.shape[0]), (K1.shape[0],  K2.shape[0]))
         label_row_inds = np.array(label_row_inds, dtype = np.int32)
         label_col_inds = np.array(label_col_inds, dtype = np.int32)
         self.predictor = KernelPairwisePredictor(self.A.ravel(), label_row_inds, label_col_inds)
         
         #self.dsikm1 = la.inv(K1 + regparam1 * (np.mat(np.eye(K1.shape[0]))))
         #self.dsikm2 = la.inv(K2 + regparam2 * (np.mat(np.eye(K2.shape[0]))))
     else:
         X1, X2 = self.X1, self.X2
         Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
         if not self.trained:
             self.trained = True
             svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
             self.svals1 = svals1.T
             self.evals1 = np.multiply(self.svals1, self.svals1)
             self.V = V
             self.rsvecs1 = np.mat(rsvecs1)
             
             if X1.shape == X2.shape and (X1 == X2).all():
                 svals2, U, rsvecs2 = svals1, V, rsvecs1
             else:
                 svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
             self.svals2 = svals2.T
             self.evals2 = np.multiply(self.svals2, self.svals2)
             self.U = U
             self.rsvecs2 = np.mat(rsvecs2)
             
             self.VTYU = V.T * Y * U
         
         self.newevals1 = 1. / (self.evals1 + regparam1)
         self.newevals2 = 1. / (self.evals2 + regparam2)
         newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(self.svals2, self.newevals2).T
         
         self.W = np.multiply(self.VTYU, newevals)
         self.W = self.rsvecs1.T * self.W * self.rsvecs2
         #self.predictor = LinearPairwisePredictor(self.W)
         self.predictor = LinearPairwisePredictor(np.array(self.W))