Esempio n. 1
0
 def decompositionFromPool(self, rpool):
     """Builds decomposition representing the training data from resource pool.
     Default implementation
     builds and decomposes the kernel matrix itself (standard case), or the 
     empirical kernel map of the training data, if reduced set approximation is
     used. Inheriting classes may also re-implement this by decomposing the feature
     map of the data (e.g. linear kernel with low-dimensional data).
     @param rpool: resource pool
     @type rpool: dict
     @return: svals, evecs, U, Z
     @rtype: tuple of numpy matrices
     """
     train_X = rpool['X']
     kernel = rpool['kernel_obj']
     if 'basis_vectors' in rpool:
         basis_vectors = rpool['basis_vectors']
         if not train_X.shape[1] == basis_vectors.shape[1]:
             raise Exception("X and basis_vectors have different number of columns")
         K_r = kernel.getKM(train_X).T
         Krr = kernel.getKM(basis_vectors)
         svals, evecs, U, Z = decomposeSubsetKM(K_r, Krr)
     else:
         K = kernel.getKM(train_X).T
         svals, evecs = linalg.eig_psd(K)
         U, Z = None, None
     return svals, evecs, U, Z
Esempio n. 2
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = array_tools.as_2d_array(rpool['X'], True)
     if 'basis_vectors' in rpool:
         basis_vectors = array_tools.as_2d_array(rpool['basis_vectors'], True)
         if not self.X.shape[1] == basis_vectors.shape[1]:
             raise Exception("X and basis_vectors have different number of columns")
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 1.
     if basis_vectors is not None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors is not None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = linalg.eig_psd(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X, self.bias)
         evecs, svals, U = linalg.svd_economy_sized(X)
         U, Z = None, None
     return svals, evecs, U, Z
Esempio n. 3
0
 def decompositionFromPool(self, rpool):
     """Builds decomposition representing the training data from resource pool.
     Default implementation
     builds and decomposes the kernel matrix itself (standard case), or the 
     empirical kernel map of the training data, if reduced set approximation is
     used. Inheriting classes may also re-implement this by decomposing the feature
     map of the data (e.g. linear kernel with low-dimensional data).
     @param rpool: resource pool
     @type rpool: dict
     @return: svals, evecs, U, Z
     @rtype: tuple of numpy matrices
     """
     train_X = rpool['X']
     kernel = rpool['kernel_obj']
     if 'basis_vectors' in rpool:
         basis_vectors = rpool['basis_vectors']
         if not train_X.shape[1] == basis_vectors.shape[1]:
             raise Exception(
                 "X and basis_vectors have different number of columns")
         K_r = kernel.getKM(train_X).T
         Krr = kernel.getKM(basis_vectors)
         svals, evecs, U, Z = decomposeSubsetKM(K_r, Krr)
     else:
         K = kernel.getKM(train_X).T
         svals, evecs = linalg.eig_psd(K)
         U, Z = None, None
     return svals, evecs, U, Z
Esempio n. 4
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = array_tools.as_2d_array(rpool['X'], True)
     if 'basis_vectors' in rpool:
         basis_vectors = array_tools.as_2d_array(rpool['basis_vectors'],
                                                 True)
         if not self.X.shape[1] == basis_vectors.shape[1]:
             raise Exception(
                 "X and basis_vectors have different number of columns")
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 1.
     if basis_vectors is not None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors is not None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = linalg.eig_psd(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X, self.bias)
         evecs, svals, U = linalg.svd_economy_sized(X)
         U, Z = None, None
     return svals, evecs, U, Z
Esempio n. 5
0
 def decompositionFromPool(self, rpool):
     K_train = rpool['kernel_matrix']
     if 'basis_vectors' in rpool:
         if not K_train.shape[1] == rpool["basis_vectors"].shape[1]:
             raise Exception("When using basis vectors, both kernel matrices must contain equal number of columns")
         svals, rsvecs, U, Z = decomposeSubsetKM(K_train.T, rpool['basis_vectors'])
     else:
         svals, rsvecs = linalg.eig_psd(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
Esempio n. 6
0
 def decompositionFromPool(self, rpool):
     K_train = rpool['kernel_matrix']
     if 'basis_vectors' in rpool:
         if not K_train.shape[1] == rpool["basis_vectors"].shape[1]:
             raise Exception(
                 "When using basis vectors, both kernel matrices must contain equal number of columns"
             )
         svals, rsvecs, U, Z = decomposeSubsetKM(K_train.T,
                                                 rpool['basis_vectors'])
     else:
         svals, rsvecs = linalg.eig_psd(K_train)
         U, Z = None, None
     return svals, rsvecs, U, Z
Esempio n. 7
0
    def solve(self, regparam):
        """Re-trains RankRLS for the given regparam.
               
        Parameters
        ----------
        regparam : float, optional
            regularization parameter, regparam > 0 (default=1.0)
            
        Notes
        -----   
        """
        size = self.svecs.shape[0]

        if not hasattr(self, "multipleright"):
            vals = np.concatenate([
                np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones(
                    (self.pairs.shape[0]), dtype=np.float64)
            ])
            row = np.concatenate([
                np.arange(self.pairs.shape[0]),
                np.arange(self.pairs.shape[0])
            ])
            col = np.concatenate([self.pairs[:, 0], self.pairs[:, 1]])
            coo = coo_matrix((vals, (row, col)),
                             shape=(self.pairs.shape[0], size))
            self.L = (coo.T * coo)  #.todense()

            #Eigenvalues of the kernel matrix
            evals = np.multiply(self.svals, self.svals)

            #Temporary variables
            ssvecs = np.multiply(self.svecs, self.svals)

            #These are cached for later use in solve and computeHO functions
            ssvecsTLssvecs = ssvecs.T * self.L * ssvecs
            LRsvals, LRevecs = linalg.eig_psd(ssvecsTLssvecs)
            LRsvals = np.mat(LRsvals)
            LRevals = np.multiply(LRsvals, LRsvals)
            LY = coo.T * np.mat(np.ones((self.pairs.shape[0], 1)))
            self.multipleright = LRevecs.T * (ssvecs.T * LY)
            self.multipleleft = ssvecs * LRevecs
            self.LRevals = LRevals
            self.LRevecs = LRevecs

        self.regparam = regparam

        #Compute the eigenvalues determined by the given regularization parameter
        self.neweigvals = 1. / (self.LRevals + regparam)
        self.A = self.svecs * np.multiply(1. / self.svals.T, (
            self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
        self.predictor = self.svdad.createModel(self)
    def solve(self, regparam):
        """Re-trains RankRLS for the given regparam.
               
        Parameters
        ----------
        regparam : float, optional
            regularization parameter, regparam > 0 (default=1.0)
            
        Notes
        -----   
        """
        size = self.svecs.shape[0]

        if not hasattr(self, "multipleright"):
            vals = np.concatenate(
                [np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones((self.pairs.shape[0]), dtype=np.float64)]
            )
            row = np.concatenate([np.arange(self.pairs.shape[0]), np.arange(self.pairs.shape[0])])
            col = np.concatenate([self.pairs[:, 0], self.pairs[:, 1]])
            coo = coo_matrix((vals, (row, col)), shape=(self.pairs.shape[0], size))
            self.L = coo.T * coo  # .todense()

            # Eigenvalues of the kernel matrix
            evals = np.multiply(self.svals, self.svals)

            # Temporary variables
            ssvecs = np.multiply(self.svecs, self.svals)

            # These are cached for later use in solve and computeHO functions
            ssvecsTLssvecs = ssvecs.T * self.L * ssvecs
            LRsvals, LRevecs = linalg.eig_psd(ssvecsTLssvecs)
            LRsvals = np.mat(LRsvals)
            LRevals = np.multiply(LRsvals, LRsvals)
            LY = coo.T * np.mat(np.ones((self.pairs.shape[0], 1)))
            self.multipleright = LRevecs.T * (ssvecs.T * LY)
            self.multipleleft = ssvecs * LRevecs
            self.LRevals = LRevals
            self.LRevecs = LRevecs

        self.regparam = regparam

        # Compute the eigenvalues determined by the given regularization parameter
        self.neweigvals = 1.0 / (self.LRevals + regparam)
        self.A = self.svecs * np.multiply(
            1.0 / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright))
        )
        self.predictor = self.svdad.createModel(self)
Esempio n. 9
0
    def solve(self, regparam=1.0):
        """Trains the learning algorithm, using the given regularization parameter.
               
        Parameters
        ----------
        regparam : float (regparam > 0)
            regularization parameter
            
        Notes
        -----
    
        Computational complexity of re-training:
        m = n_samples, d = n_features, l = n_labels, b = n_bvectors
        
        O(lm^2): basic case
        
        O(lmd): Linear Kernel, d < m
        
        O(lmb): Sparse approximation with basis vectors 
        """
        if not hasattr(self, "D"):
            qidlist = self.qids
            objcount = max(qidlist) + 1

            labelcounts = np.mat(np.zeros((1, objcount)))
            Pvals = np.ones(self.size)
            for i in range(self.size):
                qid = qidlist[i]
                labelcounts[0, qid] = labelcounts[0, qid] + 1
            D = np.mat(np.ones((1, self.size), dtype=np.float64))

            #The centering matrix way (HO computations should be modified accordingly too)
            for i in range(self.size):
                qid = qidlist[i]
                Pvals[i] = 1. / np.sqrt(labelcounts[0, qid])

            #The old Laplacian matrix way
            #for i in range(self.size):
            #    qid = qidlist[i]
            #    D[0, i] = labelcounts[0, qid]

            P = scipy.sparse.coo_matrix(
                (Pvals, (np.arange(0, self.size), qidlist)),
                shape=(self.size, objcount))
            P_csc = P.tocsc()
            P_csr = P.tocsr()

            #Eigenvalues of the kernel matrix
            #evals = np.multiply(self.svals, self.svals)

            #Temporary variables
            ssvecs = np.multiply(self.svecs, self.svals)

            #These are cached for later use in solve and holdout functions
            ssvecsTLssvecs = (np.multiply(ssvecs.T, D) -
                              (ssvecs.T * P_csc) * P_csr.T) * ssvecs
            LRsvals, LRevecs = linalg.eig_psd(ssvecsTLssvecs)
            LRsvals = np.mat(LRsvals)
            LRevals = np.multiply(LRsvals, LRsvals)
            LY = np.multiply(D.T, self.Y) - P_csr * (P_csc.T * self.Y)
            self.multipleright = LRevecs.T * (ssvecs.T * LY)
            self.multipleleft = ssvecs * LRevecs
            self.LRevals = LRevals
            self.LRevecs = LRevecs
            self.D = D

        self.regparam = regparam

        #Compute the eigenvalues determined by the given regularization parameter
        self.neweigvals = 1. / (self.LRevals + regparam)
        self.A = self.svecs * np.multiply(1. / self.svals.T, (
            self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
        self.predictor = self.svdad.createModel(self)
Esempio n. 10
0
 def solve(self, regparam1, regparam2):
     """Re-trains TwoStepRLS for the given regparams
            
     Parameters
     ----------
     regparam1: float
         regularization parameter 1, regparam1 > 0
     
     regparam2: float
         regularization parameter 2, regparam2 > 0
         
     Notes
     -----    
             
     Computational complexity of re-training:
     
     m = n_samples1, n = n_samples2, d = n_features1, e  = n_features2
     
     O(ed^2 + de^2) Linear version (assumption: d < m, e < n)
     
     O(m^3 + n^3) Kernel version
     """
     self.regparam1 = regparam1
     self.regparam2 = regparam2
     if self.kernelmode:
         K1, K2 = self.K1, self.K2
         Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
         if not self.trained:
             self.trained = True
             evals1, V  = linalg.eig_psd(K1)
             evals1 = np.mat(evals1).T
             evals1 = np.multiply(evals1, evals1)
             V = np.mat(V)
             self.evals1 = evals1
             self.V = V
             
             evals2, U = linalg.eig_psd(K2)
             evals2 = np.mat(evals2).T
             evals2 = np.multiply(evals2, evals2)
             U = np.mat(U)
             self.evals2 = evals2
             self.U = U
             self.VTYU = V.T * self.Y * U
         
         self.newevals1 = 1. / (self.evals1 + regparam1)
         self.newevals2 = 1. / (self.evals2 + regparam2)
         newevals = self.newevals1 * self.newevals2.T
         
         self.A = np.multiply(self.VTYU, newevals)
         self.A = self.V * self.A * self.U.T
         self.A = np.array(self.A)
         label_row_inds, label_col_inds = np.unravel_index(np.arange(K1.shape[0] * K2.shape[0]), (K1.shape[0],  K2.shape[0]))
         label_row_inds = np.array(label_row_inds, dtype = np.int32)
         label_col_inds = np.array(label_col_inds, dtype = np.int32)
         self.predictor = KernelPairwisePredictor(self.A.ravel(), label_row_inds, label_col_inds)
         
     else:
         X1, X2 = self.X1, self.X2
         Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
         if not self.trained:
             self.trained = True
             V, svals1, rsvecs1 = linalg.svd_economy_sized(X1)
             svals1 = np.mat(svals1)
             self.svals1 = svals1.T
             self.evals1 = np.multiply(self.svals1, self.svals1)
             self.V = V
             self.rsvecs1 = np.mat(rsvecs1)
             
             if X1.shape == X2.shape and (X1 == X2).all():
                 svals2, U, rsvecs2 = svals1, V, rsvecs1
             else:
                 U, svals2, rsvecs2 = linalg.svd_economy_sized(X2)
                 svals2 = np.mat(svals2)
             self.svals2 = svals2.T
             self.evals2 = np.multiply(self.svals2, self.svals2)
             self.U = U
             self.rsvecs2 = np.mat(rsvecs2)
             
             self.VTYU = V.T * Y * U
         
         self.newevals1 = 1. / (self.evals1 + regparam1)
         self.newevals2 = 1. / (self.evals2 + regparam2)
         newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(self.svals2, self.newevals2).T
         
         self.W = np.multiply(self.VTYU, newevals)
         self.W = self.rsvecs1.T * self.W * self.rsvecs2
         #self.predictor = LinearPairwisePredictor(self.W)
         self.predictor = LinearPairwisePredictor(np.array(self.W))
Esempio n. 11
0
 def solve(self, regparam=1.0):
     """Trains the learning algorithm, using the given regularization parameter.
            
     Parameters
     ----------
     regparam : float (regparam > 0)
         regularization parameter
         
     Notes
     -----
 
     Computational complexity of re-training:
     m = n_samples, d = n_features, l = n_labels, b = n_bvectors
     
     O(lm^2): basic case
     
     O(lmd): Linear Kernel, d < m
     
     O(lmb): Sparse approximation with basis vectors 
     """
     if not hasattr(self, "D"):
         qidlist = self.qids
         objcount = max(qidlist) + 1
         
         labelcounts = np.mat(np.zeros((1, objcount)))
         Pvals = np.ones(self.size)
         for i in range(self.size):
             qid = qidlist[i]
             labelcounts[0, qid] = labelcounts[0, qid] + 1
         D = np.mat(np.ones((1, self.size), dtype=np.float64))
         
         #The centering matrix way (HO computations should be modified accordingly too)
         for i in range(self.size):
             qid = qidlist[i]
             Pvals[i] = 1. / np.sqrt(labelcounts[0, qid])
         
         #The old Laplacian matrix way
         #for i in range(self.size):
         #    qid = qidlist[i]
         #    D[0, i] = labelcounts[0, qid]
         
         P = scipy.sparse.coo_matrix((Pvals, (np.arange(0, self.size), qidlist)), shape=(self.size,objcount))
         P_csc = P.tocsc()
         P_csr = P.tocsr()
         
         
         #Eigenvalues of the kernel matrix
         #evals = np.multiply(self.svals, self.svals)
         
         #Temporary variables
         ssvecs = np.multiply(self.svecs, self.svals)
         
         #These are cached for later use in solve and holdout functions
         ssvecsTLssvecs = (np.multiply(ssvecs.T, D) - (ssvecs.T * P_csc) * P_csr.T) * ssvecs
         LRsvals, LRevecs = linalg.eig_psd(ssvecsTLssvecs)
         LRsvals = np.mat(LRsvals)
         LRevals = np.multiply(LRsvals, LRsvals)
         LY = np.multiply(D.T, self.Y) - P_csr * (P_csc.T * self.Y)
         self.multipleright = LRevecs.T * (ssvecs.T * LY)
         self.multipleleft = ssvecs * LRevecs
         self.LRevals = LRevals
         self.LRevecs = LRevecs
         self.D = D
     
     
     self.regparam = regparam
     
     #Compute the eigenvalues determined by the given regularization parameter
     self.neweigvals = 1. / (self.LRevals + regparam)
     self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
     self.predictor = self.svdad.createModel(self)
Esempio n. 12
0
    def solve(self, regparam1, regparam2):
        """Re-trains TwoStepRLS for the given regparams
               
        Parameters
        ----------
        regparam1: float
            regularization parameter 1, regparam1 > 0
        
        regparam2: float
            regularization parameter 2, regparam2 > 0
            
        Notes
        -----    
                
        Computational complexity of re-training:
        
        m = n_samples1, n = n_samples2, d = n_features1, e  = n_features2
        
        O(ed^2 + de^2) Linear version (assumption: d < m, e < n)
        
        O(m^3 + n^3) Kernel version
        """
        self.regparam1 = regparam1
        self.regparam2 = regparam2
        if self.kernelmode:
            K1, K2 = self.K1, self.K2
            Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
            if not self.trained:
                self.trained = True
                evals1, V = linalg.eig_psd(K1)
                evals1 = np.mat(evals1).T
                evals1 = np.multiply(evals1, evals1)
                V = np.mat(V)
                self.evals1 = evals1
                self.V = V

                evals2, U = linalg.eig_psd(K2)
                evals2 = np.mat(evals2).T
                evals2 = np.multiply(evals2, evals2)
                U = np.mat(U)
                self.evals2 = evals2
                self.U = U
                self.VTYU = V.T * self.Y * U

            self.newevals1 = 1. / (self.evals1 + regparam1)
            self.newevals2 = 1. / (self.evals2 + regparam2)
            newevals = self.newevals1 * self.newevals2.T

            self.A = np.multiply(self.VTYU, newevals)
            self.A = self.V * self.A * self.U.T
            self.A = np.array(self.A)
            label_row_inds, label_col_inds = np.unravel_index(
                np.arange(K1.shape[0] * K2.shape[0]),
                (K1.shape[0], K2.shape[0]))
            label_row_inds = np.array(label_row_inds, dtype=np.int32)
            label_col_inds = np.array(label_col_inds, dtype=np.int32)
            self.predictor = KernelPairwisePredictor(self.A.ravel(),
                                                     label_row_inds,
                                                     label_col_inds)

        else:
            X1, X2 = self.X1, self.X2
            Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
            if not self.trained:
                self.trained = True
                V, svals1, rsvecs1 = linalg.svd_economy_sized(X1)
                svals1 = np.mat(svals1)
                self.svals1 = svals1.T
                self.evals1 = np.multiply(self.svals1, self.svals1)
                self.V = V
                self.rsvecs1 = np.mat(rsvecs1)

                if X1.shape == X2.shape and (X1 == X2).all():
                    svals2, U, rsvecs2 = svals1, V, rsvecs1
                else:
                    U, svals2, rsvecs2 = linalg.svd_economy_sized(X2)
                    svals2 = np.mat(svals2)
                self.svals2 = svals2.T
                self.evals2 = np.multiply(self.svals2, self.svals2)
                self.U = U
                self.rsvecs2 = np.mat(rsvecs2)

                self.VTYU = V.T * Y * U

            self.newevals1 = 1. / (self.evals1 + regparam1)
            self.newevals2 = 1. / (self.evals2 + regparam2)
            newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(
                self.svals2, self.newevals2).T

            self.W = np.multiply(self.VTYU, newevals)
            self.W = self.rsvecs1.T * self.W * self.rsvecs2
            #self.predictor = LinearPairwisePredictor(self.W)
            self.predictor = LinearPairwisePredictor(np.array(self.W))