Beispiel #1
0
 def solve_linear(self, regparam):
     self.regparam = regparam
     X1, X2 = self.X1, self.X2
     Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
     if not self.trained:
         self.trained = True
         svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
         self.svals1 = svals1.T
         self.evals1 = multiply(self.svals1, self.svals1)
         self.V = V
         self.rsvecs1 = mat(rsvecs1)
         
         if X1.shape == X2.shape and (X1 == X2).all():
             svals2, U, rsvecs2 = svals1, V, rsvecs1
         else:
             svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
         self.svals2 = svals2.T
         self.evals2 = multiply(self.svals2, self.svals2)
         self.U = U
         self.rsvecs2 = mat(rsvecs2)
         
         self.VTYU = V.T * Y * U
     
     kronsvals = self.svals1 * self.svals2.T
     
     newevals = divide(kronsvals, multiply(kronsvals, kronsvals) + regparam)
     self.W = multiply(self.VTYU, newevals)
     self.W = self.rsvecs1.T * self.W * self.rsvecs2
     self.model = LinearPairwiseModel(self.W)
Beispiel #2
0
 def solve_linear(self, regparam1, regparam2):
     self.regparam1 = regparam1
     self.regparam2 = regparam2
     X1, X2 = self.X1, self.X2
     Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
     if not self.trained:
         self.trained = True
         svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
         self.svals1 = svals1.T
         self.evals1 = np.multiply(self.svals1, self.svals1)
         self.V = V
         self.rsvecs1 = np.mat(rsvecs1)
         
         if X1.shape == X2.shape and (X1 == X2).all():
             svals2, U, rsvecs2 = svals1, V, rsvecs1
         else:
             svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
         self.svals2 = svals2.T
         self.evals2 = np.multiply(self.svals2, self.svals2)
         self.U = U
         self.rsvecs2 = np.mat(rsvecs2)
         
         self.VTYU = V.T * Y * U
     
     self.newevals1 = 1. / (self.evals1 + regparam1)
     self.newevals2 = 1. / (self.evals2 + regparam2)
     newevals = self.newevals1 * self.newevals2.T
     newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(self.svals2, self.newevals2).T
     
     self.W = np.multiply(self.VTYU, newevals)
     self.W = self.rsvecs1.T * self.W * self.rsvecs2
     self.model = LinearPairwiseModel(self.W)
Beispiel #3
0
 def solve_linear(self, regparam):
     self.regparam = regparam
     X1, X2 = self.X1, self.X2
     Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
     if not self.trained:
         self.trained = True
         svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
         self.svals1 = svals1.T
         self.evals1 = multiply(self.svals1, self.svals1)
         self.V = V
         self.rsvecs1 = mat(rsvecs1)
         
         if X1.shape == X2.shape and (X1 == X2).all():
             svals2, U, rsvecs2 = svals1, V, rsvecs1
         else:
             svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
         self.svals2 = svals2.T
         self.evals2 = multiply(self.svals2, self.svals2)
         self.U = U
         self.rsvecs2 = mat(rsvecs2)
         
         self.VTYU = V.T * Y * U
     
     kronsvals = self.svals1 * self.svals2.T
     
     newevals = divide(kronsvals, multiply(kronsvals, kronsvals) + regparam)
     self.W = multiply(self.VTYU, newevals)
     self.W = self.rsvecs1.T * self.W * self.rsvecs2
     self.model = LinearPairwiseModel(self.W)
Beispiel #4
0
 def solve_linear_conditional_ranking(self, regparam):
     self.regparam = regparam
     X1, X2 = self.X1, self.X2
     Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
     if not self.trained:
         self.trained = True
         svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
         self.svals1 = svals1.T
         self.evals1 = multiply(self.svals1, self.svals1)
         self.V = V
         self.rsvecs1 = mat(rsvecs1)
         
         qlen = X2.shape[0]
         onevec = (1./math.sqrt(qlen))*mat(ones((qlen,1)))
         C = mat(eye(qlen))-onevec*onevec.T
         
         svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T * C)
         self.svals2 = svals2.T
         self.evals2 = multiply(self.svals2, self.svals2)
         self.U = U
         self.rsvecs2 = mat(rsvecs2)
         
         self.VTYU = V.T * Y * C * U
     
     kronsvals = self.svals1 * self.svals2.T
     
     newevals = divide(kronsvals, multiply(kronsvals, kronsvals) + regparam)
     self.W = multiply(self.VTYU, newevals)
     self.W = self.rsvecs1.T * self.W * self.rsvecs2
     self.model = LinearPairwiseModel(self.W)
Beispiel #5
0
 def solve_linear_conditional_ranking(self, regparam):
     self.regparam = regparam
     X1, X2 = self.X1, self.X2
     Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
     if not self.trained:
         self.trained = True
         svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
         self.svals1 = svals1.T
         self.evals1 = multiply(self.svals1, self.svals1)
         self.V = V
         self.rsvecs1 = mat(rsvecs1)
         
         qlen = X2.shape[0]
         onevec = (1./math.sqrt(qlen))*mat(ones((qlen,1)))
         C = mat(eye(qlen))-onevec*onevec.T
         
         svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T * C)
         self.svals2 = svals2.T
         self.evals2 = multiply(self.svals2, self.svals2)
         self.U = U
         self.rsvecs2 = mat(rsvecs2)
         
         self.VTYU = V.T * Y * C * U
     
     kronsvals = self.svals1 * self.svals2.T
     
     newevals = divide(kronsvals, multiply(kronsvals, kronsvals) + regparam)
     self.W = multiply(self.VTYU, newevals)
     self.W = self.rsvecs1.T * self.W * self.rsvecs2
     self.model = LinearPairwiseModel(self.W)
Beispiel #6
0
    def solve_linear(self, regparam1, regparam2):
        self.regparam1 = regparam1
        self.regparam2 = regparam2
        X1, X2 = self.X1, self.X2
        Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
        if not self.trained:
            self.trained = True
            svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
            self.svals1 = svals1.T
            self.evals1 = np.multiply(self.svals1, self.svals1)
            self.V = V
            self.rsvecs1 = np.mat(rsvecs1)

            if X1.shape == X2.shape and (X1 == X2).all():
                svals2, U, rsvecs2 = svals1, V, rsvecs1
            else:
                svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
            self.svals2 = svals2.T
            self.evals2 = np.multiply(self.svals2, self.svals2)
            self.U = U
            self.rsvecs2 = np.mat(rsvecs2)

            self.VTYU = V.T * Y * U

        self.newevals1 = 1. / (self.evals1 + regparam1)
        self.newevals2 = 1. / (self.evals2 + regparam2)
        newevals = self.newevals1 * self.newevals2.T
        newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(
            self.svals2, self.newevals2).T

        self.W = np.multiply(self.VTYU, newevals)
        self.W = self.rsvecs1.T * self.W * self.rsvecs2
        self.model = LinearPairwiseModel(self.W)
Beispiel #7
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = rpool['train_features']
     if rpool.has_key('basis_vectors'):
         basis_vectors = rpool['basis_vectors']
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if basis_vectors != None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors != None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X,self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
Beispiel #8
0
 def decompositionFromPool(self, rpool):
     kernel = rpool[data_sources.KERNEL_OBJ]
     self.X = rpool[data_sources.TRAIN_FEATURES]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         bvectors = rpool[data_sources.BASIS_VECTORS]
     else:
         bvectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if bvectors != None or self.X.shape[1] > self.X.shape[0]:
         K = kernel.getKM(self.X).T
         #First possibility: subset of regressors has been invoked
         if bvectors != None:
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(
                 K, bvectors)
         #Second possibility: dual mode if more attributes than examples
         else:
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X, self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
Beispiel #9
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = rpool['train_features']
     if rpool.has_key('basis_vectors'):
         basis_vectors = rpool['basis_vectors']
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if basis_vectors != None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors != None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X, self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
Beispiel #10
0
 def decompositionFromPool(self, rpool):
     kernel = rpool[data_sources.KERNEL_OBJ]
     self.X = rpool[data_sources.TRAIN_FEATURES]
     if rpool.has_key(data_sources.BASIS_VECTORS):
         bvectors = rpool[data_sources.BASIS_VECTORS]
     else:
         bvectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 0.
     if bvectors != None or self.X.shape[1] > self.X.shape[0]:
         K = kernel.getKM(self.X).T
         #First possibility: subset of regressors has been invoked
         if bvectors != None:
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K, bvectors)
         #Second possibility: dual mode if more attributes than examples
         else:
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X,self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
Beispiel #11
0
 def decompositionFromPool(self, rpool):
     kernel = rpool['kernel_obj']
     self.X = array_tools.as_2d_array(rpool['X'], True)
     if rpool.has_key('basis_vectors'):
         basis_vectors = array_tools.as_2d_array(rpool['basis_vectors'], True)
         if not self.X.shape[1] == basis_vectors.shape[1]:
             raise Exception("X and basis_vectors have different number of columns")
     else:
         basis_vectors = None
     if "bias" in rpool:
         self.bias = float(rpool["bias"])
     else:
         self.bias = 1.
     if basis_vectors != None or self.X.shape[1] > self.X.shape[0]:
         #First possibility: subset of regressors has been invoked
         if basis_vectors != None:
             K_r = kernel.getKM(self.X).T
             Krr = kernel.getKM(basis_vectors)
             svals, evecs, U, Z = decomposition.decomposeSubsetKM(K_r, Krr)
         #Second possibility: dual mode if more attributes than examples
         else:
             K = kernel.getKM(self.X).T
             svals, evecs = decomposition.decomposeKernelMatrix(K)
             U, Z = None, None
     #Third possibility, primal decomposition
     else:
         #Invoking getPrimalDataMatrix adds the bias feature
         X = getPrimalDataMatrix(self.X,self.bias)
         svals, evecs, U = decomposition.decomposeDataMatrix(X.T)
         U, Z = None, None
     return svals, evecs, U, Z
Beispiel #12
0
    def solve_linear_conditional_ranking(self, regparam):
        """Trains conditional ranking KronRLS, that ranks objects from
        domain 2 against objects from domain 1.
               
        Parameters
        ----------
        regparam : float, optional
            regularization parameter, regparam > 0 (default=1.0)
            
        Notes
        -----
        Minimizes RankRLS type of loss. Currently only linear kernel
        supported. Including the code here is a hack, this should
        probably be implemented as an independent learner.
        """
        self.regparam = regparam
        X1, X2 = self.X1, self.X2
        Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')

        svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
        self.svals1 = svals1.T
        self.evals1 = np.multiply(self.svals1, self.svals1)
        self.V = V
        self.rsvecs1 = np.mat(rsvecs1)

        qlen = X2.shape[0]
        onevec = (1. / np.math.sqrt(qlen)) * np.mat(np.ones((qlen, 1)))
        C = np.mat(np.eye(qlen)) - onevec * onevec.T

        svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T * C)
        self.svals2 = svals2.T
        self.evals2 = np.multiply(self.svals2, self.svals2)
        self.U = U
        self.rsvecs2 = np.mat(rsvecs2)

        self.VTYU = V.T * Y * C * U

        kronsvals = self.svals1 * self.svals2.T

        newevals = np.divide(kronsvals,
                             np.multiply(kronsvals, kronsvals) + regparam)
        self.W = np.multiply(self.VTYU, newevals)
        self.W = self.rsvecs1.T * self.W * self.rsvecs2
        self.predictor = LinearPairwisePredictor(np.array(self.W))
Beispiel #13
0
 def solve(self, regparam1, regparam2):
     """Re-trains TwoStepRLS for the given regparams
            
     Parameters
     ----------
     regparam1: float
         regularization parameter 1, regparam1 > 0
     
     regparam2: float
         regularization parameter 2, regparam2 > 0
         
     Notes
     -----    
             
     Computational complexity of re-training:
     
     m = n_samples1, n = n_samples2, d = n_features1, e  = n_features2
     
     O(ed^2 + de^2) Linear version (assumption: d < m, e < n)
     
     O(m^3 + n^3) Kernel version
     """
     self.regparam1 = regparam1
     self.regparam2 = regparam2
     if self.kernelmode:
         K1, K2 = self.K1, self.K2
         Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
         #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
         if not self.trained:
             self.trained = True
             evals1, V  = decomposition.decomposeKernelMatrix(K1)
             evals1 = np.mat(evals1).T
             evals1 = np.multiply(evals1, evals1)
             V = np.mat(V)
             self.evals1 = evals1
             self.V = V
             
             evals2, U = decomposition.decomposeKernelMatrix(K2)
             evals2 = np.mat(evals2).T
             evals2 = np.multiply(evals2, evals2)
             U = np.mat(U)
             self.evals2 = evals2
             self.U = U
             self.VTYU = V.T * self.Y * U
         
         #newevals = 1. / (self.evals1 * self.evals2.T + regparam)
         self.newevals1 = 1. / (self.evals1 + regparam1)
         self.newevals2 = 1. / (self.evals2 + regparam2)
         newevals = self.newevals1 * self.newevals2.T
         
         self.A = np.multiply(self.VTYU, newevals)
         self.A = self.V * self.A * self.U.T
         self.A = np.array(self.A)
         #self.predictor = KernelPairwisePredictor(self.A)
         label_row_inds, label_col_inds = np.unravel_index(np.arange(K1.shape[0] * K2.shape[0]), (K1.shape[0],  K2.shape[0]))
         label_row_inds = np.array(label_row_inds, dtype = np.int32)
         label_col_inds = np.array(label_col_inds, dtype = np.int32)
         self.predictor = KernelPairwisePredictor(self.A.ravel(), label_row_inds, label_col_inds)
         
         #self.dsikm1 = la.inv(K1 + regparam1 * (np.mat(np.eye(K1.shape[0]))))
         #self.dsikm2 = la.inv(K2 + regparam2 * (np.mat(np.eye(K2.shape[0]))))
     else:
         X1, X2 = self.X1, self.X2
         Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
         if not self.trained:
             self.trained = True
             svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
             self.svals1 = svals1.T
             self.evals1 = np.multiply(self.svals1, self.svals1)
             self.V = V
             self.rsvecs1 = np.mat(rsvecs1)
             
             if X1.shape == X2.shape and (X1 == X2).all():
                 svals2, U, rsvecs2 = svals1, V, rsvecs1
             else:
                 svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
             self.svals2 = svals2.T
             self.evals2 = np.multiply(self.svals2, self.svals2)
             self.U = U
             self.rsvecs2 = np.mat(rsvecs2)
             
             self.VTYU = V.T * Y * U
         
         self.newevals1 = 1. / (self.evals1 + regparam1)
         self.newevals2 = 1. / (self.evals2 + regparam2)
         newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(self.svals2, self.newevals2).T
         
         self.W = np.multiply(self.VTYU, newevals)
         self.W = self.rsvecs1.T * self.W * self.rsvecs2
         #self.predictor = LinearPairwisePredictor(self.W)
         self.predictor = LinearPairwisePredictor(np.array(self.W))
Beispiel #14
0
    def solve(self, regparam):
        """Re-trains KronRLS for the given regparam
               
        Parameters
        ----------
        regparam : float, optional
            regularization parameter, regparam > 0

        Notes
        -----    
                
        Computational complexity of re-training:
        
        m = n_samples1, n = n_samples2, d = n_features1, e  = n_features2
        
        O(ed^2 + de^2) Linear version (assumption: d < m, e < n)
        
        O(m^3 + n^3) Kernel version
        """
        self.regparam = regparam
        if self.kernelmode:
            K1, K2 = self.K1, self.K2
            #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
            if not self.trained:
                self.trained = True
                evals1, V = la.eigh(K1)
                evals1 = np.mat(evals1).T
                V = np.mat(V)
                self.evals1 = evals1
                self.V = V

                evals2, U = la.eigh(K2)
                evals2 = np.mat(evals2).T
                U = np.mat(U)
                self.evals2 = evals2
                self.U = U
                self.VTYU = V.T * self.Y * U

            newevals = 1. / (self.evals1 * self.evals2.T + regparam)

            self.A = np.multiply(self.VTYU, newevals)
            self.A = self.V * self.A * self.U.T
            self.A = np.asarray(self.A)
            label_row_inds, label_col_inds = np.unravel_index(
                np.arange(K1.shape[0] * K2.shape[0]),
                (K1.shape[0], K2.shape[0]))
            label_row_inds = np.array(label_row_inds, dtype=np.int32)
            label_col_inds = np.array(label_col_inds, dtype=np.int32)
            self.predictor = KernelPairwisePredictor(self.A.ravel(),
                                                     label_row_inds,
                                                     label_col_inds)
        else:
            X1, X2 = self.X1, self.X2
            Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
            if not self.trained:
                self.trained = True
                svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
                self.svals1 = svals1.T
                self.evals1 = np.multiply(self.svals1, self.svals1)
                self.V = V
                self.rsvecs1 = np.mat(rsvecs1)

                if X1.shape == X2.shape and (X1 == X2).all():
                    svals2, U, rsvecs2 = svals1, V, rsvecs1
                else:
                    svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(
                        X2.T)
                self.svals2 = svals2.T
                self.evals2 = np.multiply(self.svals2, self.svals2)
                self.U = U
                self.rsvecs2 = np.mat(rsvecs2)

                self.VTYU = V.T * Y * U

            kronsvals = self.svals1 * self.svals2.T

            newevals = np.divide(kronsvals,
                                 np.multiply(kronsvals, kronsvals) + regparam)
            self.W = np.multiply(self.VTYU, newevals)
            self.W = self.rsvecs1.T * self.W * self.rsvecs2
            self.predictor = LinearPairwisePredictor(np.array(self.W))