Example #1
0
def pre_compute_E_Beta(x, y, sig, kernel="RBF"):
    """
    Function that pre computes the kernel eigenvalues/eigenfunctions during the cross-validation
    Input:
    x,y: the sample matrix and the label
    sig: the value of the kernel parameters
    Output:
    E_: a list of eigenvalues
    Beta_: a list of corresponding eigenvectors
    """
    C = int(y.max())
    eps = sp.finfo(sp.float64).eps
    E_ = []
    Beta_ = []

    for i in range(C):
        t = sp.where(y == (i + 1))[0]
        ni = t.size
        Ki = KERNEL()
        Ki.compute_kernel(x[t, :], kernel=kernel, sig=sig)
        Ki.center_kernel()
        Ki.scale_kernel(ni)

        E, Beta = linalg.eigh(Ki.K)
        idx = E.argsort()[::-1]
        E = E[idx]
        E[E < eps] = eps
        Beta = Beta[:, idx]
        E_.append(E)
        Beta_.append(Beta)

    del E, Beta, Ki
    return E_, Beta_
Example #2
0
def pre_compute_E_Beta(x, y, sig, kernel='RBF'):
    '''
    Function that pre computes the kernel eigenvalues/eigenfunctions during the cross-validation
    Input:
    x,y: the sample matrix and the label
    sig: the value of the kernel parameters
    Output:
    E_: a list of eigenvalues
    Beta_: a list of corresponding eigenvectors
    '''
    C = int(y.max())
    eps = sp.finfo(sp.float64).eps
    E_ = []
    Beta_ = []

    for i in range(C):
        t = sp.where(y == (i + 1))[0]
        ni = t.size
        Ki = KERNEL()
        Ki.compute_kernel(x[t, :], kernel=kernel, sig=sig)
        Ki.center_kernel()
        Ki.scale_kernel(ni)

        E, Beta = linalg.eigh(Ki.K)
        idx = E.argsort()[::-1]
        E = E[idx]
        E[E < eps] = eps
        Beta = Beta[:, idx]
        E_.append(E)
        Beta_.append(Beta)

    del E, Beta, Ki
    return E_, Beta_
Example #3
0
    def train(self, x, y, mu=None, sig=None):
        # Initialization
        n = y.shape[0]
        C = int(y.max())
        eps = sp.finfo(sp.float64).eps

        if (mu is None) and (self.mu is None):
            mu = 10 ** (-7)
        elif self.mu is None:
            self.mu = mu

        if (sig is None) and (self.sig is None):
            self.sig = 0.5
        elif self.sig is None:
            self.sig = sig

        # Compute K and
        K = KERNEL()
        K.compute_kernel(x, sig=self.sig)
        G = KERNEL()
        G.K = self.mu * sp.eye(n)

        for i in range(C):
            t = sp.where(y == (i + 1))[0]
            self.ni.append(sp.size(t))
            self.prop.append(float(self.ni[i]) / n)

            # Compute K_k
            Ki = KERNEL()
            Ki.compute_kernel(x, z=x[t, :], sig=self.sig)
            T = sp.eye(self.ni[i]) - sp.ones((self.ni[i], self.ni[i]))
            Ki.K = sp.dot(Ki.K, T)
            del T
            G.K += sp.dot(Ki.K, Ki.K.T) / self.ni[i]
        G.scale_kernel(C)

        # Solve the generalized eigenvalue problem
        a, A = linalg.eigh(G.K, b=K.K)
        idx = a.argsort()[::-1]
        a = a[idx]
        A = A[:, idx]

        # Remove negative eigenvalue
        t = sp.where(a > eps)[0]
        a = a[t]
        A = A[:, t]

        # Normalize the eigenvalue
        for i in range(a.size):
            A[:, i] /= sp.sqrt(sp.dot(sp.dot(A[:, i].T, K.K), A[:, i]))

        # Update model
        self.a = a.copy()
        self.A = A.copy()
        self.S = sp.dot(sp.dot(self.A, sp.diag(self.a ** (-1))), self.A.T)

        # Free memory
        del G, K, a, A
Example #4
0
    def train(self, x, y, mu=None, sig=None):
        # Initialization
        n = y.shape[0]
        C = int(y.max())
        eps = sp.finfo(sp.float64).eps

        if (mu is None) and (self.mu is None):
            mu = 10**(-7)
        elif self.mu is None:
            self.mu = mu

        if (sig is None) and (self.sig is None):
            self.sig = 0.5
        elif self.sig is None:
            self.sig = sig

        # Compute K and
        K = KERNEL()
        K.compute_kernel(x, sig=self.sig)
        G = KERNEL()
        G.K = self.mu * sp.eye(n)

        for i in range(C):
            t = sp.where(y == (i + 1))[0]
            self.ni.append(sp.size(t))
            self.prop.append(float(self.ni[i]) / n)

            # Compute K_k
            Ki = KERNEL()
            Ki.compute_kernel(x, z=x[t, :], sig=self.sig)
            T = (sp.eye(self.ni[i]) - sp.ones((self.ni[i], self.ni[i])))
            Ki.K = sp.dot(Ki.K, T)
            del T
            G.K += sp.dot(Ki.K, Ki.K.T) / self.ni[i]
        G.scale_kernel(C)

        # Solve the generalized eigenvalue problem
        a, A = linalg.eigh(G.K, b=K.K)
        idx = a.argsort()[::-1]
        a = a[idx]
        A = A[:, idx]

        # Remove negative eigenvalue
        t = sp.where(a > eps)[0]
        a = a[t]
        A = A[:, t]

        # Normalize the eigenvalue
        for i in range(a.size):
            A[:, i] /= sp.sqrt(sp.dot(sp.dot(A[:, i].T, K.K), A[:, i]))

        # Update model
        self.a = a.copy()
        self.A = A.copy()
        self.S = sp.dot(sp.dot(self.A, sp.diag(self.a**(-1))), self.A.T)

        # Free memory
        del G, K, a, A
Example #5
0
    def train(self, x, y, sig=None, dc=None, threshold=None, fast=None, E_=None, Beta_=None):
        """
        The function trains the pgpda model using the training samples
        Inputs:
        x: the samples matrix of size n x d, for the precomputed case (self.precomputed==1), x is a KERNEL object.
        y: the vector with label of size n
        sig: the parameter of the kernel function
        dc: the number of dimension of the singanl subspace
        threshold: the value of the cummulative variance that should be reached
        fast = option used to perform a fast CV: only the parameter dc/threshold is learn
        
        Outputs:
        None - The model is included/updated in the object
        """

        # Initialization
        n = y.shape[0]
        C = int(y.max())
        eps = sp.finfo(sp.float64).eps
        list_model_dc = "NM1 NM3 NM4"

        if (sig is None) and (self.sig is None):
            self.sig = 0.5
        elif self.sig is None:
            self.sig = sig

        if (dc is None) and (self.dc is None):
            self.dc = 2
        elif self.dc is None:
            self.dc = dc

        if (threshold is None) and (self.threshold is None):
            self.threshold = 0.95
        elif self.threshold is None:
            self.threshold = threshold

        # Check of consistent dimension
        if list_model_dc.find(self.model) > -1:
            for i in range(C):
                ni = sp.size(sp.where(y == (i + 1))[0])
                if self.dc >= ni - 1:
                    self.dc = ni - 2

        # Estimate the parameters of each class
        for i in range(C):
            t = sp.where(y == (i + 1))[0]
            self.ni.append(sp.size(t))
            self.prop.append(float(self.ni[i]) / n)

            if fast is None:
                # Compute Mi
                Ki = KERNEL()
                if self.precomputed is None:
                    Ki.compute_kernel(x[t, :], kernel=self.kernel, sig=self.sig)
                else:
                    Ki.K = x.K[t, :][:, t].copy()
                    Ki.rank = Ki.K.shape[0]

                self.ri.append(Ki.rank - 1)
                Ki.center_kernel()
                Ki.scale_kernel(self.ni[i])
                TraceKi = sp.trace(Ki.K)

                # Eigenvalue decomposition
                E, Beta = linalg.eigh(Ki.K)
                idx = E.argsort()[::-1]
                E = E[idx]
                E[E < eps] = eps
                Beta = Beta[:, idx]
            else:
                E = E_[i]
                Beta = Beta_[i]
                self.ri.append(E.size - 1)
                TraceKi = sp.sum(E)

            # Parameter estimation
            if list_model_dc.find(self.model) == -1:
                di = estim_d(E[0 : self.ri[i] - 1], self.threshold)
            else:
                di = self.dc
            self.di.append(di)
            self.a.append(E[0:di])
            self.b.append((TraceKi - sp.sum(self.a[i])) / (self.ri[i] - di))

            if self.b[i] < eps:  # Sanity check for numerical precision
                self.b[i] = eps
                self.ib.append(1.0 / eps)
            else:
                self.ib.append(1 / self.b[i])
            self.Beta.append(Beta[:, 0:di])
            del Beta, E

        # Finish the estimation for the different models
        if self.model == "NM0" or self.model == "NM1":
            for i in range(C):
                # Compute the value of matrix A
                temp = self.Beta[i] * ((1 / self.a[i] - self.ib[i]) / self.a[i]).reshape(self.di[i])
                self.A.append(sp.dot(temp, self.Beta[i].T) / self.ni[i])

        elif self.model == "NM2" or self.model == "NM3":
            for i in range(C):
                # Update the value of a
                self.a[i][:] = sp.mean(self.a[i])
                # Compute the value of matrix A
                temp = self.Beta[i] * ((1 / self.a[i] - self.ib[i]) / self.a[i]).reshape(self.di[i])
                self.A.append(sp.dot(temp, self.Beta[i].T) / self.ni[i])

        elif self.model == "NM4":
            # Compute the value of a
            al = sp.zeros((self.dc))
            for i in range(self.dc):
                for j in range(C):
                    al[i] += self.prop[j] * self.a[j][i]
            for i in range(C):
                self.a[i] = al.copy()
                temp = self.Beta[i] * ((1 / self.a[i] - self.ib[i]) / self.a[i]).reshape(self.di[i])
                self.A.append(sp.dot(temp, self.Beta[i].T) / self.ni[i])

        self.A = sp.asarray(self.A)
Example #6
0
    def train(self,
              x,
              y,
              sig=None,
              dc=None,
              threshold=None,
              fast=None,
              E_=None,
              Beta_=None):
        '''
        The function trains the pgpda model using the training samples
        Inputs:
        x: the samples matrix of size n x d, for the precomputed case (self.precomputed==1), x is a KERNEL object.
        y: the vector with label of size n
        sig: the parameter of the kernel function
        dc: the number of dimension of the singanl subspace
        threshold: the value of the cummulative variance that should be reached
        fast = option used to perform a fast CV: only the parameter dc/threshold is learn
        
        Outputs:
        None - The model is included/updated in the object
        '''

        # Initialization
        n = y.shape[0]
        C = int(y.max())
        eps = sp.finfo(sp.float64).eps
        list_model_dc = 'NM1 NM3 NM4'

        if (sig is None) and (self.sig is None):
            self.sig = 0.5
        elif self.sig is None:
            self.sig = sig

        if (dc is None) and (self.dc is None):
            self.dc = 2
        elif self.dc is None:
            self.dc = dc

        if (threshold is None) and (self.threshold is None):
            self.threshold = 0.95
        elif self.threshold is None:
            self.threshold = threshold

        # Check of consistent dimension
        if (list_model_dc.find(self.model) > -1):
            for i in range(C):
                ni = sp.size(sp.where(y == (i + 1))[0])
                if self.dc >= ni - 1:
                    self.dc = ni - 2

        # Estimate the parameters of each class
        for i in range(C):
            t = sp.where(y == (i + 1))[0]
            self.ni.append(sp.size(t))
            self.prop.append(float(self.ni[i]) / n)

            if fast is None:
                # Compute Mi
                Ki = KERNEL()
                if self.precomputed is None:
                    Ki.compute_kernel(x[t, :],
                                      kernel=self.kernel,
                                      sig=self.sig)
                else:
                    Ki.K = x.K[t, :][:, t].copy()
                    Ki.rank = Ki.K.shape[0]

                self.ri.append(Ki.rank - 1)
                Ki.center_kernel()
                Ki.scale_kernel(self.ni[i])
                TraceKi = sp.trace(Ki.K)

                # Eigenvalue decomposition
                E, Beta = linalg.eigh(Ki.K)
                idx = E.argsort()[::-1]
                E = E[idx]
                E[E < eps] = eps
                Beta = Beta[:, idx]
            else:
                E = E_[i]
                Beta = Beta_[i]
                self.ri.append(E.size - 1)
                TraceKi = sp.sum(E)

            # Parameter estimation
            if list_model_dc.find(self.model) == -1:
                di = estim_d(E[0:self.ri[i] - 1], self.threshold)
            else:
                di = self.dc
            self.di.append(di)
            self.a.append(E[0:di])
            self.b.append((TraceKi - sp.sum(self.a[i])) / (self.ri[i] - di))

            if self.b[i] < eps:  # Sanity check for numerical precision
                self.b[i] = eps
                self.ib.append(1.0 / eps)
            else:
                self.ib.append(1 / self.b[i])
            self.Beta.append(Beta[:, 0:di])
            del Beta, E

        # Finish the estimation for the different models
        if self.model == 'NM0' or self.model == 'NM1':
            for i in range(C):
                # Compute the value of matrix A
                temp = self.Beta[i] * ((1 / self.a[i] - self.ib[i]) /
                                       self.a[i]).reshape(self.di[i])
                self.A.append(sp.dot(temp, self.Beta[i].T) / self.ni[i])

        elif self.model == 'NM2' or self.model == 'NM3':
            for i in range(C):
                # Update the value of a
                self.a[i][:] = sp.mean(self.a[i])
                # Compute the value of matrix A
                temp = self.Beta[i] * ((1 / self.a[i] - self.ib[i]) /
                                       self.a[i]).reshape(self.di[i])
                self.A.append(sp.dot(temp, self.Beta[i].T) / self.ni[i])

        elif self.model == 'NM4':
            # Compute the value of a
            al = sp.zeros((self.dc))
            for i in range(self.dc):
                for j in range(C):
                    al[i] += self.prop[j] * self.a[j][i]
            for i in range(C):
                self.a[i] = al.copy()
                temp = self.Beta[i] * ((1 / self.a[i] - self.ib[i]) /
                                       self.a[i]).reshape(self.di[i])
                self.A.append(sp.dot(temp, self.Beta[i].T) / self.ni[i])

        self.A = sp.asarray(self.A)