Ejemplo n.º 1
0
def kernel_evaluation(K):
    kernel_results_dict = {}
    K = kernel_normalization(K)  # normalize the kernel K (useless in the case of HPK computed on normalized data)
    kernel_results_dict['score_margin'] = margin(K,
                                                 Ytr)  # the distance between the positive and negative classes in the kernel space
    kernel_results_dict['score_radius'] = radius(
        K)  # the radius of the Einimum Enclosing Ball containing data in the kernel space
    kernel_results_dict['score_ratio'] = ratio(K,
                                               Ytr)  # the radius/margin ratio defined as (radius**2/margin**2)/n_examples
    kernel_results_dict['score_froben'] = frobenius(K)  # the Frobenius norm of a kernel matrix
    kernel_results_dict['score_trace'] = trace(K)  # the trace of the kernel matrix
    return kernel_results_dict
Ejemplo n.º 2
0
    def _arrange_kernel(self):
        print "WARNING: probably not working"
        Y = [1 if y == self.classes_[1] else -1 for y in self.Y]
        n = len(self.Y)
        YY = spdiag(Y)
        actual_weights = np.ones(self.n_kernels) / (1.0 * self.n_kernels
                                                    )  #current
        obj = None
        #weights = np.ones(self.n_kernels) / (1.0 *self.n_kernels)	#current
        print actual_weights
        self.objs = []
        cstep = self.step
        self.margin = []
        for i in xrange(self.max_iter):
            Kc = summation(self.KL, actual_weights)
            #ottimizzo su alpha (posso prenderlo dallo step precedente....)
            clf = SVC(C=self.C, kernel='precomputed').fit(Kc, Y)
            alpha = np.zeros(n)
            alpha[clf.support_] = clf.dual_coef_
            alpha = matrix(alpha)

            #dati gli alpha ottimizzo sui pesi
            J = (-0.5 * alpha.T * YY * matrix(Kc) * YY *
                 alpha)[0] + np.sum(alpha)
            grad = [(-0.5 * alpha.T * YY * matrix(_K) * YY * alpha)[0]
                    for _K in self.KL]

            mu = np.argmax(actual_weights)  #all'inizio sono tutti uguali
            idx = np.where(actual_weights == max(actual_weights))
            mu = np.argmax(np.array(grad)[idx])
            D = [ 0  if actual_weights[j]==0 and grad[j] - grad[mu] > 0 else \
              -grad[j] + grad[mu] if actual_weights[j]>0 and j!=mu    else \
              np.sum([grad[v]-grad[mu] for v in range(self.n_kernels) if grad[v]>0]) if j==mu  else\
              0
             for j in range(self.n_kernels)]
            print 'd', D

            #aggiorno i pesi dato il gradiente
            weights = actual_weights + cstep * np.array(
                D)  #originalmente era un +
            weights = weights.clip(0.0)
            if weights.sum() == 0.0:
                print i, 'zero', weights
                cstep /= 2.0
                continue
            weights = weights / weights.sum()

            #riottimizzo sugli alfa
            Kc = summation(self.KL, weights)
            clf = SVC(C=self.C, kernel='precomputed').fit(Kc, Y)
            alpha = np.zeros(n)
            alpha[clf.support_] = clf.dual_coef_
            alpha = matrix(alpha)
            new_obj = (-0.5 * alpha.T * YY * matrix(Kc) * YY *
                       alpha)[0] + np.sum(alpha)
            if obj and abs(new_obj - obj) / n < self.tol:
                #completato
                #print i,'tol'
                self.objs.append(new_obj)
                actual_weights = weights
                print 'terminato', new_obj, obj
                break
            elif new_obj <= obj or not obj:
                #tutto in regola
                #print i,'new step',new_ratio
                ma = margin(Kc, Y)
                if len(self.margin) > 0 and self.margin[-1] > ma:
                    continue
                self.margin.append(ma)

                obj = new_obj
                actual_weights = weights
                print actual_weights, obj
                self.objs.append(obj)
            else:
                #supero il minimo
                weights = actual_weights
                cstep /= 2.0
                print i, 'overflow', cstep
                continue
        self._steps = i + 1

        self.weights = np.array(actual_weights)
        self.ker_matrix = summation(self.KL, self.weights)

        return self.ker_matrix
Ejemplo n.º 3
0
Archivo: RMKL.py Proyecto: sspeng/MKLpy
    def _arrange_kernel(self):
        Y = [1 if y==self.classes_[1] else -1 for y in self.Y]
        n = len(self.Y)
        R = np.array([radius(K) for K in self.KL])
        YY = matrix(np.diag(self.Y))

        actual_weights = np.ones(self.n_kernels) / (1.0 *self.n_kernels)    #current
        actual_ratio = None
        #weights = np.ones(self.n_kernels) / (1.0 *self.n_kernels)	#current

        self.ratios = []
        cstep = self.step
        for i in xrange(self.max_iter):
            ru2 = np.dot(actual_weights,R**2)
            C = self.C / ru2
            Kc = matrix(summation(self.KL,actual_weights))
            clf = SVC(C=C, kernel='precomputed').fit(Kc,Y)
            alpha = np.zeros(n)
            alpha[clf.support_] = clf.dual_coef_
            alpha = matrix(alpha)

            Q = Kc + spdiag( [ru2/self.C] * n )
            J = (-0.5 * alpha.T * YY * Q * YY * alpha)[0] + np.sum(alpha)
            grad = [(-0.5 * alpha.T * YY *(Kc+ spdiag([_r**2/self.C]*n)) * YY * alpha)[0] for _r in R]

            weights = actual_weights + cstep * np.array(grad)	#originalmente era un +
            weights = weights.clip(0.0)
            if weights.sum() == 0.0:
                #print i,'zero'
                cstep /= -2.0
                continue
            weights = weights/weights.sum()


            Kc = summation(self.KL,weights)
            new_ratio = radius(Kc)**2 / margin(Kc,Y)**2

            if actual_ratio and abs(new_ratio - actual_ratio)/n < self.tol:
                #completato
                #print i,'tol'
                self.ratios.append(new_ratio)
                actual_weights=weights
                #break;
            elif new_ratio <= actual_ratio or not actual_ratio:
                #tutto in regola
                #print i,'new step',new_ratio
                actual_ratio = new_ratio
                actual_weights = weights
                self.ratios.append(actual_ratio)
            else:
                #supero il minimo
                weights = actual_weights
                cstep /= -2.0
                #print i,'overflow',cstep
                continue
        self._steps = i+1
        
        self.weights = np.array(actual_weights)
        self.ker_matrix = summation(self.KL,self.weights)
        return self.ker_matrix
        return average(self.KL,weights)
Ejemplo n.º 4
0
    def _arrange_kernel(self):
        Y = [1 if y == self.classes_[1] else -1 for y in self.Y]
        nn = len(Y)
        nk = self.n_kernels
        YY = spdiag(Y)
        #YY = np.diag(Y)
        beta = [0.0] * nk
        mu = np.exp(beta)
        mu /= mu.sum()

        actual_ratio = None
        _sol = None
        Q = np.array([[
            np.dot(self.KL[r].ravel(), self.KL[s].ravel()) for r in range(nk)
        ] for s in range(nk)])
        Q /= np.sum([frobenius(K)**2 for K in self.KL])
        #self._H = [ YY * K * YY for K in self.KL]

        self.sr, self.margin = [], []
        self.obj = []
        _beta, _mu = None, None
        cstep = self.step
        I = np.diag(np.ones(nn))
        for i in xrange(self.max_iter):
            Kc = summation(self.KL, mu)
            #trovo i gamma
            #clf = KOMD(kernel='precomputed',lam=self.lam).fit(Kc,Y)
            try:
                _sol, _margin = margin(Kc, YY, lam=self.lam,
                                       init_sol=None)  #_sol)
            except:
                _sol, _margin = margin(Kc, YY, lam=self.lam, init_sol=None)
            gamma = _sol['x']

            #gamma = np.matrix(clf.gamma)
            #_margin = (gamma.T * YY * Kc * YY * gamma)[0,0]

            #grad = np.array([(self.C * np.dot(Q[r],mu) + (gamma.T * self._H[r] * gamma)[0,0]) \
            grad = np.array([(self.C * np.dot(Q[r],mu) + (gamma.T * YY * matrix(self.KL[r]) * YY * gamma)[0,0]) \
                    * mu[r] * (1- mu[r]) \
                      for r in range(nk)])
            _beta = beta + cstep * grad
            _mu = np.exp(_beta)
            _mu /= _mu.sum()

            _obj = _margin + (self.C / 2) * np.dot(_mu, np.dot(_mu, Q))

            if (self.obj and _obj < self.obj[-1]
                ) or _margin < 1.e-5:  # nel caso di peggioramento
                _mu = mu
                _beta = beta
                cstep /= 2.0
                if cstep < 0.00001: break
            else:
                self.obj.append(_obj)
                self.margin.append(_margin)
                mu = _mu
                beta = _beta

        self._steps = i + 1
        #print 'steps', self._steps, 'lam',self.lam,'margin',self.margin[-1]
        self.weights = np.array(mu)
        self.ker_matrix = summation(self.KL, self.weights)
        return self.ker_matrix
Ejemplo n.º 5
0
#compute homogeneous polynomial kernels with degrees 0,1,2,...,10.
print ('computing Homogeneous Polynomial Kernels...', end='')
from MKLpy.metrics import pairwise
KLtr = [pairwise.homogeneous_polynomial_kernel(Xtr, degree=d) for d in range(11)]
KLte = [pairwise.homogeneous_polynomial_kernel(Xte,Xtr, degree=d) for d in range(11)]
print ('done')


#evaluate kernels in terms of margin, radius etc...
print ('evaluating metrics...', end='')
from MKLpy.metrics import margin, radius, ratio, trace, frobenius
from MKLpy.preprocessing import kernel_normalization
deg = 5
K = KLtr[deg]					#the HPK with degree 5
K = kernel_normalization(K)		#normalize the kernel K (useless in the case of HPK computed on normalized data)
score_margin = margin(K,Ytr)	#the distance between the positive and negative classes in the kernel space
score_radius = radius(K)		#the radius of the Einimum Enclosing Ball containing data in the kernel space
score_ratio  = ratio (K,Ytr)	#the radius/margin ratio defined as (radius**2/margin**2)/n_examples
#the ratio can be also computed as score_radius**2/score_margin**2/len(Ytr)
score_trace  = trace (K)		#the trace of the kernel matrix
score_froben = frobenius(K)		#the Frobenius norm of a kernel matrix
print ('done')
print ('results of the %d-degree HP kernel:' % deg)
print ('margin: %.4f, radius: %.4f, radiu-margin ratio: %.4f,' % (score_margin, score_radius, score_ratio))
print ('trace: %.4f, frobenius norm: %.4f' % (score_trace, score_froben))


#evaluate the empirical complexity of the kernel matrix, i.e. the Spectral Ratio
# Michele Donini, Fabio Aiolli: "Learning deep kernels in the space of dot-product polynomials". Machine Learning (2017)
# Ivano Lauriola, Mirko Polato, Fabio Aiolli: "The Minimum Effort Maximum Output principle applied to Multiple Kernel Learning". ESANN (2018)
print ('computing Spectral Ratio...', end='')
Ejemplo n.º 6
0
 def test_margin(self):
     self.assertAlmostEqual(metrics.margin(self.K, self.Y), 2**.5, 5)
     self.assertRaises(SquaredKernelError, metrics.margin, self.X, self.Y)
     self.assertRaises(BinaryProblemError, metrics.margin, self.K,
                       [1, 2, 3])
     self.assertRaises(ValueError, metrics.margin, self.K, [0, 0, 1, 1, 0])