Example #1
0
def kernel_evaluation(K):
    kernel_results_dict = {}
    K = kernel_normalization(K)  # normalize the kernel K (useless in the case of HPK computed on normalized data)
    kernel_results_dict['score_margin'] = margin(K,
                                                 Ytr)  # the distance between the positive and negative classes in the kernel space
    kernel_results_dict['score_radius'] = radius(
        K)  # the radius of the Einimum Enclosing Ball containing data in the kernel space
    kernel_results_dict['score_ratio'] = ratio(K,
                                               Ytr)  # the radius/margin ratio defined as (radius**2/margin**2)/n_examples
    kernel_results_dict['score_froben'] = frobenius(K)  # the Frobenius norm of a kernel matrix
    kernel_results_dict['score_trace'] = trace(K)  # the trace of the kernel matrix
    return kernel_results_dict
Example #2
0
File: MOME.py Project: sspeng/MKLpy
    def _arrange_kernel(self):
        Y = [1 if y == self.classes_[1] else -1 for y in self.Y]
        nn = len(Y)
        nk = self.n_kernels
        YY = spdiag(Y)
        beta = [0.0] * nk
        mu = np.exp(beta)
        mu /= mu.sum()

        #actual_weights = eta[:]
        actual_ratio = None
        Q = np.array([[
            np.dot(self.KL[r].ravel(), self.KL[s].ravel()) for r in range(nk)
        ] for s in range(nk)])
        Q /= np.sum([frobenius(K)**2 for K in self.KL])

        self.sr, self.margin = [], []
        self.obj = []
        _beta, _mu = None, None
        cstep = self.step
        I = np.diag(np.ones(nn))
        for i in xrange(self.max_iter):
            Kc = summation(self.KL, mu)
            #trovo i gamma
            clf = KOMD(kernel='precomputed', lam=self.lam).fit(Kc, Y)
            gamma = clf.gamma
            _margin = (gamma.T * YY * matrix(Kc) * YY * gamma)[0]
            #m = (gamma.T * YY * matrix(Kc) * YY * gamma)[0]
            grad = np.array([(self.C * np.dot(Q[r],mu) + (gamma.T * YY * matrix(self.KL[r]) * YY * gamma)[0]) \
                    * mu[r] * (1- mu[r]) \
                      for r in range(nk)])
            _beta = beta + cstep * grad
            _mu = np.exp(_beta)
            _mu /= _mu.sum()

            _obj = _margin + (self.C / 2) * np.dot(_mu, np.dot(_mu, Q))
            if (self.obj and _obj < self.obj[-1]
                ) or _margin < 1.e-4:  # nel caso di peggioramento
                cstep /= 2.0
                if cstep < 0.00001: break
            else:
                self.obj.append(_obj)
                self.margin.append(_margin)
                mu = _mu
                beta = _beta

        self._steps = i + 1
        #print 'steps', self._steps, 'lam',self.lam,'margin',self.margin[-1]
        self.weights = np.array(mu)
        self.ker_matrix = summation(self.KL, self.weights)
        return self.ker_matrix
Example #3
0
KLte = [pairwise.homogeneous_polynomial_kernel(Xte,Xtr, degree=d) for d in range(11)]
print ('done')


#evaluate kernels in terms of margin, radius etc...
print ('evaluating metrics...', end='')
from MKLpy.metrics import margin, radius, ratio, trace, frobenius
from MKLpy.preprocessing import kernel_normalization
deg = 5
K = KLtr[deg]					#the HPK with degree 5
K = kernel_normalization(K)		#normalize the kernel K (useless in the case of HPK computed on normalized data)
score_margin = margin(K,Ytr)	#the distance between the positive and negative classes in the kernel space
score_radius = radius(K)		#the radius of the Einimum Enclosing Ball containing data in the kernel space
score_ratio  = ratio (K,Ytr)	#the radius/margin ratio defined as (radius**2/margin**2)/n_examples
#the ratio can be also computed as score_radius**2/score_margin**2/len(Ytr)
score_trace  = trace (K)		#the trace of the kernel matrix
score_froben = frobenius(K)		#the Frobenius norm of a kernel matrix
print ('done')
print ('results of the %d-degree HP kernel:' % deg)
print ('margin: %.4f, radius: %.4f, radiu-margin ratio: %.4f,' % (score_margin, score_radius, score_ratio))
print ('trace: %.4f, frobenius norm: %.4f' % (score_trace, score_froben))


#evaluate the empirical complexity of the kernel matrix, i.e. the Spectral Ratio
# Michele Donini, Fabio Aiolli: "Learning deep kernels in the space of dot-product polynomials". Machine Learning (2017)
# Ivano Lauriola, Mirko Polato, Fabio Aiolli: "The Minimum Effort Maximum Output principle applied to Multiple Kernel Learning". ESANN (2018)
print ('computing Spectral Ratio...', end='')
from MKLpy.metrics import spectral_ratio
SR = spectral_ratio(K, norm=True)
print ('%.4f' % SR)
Example #4
0
 def test_frobenius(self):
     self.assertEqual(metrics.frobenius(self.K), 111**.5)
     self.assertEqual(metrics.frobenius(self.K1), 3)
     self.assertRaises(SquaredKernelError, metrics.frobenius, self.X)