Beispiel #1
0
def pcg0(H,c,A,b,x0,fA=None,callback=None):
    '''
    Projected CG method to solve the problem: {min 1/2x'Hx + c'x | Ax = b}.
    Initial point x0 must safisty Ax0 = b. Unstable version, not recommended.
    '''
    # Initialize some variables
    r = H*x0 + c
    r = project(A,r)
    g = project(A,r)
    p = -copy(g)
    x = copy(x0)

    while True:
        alpha = dotu(r,g)/dotu(p,H*p)
        x = x+ alpha*p
        r2 = r + alpha*H*p
        g2 = project(A,r2)
        # Do iterative refinement
        # for i in range(5000):
        #     g2 = project(A,g2)
        beta = dotu(r2,g2)/dotu(r,g)
        p = -g2 + beta*p
        g = copy(g2)
        r = copy(r2)
        if nrm2(r) < 1e-16:
            break
    return x
Beispiel #2
0
def pcg0(H, c, A, b, x0, fA=None, callback=None):
    '''
    Projected CG method to solve the problem: {min 1/2x'Hx + c'x | Ax = b}.
    Initial point x0 must safisty Ax0 = b. Unstable version, not recommended.
    '''
    # Initialize some variables
    r = H * x0 + c
    r = project(A, r)
    g = project(A, r)
    p = -copy(g)
    x = copy(x0)

    while True:
        alpha = dotu(r, g) / dotu(p, H * p)
        x = x + alpha * p
        r2 = r + alpha * H * p
        g2 = project(A, r2)
        # Do iterative refinement
        # for i in range(5000):
        #     g2 = project(A,g2)
        beta = dotu(r2, g2) / dotu(r, g)
        p = -g2 + beta * p
        g = copy(g2)
        r = copy(r2)
        if nrm2(r) < 1e-16:
            break
    return x
Beispiel #3
0
	def get_kernel(X, Y, type='linear', param=1.0):
		"""Calculates a kernel given the data X and Y (dims x exms)"""
		(Xdims,Xn) = X.size
		(Ydims,Yn) = Y.size
    	
		kernel = matrix(1.0)
		if type=='linear':
			print('Calculating linear kernel with size {0}x{1}.'.format(Xn,Yn))
			kernel = matrix([ dotu(X[:,i],Y[:,j]) for j in range(Yn) for i in range(Xn)], (Xn,Yn), 'd')

		if type=='rbf':
			print('Calculating Gaussian kernel with size {0}x{1}.'.format(Xn,Yn))
			kernel = matrix([dotu(X[:,i]-Y[:,j],X[:,i]-Y[:,j]) for j in range(Yn) for i in range(Xn)], (Xn,Yn))
			kernel = exp(-param*kernel)
		return kernel
Beispiel #4
0
	def apply_dual(self, k, norms):
		"""Application of a dual trained SVDD.
		   k \in m(test_data x train support vectors)
		   norms \in (test_data x 1)
		"""
		# number of training examples
		N = len(self.svs)
		(tN,foo) = k.size

		if (self.isDualTrained!=True):
			print('First train, then test.')
			return 0, SVDD.MSG_ERROR

		Pc = self.kernel[self.svs,self.svs]
		resc = matrix([dotu(Pc[i,:],self.alphas[self.svs]) for i in range(N)]) 
		resc = dotu(resc,self.alphas[self.svs])
		res = resc - 2*matrix([dotu(k[i,:],self.alphas[self.svs]) for i in range(tN)]) + norms
		return res, SVDD.MSG_OK
Beispiel #5
0
    def get_kernel(X, Y, type='linear', param=1.0):
        """Calculates a kernel given the data X and Y (dims x exms)"""
        (Xdims, Xn) = X.size
        (Ydims, Yn) = Y.size

        kernel = matrix(1.0)
        if type == 'linear':
            print('Calculating linear kernel with size {0}x{1}.'.format(
                Xn, Yn))
            kernel = matrix(
                [dotu(X[:, i], Y[:, j]) for j in range(Yn) for i in range(Xn)],
                (Xn, Yn), 'd')

        if type == 'rbf':
            print('Calculating Gaussian kernel with size {0}x{1}.'.format(
                Xn, Yn))
            kernel = matrix([
                dotu(X[:, i] - Y[:, j], X[:, i] - Y[:, j]) for j in range(Yn)
                for i in range(Xn)
            ], (Xn, Yn))
            kernel = exp(-param * kernel)
        return kernel
Beispiel #6
0
	def get_diag_kernel(X, type='linear', param=1.0):
		"""Calculates the kernel diagonal given the data X (dims x exms)"""
		(Xdims,Xn) = X.size
    	
		kernel = matrix(1.0)
		if type=='linear':
			print('Calculating diagonal of a linear kernel with size {0}x{1}.'.format(Xn,Xn))
			kernel = matrix([ dotu(X[:,i],X[:,i]) for i in range(Xn)], (Xn,1), 'd')
		
		if type=='rbf':
			print('Gaussian kernel diagonal is always exp(0)=1.')
			kernel = matrix(1.0, (Xn,1), 'd')

		return kernel
Beispiel #7
0
    def get_diag_kernel(X, type='linear', param=1.0):
        """Calculates the kernel diagonal given the data X (dims x exms)"""
        (Xdims, Xn) = X.size

        kernel = matrix(1.0)
        if type == 'linear':
            print('Calculating diagonal of a linear kernel with size {0}x{1}.'.
                  format(Xn, Xn))
            kernel = matrix([dotu(X[:, i], X[:, i]) for i in range(Xn)],
                            (Xn, 1), 'd')

        if type == 'rbf':
            print('Gaussian kernel diagonal is always exp(0)=1.')
            kernel = matrix(1.0, (Xn, 1), 'd')

        return kernel
Beispiel #8
0
	def apply_dual(self, kernel):
		""" Application of dual trained ssad.
			kernel = Kernel.get_kernel(Y, X[:,cssad.svs], kernel_type, kernel_param)
		"""
		# number of support vectors
		N = len(self.svs)

		# check number and dims of test data
		(tN,talphas) = kernel.size
		if (tN<1):
			print('Invalid test data')
			return 0, SSAD.MSG_ERROR

		# apply trained classifier
		prod = matrix([self.alphas[i,0]*self.cy[0,i] for i in self.svs],(N,1))
		res = matrix([dotu(kernel[i,:],prod) for i in range(tN)]) 
		return res, SSAD.MSG_OK
Beispiel #9
0
    def apply_dual(self, kernel):
        """ Application of dual trained ssad.
			kernel = Kernel.get_kernel(Y, X[:,cssad.svs], kernel_type, kernel_param)
		"""
        # number of support vectors
        N = len(self.svs)

        # check number and dims of test data
        (tN, talphas) = kernel.size
        if (tN < 1):
            print('Invalid test data')
            return 0, SSAD.MSG_ERROR

        # apply trained classifier
        prod = matrix([self.alphas[i, 0] * self.cy[0, i] for i in self.svs],
                      (N, 1))
        res = matrix([dotu(kernel[i, :], prod) for i in range(tN)])
        return res, SSAD.MSG_OK
Beispiel #10
0
	def apply_dual(self, kernel):
		"""Application of a dual trained oc-svm."""

		# number of training examples
		N = self.samples

		# check number and dims of test data
		(tN,foo) = kernel.size
		if (tN<1):
			print('Invalid test data')
			return 0, OCSVM.MSG_ERROR

		if (self.isDualTrained!=True):
			print('First train, then test.')
			return 0, OCSVM.MSG_ERROR

		# apply trained classifier
		res = matrix([dotu(kernel[i,:],self.alphas[self.svs]) for i in range(tN)])
		return res, OCSVM.MSG_OK
Beispiel #11
0
    def apply_dual(self, kernel):
        """Application of a dual trained oc-svm."""

        # number of training examples
        N = self.samples

        # check number and dims of test data
        (tN, foo) = kernel.size
        if (tN < 1):
            print('Invalid test data')
            return 0, OCSVM.MSG_ERROR

        if (self.isDualTrained != True):
            print('First train, then test.')
            return 0, OCSVM.MSG_ERROR

        # apply trained classifier
        res = matrix(
            [dotu(kernel[i, :], self.alphas[self.svs]) for i in range(tN)])
        return res, OCSVM.MSG_OK
Beispiel #12
0
def kernel(xs, ys):
    return dotu(matrix(xs), matrix(ys))
Beispiel #13
0
    def kkt_solver(x, y, z):

        if (TEST_KKT):
            x0 = matrix(0., x.size)
            y0 = matrix(0., y.size)
            z0 = matrix(0., z.size)
            x0[:] = x[:]
            y0[:] = y[:]
            z0[:] = z[:]

            # Get default solver solutions.
            xp = matrix(0., x.size)
            yp = matrix(0., y.size)
            zp = matrix(0., z.size)
            xp[:] = x[:]
            yp[:] = y[:]
            zp[:] = z[:]
            default_solver(xp, yp, zp)
            offset = K * (K + 1) / 2
            for i in xrange(K):
                symmetrize_matrix(zp, K + 1, offset)
                offset += (K + 1) * (K + 1)

        # pab = x[:K*(K+1)/2]  # p_{ab}  1<=a<=b<=K
        # pis = x[K*(K+1)/2:]  # \pi_i   1<=i<=K

        # z_{ab} := d_{ab}^{-1} z_{ab}
        # \mat{z}_i = r_i^{-1} \mat{z}_i r_i^{-t}
        misc.scale(z, W, trans='T', inverse='I')

        l = z[:]

        # l_{ab} := d_{ab}^{-2} z_{ab}
        # \mat{z}_i := r_i^{-t}r_i^{-1} \mat{z}_i r_i^{-t} r_i^{-1}
        misc.scale(l, W, trans='N', inverse='I')

        # The RHS of equations
        #
        # d_{ab}^{-2}n_{ab} + vec(V_{ab})^t . vec( \sum_i R_i* F R_i*)
        # + \sum_i vec(V_{ab})^t . vec( g_i g_i^t) u_i + y
        # = -d_{ab}^{-2} l_{ab} + ( p_{ab} - vec(V_{ab})^t . vec(\sum_i L_i*)
        #
        ###

        # Lsum := \sum_i L_i
        moffset = K * (K + 1) / 2
        Lsum = np.sum(np.array(l[moffset:]).reshape((K, (K + 1) * (K + 1))),
                      axis=0)
        Lsum = matrix(Lsum, (K + 1, K + 1))
        Ls = Lsum[:K, :K]

        x[:K * (K + 1) / 2] -= l[:K * (K + 1) / 2]

        dL = matrix(0., (K * (K + 1) / 2, 1))
        ab = 0
        for a in xrange(K):
            dL[ab] = Ls[a, a]
            ab += 1
            for b in xrange(a + 1, K):
                dL[ab] = Ls[a, a] + Ls[b, b] - 2 * Ls[b, a]
                ab += 1

        x[:K * (K + 1) / 2] -= cvxopt.mul(si2ab, dL)

        # The RHS of equations
        # g_i^t F g_i + R_{i,K+1,K+1}^2 u_i = pi - L_{i,K+1,K+1}
        x[K * (K + 1) / 2:] -= l[K * (K + 1) / 2 + (K + 1) * (K + 1) -
                                 1::(K + 1) * (K + 1)]

        # x := B^{-1} Cv
        lapack.sytrs(Bm, ipiv, x)
        # lapack.potrs( Bm, x)

        # y := (oz'.B^{-1}.Cv[:-1] - y)/(oz'.B^{-1}.oz)
        y[0] = (blas.dotu(oz, x) - y[0]) / blas.dotu(oz, iB1)
        # x := B^{-1} Cv - B^{-1}.oz y
        blas.axpy(iB1, x, -y[0])

        # Solve for -n_{ab} - d_{ab}^2 z_{ab} = l_{ab}
        # We need to return scaled d*z.
        # z := d_{ab} d_{ab}^{-2}(n_{ab} + l_{ab})
        #    = d_{ab}^{-1}n_{ab} + d_{ab}^{-1}l_{ab}
        z[:K * (K + 1) / 2] += cvxopt.mul(dis, x[:K * (K + 1) / 2])
        z[:K * (K + 1) / 2] *= -1.

        # Solve for \mat{z}_i = -R_i (\mat{l}_i + diag(F, u_i)) R_i
        #                     = -L_i - R_i diag(F, u_i) R_i
        # We return
        # r_i^t \mat{z}_i r_i = -r_i^{-1} (\mat{l}_i +  diag(F, u_i)) r_i^{-t}
        ui = x[-K:]
        nab = tri2symm(x, K)

        F = Fisher_matrix(si2, nab)
        offset = K * (K + 1) / 2
        for i in xrange(K):
            start, end = i * (K + 1) * (K + 1), (i + 1) * (K + 1) * (K + 1)
            Fu = matrix(0.0, (K + 1, K + 1))
            Fu[:K, :K] = F
            Fu[K, K] = ui[i]
            Fu = matrix(Fu, ((K + 1) * (K + 1), 1))
            # Fu := -r_i^{-1} diag( F, u_i) r_i^{-t}
            cngrnc(rtis[i], Fu, K + 1, alpha=-1.)
            # Fu := -r_i^{-1} (\mat{l}_i + diag( F, u_i )) r_i^{-t}
            blas.axpy(z[offset + start:offset + end], Fu, alpha=-1.)
            z[offset + start:offset + end] = Fu

        if (TEST_KKT):
            offset = K * (K + 1) / 2
            for i in xrange(K):
                symmetrize_matrix(z, K + 1, offset)
                offset += (K + 1) * (K + 1)
            dz = np.max(np.abs(z - zp))
            dx = np.max(np.abs(x - xp))
            dy = np.max(np.abs(y - yp))
            tol = 1e-5
            if dx > tol:
                print 'dx='
                print dx
                print x
                print xp
            if dy > tol:
                print 'dy='
                print dy
                print y
                print yp
            if dz > tol:
                print 'dz='
                print dz
                print z
                print zp
            if dx > tol or dy > tol or dz > tol:
                for i, (r, rti) in enumerate(zip(ris, rtis)):
                    print 'r[%d]=' % i
                    print r
                    print 'rti[%d]=' % i
                    print rti
                    print 'rti.T*r='
                    print rti.T * r
                for i, d in enumerate(ds):
                    print 'd[%d]=%g' % (i, d)
                print 'x0, y0, z0='
                print x0
                print y0
                print z0
                print Bm0
Beispiel #14
0
def kernel(xs, ys):
    return dotu(matrix(xs), matrix(ys))
Beispiel #15
0
def linear_kernel(xs, ys):
    return dotu(matrix(xs), matrix(ys))
Beispiel #16
0
def linear_kernel(xs, ys):
    return dotu(matrix(xs), matrix(ys))