def PolytopProjection(data, T = 1.0, isProduct = False, solver = None):
    if solver is None: 
        solver = 'cvxopt_qp'
        
    #data = float128(data)
    if isProduct:
        H = data
        n = data.shape[0]
        m = len(T)
    else:
        H = dot(data, data.T)
        n, m = data.shape
    #print H.shape
    #print 'PolytopProjection: n=%d, m=%d, H.shape[0]= %d, H.shape[1]= %d ' %(n, m, H.shape[0], H.shape[1])
    #T = abs(dot(H, ones(n)))
    f = -asfarray(T) *ones(n)
    p = QP(H, f, lb = zeros(n), iprint = -1, maxIter = 150)

    xtol = 1e-6
    if max(T) < 1e5*xtol: xtol = max(T)/1e5
    r = p._solve(solver, ftol = 1e-16, xtol = xtol, maxIter = 10000)
    sol = r.xf

    if isProduct:
        return r.xf
    else:
        s = dot(data.T, r.xf)
        return s.flatten(), r.xf        
def PolytopProjection(data, T=1.0, isProduct=False, solver=None):
    if solver is None:
        solver = 'cvxopt_qp'

    #data = float128(data)
    if isProduct:
        H = data
        n = data.shape[0]
        m = len(T)
    else:
        H = dot(data, data.T)
        n, m = data.shape
    #print H.shape
    #print 'PolytopProjection: n=%d, m=%d, H.shape[0]= %d, H.shape[1]= %d ' %(n, m, H.shape[0], H.shape[1])
    #T = abs(dot(H, ones(n)))
    f = -asfarray(T) * ones(n)
    p = QP(H, f, lb=zeros(n), iprint=-1, maxIter=150)

    xtol = 1e-6
    if max(T) < 1e5 * xtol: xtol = max(T) / 1e5
    r = p._solve(solver, ftol=1e-16, xtol=xtol, maxIter=10000)
    sol = r.xf

    if isProduct:
        return r.xf
    else:
        s = dot(data.T, r.xf)
        return s.flatten(), r.xf
Beispiel #3
0
 def test_cplex(self):
     p = QP(diag([1, 2, 3]),
            [15, 8, 80],
            A = matrix('1 2 3; 8 15 80'),
            b = [150, 800],
            Aeq = [0, 1, -1],
            beq = 25.5,
            ub = [15,inf,inf])
     r = p._solve('cplex', iprint = 0)
     f_opt, x_opt = r.ff, r.xf
     np.testing.assert_almost_equal(f_opt,  -1190.35)
     np.testing.assert_almost_equal(x_opt, [-15. ,  -2.3, -27.8 ])
Beispiel #4
0
 def tilt(self):
     ######construct constraint weight matrix and optimize p############
     ndim=self.ndim
     wmatrix=self.ndim*self.m_fd()
     wmatrix_2=self.ndim*self.k()*self.ydat
     p0=ones(ndim)/ndim
     lb=zeros(ndim)
     ub=ones(ndim)
     Aeq=ones(ndim)
     beq=1. 
     #A=vstack((-wmatrix,-wmatrix_2,-wmatrix_2))
     #b=hstack((zeros(len(wmatrix)),ones(len(wmatrix_2)),zeros(len(wmatrix_2))))
     A=vstack((-wmatrix,-wmatrix_2))
     b=hstack((zeros(len(wmatrix)),ones(len(wmatrix_2))))
     p=QP(diag(ones(ndim)),2*p0*ones(ndim),lb=lb,ub=ub,Aeq=Aeq,beq=beq,A=A,b=b,name="find optimal p that satisfies m'>0 and m<1")
     r=p._solve('cvxopt_qp')
     solution=r.xf
     self.solution=solution
     self.ydat=ndim*solution*self.ydat
    def test_cvxopt(self):
        Q = matrix([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]])
        p = matrix([15.0, 8.0, 80.0])
        G = matrix([[1.0, 2.0, 3.0], [8.0, 15.0, 80.0], [0, 0, 0]])
        h = matrix([150.0, 800.0, 0], (3, 1))
        A = matrix([0.0, 1.0, -1.0], (1,3))
        b = matrix(25.5)

        sol = cvxopt.solvers.qp(Q, p, G, h, A, b)

        p = QP(diag([1, 2, 3]),
               [15, 8, 80],
               A = np.matrix("1 2 3; 8 15 80"),
               b = [150, 800],
               Aeq = [0, 1, -1],
               beq = 25.5)
        r = p._solve('cvxopt_qp', iprint = 0)
        f_opt, x_opt = r.ff, r.xf

        np.testing.assert_almost_equal(f_opt,  sol['primal objective'], decimal=5)
        np.testing.assert_almost_equal(x_opt, np.squeeze(sol['x']), decimal=5)
Beispiel #6
0
beq = 0.0
A = [1.0 for i in range(2 * tot_values)]
b = nu * float(C)

#coeff for 3rd constraint
#kernel=H
p = QP(np.asmatrix(kernel),
       np.asmatrix(f),
       lb=np.asmatrix(lower_limit),
       ub=np.asmatrix(upper_limit),
       Aeq=Aeq,
       beq=beq,
       A=A,
       b=b)
# or p = QP(H=diag([1,2,3]), f=[15,8,80], A= ...)
r = p._solve('cvxopt_qp', iprint=0)
f_opt, x = r.ff, r.xf

support_vectors = []
support_vectors_Y = []
coeff = []
b = 0.0
#support vectors: points such that an-an' ! = 0
for i in range(tot_values):
    if not ((x[i] - x[tot_values + i]) == 0):
        support_vectors.append(X[i])
        support_vectors_Y.append(Y[i])
        coeff.append(x[i] - x[tot_values + i])

#bias_term=tn-eps-(support vectors)*corresponding kernel
support_vector = []
Beispiel #7
0
Aeq = [1.0 for i in range(2 * tot_values)]
for i in range(tot_values, 2 * tot_values):
    Aeq[i] = -1.0
beq = 0.0

#----------------------------------------------------------------------------------------------------

#coeff for 3rd constraint
#kernel=H
eq = QP(np.asmatrix(kernel),
        np.asmatrix(f),
        lb=np.asmatrix(lower_limit),
        ub=np.asmatrix(upper_limit),
        Aeq=Aeq,
        beq=beq)
p = eq._solve('cvxopt_qp', iprint=0)
f_optimized, x = p.ff, p.xf

#---------------------------------------------------------------------------------------
support_vectors = []
support_vectors_Y = []
support_vector = []
support_vector_Y = []

coeff = []
b = 0.0
#support vectors: points such that an-an' ! = 0
for i in range(tot_values):
    if not ((x[i] - x[tot_values + i]) == 0):
        support_vectors.append(X[i])
        support_vectors_Y.append(Y[i])
Beispiel #8
0
	def fit(self, X, Y):
		
		xdim = X.shape[0]
		ydim = Y.shape[0]
		
		if xdim != ydim:
			raise Exception('Input dimension does not match!')
		
		kernel=[[0.0 for i in range(2*xdim)] for j in range(2*xdim)]
		
		for i in range(xdim):
			for j in range(xdim):
				kernel[i][j] = self._kernel(X[i], X[j])
				kernel[i + xdim][j + xdim] = self._kernel(X[i], X[j])
		
		#----------------------------------------------------------------------------------------------------------------
		#negating the values for a_n'
		for i in range(xdim):
			for j in range(xdim):
				kernel[i + xdim][j] = (-1.0) * self._kernel(X[i], X[j])
				kernel[i][j + xdim] = (-1.0) * self._kernel(X[i], X[j])
		
		#--------------------------------------------------------------------------------------------------------------
		#coeff of 2nd term to minimize
		f=[0.0 for i in range(2 * xdim)]
		for i in range(xdim):
			f[i] = -float(Y[i]) + self.eps
		for i in range(xdim, 2 * xdim):
			f[i] = float(Y[i - xdim])+ self.eps
		
		#-----------------------------------------------------------------------------------------------------
		#constraints
		lower_limit = [0.0 for i in range(2 * xdim)]
		upper_limit = [float(self.C) for i in range(2 * xdim)]
		Aeq = [1.0 for i in range(2 * xdim)]
		for i in range(xdim, 2 * xdim):
			Aeq[i]=-1.0
		beq=0.0
			
		#----------------------------------------------------------------------------------------------------
		
		#coeff for 3rd constraint
		#kernel=H
		eq = QP(np.asmatrix(kernel),np.asmatrix(f),
		  lb=np.asmatrix(lower_limit),
		  ub=np.asmatrix(upper_limit),
		  Aeq=Aeq,beq=beq)
		p = eq._solve('cvxopt_qp', iprint = 0)
		f_opt, x = p.ff, p.xf
		
		#---------------------------------------------------------------------------------------
		self.support_vectors=[]
		self.support_vectors_Y=[]

		self.coeff=[]
		#support vectors: points such that an-an' ! = 0
		for i in range(xdim):
			if not((x[i]-x[xdim+i])==0):
				self.support_vectors.append( X[i] )
				self.support_vectors_Y.append(Y[i])
				self.coeff.append( x[i]-x[xdim+i] )
		
		
		low=min(abs(x))
		for i in range(xdim):
			if not(abs(x[i]-x[xdim+i]) < low + 0.005):
				self.support_vector.append( X[i] )
				self.support_vector_Y.append(Y[i])
				
		
		bias=0.0
		for i in range(len(X)):
			bias=bias+float(Y[i] - self.eps - self._product(coeff, self.support_vectors, X[i]))
		#generally bias is average as written in the book
		self.bias=bias/len(X)
Beispiel #9
0
"""
Example:
Concider the problem
0.5 * (x1^2 + 2x2^2 + 3x3^2) + 15x1 + 8x2 + 80x3 -> min        (1)
subjected to
x1 + 2x2 + 3x3 <= 150            (2)
8x1 +  15x2 +  80x3 <= 800    (3)
x2 - x3 = 25.5                           (4)
x1 <= 15                                  (5)
"""

from numpy import diag, matrix, inf
from openopt import QP
p = QP(diag([1, 2, 3]), [15, 8, 80], A = matrix('1 2 3; 8 15 80'), b = [150, 800], Aeq = [0, 1, -1], beq = 25.5, ub = [15,inf,inf])
# or p = QP(H=diag([1,2,3]), f=[15,8,80], A= ...)
r = p._solve('cvxopt_qp', iprint = 0)
f_opt, x_opt = r.ff, r.xf
# x_opt = array([-15. ,  -2.5, -28. ])
# f_opt = -1190.25
Beispiel #10
0
#-----------------------------------------------------------------------------------------------------
#constraints
lower_limit=[0.0 for i in range(2*tot_values)]
upper_limit=[float(C) for i in range(2*tot_values)]
Aeq = [1.0 for i in range(2*tot_values)]
for i in range(tot_values,2*tot_values):
	Aeq[i]=-1.0
beq=0.0


#----------------------------------------------------------------------------------------------------

#coeff for 3rd constraint
#kernel=H
eq = QP(np.asmatrix(kernel),np.asmatrix(f),lb=np.asmatrix(lower_limit),ub=np.asmatrix(upper_limit),Aeq=Aeq,beq=beq)
p = eq._solve('cvxopt_qp', iprint = 0)
f_optimized, x = p.ff, p.xf

#---------------------------------------------------------------------------------------
support_vectors=[]
support_vectors_Y=[]
support_vector=[]
support_vector_Y=[]

coeff=[]
b=0.0
#support vectors: points such that an-an' ! = 0
for i in range(tot_values):
	if not((x[i]-x[tot_values+i])==0):
		support_vectors.append( X[i] )
		support_vectors_Y.append(Y[i])