def Random(n = 10, m = 4, delta = 1.0e-05): from toolslsq import as_llmat "n is signal dimension and m is number of measurements" np.random.seed(1919) Q = sp(matrix=as_llmat(np.random.random((m,n)).T)) #Q = sp(matrix=as_llmat(np.random.rand(n,m))) Q = PysparseLinearOperator(Q) eps = 0.1 y = eps*np.ones([m,1]) d = np.array(Q*y[:,0]) p = n; n = m c = np.concatenate((np.zeros(n),np.ones(n)*delta), axis=1) ucon = np.zeros(2*n) lcon = -np.ones(2*n)*inf uvar = np.ones(2*n)*inf lvar = -np.ones(2*n)*inf #lvar[0:n] = np.zeros(n) I = IdentityOperator(n, symmetric=True) # Build [ I -I] # [-I -I] B = BlockLinearOperator([[I, -I], [-I]], symmetric=True) Q_ = ZeroOperator(n,p) new_Q = BlockLinearOperator([[Q,Q_]]) p, n = new_Q.shape m, n = B.shape name = str(p)+'_'+str(n)+'_'+str(m)+'_l1_ls_RANDOM' lsqpr = LSQRModel(Q=new_Q, B=B, d=d, c=c, Lcon=lcon, Ucon=ucon, Lvar=lvar,\ Uvar=uvar, name=name) return lsqpr
def hess(self, xst, yzuv=None, *args, **kwargs): """ Evaluate the Hessian matrix of the Lagrangian associated to the L1 merit function problem. :parameters: :xst: vector of primal variables [x, s, t]. :yzuv: vector of dual variables [y, z, u, v]. If `None`, the Hessian of the objective will be evaluated. """ # Shortcuts. model = self.model m = model.m nrC = model.nrangeC (x, s, t) = self.get_xst(xst) obj_weight = kwargs.get('obj_weight', 1.0) shift = (obj_weight != 0.0) if yzuv is not None: (y, z, u, v) = self.get_yzuv(yzuv) else: y = np.zeros(m + nrC) y2 = self.nlp_multipliers(y, shift=shift) H = sp(nrow=self.n, ncol=self.n, symmetric=True, sizeHint=model.nnzh) H[:model.n, :model.n] = model.hess(x, y2, **kwargs) return H
def solve_iterative(A,B,C,rhs): "" m,n = B.shape B = sp(matrix=as_llmat(B)) A = sp(matrix=as_llmat(A)) B = PysparseLinearOperator(B) C = sp(matrix=as_llmat(C)) #x_M = bSol[:n] #y_M = bSol[n:] M=DiagonalOperator(1/A.takeDiagonal()) N=DiagonalOperator(1/C.takeDiagonal()) #Construisons g,f et resolvons Ax_0=f, b=[g-Bx,0] g = rhs[n:] f = rhs[:n] x_0 = -M*f b = g-B*x_0 #lsqr = LSQRFramework(B) lsqr = LSMRFramework(B) #lsqr = CRAIGFramework(B) tp1 = time.clock() lsqr.solve(b,atol = 1e-16, btol = 1e-16,M = N, N = M) # lsqr.solve(b,atol = 1e-16, btol = 1e-16,M = N, N = M,show = True,\ # store_resids = True) tp2 = time.clock() # print "CPU TIME:",tp2-tp1 # print lsqr.r2norm,lsqr.r1norm, #Reconstruction de le solution, calcul des erreurs xsol = x_0 + lsqr.x w = b - B * lsqr.x ysol = N(w) SolFinal = np.concatenate((xsol, ysol), axis=0) return sol_final
def linearoperator(): np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3) Q,B,d,c,lcon,ucon,lvar,uvar,name = fifth_class_tp(8,11) #Q,B,d,c,lcon,ucon,lvar,uvar,name = sixeth_class_tp(128,1024) lsqpr = lsq_tp_generator(Q,B,d,c,lcon,ucon,lvar,uvar,name,Model=LSQRModel,\ txt='False', npz='True') print lsqpr.name J = sp(matrix=as_llmat(Q)) e1 = np.ones(J.shape[0]) e2 = np.ones(J.shape[1]) print 'J.shape = ', J.getShape() print 'Testing PysparseLinearOperator:' op = PysparseLinearOperator(J) print 'op.shape = ', op.shape print 'op.T.shape = ', op.T.shape print 'op * e2 = ', op * e2 print "op.T * e1 = ", op.T * e1 print 'op.T.T * e2 = ', op.T.T * e2 print 'op.T.T.T * e1 = ', op.T.T.T * e1 print 'With call:' print 'op(e2) = ', op(e2) print 'op.T(e1) = ', op.T(e1) print 'op.T.T is op : ', (op.T.T is op) print print 'Testing LinearOperator:' op = LinearOperator(J.shape[1], J.shape[0], lambda v: J*v, matvec_transp=lambda u: u*J) print 'op.shape = ', op.shape print 'op.T.shape = ', op.T.shape print 'op * e2 = ', op * e2 print 'e1.shape = ', e1.shape print 'op.T * e1 = ', op.T * e1 print 'op.T.T * e2 = ', op.T.T * e2 print 'op(e2) = ', op(e2) print 'op.T(e1) = ', op.T(e1) print 'op.T.T is op : ', (op.T.T is op) print op2 = op.T * op print 'op2 * e2 = ', op2 * e2 print 'op.T * (op * e2) = ', op.T * (op * e2) print 'op2 is symmetric: ', check_symmetric(op2) op3 = op * op.T print 'op3 * e1 = ', op3 * e1 print 'op * (op.T * e1) = ', op * (op.T * e1) print 'op3 is symmetric: ', check_symmetric(op3) print print 'Testing negative operator:' nop = -op print op * e2 print nop * e2 I = LinearOperator(nargin=4, nargout=4, matvec=lambda v: v, symmetric=True)
def exampleliop(n,m): # Q,B,d,c,lcon,ucon,lvar,uvar,name = first_class_tp(3,2,2)#(2,2,3 ) Q,B,d,c,lcon,ucon,lvar,uvar,name = fifth_class_tp(n,m) Q = sp(matrix=as_llmat(Q)) B = sp(matrix=as_llmat(B)) #print np.identity(Q.shape[0])[0,:]*Q #print Q[0,:] Q = PysparseLinearOperator(Q) B = PysparseLinearOperator(B) #print as_llmat(FormEntireMatrix(Q.shape[1],Q.shape[0],Q)) #C = LinearOperator(nargin=4, nargout=4, #matvec=lambda v: v, symmetric=True) lsqpr = LSQRModel(Q=Q, B=B, d=d, c=c, Lcon=lcon, Ucon=ucon, Lvar=lvar,\ Uvar=uvar, name='test') #print Q.shape,B.shape,d.shape,c.shape,lcon.shape,ucon.shape,lvar.shape #lsqpr = LSQRModel(Q=lsqp.Q, B=lsqp.B, d=lsqp.d, c=lsqp.c, Lcon=lsqp.Lcon,\ #Ucon=lsqp.Ucon, Lvar=lsqp.Lvar,Uvar=lsqp.Uvar, name=lsqp.name) return lsqpr
def solve_iterative_opr(A,B,C,rhs): A= sp(matrix=as_llmat(A)) A = PysparseLinearOperator(A) B= sp(matrix=as_llmat(B)) #print B B = PysparseLinearOperator(B) C= sp(matrix=as_llmat(C)) C = PysparseLinearOperator(C) m,n = B.shape p, q = A.shape e = ones(p) M = DiagonalOperator(1/(A*e)) mc,nc = C.shape ec = ones(mc) N = DiagonalOperator(1/(C*ec)) #Construisons g,f et resolvons Ax_0=f, b=[g-Bx,0] g = rhs[n:] f = rhs[:n] x_0 = -M*f b = g-B*x_0 lsqr = LSQRFramework(B) #lsqr = CRAIGFramework(B) #lsqr = LSMRFramework(B) t0 = time.clock() lsqr.solve(b,atol = 1e-16, btol = 1e-16,M = N, N = M,show = True,\ store_resids = True) #lsqr.solve(b,atol = 1e-16, btol = 1e-16,M = N, N = M) t1 = time.clock() print "CPU TIME:",t1-t0 print lsqr.r2norm,lsqr.r1norm #Reconstruction de le solution, calcul des erreurs xsol = x_0 + lsqr.x w = b - B * lsqr.x ysol = N(w) SolFinal = np.concatenate((xsol, ysol), axis=0)
def jacPos(self, x, **kwargs): """ Convenience function to evaluate the Jacobian matrix of the constraints reformulated as ci(x) = ai for i in equalC ci(x) - Li >= 0 for i in lowerC ci(x) - Li >= 0 for i in rangeC Ui - ci(x) >= 0 for i in upperC Ui - ci(x) >= 0 for i in rangeC. The gradients of the general constraints appear in 'natural' order, i.e., in the order in which they appear in the problem. The gradients of range constraints appear in two places: first in the 'natural' location and again after all other general constraints, with a flipped sign to account for the upper bound on those constraints. The overall Jacobian of the new constraints thus has the form [ J ] [-JR] This is a `m + nrangeC` by `n` matrix, where `J` is the Jacobian of the general constraints in the order above in which the sign of the 'less than' constraints is flipped, and `JR` is the Jacobian of the 'less than' side of range constraints. """ store_zeros = kwargs.get('store_zeros', False) store_zeros = 1 if store_zeros else 0 n = self.n m = self.m nrangeC = self.nrangeC upperC = self.upperC rangeC = self.rangeC # Initialize sparse Jacobian J = sp(nrow=m + nrangeC, ncol=n, sizeHint=self.nnzj + 10 * nrangeC, storeZeros=store_zeros) # Insert contribution of general constraints J[:m, :n] = self.jac(x, store_zeros=store_zeros) #[self.permC,:] J[upperC, :n] *= -1 # Flip sign of 'upper' gradients J[m:, :n] = -J[rangeC, :n] # Append 'upper' side of range const. return J
def DCT(n = 10, m = 4, delta = 1.0e-05): from toolslsq import as_llmat "n is signal dimension and m is number of measurements" #n signal dimension #m number of measurements z = np.zeros([n,1]) J = np.random.permutation(range(n)) # m randomly chosen indices J = np.array(range(n)) # generate the m*n partial DCT matrix whose m rows are # the rows of the n*n DCT matrix at the indices specified by J Q = sp(matrix=as_llmat(np.random.rand(n,m))) Q = PysparseLinearOperator(Q) # spiky signal generation T = min(m,n)-1 # number of spikes x0 = np.zeros([n,1]); q = np.random.permutation(range(n)) #or s=list(range(5)) random.shuffle(s) x0[q[0:T]]= np.sign(np.random.rand(T,1)) #x0[J[0:T]] = np.sign(np.reshape(np.arange(1,T+1),(T,1))) # noisy observations sigma = 0.01 # noise standard deviation y = x0 #+ sigma*np.reshape(np.arange(1,m+1),(m,1)) p = n; n = m y = sprandvec(n,30) d = Q*y[:,0] d = np.array(d) c = np.zeros(n) c = np.concatenate((np.zeros(n),np.ones(n)*delta), axis=1) ucon = np.zeros(2*n) lcon = -np.ones(2*n)*inf uvar = np.ones(2*n)*inf lvar = -np.ones(2*n)*inf I = IdentityOperator(n, symmetric=True) # Build [ I -I] # [-I -I] B = BlockLinearOperator([[I, -I], [-I]], symmetric=True) Q_ = ZeroOperator(n,p) new_Q = BlockLinearOperator([[Q,Q_]]) p, n = new_Q.shape m, n = B.shape name = str(p)+'_'+str(n)+'_'+str(m)+'_l1_ls' lsqpr = LSQRModel(Q=new_Q, B=B, d=d, c=c, Lcon=lcon, Ucon=ucon, Lvar=lvar,\ Uvar=uvar, name=name) return lsqpr
def Random(n = 10, m = 4, delta = 1.0e-05): from toolslsq import as_llmat np.random.seed(1919) A = np.random.random((m,n)).T Q = sp(matrix=as_llmat(A)) Q = PysparseLinearOperator(Q) eps = 0.1 v = eps*np.ones([1,m]) #y = sprandvec(n,30) y = v.T d = Q*y[:,0] ## spiky signal generation #T = min(m,n)-1 # number of spikes #x0 = np.zeros([n,1]); #q = np.random.permutation(range(n)) #or s=list(range(5)) random.shuffle(s) #x0[q[0:T]]= np.sign(np.random.rand(T,1)) ##x0[J[0:T]] = np.sign(np.reshape(np.arange(1,T+1),(T,1))) ## noisy observations #sigma = 0.01 # noise standard deviation #y = x0 #+ sigma*np.reshape(np.arange(1,m+1),(m,1)) p = n; n = m #y = sprandvec(n,30) #d = Q*y[:,0] d = np.array(d) c = np.zeros(n) c = np.concatenate((np.zeros(n),np.ones(n)*delta), axis=1) ucon = np.zeros(2*n) lcon = -np.ones(2*n)*inf uvar = np.ones(2*n)*inf lvar = -np.ones(2*n)*inf lvar[0:n] = np.zeros(n) I = IdentityOperator(n, symmetric=True) # Build [ I -I] # [-I -I] B = BlockLinearOperator([[I, -I], [-I]], symmetric=True) Q_ = ZeroOperator(n,p) new_Q = BlockLinearOperator([[Q,Q_]]) p, n = new_Q.shape m, n = B.shape name = str(p)+'_'+str(n)+'_'+str(m)+'_l1_ls' lsqpr = LSQRModel(Q=new_Q, B=B, d=d, c=c, Lcon=lcon, Ucon=ucon, Lvar=lvar,\ Uvar=uvar, name=name) return lsqpr
def jacPos(self, x, **kwargs): """ Convenience function to evaluate the Jacobian matrix of the constraints reformulated as ci(x) = ai for i in equalC ci(x) - Li >= 0 for i in lowerC ci(x) - Li >= 0 for i in rangeC Ui - ci(x) >= 0 for i in upperC Ui - ci(x) >= 0 for i in rangeC. The gradients of the general constraints appear in 'natural' order, i.e., in the order in which they appear in the problem. The gradients of range constraints appear in two places: first in the 'natural' location and again after all other general constraints, with a flipped sign to account for the upper bound on those constraints. The overall Jacobian of the new constraints thus has the form [ J ] [-JR] This is a `m + nrangeC` by `n` matrix, where `J` is the Jacobian of the general constraints in the order above in which the sign of the 'less than' constraints is flipped, and `JR` is the Jacobian of the 'less than' side of range constraints. """ store_zeros = kwargs.get('store_zeros', False) store_zeros = 1 if store_zeros else 0 n = self.n ; m = self.m ; nrangeC = self.nrangeC upperC = self.upperC ; rangeC = self.rangeC # Initialize sparse Jacobian J = sp(nrow=m + nrangeC, ncol=n, sizeHint=self.nnzj+10*nrangeC, storeZeros=store_zeros) # Insert contribution of general constraints J[:m,:n] = self.jac(x, store_zeros=store_zeros) #[self.permC,:] J[upperC,:n] *= -1 # Flip sign of 'upper' gradients J[m:,:n] = -J[rangeC,:n] # Append 'upper' side of range const. return J
def hess(self, xst, yzuv=None, c=None, J=None, H=None): """ Return the primal-dual Hessian matrix of the interior/exterior merit function with specific values of the Lagrange multiplier estimates. If you require the exact (primal) Hessian, setting yzuv=None will result in using the primal multipliers. :parameters: :xst: Vector of primal variables :yzuv: Vector of multiplier estimates. If set to `None`, the primal multipliers will be used, which effectively yields the Hessian of the barrier objective. :keywords: :c: Constraint vector of the l1 problem, if available. :J: Jacobian of the constraints of the l1 problem, if available. :H: Hessian of the Lagrangian of the l1 problem, or an estimate. """ # Shortcuts l1 = self.l1 model = l1.model n = model.n if c is None: c = l1.cons(xst) if J is None: J = l1.jac(xst) st = xst[n:] if yzuv is None: yzuv = self.primal_multipliers(xst, c=c) yz = yzuv[:l1.m] uv = yzuv[l1.m:] # Hbar = H(xst) + J(xst)' C(xst)^{-1} YZ J(xst) + bits with u and v. Hbar = l1.hess(xst, yzuv) if H is None else H.copy() _JCYJ = spmatrix.symdot(J.matrix, yz / c) JCYJ = sp(matrix=_JCYJ) Hbar += JCYJ r1 = range(n, self.n - 1) Hbar.addAt(uv / st, r1, r1) return Hbar
return LinearOperator(A.shape[1], A.shape[0], lambda v: np.dot(A, v), matvec_transp=lambda u: np.dot(A.T, u), symmetric=False) if __name__ == '__main__': from pykrylov.tools import check_symmetric from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp from nlpy.model import AmplModel import sys np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3) nlp = AmplModel(sys.argv[1]) J = sp(matrix=nlp.jac(nlp.x0)) e1 = np.ones(J.shape[0]) e2 = np.ones(J.shape[1]) print 'J.shape = ', J.getShape() print 'Testing PysparseLinearOperator:' op = PysparseLinearOperator(J) print 'op.shape = ', op.shape print 'op.T.shape = ', op.T.shape print 'op * e2 = ', op * e2 print "op.T * e1 = ", op.T * e1 print 'op.T.T * e2 = ', op.T.T * e2 print 'op.T.T.T * e1 = ', op.T.T.T * e1 print 'With call:' print 'op(e2) = ', op(e2) print 'op.T(e1) = ', op.T(e1)
def __call__(self, y, **kwargs): "Return the result of applying preconditioner to y" return y/self.diag if __name__ == '__main__': hdr_fmt = '%10s %6s %8s %8s %8s' hdr = hdr_fmt % ('Name', 'Matvec', 'Resid0', 'Resid', 'Error') fmt = '%10s %6d %8.2e %8.2e %8.2e' print hdr print '-' * len(hdr) #AA = spmatrix.ll_mat_from_mtx('mcca.mtx') AA = spmatrix.ll_mat_from_mtx('jpwh_991.mtx') A = sp(matrix=AA) # Create diagonal preconditioner dp = DiagonalPrec(A) n = A.shape[0] e = np.ones(n) rhs = A*e for KSolver in [CGS, TFQMR, BiCGSTAB]: ks = KSolver( lambda v: A*v, #precon = dp, #verbose=False, reltol = 1.0e-8 ) ks.solve(rhs, guess = 1+np.arange(n, dtype=np.float), matvec_max=2*n)
except: from nlpy.linalg.pyma27 import PyMa27Context as LBLContext from pysparse.sparse import spmatrix from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp import numpy as np from nlpy.tools.timing import cputime import sys if len(sys.argv) < 3: sys.stderr.write("Please supply two positive definite matrices as input") sys.stderr.write(" in MatrixMarket format.\n") sys.exit(1) # Create symmetric quasi-definite matrix K A = sp(matrix=spmatrix.ll_mat_from_mtx(sys.argv[1])) C = sp(matrix=spmatrix.ll_mat_from_mtx(sys.argv[2])) nA = A.shape[0] nC = C.shape[0] # K = spmatrix.ll_mat_sym(nA + nC, A.nnz + C.nnz + min(nA,nC)) K = sp(size=nA + nC, sizeHint=A.nnz + C.nnz + min(nA, nC), symmetric=True) K[:nA, :nA] = A K[nA:, nA:] = -C # K[nA:,nA:].scale(-1.0) idx = np.arange(min(nA, nC), dtype=np.int) K.put(1, nA + idx, idx) # Create right-hand side rhs=K*e e = np.ones(nA + nC) # rhs = np.empty(nA+nC)
Return the square root of a linear operator, if defined. Note that this is not the elementwise square root. The result is a linear operator that, when composed with itself, yields the original operator. """ return op._sqrt() if __name__ == '__main__': from pykrylov.tools import check_symmetric from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp from nlpy.model import AmplModel import sys np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3) nlp = AmplModel(sys.argv[1]) J = sp(matrix=nlp.jac(nlp.x0)) e1 = np.ones(J.shape[0]) e2 = np.ones(J.shape[1]) print 'J.shape = ', J.getShape() print 'Testing PysparseLinearOperator:' op = PysparseLinearOperator(J) print 'op.shape = ', op.shape print 'op.T.shape = ', op.T.shape print 'op * e2 = ', op * e2 print "op.T * e1 = ", op.T * e1 print 'op.T.T * e2 = ', op.T.T * e2 print 'op.T.T.T * e1 = ', op.T.T.T * e1 print 'With call:' print 'op(e2) = ', op(e2) print 'op.T(e1) = ', op.T(e1)
except: from nlpy.linalg.pyma27 import PyMa27Context as LBLContext from pysparse.sparse import spmatrix from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp import numpy as np from nlpy.tools.timing import cputime import sys if len(sys.argv) < 3: sys.stderr.write('Please supply two positive definite matrices as input') sys.stderr.write(' in MatrixMarket format.\n') sys.exit(1) # Create symmetric quasi-definite matrix K A = sp(matrix=spmatrix.ll_mat_from_mtx(sys.argv[1])) C = sp(matrix=spmatrix.ll_mat_from_mtx(sys.argv[2])) nA = A.shape[0] nC = C.shape[0] #K = spmatrix.ll_mat_sym(nA + nC, A.nnz + C.nnz + min(nA,nC)) K = sp(size=nA + nC, sizeHint=A.nnz + C.nnz + min(nA, nC), symmetric=True) K[:nA, :nA] = A K[nA:, nA:] = -C #K[nA:,nA:].scale(-1.0) idx = np.arange(min(nA, nC), dtype=np.int) K.put(1, nA + idx, idx) # Create right-hand side rhs=K*e e = np.ones(nA + nC) #rhs = np.empty(nA+nC)
def __call__(self, y, **kwargs): "Return the result of applying preconditioner to y" return y/self.diag if __name__ == '__main__': hdr_fmt = '%10s %6s %8s %8s %8s' hdr = hdr_fmt % ('Name', 'Matvec', 'Resid0', 'Resid', 'Error') fmt = '%10s %6d %8.2e %8.2e %8.2e' print hdr print '-' * len(hdr) AA = spmatrix.ll_mat_from_mtx(sys.argv[1]) A = sp(matrix=AA) op = PysparseLinearOperator(A) # Create diagonal preconditioner dp = DiagonalPrec(A) n = A.shape[0] e = np.ones(n) rhs = A*e for KSolver in [CGS, TFQMR, BiCGSTAB]: ks = KSolver( op, #precon = dp, #verbose=False, reltol = 1.0e-8 )
def _jac(self, x, lp=False): """ Helper method to assemble the Jacobian matrix of the constraints of the transformed problems. See the documentation of :meth:`jac` for more information. The positional argument `lp` should be set to `True` only if the problem is known to be a linear program. In this case, the evaluation of the constraint matrix is cheaper and the argument `x` is ignored. """ n = self.original_n m = self.original_m # List() simply allows operations such as 1 + [2,3] -> [3,4] lowerC = List(self.lowerC) ; nlowerC = self.nlowerC upperC = List(self.upperC) ; nupperC = self.nupperC rangeC = List(self.rangeC) ; nrangeC = self.nrangeC lowerB = List(self.lowerB) ; nlowerB = self.nlowerB upperB = List(self.upperB) ; nupperB = self.nupperB rangeB = List(self.rangeB) ; nrangeB = self.nrangeB nbnds = nlowerB + nupperB + 2*nrangeB nSlacks = nlowerC + nupperC + 2*nrangeC # Initialize sparse Jacobian nnzJ = 2 * self.nnzj + m + nrangeC + nbnds + nrangeB # Overestimate J = sp(nrow=self.m, ncol=self.n, sizeHint=nnzJ) # Insert contribution of general constraints if lp: J[:m,:n] = AmplModel.A(self) else: J[:m,:n] = AmplModel.jac(self,x[:n]) J[upperC,:n] *= -1.0 # Flip sign of 'upper' gradients J[m:m+nrangeC,:n] = J[rangeC,:n] # Append 'upper' side of range const. J[m:m+nrangeC,:n] *= -1.0 # Flip sign of 'upper' range gradients. # Create a few index lists rlowerC = List(range(nlowerC)) ; rlowerB = List(range(nlowerB)) rupperC = List(range(nupperC)) ; rupperB = List(range(nupperB)) rrangeC = List(range(nrangeC)) ; rrangeB = List(range(nrangeB)) # Insert contribution of slacks on general constraints J.put(-1.0, lowerC, n + rlowerC) J.put(-1.0, upperC, n + nlowerC + rupperC) J.put(-1.0, rangeC, n + nlowerC + nupperC + rrangeC) J.put(-1.0, m + rrangeC, n + nlowerC + nupperC + nrangeC + rrangeC) # Insert contribution of bound constraints on the original problem bot = m+nrangeC ; J.put( 1.0, bot + rlowerB, lowerB) bot += nlowerB ; J.put( 1.0, bot + rrangeB, rangeB) bot += nrangeB ; J.put(-1.0, bot + rupperB, upperB) bot += nupperB ; J.put(-1.0, bot + rrangeB, rangeB) # Insert contribution of slacks on the bound constraints bot = m+nrangeC J.put(-1.0, bot + rlowerB, n + nSlacks + rlowerB) bot += nlowerB J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + rrangeB) bot += nrangeB J.put(-1.0, bot + rupperB, n + nSlacks + nlowerB + nrangeB + rupperB) bot += nupperB J.put(-1.0, bot + rrangeB, n+nSlacks+nlowerB+nrangeB+nupperB+rrangeB) return J
def jac(self, xst, J=None, **kwargs): """ Return the constraint Jacobian of the L1 merit function problem. :parameters: :xst: vector of primal variables [x, s, t]. :keywords: :J: constraint Jacobian of original problem, if available. If supplied, `J` must conform to the output of the `jac_pos()` method. """ def Range(*args): return np.arange(*args, dtype=np.int) # Shortcuts. model = self.model n = model.n m = model.m eqC = model.equalC neqC = model.nequalC lC = model.lowerC nlC = model.nlowerC uC = model.upperC nuC = model.nupperC rC = model.rangeC nrC = model.nrangeC lB = model.lowerB nlB = model.nlowerB uB = model.upperB nuB = model.nupperB rB = model.rangeB nrB = model.nrangeB nB = self.nBounds nB2 = self.nBounds2 (x, s, t) = self.get_xst(xst) # We order constraints and variables as follows: # # x s t # l ≤ c(x) ≤ u [ J I ] } m # (l ≤) c(x) ≤ u [ -JR I ] } nrC # l ≤ x [ I I ] ^ # x ≤ u [ -I I ] | nB2 # l ≤ x (≤ u) [ I I ] | # (l ≤) x ≤ u [ -I I ] v # # n m nB Jp = sp(nrow=m + nrC + nB2, ncol=n + m + nB, symmetric=False, sizeHint=model.nnzj + 10 * nrC + 2 * m + 2 * nB + nB2) # Contributions from original problem variables. r_lB = Range(nlB) r_uB = Range(nuB) r_rB = Range(nrB) Jp[:m + nrC, :n] = self.jac_pos(x) if J is None else J[:, :] Jp.put(1, m + nrC + r_lB, lB) # l ≤ x. Jp.put(-1, m + nrC + nlB + r_uB, uB) # x ≤ u. Jp.put(1, m + nrC + nlB + nuB + r_rB, rB) # l ≤ x (≤ u). Jp.put(-1, m + nrC + nB + r_rB, rB) # (l ≤) x ≤ u. # Contributions from elastics on original bound constraints. Jp.put(1, m + nrC + r_lB, n + m + r_lB) # xL + tL ≥ l. Jp.put(1, m + nrC + nlB + r_uB, n + m + nlB + r_uB) # -xU + tU ≥ -u. # xR + tR ≥ l and -xR + tR ≥ -u. Jp.put(1, m + nrC + nlB + nuB + r_rB, n + m + nlB + nuB + r_rB) Jp.put(1, m + nrC + nlB + nuB + nrB + r_rB, n + m + nlB + nuB + r_rB) # Note that in the Jacobian, the elastics are ordered exactly like the # constraints of the original problem. # Contributions from elastics for equality constraints. a_eqC = np.array(eqC) Jp.put(1, eqC, n + a_eqC) # cE(x) + sE ≥ 0. # Contributions from elastics for lower inequality constraints. # r_lC = Range(nlC) a_lC = np.array(lC) Jp.put(1, lC, n + a_lC) # Contributions from elastics for upper inequality constraints. a_uC = np.array(uC) Jp.put(1, uC, n + a_uC) # Contributions from elastics for range constraints. r_rC = Range(nrC) a_rC = np.array(rC) Jp.put(1, rC, n + a_rC) Jp.put(1, m + r_rC, n + a_rC) return Jp
def _jac(self, x, lp=False): """ Helper method to assemble the Jacobian matrix of the constraints of the transformed problems. See the documentation of :meth:`jac` for more information. The positional argument `lp` should be set to `True` only if the problem is known to be a linear program. In this case, the evaluation of the constraint matrix is cheaper and the argument `x` is ignored. """ n = self.original_n m = self.original_m # List() simply allows operations such as 1 + [2,3] -> [3,4] lowerC = List(self.lowerC) nlowerC = self.nlowerC upperC = List(self.upperC) nupperC = self.nupperC rangeC = List(self.rangeC) nrangeC = self.nrangeC lowerB = List(self.lowerB) nlowerB = self.nlowerB upperB = List(self.upperB) nupperB = self.nupperB rangeB = List(self.rangeB) nrangeB = self.nrangeB nbnds = nlowerB + nupperB + 2 * nrangeB nSlacks = nlowerC + nupperC + 2 * nrangeC # Initialize sparse Jacobian nnzJ = 2 * self.nnzj + m + nrangeC + nbnds + nrangeB # Overestimate J = sp(nrow=self.m, ncol=self.n, sizeHint=nnzJ) # Insert contribution of general constraints if lp: J[:m, :n] = AmplModel.A(self) else: J[:m, :n] = AmplModel.jac(self, x[:n]) J[upperC, :n] *= -1.0 # Flip sign of 'upper' gradients J[m:m + nrangeC, :n] = J[rangeC, :n] # Append 'upper' side of range const. J[m:m + nrangeC, :n] *= -1.0 # Flip sign of 'upper' range gradients. # Create a few index lists rlowerC = List(range(nlowerC)) rlowerB = List(range(nlowerB)) rupperC = List(range(nupperC)) rupperB = List(range(nupperB)) rrangeC = List(range(nrangeC)) rrangeB = List(range(nrangeB)) # Insert contribution of slacks on general constraints J.put(-1.0, lowerC, n + rlowerC) J.put(-1.0, upperC, n + nlowerC + rupperC) J.put(-1.0, rangeC, n + nlowerC + nupperC + rrangeC) J.put(-1.0, m + rrangeC, n + nlowerC + nupperC + nrangeC + rrangeC) # Insert contribution of bound constraints on the original problem bot = m + nrangeC J.put(1.0, bot + rlowerB, lowerB) bot += nlowerB J.put(1.0, bot + rrangeB, rangeB) bot += nrangeB J.put(-1.0, bot + rupperB, upperB) bot += nupperB J.put(-1.0, bot + rrangeB, rangeB) # Insert contribution of slacks on the bound constraints bot = m + nrangeC J.put(-1.0, bot + rlowerB, n + nSlacks + rlowerB) bot += nlowerB J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + rrangeB) bot += nrangeB J.put(-1.0, bot + rupperB, n + nSlacks + nlowerB + nrangeB + rupperB) bot += nupperB J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + nrangeB + nupperB + rrangeB) return J