def embed_SDP(P,order="AMD",cholmod=False): if not isinstance(P,SDP): raise ValueError, "not an SDP object" if order=='AMD': from cvxopt.amd import order elif order=='METIS': from cvxopt.metis import order else: raise ValueError, "unknown ordering: %s " %(order) p = order(P.V) if cholmod: from cvxopt import cholmod V = +P.V + spmatrix([float(i+1) for i in xrange(P.n)],xrange(P.n),xrange(P.n)) F = cholmod.symbolic(V,p=p) cholmod.numeric(V,F) f = cholmod.getfactor(F) fd = [(j,i) for i,j in enumerate(f[:P.n**2:P.n+1])] fd.sort() ip = matrix([j for _,j in fd]) Ve = chompack.tril(chompack.perm(chompack.symmetrize(f),ip)) Ie = misc.sub2ind((P.n,P.n),Ve.I,Ve.J) else: #Vc,n = chompack.embed(P.V,p) symb = chompack.symbolic(P.V,p) #Ve = chompack.sparse(Vc) Ve = symb.sparsity_pattern(reordered=False) Ie = misc.sub2ind((P.n,P.n),Ve.I,Ve.J) Pe = SDP() Pe._A = +P.A; Pe._b = +P.b Pe._A[:,0] += spmatrix(0.0,Ie,[0 for i in range(len(Ie))],(Pe._A.size[0],1)) Pe._agg_sparsity() Pe._pname = P._pname + "_embed" Pe._ischordal = True; Pe._blockstruct = P._blockstruct return Pe
def test_merge_nc(self): symb = cp.symbolic(self.A_nc, p = None, merge_function = cp.merge_size_fill(0,0)) self.assertEqual(symb.n, 23) self.assertEqual(symb.nnz, 150) self.assertEqual(symb.clique_number, 12) self.assertEqual(symb.Nsn, 10) self.assertEqual(symb.fill,(73,0)) p = amd.order(self.A_nc) symb = cp.symbolic(self.A_nc, p = p, merge_function = cp.merge_size_fill(4,4)) self.assertEqual(symb.n, 23) self.assertTrue(symb.nnz > 150) self.assertTrue(symb.Nsn < 10) self.assertTrue(symb.fill[0] >= 36) self.assertTrue(symb.fill[1] > 0)
def mk_rand(V, cone='posdef', seed=0): """ Generates random matrix U with sparsity pattern V. - U is positive definite if cone is 'posdef'. - U is completable if cone is 'completable'. """ if not (cone == 'posdef' or cone == 'completable'): raise ValueError, "cone must be 'posdef' (default) or 'completable' " from cvxopt import amd setseed(seed) n = V.size[0] U = +V U.V *= 0.0 for i in xrange(n): u = normal(n, 1) / sqrt(n) base.syrk(u, U, beta=1.0, partial=True) # test if U is in cone: if not, add multiple of identity t = 0.1 Ut = +U p = amd.order(Ut) # Vc, NF = chompack.embed(U,p) symb = chompack.symbolic(U, p) Vc = chompack.cspmatrix(symb) + U while True: # Uc = chompack.project(Vc,Ut) Uc = chompack.cspmatrix(symb) + Ut try: if cone == 'posdef': # positive definite? # Y = chompack.cholesky(Uc) Y = Uc.copy() chompack.cholesky(Y) elif cone == 'completable': # completable? # Y = chompack.completion(Uc) Y = Uc.copy() chompack.completion(Y) # Success: Ut is in cone U = +Ut break except: Ut = U + spmatrix(t, xrange(n), xrange(n), (n, n)) t *= 2.0 return U
def test_symbolic_nc(self): symb = cp.symbolic(self.A_nc, p=None) self.assertEqual(symb.n, 23) self.assertEqual(symb.nnz, 150) self.assertEqual(symb.clique_number, 12) self.assertEqual(symb.Nsn, 10) self.assertEqual(symb.fill, (73, 0)) #self.assertEqual(symb.p, None) #self.assertEqual(symb.ip, None) p = amd.order(self.A_nc) symb = cp.symbolic(self.A_nc, p=p) self.assertEqual(symb.n, 23) self.assertEqual(symb.nnz, 113) self.assertEqual(symb.clique_number, 9) self.assertEqual(symb.Nsn, 15) self.assertEqual(symb.fill, (36, 0))
def test_symbolic_nc(self): symb = cp.symbolic(self.A_nc, p = None) self.assertEqual(symb.n, 23) self.assertEqual(symb.nnz, 150) self.assertEqual(symb.clique_number, 12) self.assertEqual(symb.Nsn, 10) self.assertEqual(symb.fill,(73,0)) #self.assertEqual(symb.p, None) #self.assertEqual(symb.ip, None) p = amd.order(self.A_nc) symb = cp.symbolic(self.A_nc, p = p) self.assertEqual(symb.n, 23) self.assertEqual(symb.nnz, 113) self.assertEqual(symb.clique_number, 9) self.assertEqual(symb.Nsn, 15) self.assertEqual(symb.fill,(36,0))
def mk_rand(V,cone='posdef',seed=0): """ Generates random matrix U with sparsity pattern V. - U is positive definite if cone is 'posdef'. - U is completable if cone is 'completable'. """ if not (cone=='posdef' or cone=='completable'): raise ValueError("cone must be 'posdef' (default) or 'completable' ") from cvxopt import amd setseed(seed) n = V.size[0] U = +V U.V *= 0.0 for i in range(n): u = normal(n,1)/sqrt(n) base.syrk(u,U,beta=1.0,partial=True) # test if U is in cone: if not, add multiple of identity t = 0.1; Ut = +U p = amd.order(Ut) # Vc, NF = chompack.embed(U,p) symb = chompack.symbolic(U,p) Vc = chompack.cspmatrix(symb) + U while True: # Uc = chompack.project(Vc,Ut) Uc = chompack.cspmatrix(symb) + Ut try: if cone=='posdef': # positive definite? # Y = chompack.cholesky(Uc) Y = Uc.copy() chompack.cholesky(Y) elif cone=='completable': # completable? # Y = chompack.completion(Uc) Y = Uc.copy() chompack.completion(Y) # Success: Ut is in cone U = +Ut break except: Ut = U + spmatrix(t,range(n),range(n),(n,n)) t*=2.0 return U
def test_merge_nc(self): symb = cp.symbolic(self.A_nc, p=None, merge_function=cp.merge_size_fill(0, 0)) self.assertEqual(symb.n, 23) self.assertEqual(symb.nnz, 150) self.assertEqual(symb.clique_number, 12) self.assertEqual(symb.Nsn, 10) self.assertEqual(symb.fill, (73, 0)) p = amd.order(self.A_nc) symb = cp.symbolic(self.A_nc, p=p, merge_function=cp.merge_size_fill(4, 4)) self.assertEqual(symb.n, 23) self.assertTrue(symb.nnz > 150) self.assertTrue(symb.Nsn < 10) self.assertTrue(symb.fill[0] >= 36) self.assertTrue(symb.fill[1] > 0)
def multstuff(self, A, AT, m, n): """Multiplies and stuffs matrices.""" # Make a sparse mask of rn. Use A*A'. Am = zeros(m, n) for k in A.keys(): Am[k] = 1 Am = Am * tp(Am) p = order(Am) Ao = Am[p, p] # Design a simple storage scheme for Ao. k = 0 ss = {} for (i, j) in zip(Ao.I, Ao.J): if i >= j: ss[i, j] = k k += 1 # Find reduced Newton system matrix ASAT. d = {} ASAT = {} # Write some code. h = param('CM_h', m, 1) for i in range(m): ip = p[i] for j in range(i + 1): jp = p[j] for k in range(n): if (ip, k) in A and (k, jp) in AT: if ss[i, j] not in d: d[ss[i, j]] = exprtoC(A[ip, k] * h[k] * AT[k, jp]) else: d[ss[i, j]] += ' + ' + exprtoC( A[ip, k] * h[k] * AT[k, jp]) s = '' for k in sorted(d.keys()): s += 'CM_ASAT[%d] = %s;\n' % (k, d[k]) return (Ao, s, ss, p)
def multstuff(self, A, AT, m, n): """Multiplies and stuffs matrices.""" # Make a sparse mask of rn. Use A*A'. Am = zeros(m, n) for k in A.keys(): Am[k] = 1 Am = Am*tp(Am) p = order(Am) Ao = Am[p,p] # Design a simple storage scheme for Ao. k = 0 ss = {} for (i, j) in zip(Ao.I, Ao.J): if i >= j: ss[i,j] = k k += 1 # Find reduced Newton system matrix ASAT. d = {} ASAT = {} # Write some code. h = param('CM_h', m, 1) for i in range(m): ip = p[i] for j in range(i + 1): jp = p[j] for k in range(n): if (ip,k) in A and (k,jp) in AT: if ss[i,j] not in d: d[ss[i,j]] = exprtoC(A[ip,k] * h[k] * AT[k,jp]) else: d[ss[i,j]] += ' + ' + exprtoC(A[ip,k] * h[k] * AT[k,jp]) s = '' for k in sorted(d.keys()): s += 'CM_ASAT[%d] = %s;\n' % (k, d[k]) return (Ao, s, ss, p)
def completion(X): """ Returns maximum-determinant positive definite completion of X if it exists, and otherwise an exception is raised. """ from cvxopt.amd import order n = X.size[0] Xt = chompack.tril(X) p = order(Xt) # Xc,N = chompack.embed(Xt,p) # L = chompack.completion(Xc) symb = chompack.symbolic(Xt,p) L = chompack.cspmatrix(symb) + Xt chompack.completion(L) Xt = matrix(0.,(n,n)) Xt[::n+1] = 1. # chompack.solve(L, Xt, mode=0) # chompack.solve(L, Xt, mode=1) chompack.trsm(L, Xt) chompack.trsm(L, Xt, trans = 'T') return Xt
def completion(X): """ Returns maximum-determinant positive definite completion of X if it exists, and otherwise an exception is raised. """ from cvxopt.amd import order n = X.size[0] Xt = chompack.tril(X) p = order(Xt) # Xc,N = chompack.embed(Xt,p) # L = chompack.completion(Xc) symb = chompack.symbolic(Xt, p) L = chompack.cspmatrix(symb) + Xt chompack.completion(L) Xt = matrix(0., (n, n)) Xt[::n + 1] = 1. # chompack.solve(L, Xt, mode=0) # chompack.solve(L, Xt, mode=1) chompack.trsm(L, Xt) chompack.trsm(L, Xt, trans='T') return Xt
def solve_phase1(self,kktsolver='chol',MM = 1e5): """ Solves primal Phase I problem using the feasible start solver. Returns primal feasible X. """ from cvxopt import cholmod, amd k = 1e-3 # compute Schur complement matrix Id = [i*(self.n+1) for i in range(self.n)] As = self._A[:,1:] As[Id,:] /= sqrt(2.0) M = spmatrix([],[],[],(self.m,self.m)) base.syrk(As,M,trans='T') u = +self.b # compute least-norm solution F = cholmod.symbolic(M) cholmod.numeric(M,F) cholmod.solve(F,u) x = 0.5*self._A[:,1:]*u X0 = spmatrix(x[self.V[:].I],self.V.I,self.V.J,(self.n,self.n)) # test feasibility p = amd.order(self.V) #Xc,Nf = chompack.embed(X0,p) #E = chompack.project(Xc,spmatrix(1.0,range(self.n),range(self.n))) symb = chompack.symbolic(self.V,p) Xc = chompack.cspmatrix(symb) + X0 try: # L = chompack.completion(Xc) L = Xc.copy() chompack.completion(L) # least-norm solution is feasible return X0,None except: pass # create Phase I SDP object trA = matrix(0.0,(self.m+1,1)) e = matrix(1.0,(self.n,1)) Aa = self._A[Id,1:] base.gemv(Aa,e,trA,trans='T') trA[-1] = MM P1 = SDP() P1._A = misc.phase1_sdp(self._A,trA) P1._b = matrix([self.b-k*trA[:self.m],MM]) P1._agg_sparsity() # find feasible starting point for Phase I problem tMIN = 0.0 tMAX = 1.0 while True: t = (tMIN+tMAX)/2.0 #Xt = chompack.copy(Xc) #chompack.axpy(E,Xt,t) Xt = Xc.copy() + spmatrix(t,list(range(self.n)),list(range(self.n))) try: # L = chompack.completion(Xt) L = Xt.copy() chompack.completion(L) tMAX = t if tMAX - tMIN < 1e-1: break except: tMAX *= 2.0 tMIN = t tt = t + 1.0 U = X0 + spmatrix(tt,list(range(self.n,)),list(range(self.n))) trU = sum(U[:][Id]) Z0 = spdiag([U,spmatrix([tt+k,MM-trU],[0,1],[0,1],(2,2))]) sol = P1.solve_feas(primalstart = {'x':Z0}, kktsolver = kktsolver) s = sol['x'][-2,-2] - k if s > 0: return None,P1 else: sol.pop('y') sol.pop('s') X0 = sol.pop('x')[:self.n,:self.n]\ - spmatrix(s,list(range(self.n)),list(range(self.n))) return X0,sol
# Read in X X = pandas.read_csv('X.csv', header=None).values # Read in b b = pandas.read_csv('true_b.csv', header=None).values # Read in Y Y = pandas.read_csv('Y.csv', header=None).values # Read in ffx variance sigma2 = pandas.read_csv('true_ffxvar.csv', header=None).values ZtZ = cvxopt.spmatrix.trans(Z2) * Z2 # Use minimum degree ordering P = amd.order(ZtZ) # Set the factorisation to use LL' instead of LDL' cholmod.options['supernodal'] = 2 # Make an expression for the factorisation F = cholmod.symbolic(ZtZ, p=P) # Calculate the factorisation cholmod.numeric(ZtZ, F) # Get the sparse cholesky factorisation L = cholmod.getfactor(F) LLt = L * cvxopt.spmatrix.trans(L) # Create initial lambda
def solve(A, b, C, L, dims, proxqp=None, sigma=1.0, rho=1.0, **kwargs): """ Solves the SDP min. < c, x > s.t. A(x) = b x >= 0 and its dual max. -< b, y > s.t. s >= 0. c + A'(y) = s Input arguments. A is an N x M sparse matrix where N = sum_i ns[i]**2 and M = sum_j ms[j] and ns and ms are the SDP variable sizes and constraint block lengths respectively. The expression A(x) = b can be written as A.T*xtilde = b, where xtilde is a stacked vector of vectorized versions of xi. b is a stacked vector containing constraint vectors of size m_i x 1. C is a stacked vector containing vectorized 'd' matrices c_k of size n_k**2 x 1, representing symmetric matrices. L is an N X P sparse matrix, where L.T*X = 0 represents the consistency constraints. If an index k appears in different cliques i,j, and in converted form are indexed by it, jt, then L[it,l] = 1, L[jt,l] = -1 for some l. dims is a dictionary containing conic dimensions. dims['l'] contains number of linear variables under nonnegativity constrant dims['q'] contains a list of quadratic cone orders (not implemented!) dims['s'] contains a list of semidefinite cone matrix orders proxqp is either a function pointer to a prox implementation, or, if the problem has block-diagonal correlative sparsity, a pointer to the prox implementation of a single clique. The choices are: proxqp_general : solves prox for general sparsity pattern proxqp_clique : solves prox for a single dense clique with only semidefinite variables. proxqp_clique_SNL : solves prox for sensor network localization problem sigma is a nonnegative constant (step size) rho is a nonnegative constaint between 0 and 2 (overrelaxation parameter) In addition, the following paramters are optional: maxiter : maximum number of iterations (default 100) reltol : relative tolerance (default 0.01). If rp < reltol and rd < reltol and iteration < maxiter, solver breaks and returns current value. adaptive : boolean toggle on whether adaptive step size should be used. (default False) mu, tau, tauscale : parameters for adaptive step size (see paper) multiprocess : number of parallel processes (default 1). if multiprocess = 1, no parallelization is used. blockdiagonal : boolean toggle on whether problem has block diagonal correlative sparsity. Note that even if the problem does have block-diagonal correlative sparsity, if this parameter is set to False, then general mode is used. (default False) verbose : toggle printout (default True) log_cputime : toggle whether cputime should be logged. The output is returned in a dictionary with the following files: x : primal variable in stacked form (X = [x0, ..., x_{N-1}]) where xk is the vectorized form of the nk x nk submatrix variable. y, z : iterates in Spingarn's method cputime, walltime : total cputime and walltime, respectively, spent in main loop. If log_cputime is False, then cputime is returned as 0. primal, rprimal, rdual : evolution of primal optimal value, primal residual, and dual residual (resp.) sigma : evolution of step size sigma (changes if adaptive step size is used.) """ solvers.options['show_progress'] = False maxiter = kwargs.get('maxiter', 100) reltol = kwargs.get('reltol', 0.01) adaptive = kwargs.get('adaptive', False) mu = kwargs.get('mu', 2.0) tau = kwargs.get('tau', 1.5) multiprocess = kwargs.get('multiprocess', 1) tauscale = kwargs.get('tauscale', 0.9) blockdiagonal = kwargs.get('blockdiagonal', False) verbose = kwargs.get('verbose', True) log_cputime = kwargs.get('log_cputime', True) if log_cputime: try: import psutil except (ImportError): assert False, "Python package psutil required to log cputime. Package can be downloaded at http://code.google.com/p/psutil/" #format variables nl, ns = dims['l'], dims['s'] C = C[nl:] L = L[nl:, :] As, bs = [], [] cons = [] offset = 0 for k in xrange(len(ns)): Atmp = sparse(A[nl + offset:nl + offset + ns[k]**2, :]) J = list(set(list(Atmp.J))) Atmp = Atmp[:, J] if len(sparse(Atmp).V) == Atmp[:].size[0]: Atmp = matrix(Atmp) else: Atmp = sparse(Atmp) As.append(Atmp) bs.append(b[J]) cons.append(J) offset += ns[k]**2 if blockdiagonal: if sum([len(c) for c in cons]) > len(b): print "Problem does not have block-diagonal correlative sparsity. Switching to general mode." blockdiagonal = False #If not block-diagonal correlative sprasity, represent A as a list of lists: # A[i][j] is a matrix (or spmatrix) if ith clique involves jth constraint block #Otherwise, A is a list of matrices, where A[i] involves the ith clique and #ith constraint block only. if not blockdiagonal: while sum([len(c) for c in cons]) > len(b): tobreak = False for i in xrange(len(cons)): for j in xrange(i): ci, cj = set(cons[i]), set(cons[j]) s1 = ci.intersection(cj) if len(s1) > 0: s2 = ci.difference(cj) s3 = cj.difference(ci) cons.append(list(s1)) if len(s2) > 0: s2 = list(s2) if not (s2 in cons): cons.append(s2) if len(s3) > 0: s3 = list(s3) if not (s3 in cons): cons.append(s3) cons.pop(i) cons.pop(j) tobreak = True break if tobreak: break As, bs = [], [] for i in xrange(len(cons)): J = cons[i] bs.append(b[J]) Acol = [] offset = 0 for k in xrange(len(ns)): Atmp = sparse(A[nl + offset:nl + offset + ns[k]**2, J]) if len(Atmp.V) == 0: Acol.append(0) elif len(Atmp.V) == Atmp[:].size[0]: Acol.append(matrix(Atmp)) else: Acol.append(Atmp) offset += ns[k]**2 As.append(Acol) ms = [len(i) for i in bs] bs = matrix(bs) meq = L.size[1] if (not blockdiagonal) and multiprocess > 1: print "Multiprocessing mode can only be used if correlative sparsity is block diagonal. Switching to sequential mode." multiprocess = 1 assert rho > 0 and rho < 2, 'Overrelaxaton parameter (rho) must be (strictly) between 0 and 2' # create routine for projecting on { x | L*x = 0 } #{ x | L*x = 0 } -> P = I - L*(L.T*L)i *L.T LTL = spmatrix([], [], [], (meq, meq)) offset = 0 for k in ns: Lk = L[offset:offset + k**2, :] base.syrk(Lk, LTL, trans='T', beta=1.0) offset += k**2 LTLi = cholmod.symbolic(LTL, amd.order(LTL)) cholmod.numeric(LTL, LTLi) #y = y - L*LTLi*L.T*y nssq = sum(matrix([nsk**2 for nsk in ns])) def proj(y, ip=True): if not ip: y = +y tmp = matrix(0.0, size=(meq, 1)) ypre = +y base.gemv(L,y,tmp,trans='T',\ m = nssq, n = meq, beta = 1) cholmod.solve(LTLi, tmp) base.gemv(L,tmp,y,beta=1.0,alpha=-1.0,trans='N',\ m = nssq, n = meq) if not ip: return y time_to_solve = 0 #initialize variables X = C * 0.0 Y = +X Z = +X dualS = +X dualy = +b PXZ = +X proxargs = { 'C': C, 'A': As, 'b': bs, 'Z': Z, 'X': X, 'sigma': sigma, 'dualS': dualS, 'dualy': dualy, 'ns': ns, 'ms': ms, 'multiprocess': multiprocess } if blockdiagonal: proxqp = proxqp_blockdiagonal(proxargs, proxqp) else: proxqp = proxqp_general if log_cputime: utime = psutil.cpu_times()[0] wtime = time.time() primal = [] rpvec, rdvec = [], [] sigmavec = [] for it in xrange(maxiter): pv, gap = proxqp(proxargs) blas.copy(Z, Y) blas.axpy(X, Y, alpha=-2.0) proj(Y, ip=True) #PXZ = sigma*(X-Z) blas.copy(X, PXZ) blas.scal(sigma, PXZ) blas.axpy(Z, PXZ, alpha=-sigma) #z = z + rho*(y-x) blas.axpy(X, Y, alpha=1.0) blas.axpy(Y, Z, alpha=-rho) xzn = blas.nrm2(PXZ) xn = blas.nrm2(X) xyn = blas.nrm2(Y) proj(PXZ, ip=True) rdual = blas.nrm2(PXZ) rpri = sqrt(abs(xyn**2 - rdual**2)) / sigma if log_cputime: cputime = psutil.cpu_times()[0] - utime else: cputime = 0 walltime = time.time() - wtime if rpri / max(xn, 1.0) < reltol and rdual / max(1.0, xzn) < reltol: break rpvec.append(rpri / max(xn, 1.0)) rdvec.append(rdual / max(1.0, xzn)) primal.append(pv) if adaptive: if (rdual / xzn * mu < rpri / xn): sigmanew = sigma * tau elif (rpri / xn * mu < rdual / xzn): sigmanew = sigma / tau else: sigmanew = sigma if it % 10 == 0 and it > 0 and tau > 1.0: tauscale *= 0.9 tau = 1 + (tau - 1) * tauscale sigma = max(min(sigmanew, 10.0), 0.1) sigmavec.append(sigma) if verbose: if log_cputime: print "%d: primal = %e, gap = %e, (rp,rd) = (%e,%e), sigma = %f, (cputime,walltime) = (%f, %f)" % ( it, pv, gap, rpri / max(xn, 1.0), rdual / max(1.0, xzn), sigma, cputime, walltime) else: print "%d: primal = %e, gap = %e, (rp,rd) = (%e,%e), sigma = %f, walltime = %f" % ( it, pv, gap, rpri / max(xn, 1.0), rdual / max(1.0, xzn), sigma, walltime) sol = {} sol['x'] = X sol['y'] = Y sol['z'] = Z sol['cputime'] = cputime sol['walltime'] = walltime sol['primal'] = primal sol['rprimal'] = rpvec sol['rdual'] = rdvec sol['sigma'] = sigmavec return sol
def SW_lmerTest(theta3D,L,nlevels,nparams,ZtX,ZtY,XtX,ZtZ,XtY,YtX,YtZ,XtZ,YtY,n,beta):# TODO inputs #================================================================================ # Initial theta #================================================================================ theta0 = np.array([]) r = np.amax(nlevels.shape) for i in np.arange(r): theta0 = np.hstack((theta0, mat2vech2D(np.eye(nparams[i])).reshape(np.int64(nparams[i]*(nparams[i]+1)/2)))) #================================================================================ # Sparse Permutation, P #================================================================================ tinds,rinds,cinds=get_mapping2D(nlevels, nparams) tmp = np.random.randn(theta0.shape[0]) Lam=mapping2D(tmp,tinds,rinds,cinds) # Obtain Lambda'Z'ZLambda LamtZtZLam = spmatrix.trans(Lam)*cvxopt.sparse(matrix(ZtZ[0,:,:]))*Lam # Obtaining permutation for PLS cholmod.options['supernodal']=2 P=amd.order(LamtZtZLam) # Identity I = spmatrix(1.0, range(Lam.size[0]), range(Lam.size[0])) # These are not spatially varying XtX_current = cvxopt.matrix(XtX[0,:,:]) XtZ_current = cvxopt.matrix(XtZ[0,:,:]) ZtX_current = cvxopt.matrix(ZtX[0,:,:]) ZtZ_current = cvxopt.sparse(cvxopt.matrix(ZtZ[0,:,:])) df = np.zeros(YtY.shape[0]) # Get the sigma^2 and D estimates. for i in np.arange(theta3D.shape[0]): # Get current theta theta = theta3D[i,:] # Convert product matrices to CVXopt form XtY_current = cvxopt.matrix(XtY[i,:,:]) YtX_current = cvxopt.matrix(YtX[i,:,:]) YtY_current = cvxopt.matrix(YtY[i,:,:]) YtZ_current = cvxopt.matrix(YtZ[i,:,:]) ZtY_current = cvxopt.matrix(ZtY[i,:,:]) # Convert to gamma form gamma = theta2gamma(theta, ZtX_current, ZtY_current, XtX_current, ZtZ_current, XtY_current, YtX_current, YtZ_current, XtZ_current, YtY_current, n, P, I, tinds, rinds, cinds) # Estimate hessian H = nd.Hessian(llh_gamma)(gamma, beta[i,:,:], np.array(ZtX_current), np.array(ZtY_current), np.array(XtX_current), np.array(matrix(ZtZ_current)), np.array(XtY_current), np.array(YtX_current), np.array(YtZ_current), np.array(XtZ_current), np.array(YtY_current), nlevels, nparams, n, P, tinds, rinds, cinds) # Estimate Jacobian J = nd.Jacobian(S2_gammavec)(gamma, L, np.array(ZtX_current), np.array(ZtY_current), np.array(XtX_current), np.array(matrix(ZtZ_current)), np.array(XtY_current), np.array(YtX_current), np.array(YtZ_current), np.array(XtZ_current), np.array(YtY_current), nparams, nlevels) # print('J shape') # print(J.shape) # Calulcate S^2 S2 = S2_gamma(gamma, L, ZtX_current, ZtY_current, XtX_current, ZtZ_current, XtY_current, YtX_current, YtZ_current, XtZ_current, YtY_current, n, P, I, tinds, rinds, cinds) # Calculate the degrees of freedom df[i] = 2*(S2**2)/(J @ np.linalg.pinv(H) @ J.transpose()) return(df)
0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 13, 13, 14, 14, 15 ] A = spmatrix(1.0, I, J, (17, 17)) # Test if A is chordal p = cp.maxcardsearch(A) print("\nMaximum cardinality search") print(" -- perfect elimination order:"), cp.peo(A, p) # Test if natural ordering 0,1,2,...,17 is a perfect elimination order p = range(17) print("\nNatural ordering") print(" -- perfect elimination order:"), cp.peo(A, p) p = amd.order(A) print("\nAMD ordering") print(" -- perfect elimination order:"), cp.peo(A, p) # Compute a symbolic factorization symb = cp.symbolic(A, p) print("\nSymbolic factorization:") print("Fill :"), sum(symb.fill) print("Number of cliques :"), symb.Nsn print(symb) # Compute a symbolic factorization with clique merging symb2 = cp.symbolic(A, p, merge_function=cp.merge_size_fill(3, 3)) print("Symbolic factorization with clique merging:") print("Fill (fact.+merging) :"), sum(symb2.fill) print("Number of cliques :"), symb2.Nsn
# Define sparse matrix I = range(17) + [2,2,3,3,4,14,4,14,8,14,15,8,15,7,8,14,8,14,14,15,10,12,13,16,12,13,16,12,13,15,16,13,15,16,15,16,15,16,16] J = range(17) + [0,1,1,2,2,2,3,3,4,4,4,5,5,6,6,6,7,7,8,8,9,9,9,9,10,10,10,11,11,11,11,12,12,12,13,13,14,14,15] A = spmatrix(1.0,I,J,(17,17)) # Test if A is chordal p = cp.maxcardsearch(A) print("\nMaximum cardinality search") print(" -- perfect elimination order:"), cp.peo(A,p) # Test if natural ordering 0,1,2,...,17 is a perfect elimination order p = range(17) print("\nNatural ordering") print(" -- perfect elimination order:"), cp.peo(A,p) p = amd.order(A) print("\nAMD ordering") print(" -- perfect elimination order:"), cp.peo(A,p) # Compute a symbolic factorization symb = cp.symbolic(A, p) print("\nSymbolic factorization:") print("Fill :"), sum(symb.fill) print("Number of cliques :"), symb.Nsn print(symb) # Compute a symbolic factorization with clique merging symb2 = cp.symbolic(A, p, merge_function = cp.merge_size_fill(3,3)) print("Symbolic factorization with clique merging:") print("Fill (fact.+merging) :"), sum(symb2.fill) print("Number of cliques :"), symb2.Nsn
def solve_phase1(self, kktsolver='chol', MM=1e5): """ Solves primal Phase I problem using the feasible start solver. Returns primal feasible X. """ from cvxopt import cholmod, amd k = 1e-3 # compute Schur complement matrix Id = [i * (self.n + 1) for i in range(self.n)] As = self._A[:, 1:] As[Id, :] /= sqrt(2.0) M = spmatrix([], [], [], (self.m, self.m)) base.syrk(As, M, trans='T') u = +self.b # compute least-norm solution F = cholmod.symbolic(M) cholmod.numeric(M, F) cholmod.solve(F, u) x = 0.5 * self._A[:, 1:] * u X0 = spmatrix(x[self.V[:].I], self.V.I, self.V.J, (self.n, self.n)) # test feasibility p = amd.order(self.V) #Xc,Nf = chompack.embed(X0,p) #E = chompack.project(Xc,spmatrix(1.0,range(self.n),range(self.n))) symb = chompack.symbolic(self.V, p) Xc = chompack.cspmatrix(symb) + X0 try: # L = chompack.completion(Xc) L = Xc.copy() chompack.completion(L) # least-norm solution is feasible return X0 except: pass # create Phase I SDP object trA = matrix(0.0, (self.m + 1, 1)) e = matrix(1.0, (self.n, 1)) Aa = self._A[Id, 1:] base.gemv(Aa, e, trA, trans='T') trA[-1] = MM P1 = SDP() P1._A = misc.phase1_sdp(self._A, trA) P1._b = matrix([self.b - k * trA[:self.m], MM]) P1._agg_sparsity() # find feasible starting point for Phase I problem tMIN = 0.0 tMAX = 1.0 while True: t = (tMIN + tMAX) / 2.0 #Xt = chompack.copy(Xc) #chompack.axpy(E,Xt,t) Xt = Xc.copy() + spmatrix(t, range(self.n), range(self.n)) try: # L = chompack.completion(Xt) L = Xt.copy() chompack.completion(L) tMAX = t if tMAX - tMIN < 1e-1: break except: tMAX *= 2.0 tMIN = t tt = t + 1.0 U = X0 + spmatrix(tt, range(self.n, ), range(self.n)) trU = sum(U[:][Id]) Z0 = spdiag([U, spmatrix([tt + k, MM - trU], [0, 1], [0, 1], (2, 2))]) sol = P1.solve_feas(primalstart={'x': Z0}, kktsolver=kktsolver) s = sol['x'][-2, -2] - k if s > 0: return None, P1 else: sol.pop('y') sol.pop('s') X0 = sol.pop('x')[:self.n,:self.n]\ - spmatrix(s,range(self.n),range(self.n)) return X0, sol