def test_basic(self): import cvxopt a = cvxopt.matrix([1.0, 2.0, 3.0]) assert list(a) == [1.0, 2.0, 3.0] b = cvxopt.matrix([3.0, -2.0, -1.0]) c = cvxopt.spmatrix([1.0, -2.0, 3.0], [0, 2, 4], [1, 2, 4], (6, 5)) d = cvxopt.spmatrix([1.0, 2.0, 5.0], [0, 1, 2], [0, 0, 0], (3, 1)) e = cvxopt.mul(a, b) self.assertEqualLists(e, [3.0, -4.0, -3.0]) self.assertAlmostEqualLists(list(cvxopt.div(a, b)), [1.0 / 3.0, -1.0, -3.0]) self.assertAlmostEqual(cvxopt.div([1.0, 2.0, 0.25]), 2.0) self.assertEqualLists(list(cvxopt.min(a, b)), [1.0, -2.0, -1.0]) self.assertEqualLists(list(cvxopt.max(a, b)), [3.0, 2.0, 3.0]) self.assertEqual(cvxopt.max([1.0, 2.0]), 2.0) self.assertEqual(cvxopt.max(a), 3.0) self.assertEqual(cvxopt.max(c), 3.0) self.assertEqual(cvxopt.max(d), 5.0) self.assertEqual(cvxopt.min([1.0, 2.0]), 1.0) self.assertEqual(cvxopt.min(a), 1.0) self.assertEqual(cvxopt.min(c), -2.0) self.assertEqual(cvxopt.min(d), 1.0) self.assertEqual(len(c.imag()), 0) with self.assertRaises(OverflowError): cvxopt.matrix(1.0, (32780 * 4, 32780)) with self.assertRaises(OverflowError): cvxopt.spmatrix(1.0, (0, 32780 * 4), (0, 32780)) + 1
def _conelp_scale(P, inplace = True, **kwargs): """ Scale cone LP """ inplace = kwargs.get('inplace', True) c,G,h,dims = P.problem_data cp = G.CCS[0] V = abs(G.V) u = matrix([max(abs(V[cp[i]:cp[i+1]])) if cp[i+1]-cp[i]>0 else 1.0 for i in range(len(cp)-1)]) u = max(u,abs(c)) G = G*spmatrix(div(1.0,u),range(len(u)),range(len(u))) c = div(c,u) nrm2h = blas.nrm2(h) if inplace: P.cost_scale *= max(1.0,nrm2h) if nrm2h > 1.0: h /= nrm2h if inplace: P.problem_data = (c,G,h,dims) return c,G,h,dims
def test_basic(self): import cvxopt a = cvxopt.matrix([1.0,2.0,3.0]) b = cvxopt.matrix([3.0,-2.0,-1.0]) c = cvxopt.spmatrix([1.0,-2.0,3.0],[0,2,4],[1,2,4],(6,5)) d = cvxopt.spmatrix([1.0,2.0,5.0],[0,1,2],[0,0,0],(3,1)) self.assertEqualLists(list(cvxopt.mul(a,b)),[3.0,-4.0,-3.0]) self.assertAlmostEqualLists(list(cvxopt.div(a,b)),[1.0/3.0,-1.0,-3.0]) self.assertAlmostEqual(cvxopt.div([1.0,2.0,0.25]),2.0) self.assertEqualLists(list(cvxopt.min(a,b)),[1.0,-2.0,-1.0]) self.assertEqualLists(list(cvxopt.max(a,b)),[3.0,2.0,3.0]) self.assertEqual(cvxopt.max([1.0,2.0]),2.0) self.assertEqual(cvxopt.max(a),3.0) self.assertEqual(cvxopt.max(c),3.0) self.assertEqual(cvxopt.max(d),5.0) self.assertEqual(cvxopt.min([1.0,2.0]),1.0) self.assertEqual(cvxopt.min(a),1.0) self.assertEqual(cvxopt.min(c),-2.0) self.assertEqual(cvxopt.min(d),1.0)
def test_basic(self): import cvxopt a = cvxopt.matrix([1.0,2.0,3.0]) b = cvxopt.matrix([3.0,-2.0,-1.0]) c = cvxopt.spmatrix([1.0,-2.0,3.0],[0,2,4],[1,2,4],(6,5)) d = cvxopt.spmatrix([1.0,2.0,5.0],[0,1,2],[0,0,0],(3,1)) self.assertEqualLists(list(cvxopt.mul(a,b)),[3.0,-4.0,-3.0]) self.assertAlmostEqualLists(list(cvxopt.div(a,b)),[1.0/3.0,-1.0,-3.0]) self.assertAlmostEqual(cvxopt.div([1.0,2.0,0.25]),2.0) self.assertEqualLists(list(cvxopt.min(a,b)),[1.0,-2.0,-1.0]) self.assertEqualLists(list(cvxopt.max(a,b)),[3.0,2.0,3.0]) self.assertEqual(cvxopt.max([1.0,2.0]),2.0) self.assertEqual(cvxopt.max(a),3.0) self.assertEqual(cvxopt.max(c),3.0) self.assertEqual(cvxopt.max(d),5.0) self.assertEqual(cvxopt.min([1.0,2.0]),1.0) self.assertEqual(cvxopt.min(a),1.0) self.assertEqual(cvxopt.min(c),-2.0) self.assertEqual(cvxopt.min(d),1.0) with self.assertRaises(OverflowError): cvxopt.matrix(1.0,(32780*4,32780)) with self.assertRaises(OverflowError): cvxopt.spmatrix(1.0,(0,32780*4),(0,32780))+1
def estimate_range(A, xl, xu): ''' Estimate the range of Ax, where xl <= x <= xu Mathematically, the lower and upper bounds are: ''' # Decide if A is sparse # if 'sp' in str(type(A)) or 'sparse' in str(type(A)): # pass # Efficient, exploit the sparsity of A Apos = copy(A) Aneg = copy(A) # For sparse matrices Apos.V = cvxopt.max(Apos.V, 0) Aneg.V = cvxopt.min(Aneg.V, 0) # pdb.set_trace() bl = Apos * xl + Aneg * xu bu = Apos * xu + Aneg * xl # Relatively efficient, but dense matrix means overhead # bl = cvxopt.min(A,0)*xu + cvxopt.max(A,0)*xl # bu = cvxopt.min(A,0)*xl + cvxopt.max(A,0)*xu # This version is less efficient # row, col = A.size # bl = matrix(10.0,size=(row,1)) # bu = matrix(10.0,size=(row,1)) # for i in range(row): # for j in range(col): # if A[i,j] > 0.0: # bl[i] += A[i,j]*xl[j] # bu[i] += A[i,j]*xu[j] # elif A[i] < 0.0: # print A.size, xl.size, xu.size, i,j # bl[i] += A[i,j]*xu[j] # bu[i] += A[i,j]*xl[j] return (bl, bu)
def psdcompletion(A, reordered = True, **kwargs): """ Maximum determinant positive semidefinite matrix completion. The routine takes a cspmatrix :math:`A` and returns the maximum determinant positive semidefinite matrix completion :math:`X` as a dense matrix, i.e., .. math:: P( X ) = A :param A: :py:class:`cspmatrix` :param reordered: boolean """ assert isinstance(A, cspmatrix) and A.is_factor is False, "A must be a cspmatrix" tol = kwargs.get('tol',1e-15) X = matrix(A.spmatrix(reordered = True, symmetric = True)) symb = A.symb n = symb.n snptr = symb.snptr sncolptr = symb.sncolptr snrowidx = symb.snrowidx # visit supernodes in reverse (descending) order for k in range(symb.Nsn-1,-1,-1): nn = snptr[k+1]-snptr[k] beta = snrowidx[sncolptr[k]:sncolptr[k+1]] nj = len(beta) if nj-nn == 0: continue alpha = beta[nn:] nu = beta[:nn] eta = matrix([matrix(range(beta[kk]+1,beta[kk+1])) for kk in range(nj-1)] + [matrix(range(beta[-1]+1,n))]) try: # Try Cholesky factorization first Xaa = X[alpha,alpha] lapack.potrf(Xaa) Xan = X[alpha,nu] lapack.trtrs(Xaa, Xan, trans = 'N') XeaT = X[eta,alpha].T lapack.trtrs(Xaa, XeaT, trans = 'N') # Compute update tmp = XeaT.T*Xan except: # If Cholesky fact. fails, switch to EVD: Xaa = Z*diag(w)*Z.T Xaa = X[alpha,alpha] w = matrix(0.0,(Xaa.size[0],1)) Z = matrix(0.0,Xaa.size) lapack.syevr(Xaa, w, jobz='V', range='A', uplo='L', Z=Z) # Pseudo-inverse: Xp = pinv(Xaa) lambda_max = max(w) Xp = Z*spmatrix([1.0/wi if wi > lambda_max*tol else 0.0 for wi in w],range(len(w)),range(len(w)))*Z.T # Compute update tmp = X[eta,alpha]*Xp*X[alpha,nu] X[eta,nu] = tmp X[nu,eta] = tmp.T if reordered: return X else: return X[symb.ip,symb.ip]
beq0 = -0.05 * 2 # 0. SDP Relaxation of QCQP relP0 = {'P0': A2, 'b0': 2 * A.T * b + c, 'c0': d + b.T * b} relG0 = { 'P': [Id], 'b': [None], 'c': [-1.], 'Peq': [Z], 'beq': [None], 'ceq': [beq0] } sol0 = qcqprel(relP0, relG0) # print some output x0 = sol0['RQCQPx'] X0 = sol0['RQCQPX'] print "#" * 40 print " " * 12, "Diagnostics" print print "max of |X0-x0*x0'|" print " " * 4, max(abs(X0 - x0 * x0.T)) print " || xj || " print " " * 4, "x0:", nrm2(x0) print "value of objective" obj0 = (A * x0 + b).T * (A * x0 + b) + c.T * x0 + d print " " * 4, "x0: ", obj0[0] print "Constraint check" print " " * 4, "x0[0]*x0[1]: ", x0[0] * x0[1]
def _conelp_to_real(P, inplace = True, **kwargs): """ Convert complex-valued cone LP to real-valued cone LP. """ c,G,h,dims = P.problem_data offset_s = dims['l'] + sum(dims['q']) Glist, hlist = [G[:offset_s,:].real()],[h[:offset_s].real()] Gs = G[offset_s:,:] hs = sparse(h[offset_s:,0]) ns = dims['s'] if max(ns) <= 500: ri = [] for k,si in enumerate(dims['s']): for j in range(si): for i in range(si): ri.append((k,i,j)) def blk_entry(idx): return ri[idx] else: def blk_entry(idx): blk = 0 while idx >= ns[blk]**2: idx -= ns[blk]**2 blk += 1 return blk, idx % ns[blk], idx // ns[blk] offsets = [0] for ni in ns: offsets.append(offsets[-1] + (2*ni)**2) I,J,V = [],[],[] GI,GJ,GV = Gs.I,Gs.J,Gs.V for k in range(len(GI)): #blk,i,j = ri[GI[k]] blk,i,j = blk_entry(GI[k]) ni = 2*ns[blk] if i == j: I.append([offsets[blk]+ni*j+i,offsets[blk]+ni*(ns[blk]+j)+ns[blk]+i]) J.append(2*[GJ[k]]) V.append(2*[0.5*GV[k].real]) else: I.append([offsets[blk]+ni*j+i,offsets[blk]+ni*j+ns[blk]+i,\ offsets[blk]+ni*(ns[blk]+j)+i,offsets[blk]+ni*(ns[blk]+j)+ns[blk]+i]) J.append(4*[GJ[k]]) V.append([0.5*GV[k].real, 0.5*GV[k].imag, -0.5*GV[k].imag, 0.5*GV[k].real]) Gr = spmatrix([v for v in chain(*V)],[i for i in chain(*I)],[j for j in chain(*J)],(offsets[-1],Gs.size[1])) I,J,V = [],[],[] hV, hI = hs.V,hs.I for k in range(len(hV)): #blk,i,j = ri[hI[k]] blk,i,j = blk_entry(GI[k]) ni = 2*ns[blk] if i == j: I.append([offsets[blk]+ni*j+i,offsets[blk]+ni*(ns[blk]+j)+ns[blk]+i]) J.append(2*[0]) V.append(2*[0.5*hV[k].real]) else: I.append([offsets[blk]+ni*j+i,offsets[blk]+ni*j+ns[blk]+i,\ offsets[blk]+ni*(ns[blk]+j)+i,offsets[blk]+ni*(ns[blk]+j)+ns[blk]+i]) J.append(4*[0]) V.append([0.5*hV[k].real, 0.5*hV[k].imag, -0.5*hV[k].imag, 0.5*hV[k].real]) hr = spmatrix([v for v in chain(*V)],[i for i in chain(*I)],[j for j in chain(*J)],(offsets[-1],1)) Glist += [Gr] hlist += [hr] G = sparse(Glist) h = sparse(hlist) dims = {'l':dims['l'],'q':dims['q'],'s':[2*si for si in dims['s']]} if inplace: P.problem_data = (c,G,h,dims) P.as_real = True P.Nx = P.Nx*2 else: return c,G,h,dims
def __init__(self, casefile, **kwargs): """ Optional keyword arguments: branch_rmin (default: -inf ) shunt_gmin (default: -inf ) gen_elim (default: 0.0 ) truncate_gen_bounds (default: None ) line_constraints (default: True ) scale (default: False ) """ self.to_real = kwargs.get('to_real', True) self.conversion = kwargs.get('conversion', False) self.scale = kwargs.get('scale', False) self.shunt_gmin = kwargs.get('shunt_gmin', -float('inf')) self.branch_rmin = kwargs.get('branch_rmin', -float('inf')) self.gen_elim = kwargs.get('gen_elim', 0.0) self.truncate_gen_bounds = kwargs.get('truncate_gen_bounds',None) self.line_constraints = kwargs.get('line_constraints', True) self.pad_constraints = kwargs.get('pad_constraints', True) self.__verbose = kwargs.get('verbose',0) self.eigtol = kwargs.get('eigtol',1e5) ### load data data = _load_case(casefile, verbose = kwargs.get('verbose',0)) assert data['version'] == '2' if kwargs.get('verbose',0): print("Extracting data from case file.") ### add data to object self.baseMVA = data['baseMVA'] self.cost_scale = self.baseMVA self.nbus = data['bus'].shape[0] # branches in service active_branches = [i for i in range(data['branch'].shape[0]) if data['branch'][i,10] > 0] self.nbranch = len(active_branches) # generators in service active_generators = [i for i in range(data['gen'].shape[0]) if data['gen'][i,7] > 0] self.ngen = len(active_generators) # bus data self.busses = [] for k in range(self.nbus): bus = {'id': int(data['bus'][k,0]), 'type': int(data['bus'][k,1]), 'Pd': data['bus'][k,2] / self.baseMVA, 'Qd': data['bus'][k,3] / self.baseMVA, 'Gs': data['bus'][k,4], 'Bs': data['bus'][k,5], 'area': data['bus'][k,6], 'Vm': data['bus'][k,7], 'Va': data['bus'][k,8], 'baseKV': data['bus'][k,9], 'maxVm': data['bus'][k,11], 'minVm': data['bus'][k,12]} if bus['Gs'] < self.shunt_gmin: bus['Gs'] = self.shunt_gmin self.busses.append(bus) self.bus_id_to_index = {} for k, bus in enumerate(self.busses): self.bus_id_to_index[bus['id']] = k # generator data self.generators = [] ii_p = 0 ii_q = 0 for k in active_generators: gen = {'bus_id': int(data['gen'][k,0]), 'Pg': data['gen'][k,1] / self.baseMVA, 'Pmax': data['gen'][k,8] / self.baseMVA, 'Pmin': data['gen'][k,9] / self.baseMVA, 'Qg': data['gen'][k,2] / self.baseMVA, 'Qmax': data['gen'][k,3] / self.baseMVA, 'Qmin': data['gen'][k,4] / self.baseMVA, 'Vg': data['gen'][k,5], 'mBase': data['gen'][k,6] } if gen['Pmax'] > gen['Pmin'] + self.gen_elim: gen['pslack'] = ii_p ii_p += 1 else: # eliminate slack variable and set Pmin and Pmax to their average gen['pslack'] = None gen['Pmin'] = (gen['Pmax'] + gen['Pmin'])/2.0 gen['Pmax'] = gen['Pmin'] if gen['Qmax'] > gen['Qmin'] + self.gen_elim: gen['qslack'] = ii_q ii_q += 1 else: # eliminate slack variable and set Qmin and Qmax to their average gen['qslack'] = None gen['Qmin'] = (gen['Qmax'] + gen['Qmin'])/2.0 gen['Qmax'] = gen['Qmin'] if self.truncate_gen_bounds: if gen['Pmin'] < -self.truncate_gen_bounds or gen['Pmax'] > self.truncate_gen_bounds: if kwargs.get('verbose',0): print("Warning: generator at bus %i with large active bound(s); decreasing bound(s)"%(gen['bus_id'])) gen['Pmin'] = max(-self.truncate_gen_bounds, gen['Pmin']) gen['Pmax'] = min( self.truncate_gen_bounds, gen['Pmax']) if gen['Qmin'] < -self.truncate_gen_bounds or gen['Qmax'] > self.truncate_gen_bounds: if kwargs.get('verbose',0): print("Warning: generator at bus %i with large reactive bound(s); decreasing bound(s)"%(gen['bus_id'])) gen['Qmin'] = max(-self.truncate_gen_bounds, gen['Qmin']) gen['Qmax'] = min( self.truncate_gen_bounds, gen['Qmax']) self.generators.append(gen) self.bus_id_to_genlist = {} for k, gen in enumerate(self.generators): if gen['bus_id'] in self.bus_id_to_genlist: self.bus_id_to_genlist[gen['bus_id']].append(k) else: self.bus_id_to_genlist[gen['bus_id']] = [k] # branch data self.branches = [] for k in active_branches: branch = {'from': int(data['branch'][k,0]), 'to': int(data['branch'][k,1]), 'r': data['branch'][k,2], 'x': data['branch'][k,3], 'b': data['branch'][k,4], 'rateA': data['branch'][k,5], 'rateB': data['branch'][k,6], 'rateC': data['branch'][k,7], 'ratio': data['branch'][k,8], 'angle': data['branch'][k,9], 'angle_min': data['branch'][k,11], 'angle_max': data['branch'][k,12] } if branch['r'] < self.branch_rmin: if kwargs.get('verbose',0): print("Warning: branch (%i:%i->%i) with small resistance; enforcing min. resistance"%(k,branch['from'],branch['to'])) branch['r'] = self.branch_rmin if not self.line_constraints: branch['rateA'] = 0.0 if not self.pad_constraints: branch['angle_min'] = -360.0 branch['angle_max'] = 360.0 elif (branch['angle_min'] > -360.0 and branch['angle_min'] <= -180.0) or (branch['angle_max'] < 360.0 and branch['angle_max'] >= 180.0) or (branch['angle_min']==0.0 and branch['angle_max']==0.0): if kwargs.get('verbose',0): print("Warning: branch (%i:%i->%i) with unsupported phase angle diff. constraint; dropping constraint"%(k,branch['from'],branch['to'])) branch['angle_min'] = -360.0 branch['angle_max'] = 360.0 self.branches.append(branch) # gen cost for i, k in enumerate(active_generators): gencost = {'model': int(data['gencost'][k,0]), 'startup': data['gencost'][k,1], 'shutdown': data['gencost'][k,2], 'ncoef': int(data['gencost'][k,3]), 'coef': data['gencost'][k,4:].T } if gencost['model'] == 1: raise TypeError("Piecewise linear cost functions are not supported.") self.generators[i]['Pcost'] = gencost if data['gencost'].shape[0] == 2*data['gen'].shape[0]: offset = data['gen'].shape[0] for i, k in enumerate(active_generators): gencost = {'model': int(data['gencost'][offset + k,0]), 'startup': data['gencost'][offset + k,1], 'shutdown': data['gencost'][offset + k,2], 'ncoef': int(data['gencost'][offset + k,3]), 'coef': data['gencost'][offset + k,4:].T } self.generators[i]['Qcost'] = gencost ### Compute bus admittance matrix and connection matrices j = complex(0.0,1.0) r = matrix([branch['r'] for branch in self.branches]) b = matrix([branch['b'] for branch in self.branches]) x = matrix([branch['x'] for branch in self.branches]) Gs = matrix([bus['Gs'] for bus in self.busses]) Bs = matrix([bus['Bs'] for bus in self.busses]) tap = matrix([1.0 if branch['ratio'] == 0.0 else branch['ratio'] for branch in self.branches]) angle = matrix([branch['angle'] for branch in self.branches]) tap = mul(tap, exp(j*pi/180.*angle)) self.tap = tap Ys = div(1.0, r + j*x) Ytt = Ys + j*b/2.0 Yff = div(Ytt, mul(tap, tap.H.T)) Yft = -div(Ys, tap.H.T) Ytf = -div(Ys, tap) Ysh = (Gs+j*Bs)/self.baseMVA self.Ybr = [] for k in range(self.nbranch): self.Ybr.append([[Yff[k],Yft[k]],[Ytf[k],Ytt[k]]]) f = matrix([self.bus_id_to_index[branch['from']] for branch in self.branches]) t = matrix([self.bus_id_to_index[branch['to']] for branch in self.branches]) Cf = spmatrix(1.0, range(self.nbranch), f, (self.nbranch,self.nbus)) Ct = spmatrix(1.0, range(self.nbranch), t, (self.nbranch,self.nbus)) Yf = spmatrix(matrix([Yff,Yft]), 2*list(range(self.nbranch)), matrix([f,t]), (self.nbranch,self.nbus)) Yt = spmatrix(matrix([Ytf,Ytt]), 2*list(range(self.nbranch)), matrix([f,t]), (self.nbranch,self.nbus)) Ybus = Cf.T*Yf + Ct.T*Yt + spmatrix(Ysh, range(self.nbus), range(self.nbus), (self.nbus,self.nbus)) self.Cf = Cf self.Ct = Ct self.Yf = Yf self.Yt = Yt self.Ybus = Ybus if kwargs.get('verbose',0): print("Building cone LP.") self._build_conelp() if self.conversion: if kwargs.get('verbose',0): if kwargs.get('coupling','full') == 'full': print("Applying chordal conversion to cone LP.") else: print("Applying partial chordal conversion to cone LP.") _conelp_convert(self, **kwargs) if self.scale: if kwargs.get('verbose',0): print("Scaling cone LP.") _conelp_scale(self, **kwargs) if self.to_real: if kwargs.get('verbose',0): print("Converting to real-valued cone LP.") _conelp_to_real(self, **kwargs) return
def psdcompletion(A, reordered=True, **kwargs): """ Maximum determinant positive semidefinite matrix completion. The routine takes a cspmatrix :math:`A` and returns the maximum determinant positive semidefinite matrix completion :math:`X` as a dense matrix, i.e., .. math:: P( X ) = A :param A: :py:class:`cspmatrix` :param reordered: boolean """ assert isinstance( A, cspmatrix) and A.is_factor is False, "A must be a cspmatrix" tol = kwargs.get('tol', 1e-15) X = matrix(A.spmatrix(reordered=True, symmetric=True)) symb = A.symb n = symb.n snptr = symb.snptr sncolptr = symb.sncolptr snrowidx = symb.snrowidx # visit supernodes in reverse (descending) order for k in range(symb.Nsn - 1, -1, -1): nn = snptr[k + 1] - snptr[k] beta = snrowidx[sncolptr[k]:sncolptr[k + 1]] nj = len(beta) if nj - nn == 0: continue alpha = beta[nn:] nu = beta[:nn] eta = matrix([ matrix(range(beta[kk] + 1, beta[kk + 1])) for kk in range(nj - 1) ] + [matrix(range(beta[-1] + 1, n))]) try: # Try Cholesky factorization first Xaa = X[alpha, alpha] lapack.potrf(Xaa) Xan = X[alpha, nu] lapack.trtrs(Xaa, Xan, trans='N') XeaT = X[eta, alpha].T lapack.trtrs(Xaa, XeaT, trans='N') # Compute update tmp = XeaT.T * Xan except: # If Cholesky fact. fails, switch to EVD: Xaa = Z*diag(w)*Z.T Xaa = X[alpha, alpha] w = matrix(0.0, (Xaa.size[0], 1)) Z = matrix(0.0, Xaa.size) lapack.syevr(Xaa, w, jobz='V', range='A', uplo='L', Z=Z) # Pseudo-inverse: Xp = pinv(Xaa) lambda_max = max(w) Xp = Z * spmatrix( [1.0 / wi if wi > lambda_max * tol else 0.0 for wi in w], range(len(w)), range(len(w))) * Z.T # Compute update tmp = X[eta, alpha] * Xp * X[alpha, nu] X[eta, nu] = tmp X[nu, eta] = tmp.T if reordered: return X else: return X[symb.ip, symb.ip]
def edmcompletion(A, reordered = True, **kwargs): """ Euclidean distance matrix completion. The routine takes an EDM-completable cspmatrix :math:`A` and returns a dense EDM :math:`X` that satisfies .. math:: P( X ) = A :param A: :py:class:`cspmatrix` :param reordered: boolean """ assert isinstance(A, cspmatrix) and A.is_factor is False, "A must be a cspmatrix" tol = kwargs.get('tol',1e-15) X = matrix(A.spmatrix(reordered = True, symmetric = True)) symb = A.symb n = symb.n snptr = symb.snptr sncolptr = symb.sncolptr snrowidx = symb.snrowidx # visit supernodes in reverse (descending) order for k in range(symb.Nsn-1,-1,-1): nn = snptr[k+1]-snptr[k] beta = snrowidx[sncolptr[k]:sncolptr[k+1]] nj = len(beta) if nj-nn == 0: continue alpha = beta[nn:] nu = beta[:nn] eta = matrix([matrix(range(beta[kk]+1,beta[kk+1])) for kk in range(nj-1)] + [matrix(range(beta[-1]+1,n))]) ne = len(eta) # Compute Yaa, Yan, Yea, Ynn, Yee Yaa = -0.5*X[alpha,alpha] - 0.5*X[alpha[0],alpha[0]] blas.syr2(X[alpha,alpha[0]], matrix(1.0,(nj-nn,1)), Yaa, alpha = 0.5) Ynn = -0.5*X[nu,nu] - 0.5*X[alpha[0],alpha[0]] blas.syr2(X[nu,alpha[0]], matrix(1.0,(nn,1)), Ynn, alpha = 0.5) Yee = -0.5*X[eta,eta] - 0.5*X[alpha[0],alpha[0]] blas.syr2(X[eta,alpha[0]], matrix(1.0,(ne,1)), Yee, alpha = 0.5) Yan = -0.5*X[alpha,nu] - 0.5*X[alpha[0],alpha[0]] Yan += 0.5*matrix(1.0,(nj-nn,1))*X[alpha[0],nu] Yan += 0.5*X[alpha,alpha[0]]*matrix(1.0,(1,nn)) Yea = -0.5*X[eta,alpha] - 0.5*X[alpha[0],alpha[0]] Yea += 0.5*matrix(1.0,(ne,1))*X[alpha[0],alpha] Yea += 0.5*X[eta,alpha[0]]*matrix(1.0,(1,nj-nn)) # EVD: Yaa = Z*diag(w)*Z.T w = matrix(0.0,(Yaa.size[0],1)) Z = matrix(0.0,Yaa.size) lapack.syevr(Yaa, w, jobz='V', range='A', uplo='L', Z=Z) # Pseudo-inverse: Yp = pinv(Yaa) lambda_max = max(w) Yp = Z*spmatrix([1.0/wi if wi > lambda_max*tol else 0.0 for wi in w],range(len(w)),range(len(w)))*Z.T # Compute update tmp = -2.0*Yea*Yp*Yan + matrix(1.0,(ne,1))*Ynn[::nn+1].T + Yee[::ne+1]*matrix(1.0,(1,nn)) X[eta,nu] = tmp X[nu,eta] = tmp.T if reordered: return X else: return X[symb.ip,symb.ip]