def __init__(self, dim, n_labels, mode=1): self.mode = mode self.dim = dim self.n_labels = n_labels if mode == 1: self.P = spmatrix(1, range(dim * n_labels), range(dim * n_labels)) glast = matrix(np.ones((1, dim * n_labels))) self.G = sparse([-self.P, self.P, glast]) h1 = np.zeros(dim * n_labels) h2 = np.ones(dim * n_labels) self.h = matrix(np.concatenate([h1, h2, [dim]])) elif mode == 2: self.P = spmatrix(1, range(n_labels), range(n_labels)) glast = matrix(np.ones((1, n_labels))) self.G = sparse([-self.P, self.P, glast]) h1 = np.zeros(n_labels) h2 = np.ones(n_labels) self.h = matrix(np.concatenate([h1, h2, [1]])) elif mode == 3: self.P = spmatrix(1, range(n_labels), range(n_labels)) self.A = matrix(np.ones((1, n_labels))) self.G = sparse([-self.P, self.P]) h1 = np.zeros(n_labels) h2 = np.ones(n_labels) self.h = matrix(np.concatenate([h1, h2])) self.b = matrix(np.ones(1))
def get_cvxopt_inputs(MM, constraints = None, slack = 0, sparsemat = True, filter = 'even'): """ if provided, constraints should be a list of sympy polynomials that should be 0. @params - constraints: a list of sympy expressions representing the constraints in the same """ # Many optionals for what c might be, not yet determined really if filter is None: c = matrix(np.ones((MM.num_matrix_monos, 1))) else: c = matrix([monomial_filter(yi, filter='even') for yi in MM.matrix_monos], tc='d') Anp,bnp = MM.get_Ab(constraints) #_, residual, _, _ = scipy.linalg.lstsq(Anp, bnp) b = matrix(bnp) indicatorlist = MM.get_LMI_coefficients() Glnp,hlnp = MM.get_Ab_slack(constraints, abs_slack = slack, rel_slack = slack) hl = matrix(hlnp) if sparsemat: G = [sparse(indicatorlist).trans()] A = sparse(matrix(Anp)) Gl = sparse(matrix(Glnp)) else: G = [matrix(indicatorlist).trans()] A = matrix(Anp) Gl = matrix(Glnp) num_row_monos = len(MM.row_monos) h = [matrix(np.zeros((num_row_monos,num_row_monos)))] return {'c':c, 'G':G, 'h':h, 'A':A, 'b':b, 'Gl':Gl, 'hl':hl}
def F(x=None, z=None): # Case 1 if (x is None and z is None): x0 = opt.matrix(np.ones((n, 1))) * 1.0 return (len(fs), x0) # Case 2 elif (x is not None and z is None): in_domain = map(lambda y: y(x), inds) if (reduce(lambda v, w: v and w, in_domain)): f = opt.matrix(0.0, (len(fs), 1)) for i in range(0, len(fs), 1): f[i] = fs[i](x) Df = opt.spmatrix(0.0, [], [], (0, n)) for i in range(0, len(grads), 1): Df = opt.sparse([Df, grads[i](x).T]) return (f, Df) else: return (None, None) # Case 3 else: f = opt.matrix(0.0, (len(fs), 1)) for i in range(0, len(fs), 1): f[i] = fs[i](x) Df = opt.spmatrix(0.0, [], [], (0, n)) for i in range(0, len(grads), 1): Df = opt.sparse([Df, grads[i](x).T]) H = opt.spmatrix(0.0, [], [], (n, n)) for i in range(0, len(hess), 1): H = H + z[i] * hess[i](x) return (f, Df, H)
def test_pcg(): 'Test function for projected CG.' n = 10 m = 4 H = sprandsym(n, n) A = sp_rand(m, n, 0.9) x0 = matrix(1, (n, 1)) b = A * x0 c = matrix(1.0, (n, 1)) x_pcg = pcg(H, c, A, b, x0) Lhs1 = sparse([H, A]) Lhs2 = sparse([A.T, spmatrix([], [], [], (m, m))]) Lhs = sparse([[Lhs1], [Lhs2]]) rhs = -matrix([c, spmatrix([], [], [], (m, 1))]) rhs2 = copy(rhs) linsolve(Lhs, rhs) #print rhs[:10] sol = solvers.qp(H, c, A=A, b=b) print ' cvxopt qp| pCG' print matrix([[sol['x']], [x_pcg]]) print 'Dual variables:' print sol['y'] print 'KKT equation residuals:' print H * sol['x'] + c + A.T * sol['y']
def gen_equalities(dm_info, k_additive=0): """ Input: dm_info(criteria functions, Shapley values structure, II values structure, Necessity criteria, Sufficiency criteria), k-additivity Generate matrix rows for different information classes Output: Matrix and a column, Aeq*v=beq """ dim = len(dm_info['criteria_functions']) Alist = [equalities(2**dim)] blist = [0., 1] if 'Sh_values' in dm_info: Alist.extend([shapley(2**dim, p[0]) for p in dm_info['Sh_values']]) blist.extend([p[1] for p in dm_info['Sh_values']]) Aeq = cvx.sparse(Alist) beq = cvx.matrix(blist) if 'necessity' in dm_info: Aeq = cvx.sparse([Aeq, necessity(2**dim, dm_info['necessity'])]) beq = cvx.matrix( [beq, cvx.matrix(0., (size(Aeq)[0] - size(beq)[0], 1))]) if 'sufficiency' in dm_info: Aeq = cvx.sparse([Aeq, sufficiency(2**dim, dm_info['sufficiency'])]) beq = cvx.matrix( [beq, cvx.matrix(1., (size(Aeq)[0] - size(beq)[0], 1))]) if k_additive: Aeq = cvx.sparse([Aeq, k_additivity(2**dim, k_additive)]) beq = cvx.matrix( [beq, cvx.matrix(0., (size(Aeq)[0] - size(beq)[0], 1))]) return Aeq, beq
def _get_lineq(self): 'Extract linear equation coefficients from QP and a partition' qp = self.QP # Concatenate active constraints with eq constraints Aeq = sparse([qp.Aeq, -qp.A[self.cAL, :], qp.A[self.cAU, :]]) beq = matrix([qp.beq, -qp.bl[self.cAL], qp.bu[self.cAU]]) beq -= Aeq[:, self.AL] * qp.l[self.AL] + Aeq[:, self.AU] * qp.u[self.AU] Aeq = Aeq[:, self.I] # [H(I,I);Aeq(:,I)] Lhs = sparse([qp.H[self.I, self.I], Aeq]) row_Aeq, col_Aeq = Aeq.size # [Aeq(:,I)';zeros] if row_Aeq != 0: AeqT0 = sparse( [Aeq.T, spmatrix([0], [row_Aeq - 1], [row_Aeq - 1])]) else: AeqT0 = Aeq.T # Concatenate by collumn Lhs = sparse([[Lhs], [AeqT0]]) # Concatenat to yield rhs rhs = sparse( matrix([ -qp.c[self.I] - qp.H[self.I, self.AL] * qp.l[self.AL] - qp.H[self.I, self.AU] * qp.u[self.AU], beq ])) x0 = matrix( [self.x[self.I], self.y, self.czl[self.cAL], self.czu[self.cAU]]) return (Lhs, rhs, x0)
def F(x=None,z=None): # Case 1 if(x is None and z is None): x0 = opt.matrix(np.ones((n,1)))*1.0 return (len(fs),x0) # Case 2 elif(x is not None and z is None): in_domain = map(lambda y: y(x),inds) if(reduce(lambda v,w: v and w,in_domain)): f = opt.matrix(0.0,(len(fs),1)) for i in range(0,len(fs),1): f[i] = fs[i](x) Df = opt.spmatrix(0.0,[],[],(0,n)) for i in range(0,len(grads),1): Df = opt.sparse([Df,grads[i](x).T]) return (f,Df) else: return (None,None) # Case 3 else: f = opt.matrix(0.0,(len(fs),1)) for i in range(0,len(fs),1): f[i] = fs[i](x) Df = opt.spmatrix(0.0,[],[],(0,n)) for i in range(0,len(grads),1): Df = opt.sparse([Df,grads[i](x).T]) H = opt.spmatrix(0.0,[],[],(n,n)) for i in range(0,len(hess),1): H = H + z[i]*hess[i](x) return (f,Df,H)
def F(x=None,z=None): # Case 1 if x is None and z is None: x0 = opt.matrix(1., (n,1)) return len(fs),x0 # Case 2 elif x is not None and z is None: if all(list(map(lambda y: y(x),inds))): f = opt.matrix(0.0,(len(fs),1)) for i in range(0,len(fs),1): f[i] = fs[i](x) Df = opt.spmatrix(0.0,[],[],(0,n)) for i in range(0,len(grads),1): Df = opt.sparse([Df,grads[i](x).T]) return f,Df else: return None,None # Case 3 else: f = opt.matrix(0.0,(len(fs),1)) for i in range(0,len(fs),1): f[i] = fs[i](x) Df = opt.spmatrix(0.0,[],[],(0,n)) for i in range(0,len(grads),1): Df = opt.sparse([Df,grads[i](x).T]) H = opt.spmatrix(0.0,[],[],(n,n)) for i in range(0,len(hess),1): H = H + z[i]*hess[i](x) return f,Df,H
def _get_lineq_c(self): 'Extract linear equation coefficients from QP and a partition' qp = self.QP # Inactive but not free self.realI = setdiff(self.I,self.F) # Put in the order of [realI,F] pI = self.realI + self.F # Concatenate active constraints with eq constraints Aeq = sparse([qp.Aeq,-qp.A[self.cAL,:],qp.A[self.cAU,:]]) beq = matrix([qp.beq,-qp.bl[self.cAL],qp.bu[self.cAU]]) beq -= Aeq[:,self.AL]*qp.l[self.AL] + Aeq[:,self.AU]*qp.u[self.AU] Aeq = Aeq[:,pI] # [H(I,I);Aeq(:,I)] Lhs = sparse([qp.H[pI,pI], Aeq ]) row_Aeq, col_Aeq = Aeq.size # [Aeq(:,I)';zeros] if row_Aeq != 0: AeqT0 = sparse([Aeq.T,spmatrix([0],[row_Aeq-1],[row_Aeq-1])]) else: AeqT0 = Aeq.T # Concatenate by collumn Lhs = sparse([[Lhs],[AeqT0]]) # Concatenat to yield rhs rhs = sparse(matrix([-qp.c[pI] -qp.H[pI,self.AL]*qp.l[self.AL] -qp.H[pI,self.AU]*qp.u[self.AU] ,beq])) x0 = matrix([self.x[pI],self.y,self.czl[self.cAL],self.czu[self.cAU]]) return (Lhs,rhs,x0)
def F(x=None, z=None): # Case 1 if x is None and z is None: x0 = opt.matrix(1., (n, 1)) return len(fs), x0 # Case 2 elif x is not None and z is None: if all(list([y(x) for y in inds])): f = opt.matrix(0.0, (len(fs), 1)) for i in range(0, len(fs), 1): f[i] = fs[i](x) Df = opt.spmatrix(0.0, [], [], (0, n)) for i in range(0, len(grads), 1): Df = opt.sparse([Df, grads[i](x).T]) return f, Df else: return None, None # Case 3 else: f = opt.matrix(0.0, (len(fs), 1)) for i in range(0, len(fs), 1): f[i] = fs[i](x) Df = opt.spmatrix(0.0, [], [], (0, n)) for i in range(0, len(grads), 1): Df = opt.sparse([Df, grads[i](x).T]) H = opt.spmatrix(0.0, [], [], (n, n)) for i in range(0, len(hess), 1): H = H + z[i] * hess[i](x) return f, Df, H
def solve(self): ''' "Solves" minimization problem using epigraph linear program formulation. ''' A, y, x_dim, y_dim = self.A, self.y, self.x_dim, self.y_dim A = cvxopt.sparse(cvxopt.matrix(A)) I_t = cvxopt.spdiag(cvxopt.matrix(ones(y_dim))) I_x = cvxopt.spdiag(cvxopt.matrix(ones(x_dim))) Z = cvxopt.spmatrix([], [], [], size=(x_dim, y_dim)) A = cvxopt.sparse([ [A, -A, -I_x], [-I_t, -I_t, Z] ]) # Sparse block matrix in COLUMN major order...for some reason y = cvxopt.matrix(y) z = cvxopt.matrix(zeros((x_dim, 1))) one = cvxopt.matrix(ones((y_dim, 1))) y = cvxopt.matrix([y, -y, z]) c = cvxopt.matrix([z, one]) result = cvxopt.solvers.lp(c, A, y, solver='glpk') self.result = result return result['status']
def test_pcg(): 'Test function for projected CG.' n = 10 m = 4 H = sprandsym(n,n) A = sp_rand(m,n,0.9) x0 = matrix(1,(n,1)) b = A*x0 c = matrix(1.0,(n,1)) x_pcg = pcg(H,c,A,b,x0) Lhs1 = sparse([H,A]) Lhs2 = sparse([A.T,spmatrix([],[],[],(m,m))]) Lhs = sparse([[Lhs1],[Lhs2]]) rhs = -matrix([c,spmatrix([],[],[],(m,1))]) rhs2 = copy(rhs) linsolve(Lhs,rhs) #print rhs[:10] sol = solvers.qp(H,c,A=A,b=b) print ' cvxopt qp| pCG' print matrix([[sol['x']],[x_pcg]]) print 'Dual variables:' print sol['y'] print 'KKT equation residuals:' print H*sol['x'] + c + A.T*sol['y']
def geteq(n,dic): 'generate the equality constraints.' # Equality from the Laplace operator AeqL = -Laplace(n,dic) h = 1.0/(n-1) # Equality from the boundary AeqB = spmatrix([],[],[],(0,n**2+4*n)) for col in range(n): y = spmatrix([],[],[],(2,n**2+4*n)) y[0,dic[-1,col]] = 1 y[0,dic[1,col]] = -1 y[1,dic[n-2,col]] = -1 y[1,dic[n,col]] = 1 AeqB = sparse([AeqB,y]) for row in range(n): y = spmatrix([],[],[],(2,n**2+4*n)) y[0,dic[row,-1]] = 1 y[0,dic[row,1]] = -1 y[1,dic[row,n-2]] = -1 y[1,dic[row,n]] = 1 AeqB = sparse([AeqB,y]) AeqL = sparse([[AeqL],[spmatrix([],[],[],(n**2,n*4))]]) AeqB = sparse([[AeqB],[-identity(4*n,h*2)]]) Aeq = sparse([AeqL,AeqB]) beq = spmatrix([],[],[],(n**2+4*n,1)) return (Aeq, beq)
def build_derivative_coef_matrix_and_column_vector(self): block_heights = Utils.calculate_block_heights(self.no_points_per_axis) adjacent_offsets = Utils.calculate_adjacent_sub_block_offsets( self.no_vars, self.no_points_per_axis) matrices_list = [] for index, block_height in enumerate(block_heights): distance = Utils.calculate_distance_between_non_zero_entries( index, self.no_points_per_axis) no_sub_blocks = Utils.calculate_number_of_sub_blocks( index, self.no_points_per_axis) matrices_list.append( Utils.build_matrix_for_partial_derivative( adjacent_offsets[index], block_height, no_sub_blocks, distance)) upper_half_matrix = sparse(matrices_list) coef_matrix = sparse([upper_half_matrix, -upper_half_matrix]) (l_b_constraints, u_b_constraints) = self.build_constraints_from_derivative_info() flat_l_b_constraints = matrix(l_b_constraints) flat_u_b_constraints = matrix(u_b_constraints) # l <= x constraint will be rewritten as -x <= -l. column_vector = matrix([flat_u_b_constraints, -flat_l_b_constraints]) return (coef_matrix, column_vector)
def setup_input_matrix(self): # constructs B = Bin - Bout # loop over all sectors self.Bout = sparse(matrix(0.0, (self.ns, self.nu))) self.B = sparse(matrix(0.0, (self.ns, self.nu))) for i in range(self.ns): Bout_one_row = sparse(matrix(0.0, (1, self.nu))) Bin_one_row = sparse(matrix(0.0, (1, self.nu))) # current sector number s = i + 1 # start index of u_{s_i} in the current row of Bout Bout_start_i = (s) * self.ns - self.ns - 1 # get Neighbors N = self.getNeighborSectors(s) # active indices for the row in Bout corresponding to current sector Bout_active_i = [Bout_start_i + j for j in N] # active indices for the row in Bin corresponding to current sector Bin_active_i = [j * self.ns - self.ns - 1 + s for j in N] # generate row in Bout Bout_one_row[Bout_active_i] = 1.0 Bin_one_row[Bin_active_i] = 1.0 # update Bout matrix self.Bout[i, :] = Bout_one_row # update B matrix, row-by-row self.B[i, :] = Bin_one_row - Bout_one_row #print Bout_active_i #print Bin_active_i self.B return
def _prepare_and_solve_primal(self, X, y): # Solution vector [{w},w_0,e_1,...e_i] # See: # http://cvxopt.org/userguide/matrices.html#sparse-matrices # http://cvxopt.org/userguide/coneprog.html#cvxopt.solvers.qp objects, features = X.shape opt_space_dim = features + 1 + objects P = spmatrix(1., range(features), range(features), (opt_space_dim, opt_space_dim)) q = matrix( spmatrix(self.C / objects, range(features + 1, opt_space_dim), [0] * objects, (opt_space_dim, 1))) y_column = -y.reshape(-1, 1) # G is also a block matrix Gx = sparse([[matrix(X * y_column)], [matrix(y_column)], [spmatrix(-1., range(objects), range(objects))]]) Ge = spmatrix(-1., range(objects), range(features + 1, opt_space_dim), (objects, opt_space_dim)) G = sparse([Gx, Ge]) hx = spmatrix(-1., range(objects), [0] * objects) he = spmatrix(0, range(objects), [0] * objects) h = matrix(sparse([hx, he])) return qp(P, q, G, h)
def test_basic_complex(self): import cvxopt a = cvxopt.matrix([1, -2, 3]) b = cvxopt.matrix([1.0, -2.0, 3.0]) c = cvxopt.matrix([1.0 + 2j, 1 - 2j, 0 + 1j]) d = cvxopt.spmatrix( [complex(1.0, 0.0), complex(0.0, 1.0), complex(2.0, -1.0)], [0, 1, 3], [0, 2, 3], (4, 4)) e = cvxopt.spmatrix( [complex(1.0, 0.0), complex(0.0, 1.0), complex(2.0, -1.0)], [2, 3, 3], [1, 2, 3], (4, 4)) self.assertAlmostEqualLists(list(cvxopt.div(b, c)), [0.2 - 0.4j, -0.4 - 0.8j, -3j]) self.assertAlmostEqualLists(list(cvxopt.div(b, 2.0j)), [-0.5j, 1j, -1.5j]) self.assertAlmostEqualLists(list(cvxopt.div(a, c)), [0.2 - 0.4j, -0.4 - 0.8j, -3j]) self.assertAlmostEqualLists(list(cvxopt.div(c, a)), [(1 + 2j), (-0.5 + 1j), 0.3333333333333333j]) self.assertAlmostEqualLists(list(cvxopt.div(c, c)), [1.0, 1.0, 1.0]) self.assertAlmostEqualLists(list(cvxopt.div(a, 2.0j)), [-0.5j, 1j, -1.5j]) self.assertAlmostEqualLists(list(cvxopt.div(c, 1.0j)), [2 - 1j, -2 - 1j, 1 + 0j]) self.assertAlmostEqualLists(list(cvxopt.div(1j, c)), [0.4 + 0.2j, -0.4 + 0.2j, 1 + 0j]) self.assertTrue(len(d) + len(e) == len(cvxopt.sparse([d, e]))) self.assertTrue(len(d) + len(e) == len(cvxopt.sparse([[d], [e]])))
def setup_problem(self): ''' Sets up optimizatoin matrices in compact form min_X C.T*X s.t. A*X <= b See the implementation detatils, in the implementation notes documents ''' start_t = time.time() self.ns = self.Nrows * self.Ncols self.nu = self.ns**2 self.ns = self.Nrows * self.Ncols self.nu = self.ns**2 self.setup_grid_matrix() self.setup_input_matrix() self.Ad = self.setup_dynamics_constraints() Af = self.setup_flow_constraints() Ab, bb = self.setup_boundary_constraints() self.setup_initial_condition_vector() self.get_Xref() self.setup_optimization_vector() # construct compact form optimization matrices self.A = sparse([Af, Ab]) self.b = sparse([self.X0, bb]) print "Setup is done in: ", time.time() - start_t, "second(s)" return self.A, self.b
def _convert(H, f, A, b, Aeq, beq, lb, ub): """ Convert everything to cvxopt-style matrices """ P = cvxmat(H) q = cvxmat(f) if Aeq is None: A_ = None else: A_ = cvxmat(Aeq) if beq is None: b_ = None else: b_ = cvxmat(beq) if lb is None and ub is None: if A is None: G = None h = None else: G = cvxmat(A) h = cvxmat(b) else: n = len(lb) if A is None: G = sparse([-speye(n), speye(n)]) h = cvxmat(np.vstack([-lb, ub])) else: G = sparse([cvxmat(A), -speye(n), speye(n)]) h = cvxmat(np.vstack([b, -lb, ub])) return P, q, G, h, A_, b_
def build_gy(self, dae): """Build line Jacobian matrix""" if not self.n: idx = range(dae.m) dae.set_jac(Gy, 1e-6, idx, idx) return Vn = polar(1.0, dae.y[self.a]) Vc = mul(dae.y[self.v], Vn) Ic = self.Y * Vc diagVn = spdiag(Vn) diagVc = spdiag(Vc) diagIc = spdiag(Ic) dS = self.Y * diagVn dS = diagVc * conj(dS) dS += conj(diagIc) * diagVn dR = diagIc dR -= self.Y * diagVc dR = diagVc.H.T * dR self.gy_store = sparse([[dR.imag(), dR.real()], [dS.real(), dS.imag()]]) # rebuild = False return sparse(self.gy_store)
def const_to_matrix(self, value, convert_scalars=False): """Convert an arbitrary value into a matrix of type self.target_matrix. Args: value: The constant to be converted. convert_scalars: Should scalars be converted? Returns: A matrix of type self.target_matrix or a scalar. """ if isinstance(value, (numpy.ndarray, numpy.matrix)): return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d') elif isinstance(value, numbers.Number): return cvxopt.sparse(cvxopt.matrix(value), tc='d') # Convert scipy sparse matrices to coo form first. elif sp.issparse(value): value = value.tocoo() return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(), value.col.tolist(), size=value.shape, tc='d') else: # Lists. return cvxopt.sparse(value, tc='d')
def geteq(n, dic): 'generate the equality constraints.' # Equality from the Laplace operator AeqL = -Laplace(n, dic) h = 1.0 / (n - 1) # Equality from the boundary AeqB = spmatrix([], [], [], (0, n**2 + 4 * n)) for col in range(n): y = spmatrix([], [], [], (2, n**2 + 4 * n)) y[0, dic[-1, col]] = 1 y[0, dic[1, col]] = -1 y[1, dic[n - 2, col]] = -1 y[1, dic[n, col]] = 1 AeqB = sparse([AeqB, y]) for row in range(n): y = spmatrix([], [], [], (2, n**2 + 4 * n)) y[0, dic[row, -1]] = 1 y[0, dic[row, 1]] = -1 y[1, dic[row, n - 2]] = -1 y[1, dic[row, n]] = 1 AeqB = sparse([AeqB, y]) AeqL = sparse([[AeqL], [spmatrix([], [], [], (n**2, n * 4))]]) AeqB = sparse([[AeqB], [-identity(4 * n, h * 2)]]) Aeq = sparse([AeqL, AeqB]) beq = spmatrix([], [], [], (n**2 + 4 * n, 1)) return (Aeq, beq)
def l1_fit(index, y, beta_d2=1.0, beta_d1=1.0, beta_seasonal=1.0, beta_step=5.0, period=12, growth=0.0, step_permissives=None): assert isinstance(y, np.ndarray) assert isinstance(index, np.ndarray) #x must be integer type for seasonality to make sense assert index.dtype.kind == 'i' n = len(y) m = n-2 p = period ys, y_min, y_max = mu.scale_numpy(y) D1 = mu.get_first_derivative_matrix_nes(index) D2 = mu.get_second_derivative_matrix_nes(index) H = mu.get_step_function_matrix(n) T = mu.get_T_matrix(p) B = mu.get_B_matrix_nes(index, p) Q = B*T #define F_matrix from blocks like in paper zero = mu.zero_spmatrix ident = mu.identity_spmatrix gvec = spmatrix(growth, range(m), [0]*m) zero_m = spmatrix(0.0, range(m), [0]*m) zero_p = spmatrix(0.0, range(p), [0]*p) zero_n = spmatrix(0.0, range(n), [0]*n) step_reg = mu.get_step_function_reg(n, beta_step, permissives=step_permissives) F_matrix = sparse([ [ident(n), -beta_d1*D1, -beta_d2*D2, zero(p, n), zero(n)], [Q, zero(m, p-1), zero(m, p-1), -beta_seasonal*T, zero(n, p-1)], [H, zero(m, n), zero(m, n), zero(p, n), step_reg] ]) w_vector = sparse([ mu.np2spmatrix(ys), gvec, zero_m, zero_p, zero_n ]) solution_vector = np.asarray(l1.l1(matrix(F_matrix), matrix(w_vector))).squeeze() #separate xbase = solution_vector[0:n] s = solution_vector[n:n+p-1] h = solution_vector[n+p-1:] #scale back to original if y_max > y_min: scaling = y_max - y_min else: scaling = 1.0 xbase = xbase*scaling + y_min s = s*scaling h = h*scaling seas = np.asarray(Q*matrix(s)).squeeze() steps = np.asarray(H*matrix(h)).squeeze() x = xbase + seas + steps solution = {'xbase': xbase, 'seas': seas, 'steps': steps, 'x': x, 'h': h, 's': s} return solution
def optimize(obj): # constraints # The equality constraint ensures the required amount of energy is delivered A1 = matrix(0.0, (n, t * n)) A2 = matrix(0.0, (n, t * n)) b = matrix(0.0, (2 * n, 1)) for j in range(0, n): b[j] = float(results[j][1]) for i in range(0, t): A1[n * (t * j + i) + j] = float(timeInterval) / 60 # kWh -> kW if i < results[j][0] or i > results[j][2]: A2[n * (t * j + i) + j] = 1.0 A = sparse([A1, A2]) A3 = spdiag([-1] * (t * n)) A4 = spdiag([1] * (t * n)) # The inequality constraint ensures powers are positive and below a maximum G = sparse([A3, A4]) h = [] for i in range(0, 2 * t * n): if i < t * n: h.append(0.0) else: h.append(pMax) h = matrix(h) # objective if obj == 1: q = [] for i in range(0, n): for j in range(0, len(baseLoad)): q.append(baseLoad[j]) q = matrix(q) elif obj == 2: q = matrix([0.0] * (t * n)) if obj == 3: q = [] for i in range(0, n): for j in range(0, len(baseLoad)): q.append(baseLoad[j] - pv[j]) q = matrix(q) if obj == 1 or obj == 2 or obj == 3: I = spdiag([1] * t) P = sparse([[I] * n] * n) sol = solvers.qp(P, q, G, h, A, b) X = sol['x'] return X
def const_to_matrix(self, value): if isinstance(value, numpy.ndarray): # ECHU: temporary workaround when travis fails try: retval = cvxopt.sparse(cvxopt.matrix(value), tc='d') except TypeError: retval = cvxopt.sparse(cvxopt.matrix(value.T.tolist()), tc='d') return retval return cvxopt.sparse(value, tc='d')
def cvxopt_matrix_2_cvxopt_sparse(Q=None, A_ub=None, A_eq=None): if Q is not None: Q = cvxopt.sparse(Q) if A_ub is not None: A_ub = cvxopt.sparse(A_ub) if A_eq is not None: A_eq = cvxopt.sparse(A_eq) return (Q, A_ub, A_eq)
def setup_boundary_constraints(self): # implements boundary constraints (9) in implementation notes # identity matrix n = (self.nu + self.ns) * self.Tp I = spmatrix(1.0, range(n), range(n)) A_boundary = sparse([I, -1.0 * I]) b = sparse([matrix(1.0, (n, 1)), matrix(0.0, (n, 1))]) return A_boundary, b
def __init__( self, dim, const_matrix_ineq=None, const_vector_ineq=None, const_matrix_eq=None, const_vector_eq=None, solver_type="cvxopt", scipy_solver="revised simplex", sparse_solver=False, ): self.dim = dim self.solver_type = solver_type if not (solver_type == "cvxopt" or solver_type == "scipy"): raise TypeError("Wrong solver type") if solver_type == "cvxopt": solvers.options["show_progress"] = False else: self.scipy_solver = scipy_solver if sparse_solver and solver_type == "scipy": raise TypeError("scipy solver cannot handle sparse matrices.") if const_matrix_ineq is not None and const_vector_ineq is not None: num_ineq_constraints, dim_ineq_constraints = const_matrix_ineq.shape if not dim_ineq_constraints == self.dim: raise ValueError( "Dimension of the inequality constraints does not match the dimensionality of the problem." ) self.G = const_matrix_ineq self.h = const_vector_ineq if solver_type == "cvxopt": self.G = matrix(self.G, (num_ineq_constraints, dim_ineq_constraints)) if sparse_solver: self.G = sparse(self.G) self.h = matrix(self.h, (num_ineq_constraints, 1)) else: self.G = None self.h = None if const_matrix_eq is not None and const_vector_eq is not None: num_eq_constraints, dim_eq_constraints = const_matrix_eq.shape if not (dim_eq_constraints == self.dim): raise ValueError( "Dimension of the equality constraints does not match the dimensionality of the problem." ) self.A = const_matrix_eq self.b = const_vector_eq if solver_type == "cvxopt": self.A = matrix(self.A, (num_eq_constraints, dim_eq_constraints)) self.b = matrix(self.b, (num_eq_constraints, 1), "d") if sparse_solver: self.A = sparse(self.A) else: self.A = None self.b = None
def clique_hist(P, file=None): if not P.ischordal: raise TypeError("Nonchordal") if file == None: plt.ion() else: plt.ioff() V = +P.V p = chompack.maxcardsearch(V) #Vc,n = chompack.embed(V,p) symb = chompack.symbolic(V, p) #D = chompack.info(Vc); N = len(D) N = symb.Nsn #Ns = [len(v['S']) for v in D]; Nu = [len(v['U']) for v in D] #Nw = [len(v['U']) + len(v['S']) for v in D] Ns = [len(v) for v in symb.supernodes()] Nu = [len(v) for v in symb.separators()] Nw = [len(v) for v in symb.cliques()] f = plt.figure() f.clf() f.text(0.58, 0.40, "Number of cliques: %i" % (len(Nw))) f.text(0.61, 0.40 - 1 * 0.07, "$\sum | W_i | = %i$" % (sum(Nw))) f.text(0.61, 0.40 - 2 * 0.07, "$\sum | V_i | = %i$" % (sum(Ns))) f.text(0.61, 0.40 - 3 * 0.07, "$\sum | U_i | = %i$" % (sum(Nu))) f.text(0.61, 0.40 - 4 * 0.07, "$\max_i\,| W_i | = %i$" % (max(Nw))) plt.subplot(221) Nmax = max(Nw) y = matrix(0, (Nmax, 1)) for n in Nw: y[n - 1] += 1 y = sparse(y) plt.stem(y.I + 1, y.V, 'k') plt.xlim(0, Nmax + 1) plt.title("Cliques") Nmax = max(Nu) y = matrix(0, (Nmax, 1)) if Nmax > 0: plt.subplot(222) for n in Nu: y[n - 1] += 1 y = sparse(y) plt.stem(y.I + 1, y.V, 'k') plt.xlim(0, Nmax + 1) plt.title("Separators") plt.subplot(223) Nmax = max(Ns) y = matrix(0, (Nmax, 1)) for n in Ns: y[n - 1] += 1 y = sparse(y) plt.stem(y.I + 1, y.V, 'k') plt.xlim(0, Nmax + 1) plt.title("Residuals") if file: plt.savefig(file)
def run(self, lam=10.0, mu=0.0, eps=0.0, s0_val=0.001): G_tmp, h_tmp = get_G_h(self.var_No) h_eps = matrix(0.0, (self.G_No, 1)) G_x = [] G_i = [] G_j = [] for G_name in self.SFG: G = self.SFG.node[G_name] if G['type'] == 'G': g_cnt = G['cnt'] h_eps[g_cnt] = G['intensity'] + eps for I_name in self.SFG[G_name]: i_cnt = self.SFG.node[I_name]['cnt'] G_x.append(1.0) G_i.append(g_cnt) G_j.append(i_cnt) G_x.append(-1.0) G_i.append(g_cnt) G_j.append(self.GI_No + self.M_No + g_cnt) G_tmp2 = spmatrix(G_x, G_i, G_j, size=(self.G_No, self.var_No)) G = sparse([G_tmp, G_tmp2]) h = matrix([h_tmp, h_eps]) A_tmp, b = get_A_b(self.SFG, self.M_No, self.I_No, self.GI_No) A_eps = spmatrix([], [], [], (self.I_No, self.G_No)) A = sparse([[A_tmp], [A_eps]]) x0 = get_initvals(self.var_No) x0['s'] = matrix(s0_val, size=h.size) c = matrix([ matrix(-1.0, size=(self.GI_No, 1)), matrix(mu, size=(self.M_No, 1)), matrix((1.0 + lam), size=(self.G_No, 1)) ]) self.sol = solvers.conelp(c=c, G=G, h=h, A=A, b=b, primalstart=x0) Xopt = self.sol['x'] alphas = [] # reporting results for N_name in self.SFG: N = self.SFG.node[N_name] if N['type'] == 'M': N['estimate'] = Xopt[self.GI_No + N['cnt']] alphas.append(N.copy()) if N['type'] == 'G': for I_name in self.SFG[N_name]: NI = self.SFG.edge[N_name][I_name] NI['estimate'] = Xopt[NI['cnt']] g_cnt = self.GI_No + self.M_No + N['cnt'] N['relaxation'] = Xopt[g_cnt] # fit error: evaluation of the cost function at the minimizer error = self.get_mean_square_error() return alphas, error, self.sol['status']
def getYklm(yk): # generates YY_k according to Molzahn dissertation (2.2a) and (2.2b) Yk = cv.sparse(0.5 * cv.matrix([[ (yk + yk.trans()).real(), (yk - yk.trans()).imag() ], [(yk.trans() - yk).imag(), (yk + yk.trans()).real()]])) Yk_bar = cv.sparse(-0.5 * cv.matrix([[(yk + yk.trans()).imag(), (yk.trans() - yk).real()], [(yk - yk.trans()).real(), (yk + yk.trans()).imag()]])) return Yk, Yk_bar
def project(A, r, G=None, fA=None): 'Project r to null(A) by solving the normal equation AA.t v = Ar.' m, n = A.size if G is None: G = spmatrix([1] * n, range(n), range(n)) Lhs1 = sparse([G, A]) Lhs2 = sparse([A.T, spmatrix([], [], [], (m, m))]) Lhs = sparse([[Lhs1], [Lhs2]]) rhs = matrix([r, spmatrix([], [], [], (m, 1))]) linsolve(Lhs, rhs) return rhs[:n]
def project(A,r,G = None, fA = None): 'Project r to null(A) by solving the normal equation AA.t v = Ar.' m,n = A.size if G is None: G = spmatrix([1]*n,range(n),range(n)) Lhs1 = sparse([G,A]) Lhs2 = sparse([A.T, spmatrix([],[],[],(m,m))]) Lhs = sparse([[Lhs1],[Lhs2]]) rhs = matrix([r,spmatrix([],[],[],(m,1))]) linsolve(Lhs,rhs) return rhs[:n]
def test_minres_scipy(): H = sprandsym(10) v = sp_rand(10,3,0.8) A = sparse([[H],[v]]) vrow = sparse([[v.T],[spmatrix([],[],[],(3,3))]]) A = sparse([A,vrow]) b = sp_rand(13,1,0.8) As = cvxopt_to_numpy_matrix(A) bs = cvxopt_to_numpy_matrix(matrix(b)) result = minres(As,bs,) x = numpy_to_cvxopt_matrix(result[0]) print nrm2(A*x-b)
def clique_hist(P,file=None): if not P.ischordal: raise TypeError, "Nonchordal" if file==None: pylab.ion() else: pylab.ioff() V = +P.V p = chompack.maxcardsearch(V) #Vc,n = chompack.embed(V,p) symb = chompack.symbolic(V,p) #D = chompack.info(Vc); N = len(D) N = symb.Nsn #Ns = [len(v['S']) for v in D]; Nu = [len(v['U']) for v in D] #Nw = [len(v['U']) + len(v['S']) for v in D] Ns = [len(v) for v in symb.supernodes()] Nu = [len(v) for v in symb.separators()] Nw = [len(v) for v in symb.cliques()] f = pylab.figure(); f.clf() f.text(0.58,0.40,"Number of cliques: %i" % (len(Nw))) f.text(0.61,0.40-1*0.07,"$\sum | W_i | = %i$" % (sum(Nw))) f.text(0.61,0.40-2*0.07,"$\sum | V_i | = %i$" % (sum(Ns))) f.text(0.61,0.40-3*0.07,"$\sum | U_i | = %i$" % (sum(Nu))) f.text(0.61,0.40-4*0.07,"$\max_i\,| W_i | = %i$" % (max(Nw))) pylab.subplot(221) Nmax = max(Nw) y = matrix(0,(Nmax,1)) for n in Nw : y[n-1] += 1 y = sparse(y) pylab.stem(y.I+1,y.V,'k'); pylab.xlim(0, Nmax+1) pylab.title("Cliques") Nmax = max(Nu) y = matrix(0,(Nmax,1)) if Nmax > 0: pylab.subplot(222) for n in Nu : y[n-1] += 1 y = sparse(y) pylab.stem(y.I+1,y.V,'k'); pylab.xlim(0, Nmax+1) pylab.title("Separators") pylab.subplot(223) Nmax = max(Ns) y = matrix(0,(Nmax,1)) for n in Ns : y[n-1] += 1 y = sparse(y) pylab.stem(y.I+1,y.V,'k'); pylab.xlim(0, Nmax+1) pylab.title("Residuals") if file: pylab.savefig(file)
def solve_generalized_mom_conelp(MM, constraints, W=None, absslack=1e-4, totalslack=1e-2, maxiter=1): """ solve using iterative GMM using the cone linear program W is a specific weight matrix we give generous bound for each constraint, and then harsh bound for g'Wg @params constraints - E[g(x,X)] = f(x) - phi(X) that are supposed to be 0 Eggt - the function handle takes current f(x) and estimates E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix maxiter - times to run the iterative GMM """ N = len(constraints) D = len(MM.matrix_monos) sr = len(MM.row_monos) A, b = MM.get_Ab(constraints, cvxoptmode=False) # augumented constraint matrix introduces slack variables g A_aug = sparse(matrix(sc.hstack((A, 1 * sc.eye(N + 1)[:, :-1])))) P = spdiag([matrix(0 * np.eye(D)), matrix(np.eye(N))]) b = matrix(b) indicatorlist = MM.get_LMI_coefficients() G = sparse(indicatorlist).trans() V, I, J = G.V, G.I, G.J, Gaug = sparse(spmatrix(V, I, J, size=(sr * sr, N + D))) h = matrix(np.zeros((sr * sr, 1))) dims = {} dims['l'] = 0 dims['q'] = [] dims['s'] = [sr] Bf = MM.get_Bflat() R = np.random.rand(len(MM), len(MM)) #W = R.dot(R.T) W = np.eye(len(MM)) w = Bf.dot(W.flatten())[:, np.newaxis] q = matrix(np.vstack((w, np.zeros((N, 1))))) #ipdb.set_trace() for i in xrange(maxiter): w = Bf.dot(W.flatten())[:, np.newaxis] sol = cvxsolvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b) sol['x'] = sol['x'][0:D] return sol
def __init__(self, prob=QP(), Free=[], **kwargs): 'Initialize the PDASc' # Initialize the superclass super(PDASc, self).__init__(prob, **kwargs) # Add additional data self.F = Free nf = len(self.F) m = self.QP.numeq Q1 = sparse([self.QP.H[self.F, self.F], self.QP.Aeq[:, self.F]]) Q2 = sparse([self.QP.Aeq[:, self.F].T, spmatrix([], [], [], (m, m))]) self.ipiv = matrix(1, (nf + m, 1)) self.Q = matrix([[Q1], [Q2]]) lapack.sytrf(self.Q, self.ipiv) # LDL factorization of Q, dense, option 1 # lapack.sytrf(self.Q,self.ipiv) # self.DiagQi = spdiag([self.Q[i,i] for i in range(nf+m)]) # self.LQ = copy(self.Q) # for i in range(nf+m): # self.LQ[i,i] = 1 # for j in range(i+1,nf+m): # self.LQ[i,j] = 0 # LDL factorization of Q, dense, option 2 by calling Matlab library # mlab = Matlab() # mlab.start() filename = 'temp/' + ''.join([ random.choice(string.ascii_letters + string.digits) for n in xrange(32) ]) s = dict() s['A'] = sparse([[Q1], [Q2]]) write(filename + '.mat', s) d = '/home/zhh210/workspace/pypdas/numeric/control/' # Memory failure when size is large # output = mlab.run_func('getldp.m',{'arg1':filename}) # self.LQ = matrix(output['result']['L']) # self.Dinv = matrix(output['result']['Dinv']) # self.P = matrix(output['result']['P']) # output = mlab.run_func('saveldp.m',{'arg1':filename}) # Execute shell command cmd = 'matlab -r ' + '"' + "saveldp('" + filename + "');quit" + '"' print cmd os.system(cmd) data = read(filename + '.mat') self.LQ = matrix(data['L']) self.Dinv = data['Dinv'] self.P = data['P']
def setup_dynamics_constraints(self): # implements dynamics constraints (6) in implementation notes Tu = sparse(matrix(0.0, (self.ns * self.Tp, self.nu * self.Tp))) for t1 in range(1, self.Tp + 1): for t2 in range(1, t1 + 1): Tu[t1 * self.ns - self.ns:t1 * self.ns, t2 * self.nu - self.nu:t2 * self.nu] = self.B # identity matrix I = spmatrix(1.0, range(self.ns * self.Tp), range(self.ns * self.Tp)) # self.I = np.eye(self.ns*self.Tp) # finally, construct dynamics matrix A_dyn = sparse([[I], [-1.0 * Tu]]) return A_dyn
def solve(x0, targetalt, targetvel, amax, dt=0.5, t_max=12 * 60): r = np.array([0, targetalt, targetvel, 0, 0, 0, 0, 9.81]) t = np.arange(0, t_max + dt, dt) # Inequality Constraints G = spmatrix(np.zeros([1, 6 * len(t)]), range(6 * len(t)), range(6 * len(t)), (6 * len(t), 8 * len(t))) h = np.zeros([6 * len(t), 1]) for i in range(len(t)): G[2 * i:2 * i + 2, 8 * i:8 * i + 2] = -np.eye(2) G[2 * len(t) + 4 * i:2 * len(t) + 4 * i + 4, 8 * i + 6:8 * i + 8] = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]]) if i < len(amax): h[2 * len(t) + 4 * i:2 * len(t) + 4 * i + 4, 0] = np.ones(4) * amax[i] else: h[2 * len(t) + 4 * i:2 * len(t) + 4 * i + 4, 0] = np.zeros(4) h = matrix(h) # Equality Constraints A = np.array([[1, 0, dt, 0, 0, 0], [0, 1, 0, dt, 0, 0], [0, 0, 1, 0, dt, 0], [0, 0, 0, 1, 0, dt], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) B = np.array([[0, 0], [0, 0], [0, 0], [0, 0], [1, 0], [0, 1]]) Aeq = spmatrix(np.zeros([1, 6 * len(t)]), range(6 * len(t)), range(6 * len(t)), (6 * len(t), 8 * len(t))) Aeq[:6, :6] = sparse(matrix(np.eye(6))) c = sparse(matrix(np.concatenate((-A, -B, np.eye(6)), axis=1))) for i in range(len(t) - 1): Aeq[6 * i + 6:6 * i + 12, 8 * i:8 * i + 14] = c beq = np.zeros([6 * len(t), 1]) beq[:6, 0] = x0 for i in range(len(t)): beq[6 * i + 5, 0] = -9.81 beq = matrix(beq) # Objective Q = matrix(2 * np.eye(8)) Q[0, 0] = 0 QQ = sparse(Q) for i in range(len(t) - 1): QQ = spdiag([QQ, Q]) p = -r.T.dot(Q) pp = matrix(np.kron(np.ones([1, len(t)]), p).T) sol = solvers.qp(QQ, pp, G, h, Aeq, beq) x = np.array(sol['x']).reshape((-1, 8)) return t, x
def minimum_risk_subject_to_target_return(): # minimum expected return threshold r_min = 0.04 P = covs q = matrix(numpy.zeros((n, 1)), tc='d') # inequality constraints Gx <= h # captures the constraints (avg_ret'x >= r_min) and (x >= 0) G = matrix(-numpy.transpose(numpy.array(avg_ret))) h = matrix(-numpy.ones((1, 1)) * r_min) # equality constraint Ax = b; captures the constraint sum(x) == 1 A = matrix(1.0, (1, n)) b = matrix(1.0) groups = rets.groupby(axis=1, level=0, sort=False, group_keys=False).count().ix[-1].values num_group = len(groups) num_asset = np.sum(groups) G_sparse_list = [] for i in range(num_group): for j in range(groups[i]): G_sparse_list.append(i) Group_sub = spmatrix(1.0, G_sparse_list, range(num_asset)) asset_sub = matrix(np.eye(n)) # asset_sub is for asset weight limit, Group sub is for group weight # constraint. G = matrix(sparse([G, asset_sub, -asset_sub, Group_sub, -Group_sub])) b_asset = tuple((0.01, 1.0) for i in rets.columns) b_asset_upper_bound = np.array([x[-1] for x in b_asset]) b_asset_lower_bound = np.array([x[0] for x in b_asset]) b_asset_matrix = matrix( numpy.concatenate((b_asset_upper_bound, -b_asset_lower_bound), 0)) b_group = [(.05, .41), (.2, .66), (0.01, .16)] b_group_upper_bound = np.array([x[-1] for x in b_group]) b_group_lower_bound = np.array([x[0] for x in b_group]) b_group_matrix = matrix( numpy.concatenate((b_group_upper_bound, -b_group_lower_bound), 0)) h = matrix(sparse([h, b_asset_matrix, b_group_matrix])) # solve minimum risk for maximum return above target . sol = solvers.qp(P, q, G, h, A, b) print(minimum_risk_subject_to_target_return.__name__) print(sol['x']) print(statistics(sol['x']))
def test_minres_scipy(): H = sprandsym(10) v = sp_rand(10, 3, 0.8) A = sparse([[H], [v]]) vrow = sparse([[v.T], [spmatrix([], [], [], (3, 3))]]) A = sparse([A, vrow]) b = sp_rand(13, 1, 0.8) As = cvxopt_to_numpy_matrix(A) bs = cvxopt_to_numpy_matrix(matrix(b)) result = minres( As, bs, ) x = numpy_to_cvxopt_matrix(result[0]) print nrm2(A * x - b)
def generateG(num_traj, num_states, size_each_state, size_each_control): num_controls = num_states - 1 num_ctrl_ineq = 2 * (size_each_control * num_controls) X_block = sparseZero(num_ctrl_ineq, num_traj*(num_states-2)*size_each_state) U_upper_block = sparseIdentity(num_controls*size_each_control) U_lower_block = -1*sparseIdentity(num_controls*size_each_control) U_block = sparse([U_upper_block, U_lower_block]) L_block = sparseZero(num_ctrl_ineq, num_traj*(num_states-1)*size_each_state) E_block = sparseZero(num_ctrl_ineq, (num_controls-1)*size_each_control) D_block = sparseZero(num_ctrl_ineq, num_traj*(num_states-2)*size_each_state) XI_block = sparseZero(num_ctrl_ineq, num_traj*(num_states-2)*size_each_state) UI_block = sparseZero(num_ctrl_ineq, num_controls*size_each_control) B_block = sparseZero(num_ctrl_ineq, num_traj*(num_states-1)*size_each_state) ctrl_ineq = sparse([[X_block], [U_block], [L_block], [E_block], [D_block], [XI_block], [UI_block], [B_block]]) num_state_ineq = 2*(num_traj*(num_states-2)*size_each_state) X_upper_block = sparseIdentity(num_traj*(num_states-2)*size_each_state) X_lower_block = -1 * sparseIdentity(num_traj*(num_states-2)*size_each_state) X_block = sparse([X_upper_block, X_lower_block]) L_block = sparseZero(num_state_ineq, num_traj*(num_states-1)*size_each_state) U_block = sparseZero(num_state_ineq, num_controls*size_each_control) E_block = sparseZero(num_state_ineq, (num_controls-1)*size_each_control) D_block = sparseZero(num_state_ineq, num_traj*(num_states-2)*size_each_state) XI_upper_block = -1 * sparseIdentity(num_traj*(num_states-2)*size_each_state) XI_lower_block = sparseIdentity(num_traj*(num_states-2)*size_each_state) XI_block = sparse([XI_upper_block, XI_lower_block]) UI_block = sparseZero(num_state_ineq, num_controls*size_each_control) B_block = sparseZero(num_state_ineq, num_traj*(num_states-1)*size_each_state) state_ineq = sparse([[X_block], [U_block], [L_block], [E_block], [D_block], [XI_block], [UI_block], [B_block]]) num_ctrl_init_ineq = 2 * (size_each_control * num_controls) X_block = sparseZero(num_ctrl_init_ineq, num_traj*(num_states-2)*size_each_state) U_upper_block = sparseIdentity(num_controls*size_each_control) U_lower_block = -1*sparseIdentity(num_controls*size_each_control) U_block = sparse([U_upper_block, U_lower_block]) L_block = sparseZero(num_ctrl_init_ineq, num_traj*(num_states-1)*size_each_state) E_block = sparseZero(num_ctrl_init_ineq, (num_controls-1)*size_each_control) D_block = sparseZero(num_ctrl_init_ineq, num_traj*(num_states-2)*size_each_state) XI_block = sparseZero(num_ctrl_init_ineq, num_traj*(num_states-2)*size_each_state) UI_upper_block = -1 * sparseIdentity(num_controls*size_each_control) UI_lower_block = sparseIdentity(num_controls*size_each_control) UI_block = sparse([UI_upper_block, UI_lower_block]) B_block = sparseZero(num_ctrl_init_ineq, num_traj*(num_states-1)*size_each_state) ctrl_init_ineq = sparse([[X_block], [U_block], [L_block], [E_block], [D_block], [XI_block], [UI_block], [B_block]]) G = sparse([ctrl_ineq, state_ineq, ctrl_init_ineq]) return G
def lst_l1(A, y, integer=False, xmax=1000): """ Returns the solution of least L1-norm problem minimize || x ||_1. subject to A'x == y x can be float or integer. Return None if no optimal/feasible solution found. This problem can be converted into a linear programming problem by setting v = |u|, x = [u' v']', minimize [0]' [u] [1] [v] subject to [ I -I] [ 0 ] [-I -I] [u] = [ 0 ] [ 0 -I] [v] [ 0 ] [ I 0] [xmax] [A]' [u] = [y] [0] [v] """ m, n = A.size c = matrix(0.0, (2*n,1)) c[n:] = 1.0 # inequality constraint I = spmatrix(1.0, range(n), range(n)) O = matrix(0.0, (n,n)) G = sparse(matrix([[I, -I, O, I], [-I, -I, -I, O]])) h = matrix(0.0, (4*n,1)) h[3*n:] = xmax # equality constraint Al = sparse(matrix([[A], [matrix(0.0, (m,n))]])) bl = y # solve the linear programming problem if integer: (status, x) = glpk.ilp(c, G, h, Al, bl)[0:2] else: (status, x) = glpk.ilp(c, G, h, Al, bl)[0:2] return x
def solve_generalized_mom_conelp(MM, constraints, W=None, absslack=1e-4, totalslack=1e-2, maxiter = 1): """ solve using iterative GMM using the cone linear program W is a specific weight matrix we give generous bound for each constraint, and then harsh bound for g'Wg @params constraints - E[g(x,X)] = f(x) - phi(X) that are supposed to be 0 Eggt - the function handle takes current f(x) and estimates E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix maxiter - times to run the iterative GMM """ N = len(constraints) D = len(MM.matrix_monos) sr = len(MM.row_monos) A,b = MM.get_Ab(constraints, cvxoptmode = False) # augumented constraint matrix introduces slack variables g A_aug = sparse(matrix(sc.hstack((A, 1*sc.eye(N+1)[:,:-1])))) P = spdiag([matrix(0*np.eye(D)), matrix(np.eye(N))]) b = matrix(b) indicatorlist = MM.get_LMI_coefficients() G = sparse(indicatorlist).trans() V,I,J = G.V, G.I, G.J, Gaug = sparse(spmatrix(V,I,J,size=(sr*sr, N + D))) h = matrix(np.zeros((sr*sr,1))) dims = {} dims['l'] = 0 dims['q'] = [] dims['s'] = [sr] Bf = MM.get_Bflat() R = np.random.rand(len(MM), len(MM)) #W = R.dot(R.T) W = np.eye(len(MM)) w = Bf.dot(W.flatten())[:,np.newaxis] q = matrix(np.vstack( (w,np.zeros((N,1))) )) #ipdb.set_trace() for i in xrange(maxiter): w = Bf.dot(W.flatten())[:,np.newaxis] sol = cvxsolvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b) sol['x'] = sol['x'][0:D] return sol
def constraints(self): # construct the constraints for the attack routing problem N = self.N u = np.tile(range(N), N) v = np.repeat(range(N),N) w = np.array(range(N*N)) # build constraint matrix A1 = spmatrix(np.repeat(self.nu, N), u, w, (N, N*N)) A2 = -spmatrix(np.repeat(self.nu + self.phi, N), v, w, (N, N*N)) I = np.array(range(N)) J = I + np.array(range(N)) * N A3 = spmatrix(self.phi, I, J, (N, N*N)) tmp = np.dot(np.diag(self.phi), self.delta).transpose() A4 = matrix(np.repeat(tmp, N, axis=1)) A5 = -spmatrix(tmp.flatten(), v, np.tile(J, N), (N, N*N)) A6 = A1 + A2 + A3 + A4 + A5 I = np.array([0]*(N-1)) J = np.array(range(self.k)+range((self.k+1),N)) + N * self.k A7 = spmatrix(1., I, J, (1, N*N)) A = sparse([[A6, -A6, A7, -A7, -spdiag([1.]*(N*N))]]) tmp = np.zeros(2*N + 2 + N*N) tmp[2*N] = 1. tmp[2*N + 1] = -1. b = matrix(tmp) return b, A
def np2spmatrix(nparray): """ Convert a numpy ndarray to sparse cvxopt matrix :param nparray: numpy ndarray :return: cvxopt sparse matrix """ return sparse(matrix(nparray))
def build_matrix(A,b,b_height, params,vec_sizes,start_idxs,total_width): h_cum = height_of(b_height, vec_sizes) h_vec = o.matrix(0, (h_cum,1), 'd') G_vals = [] G_I, G_J = [], [] idx = 0 for row, coeff, size in zip(A,b,b_height): row_height = size.row_value(vec_sizes) h_vec[idx:idx+row_height] = eval_coeff(coeff, params, row_height) for k,v in row.iteritems(): # repeated function # we ignore constant coefficients if k != '1': col_width = vec_sizes[k] result_mat = o.sparse(eval_matrix_coeff(v, params, row_height, col_width)) # set the row G_I += (result_mat.I + idx) # set the column G_J += (result_mat.J + start_idxs[k]) # set the values G_vals += result_mat.V idx += row_height Gl_mat = o.spmatrix(G_vals, G_I, G_J, (h_cum, total_width)) hl_vec = h_vec return (Gl_mat, hl_vec)
def solve_LP_problem(self): (f_coef_matrix, f_column_vector) = self.build_function_coef_matrix_and_column_vector() (d_coef_matrix, d_column_vector) = self.build_derivative_coef_matrix_and_column_vector() # Solve the LP problem by combining constraints for both function and derivative info. objective_function_vector = matrix(list(itertools.repeat(1.0, self.no_vars))) coef_matrix = sparse([f_coef_matrix, d_coef_matrix]) column_vector = matrix([f_column_vector, d_column_vector]) min_sol = solvers.lp(objective_function_vector, coef_matrix, column_vector) is_consistent = min_sol['x'] is not None # Print the LP problem for debugging purposes. if self.verbose: self.display_LP_problem(coef_matrix, column_vector) if is_consistent: self.min_heights = np.array(min_sol['x']).reshape(self.no_points_per_axis) print np.around(self.min_heights, decimals=2) # Since consistency has been established, solve the converse LP problem to get the # maximal bounding surface. max_sol = solvers.lp(-objective_function_vector, coef_matrix, column_vector) self.max_heights = np.array(max_sol['x']).reshape(self.no_points_per_axis) print np.around(self.max_heights, decimals=2) if self.plot_surfaces: self.plot_3D_objects_for_2D_case() else: print 'No witness for consistency found.' return is_consistent
def train_dual(self): """Trains an one-class svm in dual with kernel.""" if (self.samples<1): print('Invalid training data.') return SVDD.MSG_ERROR # number of training examples N = self.samples C = self.C # generate a kernel matrix P = self.kernel # this is the diagonal of the <kernel matrix q = matrix([0.5*P[i,i] for i in range(N)], (N,1)) # sum_i alpha_i = A alpha = b = 1.0 A = matrix(1.0, (1,N)) b = matrix(1.0, (1,1)) # 0 <= alpha_i <= h = C G1 = spmatrix(1.0, range(N), range(N)) G = sparse([G1,-G1]) h1 = matrix(C, (N,1)) h2 = matrix(0.0, (N,1)) h = matrix([h1,h2]) sol = qp(P,-q,G,h,A,b) # mark dual as solved self.isDualTrained = True # store solution self.alphas = sol['x'] self.obj_primal = sol['primal objective'] self.obj_dual = sol['dual objective'] # find support vectors self.svs = [] for i in range(N): if self.alphas[i]>SVDD.PRECISION: self.svs.append(i) # find support vectors with alpha < C for threshold calculation self.threshold = 10**8 flag = False for i in self.svs: if self.alphas[i]<(C-SVDD.PRECISION) and flag==False: (self.threshold, MSG) = self.apply_dual(self.kernel[i,self.svs],self.norms[i]) flag=True break # no threshold set yet? if (flag==False): (thres, MSG) = self.apply_dual(self.kernel[self.svs,self.svs],self.norms[self.svs]) self.threshold = matrix(min(thres)) print('Threshold is {0}'.format(self.threshold)) return SVDD.MSG_OK
def test_get_B_matrix_nes_on_gap(): x = np.array([0, 2, 3, 5]) period = 3 B_nes = mu.get_B_matrix_nes(x, period) expected_matrix = [[1, 0, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1]] expected_result = cvxopt.sparse(cvxopt.matrix(expected_matrix).T) assert max(B_nes - expected_result) < 1e-13
def objective_hyper(x, z, ks, p): """Objective function of UE program with hyperbolic delay functions f(x) = sum_i f_i(v_i) with v = sum_w x_w f_i(u) = ks[i,0]*u - ks[i,1]*log(ks[i,2]-u) Parameters ---------- x,z: variables for the F(x,z) function for cvxopt.solvers.cp ks: matrix of size (n,3) p: number of destinations (we use multiple-sources single-sink node-arc formulation) """ n = ks.size[0] if x is None: return 0, matrix(1.0/p, (p*n,1)) l = matrix(0.0, (n,1)) for k in range(p): l += x[k*n:(k+1)*n] f, Df, H = 0.0, matrix(0.0, (1,n)), matrix(0.0, (n,1)) for i in range(n): tmp = 1.0/(ks[i,2]-l[i]) f += ks[i,0]*l[i] - ks[i,1]*np.log(max(ks[i,2]-l[i], 1e-13)) Df[i] = ks[i,0] + ks[i,1]*tmp H[i] = ks[i,1]*tmp**2 Df = matrix([[Df]]*p) if z is None: return f, Df return f, Df, sparse([[spdiag(z[0] * H)]*p]*p)
def train(self, test_users=None): self.model = {} iset = set(range(self.n_items)) if test_users is None: test_users = range(self.n_users) for i, u in enumerate(test_users): #if (i+1) % 100 == 0: print "%d/%d" %(i+1, len(test_users)) Xp_set = self.data.get_items(u) Xn_set = iset - Xp_set Z = co.spdiag([1.0 if i in Xp_set else -1.0 for i in iset]) K = 2 * (Z * self.X.T * self.X * Z) I = co.spdiag([self.lambda_p if i in Xp_set else self.lambda_n for i in iset]) P = K + I o = utc.zeroes_vec(self.n_items) G = -utc.identity(self.n_items) A = co.matrix([[1.0 if i in Xp_set else 0.0 for i in iset], [1.0 if j in Xn_set else 0.0 for j in iset]]).T b = co.matrix([1.0, 1.0]) P = co.sparse(P) solver.options['show_progress'] = False sol = solver.qp(P, o, G, o, A, b) self.sol[u] = sol self.model[u] = self.X.T * self.X * Z * sol['x'] # endfor return self
def solve(problem, sparse=False, **kwargs): """Minimize w*x subject to ||Ax + b|| <= c*x + d.""" gs = [] hs = [] for constraint in problem.constraints: a = constraint.a b = constraint.b c = constraint.c d = constraint.d g = np.vstack((-c, -a)) hs.append(cx.matrix(np.hstack((d, b)))) if sparse: gs.append(cx.sparse(cx.matrix(g))) else: gs.append(cx.matrix(g)) begin = time.clock() cx.solvers.options.update(kwargs) cx.solvers.options['MOSEK'] = {mosek.iparam.log: 100, mosek.iparam.intpnt_max_iterations: 50000} solution = cx.solvers.socp(cx.matrix(problem.objective), Gq=gs, hq=hs, solver='mosek') duration = time.clock() - begin # duration = solution['duration'] timings['last_solve'] = duration print 'SOCP duration: %.3f' % duration print 'Total duration (including python wrappers): %.3f' % duration print 'Solver exited with status "%s"' % solution['status'] return solution
def Norm_inf1 (X,W): # X, W are two matrix of shape respectively f*n et r*n f,n = X.size r = W.size[0] print f,n,r P = matrix(W).trans() F = matrix(1.0,(f,r)) onesn = matrix(1.0,(n,1)) Idn = spmatrix(1.0, range(n),range(n)) Idr = spmatrix(1.0, range(r),range(r)) Zrn = spmatrix(0,[r-1],[n-1]) Zr = spmatrix(0,[r-1],[0]) #Zn = spmatrix(0,[n-1],[0]) A = sparse([ [P,-P,-Idr], [-Idn,-Idn,Zrn] ]) for i in range(f): V = X[i,:].trans() C = matrix([[V,-V,Zr]]) e = matrix([ [Zr, onesn] ]) solution = solvers.lp(e,A,C)['x'] F[i,:] = solution[range(r)].trans() return F
def split_linear_constraints(A, l, u): """ Returns the linear equality and inequality constraints. """ ieq = [] igt = [] ilt = [] ibx = [] for i in range(len(l)): if abs(u[i] - l[i]) <= EPS: ieq.append(i) elif (u[i] > 1e10) and (l[i] > -1e10): igt.append(i) elif (l[i] <= -1e10) and (u[i] < 1e10): ilt.append(i) elif (abs(u[i] - l[i]) > EPS) and (u[i] < 1e10) and (l[i] > -1e10): ibx.append(i) else: raise ValueError Ae = A[ieq, :] Ai = sparse([A[ilt, :], -A[igt, :], A[ibx, :], -A[ibx, :]]) be = u[ieq, :] bi = matrix([u[ilt], -l[igt], u[ibx], -l[ibx]]) return Ae, be, Ai, bi
def f(aff_exp): V, I, J = [], [], [] vert_offset = expr_offsets[str(aff_exp)] coefficients = aff_exp.coefficients() for var, blocks in coefficients.items(): # Constant is not in var_offsets. horiz_offset = var_offsets.get(var) for col, block in enumerate(blocks): vert_start = vert_offset + col*aff_exp.size[0] vert_end = vert_start + aff_exp.size[0] if var is s.CONSTANT: pass #const_vec[vert_start:vert_end, :] = block else: if isinstance(block, numbers.Number): V.append(block) I.append(vert_start) J.append(horiz_offset) else: # Block is a matrix or spmatrix. if isinstance(block, cvxopt.matrix): block = cvxopt.sparse(block) V.extend(block.V) I.extend(block.I + vert_start) J.extend(block.J + horiz_offset) return (V, I, J)
def __init__(self,prob = QP(),Free=[],**kwargs): 'Initialize the PDASc' # Initialize the superclass super(PDASc,self).__init__(prob,**kwargs) # Add additional data self.F = Free nf = len(self.F) m = self.QP.numeq Q1 = sparse([self.QP.H[self.F,self.F], self.QP.Aeq[:,self.F] ]) Q2 = sparse([self.QP.Aeq[:,self.F].T, spmatrix([],[],[],(m,m)) ]) self.ipiv = matrix(1,(nf+m,1)) self.Q = matrix([[Q1],[Q2]]) lapack.sytrf(self.Q,self.ipiv) # LDL factorization of Q, dense, option 1 # lapack.sytrf(self.Q,self.ipiv) # self.DiagQi = spdiag([self.Q[i,i] for i in range(nf+m)]) # self.LQ = copy(self.Q) # for i in range(nf+m): # self.LQ[i,i] = 1 # for j in range(i+1,nf+m): # self.LQ[i,j] = 0 # LDL factorization of Q, dense, option 2 by calling Matlab library # mlab = Matlab() # mlab.start() filename = 'temp/'+''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)]) s = dict() s['A'] = sparse([[Q1],[Q2]]) write(filename+'.mat',s) d = '/home/zhh210/workspace/pypdas/numeric/control/' # Memory failure when size is large # output = mlab.run_func('getldp.m',{'arg1':filename}) # self.LQ = matrix(output['result']['L']) # self.Dinv = matrix(output['result']['Dinv']) # self.P = matrix(output['result']['P']) # output = mlab.run_func('saveldp.m',{'arg1':filename}) # Execute shell command cmd = 'matlab -r '+'"'+ "saveldp('"+filename + "');quit" + '"' print cmd os.system(cmd) data = read(filename+'.mat') self.LQ = matrix(data['L']) self.Dinv = data['Dinv'] self.P = data['P']
def test_basic_complex(self): import cvxopt a = cvxopt.matrix([1,-2,3]) b = cvxopt.matrix([1.0,-2.0,3.0]) c = cvxopt.matrix([1.0+2j,1-2j,0+1j]) d = cvxopt.spmatrix([complex(1.0,0.0), complex(0.0,1.0), complex(2.0,-1.0)],[0,1,3],[0,2,3],(4,4)) e = cvxopt.spmatrix([complex(1.0,0.0), complex(0.0,1.0), complex(2.0,-1.0)],[2,3,3],[1,2,3],(4,4)) self.assertAlmostEqualLists(list(cvxopt.div(b,c)),[0.2-0.4j,-0.4-0.8j,-3j]) self.assertAlmostEqualLists(list(cvxopt.div(b,2.0j)),[-0.5j,1j,-1.5j]) self.assertAlmostEqualLists(list(cvxopt.div(a,c)),[0.2-0.4j,-0.4-0.8j,-3j]) self.assertAlmostEqualLists(list(cvxopt.div(c,a)),[(1+2j),(-0.5+1j),0.3333333333333333j]) self.assertAlmostEqualLists(list(cvxopt.div(c,c)),[1.0,1.0,1.0]) self.assertAlmostEqualLists(list(cvxopt.div(a,2.0j)),[-0.5j,1j,-1.5j]) self.assertAlmostEqualLists(list(cvxopt.div(c,1.0j)),[2-1j,-2-1j,1+0j]) self.assertAlmostEqualLists(list(cvxopt.div(1j,c)),[0.4+0.2j,-0.4+0.2j,1+0j]) self.assertTrue(len(d)+len(e)==len(cvxopt.sparse([d,e]))) self.assertTrue(len(d)+len(e)==len(cvxopt.sparse([[d],[e]])))
def pinvert(spmat): """ :param spmat: a cvx sparse matrix :return: the pseudo-inverse matrix as sparse """ arr = spmatrix2np(spmat) arr_inv = np.linalg.pinv(arr) return sparse(matrix(arr_inv))