def solve_fw_tap(A, b, times, caps, I, K, maxiter=100, tol = 0.001): h = matrix(0., (I*K, 1)) G = spdiag([-1.]*I*K) block = build_block_matrix(I, K) reverse_block = reverse_block_matrix(I, K) #start iteration #flows = matrix(0.0, (I*K,1)) #start with a normal lp times_expanded = reverse_block * times print 'computing first feasible flow' flows = solvers.lp(times_expanded, G, h, A = A, b = b, solver='mosek', options={"mosek":{iparam.log:0}})['x'] for k in range(maxiter): print 'iter', k #compute gradient sum_flows = block * flows print 'flows', max(sum_flows),min(sum_flows),max(flows), min(flows) Df = vdf_derivative(times, caps, sum_flows) Df = reverse_block * Df print 'Df', max(Df), min(Df) #solve affineminimization subproblem -> xd lpsol = solvers.lp(Df, G, h, A = A, b = b, solver='mosek', options={"mosek":{iparam.log:0}}) xd = lpsol['x'] print lpsol['status'] if lpsol['status'] != 'optimal': return {'flows':flows, 'grad':Df, 'G':G, 'h':h} #update bound = (Df.T * (flows - xd))[0] step = (2. / (k + 2.)) * (xd - flows) print 'diff', np.sqrt(step.T * step)[0], bound if abs(bound) < tol: print "finished after " + str(k) + " iterations" break flows = flows + step return flows
def solve_LP_problem(self): (f_coef_matrix, f_column_vector) = self.build_function_coef_matrix_and_column_vector() (d_coef_matrix, d_column_vector) = self.build_derivative_coef_matrix_and_column_vector() # Solve the LP problem by combining constraints for both function and derivative info. objective_function_vector = matrix(list(itertools.repeat(1.0, self.no_vars))) coef_matrix = sparse([f_coef_matrix, d_coef_matrix]) column_vector = matrix([f_column_vector, d_column_vector]) min_sol = solvers.lp(objective_function_vector, coef_matrix, column_vector) is_consistent = min_sol['x'] is not None # Print the LP problem for debugging purposes. if self.verbose: self.display_LP_problem(coef_matrix, column_vector) if is_consistent: self.min_heights = np.array(min_sol['x']).reshape(self.no_points_per_axis) print np.around(self.min_heights, decimals=2) # Since consistency has been established, solve the converse LP problem to get the # maximal bounding surface. max_sol = solvers.lp(-objective_function_vector, coef_matrix, column_vector) self.max_heights = np.array(max_sol['x']).reshape(self.no_points_per_axis) print np.around(self.max_heights, decimals=2) if self.plot_surfaces: self.plot_3D_objects_for_2D_case() else: print 'No witness for consistency found.' return is_consistent
def ty_solver(data, l, w_toll, full=False): """Solves the block (t,y) of the toll pricing model Parameters ---------- data: Aeq, beq, ffdelays, coefs l: linkflows w_toll: weight on the toll collected full: if True, return the whole solution """ Aeq, beq, ffdelays, coefs = data delays = compute_delays(l, ffdelays, coefs) n = len(l) p = Aeq.size[1]/n m = Aeq.size[0]/p c = matrix([(1.0+w_toll)*l, -beq]) I = spdiag([1.0]*n) G1 = matrix([-I]*(p+1)) G2 = matrix([Aeq.T, matrix(0.0, (n,p*m))]) G = matrix([[G1],[G2]]) h = [delays]*p h.append(matrix(0.0, (n,1))) h = matrix(h) if full: return solvers.lp(c,G,h)['x'] return solvers.lp(c,G,h)['x'][range(n)]
def test_options(self): from cvxopt import glpk, solvers c,G,h,A,b = self._prob_data glpk.options = {'msg_lev' : 'GLP_MSG_OFF'} sol1 = glpk.lp(c,G,h) self.assertTrue(sol1[0]=='optimal') sol2 = glpk.lp(c,G,h,A,b) self.assertTrue(sol2[0]=='optimal') sol3 = glpk.lp(c,G,h,options={'msg_lev' : 'GLP_MSG_ON'}) self.assertTrue(sol3[0]=='optimal') sol4 = glpk.lp(c,G,h,A,b,options={'msg_lev' : 'GLP_MSG_ERR'}) self.assertTrue(sol4[0]=='optimal') sol5 = solvers.lp(c,G,h,solver='glpk',options={'glpk':{'msg_lev' : 'GLP_MSG_ON'}}) self.assertTrue(sol5['status']=='optimal') sol1 = glpk.ilp(c,G,h,None,None,set(),set([0,1])) self.assertTrue(sol1[0]=='optimal') sol2 = glpk.ilp(c,G,h,A,b,set([0,1]),set()) self.assertTrue(sol2[0]=='optimal') sol3 = glpk.ilp(c,G,h,None,None,set(),set([0,1]),options={'msg_lev' : 'GLP_MSG_ALL'}) self.assertTrue(sol3[0]=='optimal') sol4 = glpk.ilp(c,G,h,A,b,set(),set([0]),options={'msg_lev' : 'GLP_MSG_ALL'}) self.assertTrue(sol4[0]=='optimal') solvers.options['glpk'] = {'msg_lev' : 'GLP_MSG_ON'} sol5 = solvers.lp(c,G,h,solver='glpk') self.assertTrue(sol5['status']=='optimal')
def test_options(self): from cvxopt import matrix, msk, solvers msk.options = {msk.mosek.iparam.log: 0} c = matrix([-4., -5.]) G = matrix([[2., 1., -1., 0.], [1., 2., 0., -1.]]) h = matrix([3., 3., 0., 0.]) msk.lp(c, G, h) msk.lp(c, G, h, options={msk.mosek.iparam.log: 1}) solvers.lp(c, G, h, solver='mosek', options={'mosek':{msk.mosek.iparam.log: 1}})
def cvxopt_solver(G, h, A, b, c, n): # cvxopt doesn't allow redundant constraints in the linear program Ax = b, # so we need to do some preprocessing to find and remove any linearly # dependent rows in the augmented matrix [A | b]. # # First we do Gaussian elimination to put the augmented matrix into row # echelon (reduced row echelon form is not necessary). Since b comes as a # numpy array (that is, a row vector), we need to convert it to a numpy # matrix before transposing it (that is, to a column vector). b = np.mat(b).T A_b = np.hstack((A, b)) A_b, permutation = to_row_echelon(np.hstack((A, b))) # Next, we apply the inverse of the permutation applied to compute the row # echelon form. P = dictionary_to_permutation(permutation) A_b = P.I * A_b # Trim any rows that are all zeros. The call to np.any returns an array of # Booleans that correspond to whether a row in A_b is all zeros. Indexing # A_b by an array of Booleans acts as a selector. We need to use np.asarray # in order for indexing to work, since it expects a row vector instead of a # column vector. A_b = A_b[np.any(np.asarray(A_b) != 0, axis=1)] # Split the augmented matrix back into a matrix and a vector. A, b = A_b[:, :-1], A_b[:, -1] # Apply the linear programming solver; cvxopt requires that these are all # of a special type of cvx-specific matrix. G, h, A, b, c = (cvx_matrix(M) for M in (G, h, A, b, c)) solution = cvx_solvers.lp(c, G, h, A, b) if solution['status'] == 'optimal': return True, solution['x'] # TODO status could be 'unknown' here, but we're currently ignoring that return False, None
def CVXOPT_LP_Solver(p, solverName): #os.close(1); os.close(2) if solverName == 'native_CVXOPT_LP_Solver': solverName = None cvxopt_solvers.options['maxiters'] = p.maxIter cvxopt_solvers.options['feastol'] = p.contol cvxopt_solvers.options['abstol'] = p.ftol if p.iprint <= 0: cvxopt_solvers.options['show_progress'] = False cvxopt_solvers.options['LPX_K_MSGLEV'] = 0 cvxopt_solvers.options['MSK_IPAR_LOG'] = 0 xBounds2Matrix(p) #WholeRepr2LinConst(p) # CVXOPT have some problems with x0 so currently I decided to avoid using the one #if p.x0.size>0 and p.x0.flatten()[0] != None and all(isfinite(p.x0)): # sol= cvxopt_solvers.solvers.lp(Matrix(p.f), Matrix(p.A), Matrix(p.b), Matrix(p.Aeq), Matrix(p.beq), solverName) #else: if (len(p.intVars)>0 or len(p.binVars)>0) and solverName == 'glpk': from cvxopt.glpk import ilp c = Matrix(p.f) A, b = Matrix(p.Aeq), Matrix(p.beq) G, h = Matrix(p.A), Matrix(p.b) if A is None: A = matrix(0.0, (0, p.n)) b = matrix(0.0,(0,1)) if G is None: G = matrix(0.0, (0, p.n)) h = matrix(0.0,(0,1)) (status, x) = ilp(c, G, h, A, b, set(p.intVars), B=set(p.binVars)) if status == 'optimal': p.istop = SOLVED_WITH_UNIMPLEMENTED_OR_UNKNOWN_REASON elif status == 'maxiters exceeded': p.istop = IS_MAX_ITER_REACHED elif status == 'time limit exceeded': p.istop = IS_MAX_TIME_REACHED elif status == 'unknown': p.istop = UNDEFINED else: p.istop = FAILED_WITH_UNIMPLEMENTED_OR_UNKNOWN_REASON if x is None: p.xf = nan*ones(p.n) else: p.xf = array(x).flatten()#w/o flatten it yields incorrect result in ff! p.ff = sum(p.dotmult(p.f, p.xf)) p.msg = status else: # if len(p.b) != 0: # s0 = matrix(p.b - dot(p.A, p.x0)) # else: # s0 = matrix() # primalstart = {'x': matrix(p.x0), 's': s0} # sol = cvxopt_solvers.lp(Matrix(p.f), Matrix(p.A), Matrix(p.b), Matrix(p.Aeq), Matrix(p.beq), solverName, primalstart) sol = cvxopt_solvers.lp(Matrix(p.f), Matrix(p.A), Matrix(p.b), Matrix(p.Aeq), Matrix(p.beq), solverName) p.msg = sol['status'] if p.msg == 'optimal' : p.istop = SOLVED_WITH_UNIMPLEMENTED_OR_UNKNOWN_REASON else: p.istop = -100 if sol['x'] is not None: p.xf = asarray(sol['x']).flatten() # ! don't involve p.ff - it can be different because of goal and additional constant from FuncDesigner p.duals = concatenate((asarray(sol['y']).flatten(), asarray(sol['z']).flatten())) else: p.ff = nan p.xf = nan*ones(p.n)
def l1Min(A,b): """ Solve min ||x||_1 s.t. Ax = b using CVXOPT. Inputs: A -- numpy array of shape m x n b -- numpy array of shape m Returns: x -- numpy array of shape n """ m, n = A.shape A1 = np.zeros((m,2*n)) A1[:,n:] = A c = np.zeros(2*n) c[:n] = 1 h = np.zeros(2*n) G = np.zeros((2*n,2*n)) G[:n,:n] = -np.eye(n) G[:n,n:] = np.eye(n) G[n:,:n] = -np.eye(n) G[n:,n:] = -np.eye(n) c = matrix(c) G = matrix(G) h = matrix(h) A = matrix(A1) b = matrix(b) sol = solvers.lp(c, G, h, A, b) return np.array(sol['x'])[n:].flatten()
def generate_program(alpha): assert(alpha < 1) t1 = alpha**2 t2 = (1 - alpha)**2 t3 = 2*alpha*(1 - alpha) tmp_list_A = [ \ [1., 2., 1., 0., 0., 0.], \ [0., 1., 0., 2., 1., 1.], \ [0., 0., 1., 0., 2., 1.], \ [t1, t1 + t3, t2, t3, t2, 0.]] \ # [0., t1, t1, t3, t2 + t3, t2], ] tmp_list_B = [1., 1., 1., 1 - alpha] #, alpha] A = matrix(tmp_list_A).T b = matrix(tmp_list_B) tmp_G_list = [ \ [-1., 0., 0., 0., 0., 0.], \ [0., -1., 0., 0., 0., 0.], \ [0., 0., -1., 0., 0., 0.], \ [0., 0., 0., -1., 0., 0.], \ [0., 0., 0., 0., -1., 0.], \ [0., 0., 0., 0., 0., -1.]] tmp_h_list = [-0.001, 0., 0., 0., 0., -0.001] G = matrix(tmp_G_list).T h = matrix(tmp_h_list) c = matrix([1., 1., 1., 1., 1., 1.]) sol = solvers.lp(c, G, h, A, b) return (G, h, A, b, c, sol)
def Norm_inf1 (X,W): # X, W are two matrix of shape respectively f*n et r*n f,n = X.size r = W.size[0] print f,n,r P = matrix(W).trans() F = matrix(1.0,(f,r)) onesn = matrix(1.0,(n,1)) Idn = spmatrix(1.0, range(n),range(n)) Idr = spmatrix(1.0, range(r),range(r)) Zrn = spmatrix(0,[r-1],[n-1]) Zr = spmatrix(0,[r-1],[0]) #Zn = spmatrix(0,[n-1],[0]) A = sparse([ [P,-P,-Idr], [-Idn,-Idn,Zrn] ]) for i in range(f): V = X[i,:].trans() C = matrix([[V,-V,Zr]]) e = matrix([ [Zr, onesn] ]) solution = solvers.lp(e,A,C)['x'] F[i,:] = solution[range(r)].trans() return F
def prob2(): """Solve the transportation problem by converting all equality constraints into inequality constraints. Returns (in order): The optimizer (sol['x']) The optimal value (sol['primal objective']) """ # minimize this function c = matrix([4.0, 7.0, 6.0, 8.0, 8.0, 9.0]) # constraints G = matrix( [ [1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], ] ) # with respect to these values h = matrix([7.0, -7.0, 2.0, -2.0, 4.0, -4.0, 5.0, -5.0, 8.0, -8.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # compute the solution to the modified constraints sol = solvers.lp(c, G, h) return sol["x"], sol["primal objective"]
def __solve(self): sol = solvers.lp(self.__generate_c(), self.__generate_A(), self.__generate_b()) vector = array(sol['x']) vector = around(vector, decimals=0) return vector
def l1Min(A, b): """Calculate the solution to the optimization problem minimize ||x||_1 subject to Ax = b Return only the solution x (not any slack variable), as a flat NumPy array. Parameters: A ((m,n) ndarray) b ((m, ) ndarray) Returns: x ((n, ) ndarray): The solution to the minimization problem. """ m, n = A.shape b = matrix(b.astype(float)) c = matrix(np.hstack((np.ones(n), np.zeros(n)))) G = np.hstack((-np.eye(n), np.eye(n))) Gi = np.hstack((-np.eye(n), -np.eye(n))) G = matrix(np.vstack((G,Gi))) h = matrix(np.zeros(2*n)) A = matrix(np.hstack((np.zeros((m,n)), A))) sol = solvers.lp(c, G, h, A, b) return sol['x'][n:]
def generate_lp(): tmp_list_A = [ #1, 2, 3, 4, 5, 6, 7, d, e, f, a, a', b, b', c, c', g, g', h, h' [1., 0., 1., 0., 0., 1., 1., 0., 0., 0., 0., 1., 0., 1., 1., 0., 1., 0., 0., 1.], #1 [0., 1., 0., 1., 1., 0., 0., 1., 1., 1., 1., 0., 1., 0., 0., 1., 0., 1., 1., 0.], #2 [0., 0., 1., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 1., 1., 0., 1., 0., 0., 0.], #3 [0., 0., 0., 1., 1., 0., 0., 1., 1., 1., 0., 0., 1., 0., 0., 1., 0., 1., 0., 0.], #4 [0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], #5 [0., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], #6 [0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], #7 [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.], #A [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0.], #B [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.], #C [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0.], #G [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.]] #H tmp_list_b = [5., 8., 5., 6., 2., 3., 2., 2., 2., 2., 2., 2.] tmp_list_G = [[-1. if i == j else 0. for i in range(20)] for j in range(20)] tmp_list_h = [0. for i in range(20)] G = matrix(tmp_list_G).T h = matrix(tmp_list_h) A = matrix(tmp_list_A).T b = matrix(tmp_list_b) c = matrix([-1., -1., -1., -1., -1., -1., -1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) sol = solvers.lp(c, G, h, A, b) return (G, h, A, b, c, sol)
def pfOptimizer(longTickers, shortTickers, Coef, StockBeta, BETA_BOUND, WEIGHT_BOUND): longTickers["bWeight"] = 1 # binary weights shortTickers["bWeight"] = -1 pfTickers = ( pd.concat((shortTickers[["ticker"]], longTickers[["ticker"]]), axis=0).sort("ticker").reset_index(drop=True) ) # sens = Coef[['ticker', 'Mkt-RF', 'SMB', 'HML', 'UMD']].merge(pfTickers).rename(columns={'Mkt-RF': 'beta'}) # control = matrix([1, 0.2, 0.2, 0.5]) # scores = matrix(sens[['beta', 'SMB', 'HML', 'UMD']].as_matrix()) * control betas = Coef[["ticker", "Mkt-RF"]].merge(pfTickers).rename(columns={"Mkt-RF": "beta"}).reset_index(drop=True) mBeta = matrix(betas["beta"]) mCqaBeta = matrix(StockBeta.merge(pfTickers)["cqaBeta"]) longIndex = matrix(pfTickers.merge(longTickers, how="left").fillna(0)["bWeight"]) mLongIndex = np.diag(pfTickers.merge(longTickers, how="left").fillna(0)["bWeight"]) mLongIndex = matrix(mLongIndex[np.logical_or.reduce([np.sum(mLongIndex, 1) > 0.5])]).trans() # mLongIndex = matrix(np.diag(tickers.merge(longTickers, how='left').fillna(0)['bWeight'])) shortIndex = -matrix(pfTickers.merge(shortTickers, how="left").fillna(0)["bWeight"]) mShortIndex = -np.diag(pfTickers.merge(shortTickers, how="left").fillna(0)["bWeight"]) mShortIndex = matrix(mShortIndex[np.logical_or.reduce([np.sum(mShortIndex, 1) > 0.5])]).trans() # mShortIndex = matrix(np.diag(pfTickers.merge(shortTickers, how='left').fillna(0)['bWeight'])) # wTickers = functions.iniWeights(pfTickers, shortTickers, longTickers) # initial weights wStart = matrix(functions.iniWeights(pfTickers, longTickers, shortTickers)["weight"]) N = pfTickers.shape[0] id = spmatrix(1.0, range(N), range(N)) wBounds = matrix(np.ones((N, 1)) * WEIGHT_BOUND) longBounds = matrix(np.zeros((shortTickers.shape[0], 1))) # longBounds = matrix(np.ones((shortTickers.shape[0], 1)) * 0.002) shortBounds = matrix(np.zeros((longTickers.shape[0], 1))) # shortBounds = matrix(np.ones((longTickers.shape[0], 1)) * (-0.005)) A = matrix( [ [mCqaBeta], [-mCqaBeta], [longIndex], [-longIndex], [shortIndex], [-shortIndex], [id], [-id], [-mLongIndex], [mShortIndex], ] ).trans() b = matrix([BETA_BOUND, BETA_BOUND, 1, -0.98, -0.98, 1, wBounds, wBounds, longBounds, shortBounds]) # A = matrix([ [longIndex], [-longIndex], # [shortIndex], [-shortIndex], # [id], [-id], # [-mLongIndex], [mShortIndex]]).trans() # b = matrix([ 1, -0.98, -0.98, 1, wBounds, wBounds, longBounds, shortBounds]) # scores = mBeta # sol = solvers.lp(-scores, A, b) sol = solvers.lp(-mBeta, A, b) w_res = sol["x"] wTickers = pfTickers wTickers["weight"] = w_res return wTickers
def problem_1(): """ This function provides the solution for problem 1 in the file: 'lab3 - linaer-programming.pdf' Inputs: None Returns: A dictionary containing the solution and a lot of other info as defined in cvxopt.solvers.lp """ c = matrix([-1.0, -2.0, -3.0, -4.0, -5.0]) G = matrix([[5.0, 4.0, 3.0, 2.0, 1.0,], [1.0, -2.0, 3.0, -4.0, 5.0], [-1.0, 2.0, -3.0, 4.0, -5.0], [-1.0, 0, 0 ,0 ,0], [0, -1.0, 0, 0, 0], [0, 0, -1.0, 0, 0], [0, 0, 0, -1.0, 0], [0, 0, 0, 0, -1.0]]) G = G.T h = matrix([30.0, 20.0, 10.0, 0., 0., 0., 0., 0.]) solution = solvers.lp(c, G, h) return solution
def opt_queue(mu, K, l, t): q = [0] * K N = np.zeros([K, K]) ucb_avg = np.zeros([K, K]) queue = np.empty((0, K), int) for i in range(K): mui = np.amax(mu[i, :]) lam = l[i] q[i] = stat_sample(lam, mui, 1, 100) queue = np.append(queue, np.array([q]), axis=0) c, G, h = schedule(K, l, mu) sol = solvers.lp(c, G, h) solx = sol["x"] hp = [] for i in range(1, len(solx)): hp = hp + [solx[i]] result = BVN(hp, K) prob = [pit[1] for pit in result] values = range(len(prob)) for s in range(1, t): index = weighted_values(values, prob, 1) # (q,N,ucb_avg) = one_schedule(result[index][0],mu,K,q,N,ucb_avg) one_schedule(result[index][0], mu, K, q, N, ucb_avg, l) queue = np.append(queue, np.array([q]), axis=0) return queue
def cheby_center(C,D,b): '''Calculates the chebyshev center for polytope C x + D y <= b Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise''' d = C.shape[1] k = D.shape[1] A = np.hstack([C,D]) dim = np.shape(A)[1] c = -np.r_[np.zeros(dim),1] norm2 = np.sqrt(np.sum(A*A, axis=1)) G = np.c_[A, norm2] solvers.options['show_progress']=False solvers.options['LPX_K_MSGLEV'] = 0 sol = solvers.lp(matrix(c), matrix(G), matrix(b), None, None, lp_solver) if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(0,d)], opt[range(d,d+k)], True else: return np.zeros(d), np.zeros(k), False
def linprog(*args, **kwargs): """ min c^T*x s.t. Gx <= h Ax = b args: c, G, h, A, b """ verbose = kwargs.get('verbose', False) # Save settings and set verbosity old_settings = _apply_options({'show_progress': verbose, 'LPX_K_MSGLEV': int(verbose)}) # Optimize results = lp(*args, solver='glpk') # Restore settings _apply_options(old_settings) # Check return status status = results['status'] if not status == 'optimal': from sys import stderr print >> stderr, ('Warning: termination of lp with status: %s' % status) # Convert back to NumPy array # and return solution xstar = np.array(results['x']) return xstar, results['primal objective']
def problem_10(X, y): """ This function implements the minimization of ||X*beta - y|| for the one norm. Inputs: X: The matrix X that defines the characterizing equations for the model. y: The vector so that X*beta = y Returns: beta: A NumPy array representing the vector that minimizes the objective function. Notes: This function uses the cvxopt.solvers.lp routine to get the answer. """ X = np.mat(X, dtype = float) y = np.array(y, dtype = float) m,n = X.shape B = np.eye(m) G = np.vstack((np.hstack((X, -B)), np.hstack((-X, -B)))) c = np.hstack((np.zeros(n), np.ones(m))) h = np.hstack((y, -y)) c = matrix(c) h = matrix(h) G = matrix(G) solution = solvers.lp(c, G, h) return np.array(solution['x'])[:n]
def prob6(): """Solve the allocation model problem in 'ForestData.npy'. Note that the first three rows of the data correspond to the first analysis area, the second group of three rows correspond to the second analysis area, and so on. Returns (in order): The optimizer x (ndarray) The optimal value (sol['primal objective']*-1000) """ data = np.load("ForestData.npy") c = matrix(data[:, 3] * -1) A = la.block_diag(*[[1.0, 1.0, 1.0] for _ in xrange(7)]) b = data[::3, 1].copy() G = np.vstack((-data[:, 4], -data[:, 5], -data[:, 6], -np.eye(21))) # flip the inequality signs h = np.hstack(([-40000.0, -5.0, -70.0 * 788.0], np.zeros(21))) # flip the inequality signs c = matrix(c) A = matrix(A) b = matrix(b) G = matrix(G) h = matrix(h) sol = solvers.lp(c, G, h, A, b) return np.ravel(sol["x"]), sol["primal objective"] * -1000.0
def quadprog(H, c, A, b): x = np.matrix(solvers.lp(matrix(np.zeros(c.shape)), matrix(-A), matrix(-b))['x']) # x = scipy.optimize.linprog(np.ones(c.shape).T[0], -np.array(A), -np.array(b).T[0])['x'] # x = np.matrix(x).T workset = np.array(A * x <= b).T[0] max_iter = 200 for k in xrange(max_iter): Aeq = A[np.where(workset == True)] g_k = H * x + c p_k, lambda_k = qpsub(H, g_k, Aeq, np.matlib.zeros((Aeq.shape[0], 1))) if np.linalg.norm(p_k) <= 1e-9: if np.min(lambda_k) > 0: break else: pos = np.argmin(lambda_k) for i in xrange(b.size): if workset[i] and np.sum(workset[:i]) == pos: workset[i] = False break else: alpha = 1.0 pos = -1 for i in xrange(b.size): if not workset[i] and (A[i] * p_k)[0, 0] < 0: now = np.abs((b[i] - A[i] * x) / (A[i] * p_k))[0, 0] if now < alpha: alpha = now pos = i x += alpha * p_k if pos != -1: workset[pos] = True return x
def static_stat_sample(lam,mu,K,num): c,G,h = schedule(K,lam,mu) sol = solvers.lp(c,G,h) l = sol['x'] M = soltobvn(l,K) result = BVN(M,K) q = np.array([0]*K) N = np.zeros([K,K]) ucb_avg = np.zeros([K,K]) prob = [pit[1] for pit in result] values = range(len(prob)) for s in range(num): index = weighted_values(values,prob,1) one_schedule(result[index][0],mu,K,q,N,ucb_avg,lam) return q
def unique_equalityset(C,D,b,af,bf,abs_tol=1e-7,verbose=0): '''Return the equality set E such that P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf.''' if D is not None: A = np.hstack([C,D]) a = np.hstack([af, np.zeros(D.shape[1]) ]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i,:]) b_i = b[i] sol = solvers.lp(matrix(A_i), matrix(A) , matrix(b), matrix(a).T, matrix(bf), lp_solver) if sol['status'] != "optimal": raise Exception("unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E)
def problem_2(): """ This function provides the solution for problem 2 in the file: 'lab3 - linaer-programming.pdf' Inputs: None Returns: A dictionary containing the solution and a lot of other info as defined in cvxopt.solvers.lp """ c = matrix([-6., -8., -5., -9.]) G = matrix([[2., 1., 1., 3.], [1., 3., 1., 2.], [-1., 0., 0., 0.], [0., -1.0, 0., 0.], [0., 0., -1.0, 0.], [0., 0., 0., -1.0]]) G = G.T h = matrix([5., 3., 0., 0., 0., 0.]) A = matrix([1., 1., 1.,0.]) A = A.T b = matrix([1.]) solution = solvers.lp(c, G, h, A, b) return solution
def main(): ''' Main function ''' ore = EveOnlineManufacturingJob(DATA_ACCESS_OBJECT) reproc_mat_list = ore.get_mineral_matrix_adjusted(sec_status_low_limit=0.9, fclt_base_yield=0.54, rprcs_skill_lvl=5, rprcs_eff_skill_lvl=5, mtrl_spcfc_prcs_skill_lvl=5, implant_bonus=0) # for simplyfication we filter ore list reproc_mat_list_filtered = {} reproc_mat_list_filtered[DATA_ACCESS_OBJECT.get_inv_type(type_name='Veldspar')['type_id']] = reproc_mat_list[DATA_ACCESS_OBJECT.get_inv_type(type_name='Veldspar')['type_id']] # reproc_mat_list_filtered[DATA_ACCESS_OBJECT.get_inv_item(type_name='Plagioclase')['type_id']] = reproc_mat_list[DATA_ACCESS_OBJECT.get_inv_item(type_name='Plagioclase')['type_id']] # reproc_mat_list_filtered[DATA_ACCESS_OBJECT.get_inv_item(type_name='Scordite')['type_id']] = reproc_mat_list[DATA_ACCESS_OBJECT.get_inv_item(type_name='Scordite')['type_id']] reproc_mat_list_filtered[DATA_ACCESS_OBJECT.get_inv_type(type_name='Pyroxeres')['type_id']] = reproc_mat_list[DATA_ACCESS_OBJECT.get_inv_type(type_name='Pyroxeres')['type_id']] reproc_mat_list = reproc_mat_list_filtered # define mineral amounts we want to get refining the ores mineral_amounts_desired={} mineral_amounts_desired[DATA_ACCESS_OBJECT.get_inv_type(type_name='Tritanium')['type_id']] = 200 mineral_amounts_desired[DATA_ACCESS_OBJECT.get_inv_type(type_name='Nocxium')['type_id']] = 1 # mineral_amounts_desired[DATA_ACCESS_OBJECT.get_inv_item(type_name='Pyerite')['type_id']] = 160 # mineral_amounts_desired[DATA_ACCESS_OBJECT.get_inv_item(type_name='Mexallon')['type_id']] = 80 # define variables for building matrices list_of_mineral_matrices = [] mineral_amounts_desired_matrix = [] ore_quantity_matrix = [] # build matrices for linear programming for k_ore,v_minerals in reproc_mat_list.iteritems(): min_quantity_list = [-float(quant) for quant in v_minerals.values()] list_of_mineral_matrices.append(min_quantity_list) ore_quantity_matrix.append(float(1.0)) min_id_list = v_minerals.keys() for min_id in min_id_list: if (min_id not in mineral_amounts_desired.keys()): mineral_amounts_desired_matrix.append(float(0.0)) else: mineral_amounts_desired_matrix.append(-float(mineral_amounts_desired[min_id])) A = matrix(list_of_mineral_matrices) b = matrix(mineral_amounts_desired_matrix) c = matrix(ore_quantity_matrix) printing.options['dformat'] = '%.1f' printing.options['width'] = -1 print A print b print c sol=solvers.lp(c,A,b) print(sol['x'])
def pfOptimizer7(longTickers, shortTickersHigh, shortTickersLow, Coef, Res, CarhartDaily, StockBeta, BETA_BOUND, WEIGHT_BOUND, back_date, start_date): shortTickers = shortTickersHigh.append(shortTickersLow).sort('ticker').reset_index(drop=True) longTickers['bWeight'] = 1 # binary weights shortTickers['bWeight'] = -1 shortTickersLow['bWeight'] = -1 pfTickers = pd.concat((shortTickers[['ticker']], longTickers[['ticker']]), axis=0).sort('ticker').reset_index(drop=True) sens = Coef[['ticker', 'Mkt-RF', 'SMB', 'HML', 'UMD']].merge(pfTickers) mBeta = matrix(sens['Mkt-RF']) mBeta_smb = matrix(sens['SMB']) mBeta_hml = matrix(sens['HML']) mBeta_umd = matrix(sens['UMD']) mCqaBeta = matrix(StockBeta.merge(pfTickers)['cqaBeta']) longIndex = matrix(pfTickers.merge(longTickers, how='left').fillna(0)['bWeight']) mLongIndex = np.diag(pfTickers.merge(longTickers, how='left').fillna(0)['bWeight']) mLongIndex = matrix(mLongIndex[np.logical_or.reduce([np.sum(mLongIndex, 1) > 0.5])]).trans() shortIndex = -matrix(pfTickers.merge(shortTickers, how='left').fillna(0)['bWeight']) mShortIndex = -np.diag(pfTickers.merge(shortTickers, how='left').fillna(0)['bWeight']) mShortIndex = matrix(mShortIndex[np.logical_or.reduce([np.sum(mShortIndex, 1) > 0.5])]).trans() shortLowIndex = -matrix(pfTickers.merge(shortTickersLow, how='left').fillna(0)['bWeight']) N = pfTickers.shape[0] id = spmatrix(1.0, range(N), range(N)) wBounds = matrix(np.ones((N, 1)) * WEIGHT_BOUND) longBounds = matrix(np.zeros((shortTickers.shape[0], 1))) shortBounds = matrix(np.zeros((longTickers.shape[0], 1))) # total_cov = matrix(functions.get_cov(pfTickers, Coef, Res, CarhartDaily, back_date, start_date)) # q = matrix(np.zeros((pfTickers.shape[0], 1))) q = -mBeta_smb - mBeta_hml G = matrix([[mBeta], [-mBeta], # [mBeta_smb], [-mBeta_smb], # [mBeta_hml], [-mBeta_hml], [mBeta_umd], [-mBeta_umd], [mCqaBeta], [-mCqaBeta], [longIndex], [-longIndex], [shortIndex], [-shortIndex], [-shortLowIndex], [id], [-id], [-mLongIndex], [mShortIndex]]).trans() opt_comp = pd.DataFrame() h = matrix([0.1, 0.2, # 0.1, 0.1, # 0.1, 0.1, 0.1, 0.1, BETA_BOUND, BETA_BOUND, 1, -0.98, -0.98, 1, 0.3, wBounds, wBounds, longBounds, shortBounds]) sol = solvers.lp(q, G, h) w_final = sol['x'] wTickers = pfTickers wTickers['weight'] = w_final return (opt_comp, wTickers)
def pfOptimizer_sector(longTickers, shortTickers, Coef, StockBeta, industry_data, BETA_BOUND, WEIGHT_BOUND, SECTOR_WEIGHT): longTickers['bWeight'] = 1 # binary weights shortTickers['bWeight'] = -1 pfTickers = pd.concat((shortTickers[['ticker']], longTickers[['ticker']]), axis=0).sort('ticker').reset_index(drop=True) pfTickers = pfTickers.merge(industry_data[['ticker', 'sector']]) sector_list = pfTickers['sector'].unique() betas = Coef[['ticker', 'Mkt-RF']].merge(pfTickers).rename(columns={'Mkt-RF': 'beta'}).reset_index(drop=True) mBeta = matrix(betas['beta']) mCqaBeta = matrix(StockBeta.merge(pfTickers)['cqaBeta']) longIndex = matrix(pfTickers.merge(longTickers, how='left').fillna(0)['bWeight']) mLongIndex = np.diag(pfTickers.merge(longTickers, how='left').fillna(0)['bWeight']) mLongIndex = matrix(mLongIndex[np.logical_or.reduce([np.sum(mLongIndex,1) > 0.5])]).trans() # mLongIndex = matrix(np.diag(tickers.merge(longTickers, how='left').fillna(0)['bWeight'])) shortIndex = -matrix(pfTickers.merge(shortTickers, how='left').fillna(0)['bWeight']) mShortIndex = -np.diag(pfTickers.merge(shortTickers, how='left').fillna(0)['bWeight']) mShortIndex = matrix(mShortIndex[np.logical_or.reduce([np.sum(mShortIndex,1) > 0.5])]).trans() # mShortIndex = matrix(np.diag(pfTickers.merge(shortTickers, how='left').fillna(0)['bWeight'])) sector_index = pfTickers[['ticker', 'sector']] for sector in sector_list: sector_index.loc[:,sector] = 0.0 sector_index.ix[sector_index['sector'] == sector, sector] = 1.0 mSector_index = matrix(sector_index.iloc[:, 2:].as_matrix()) # wTickers = functions.iniWeights(pfTickers, shortTickers, longTickers) # initial weights wStart = matrix(functions.iniWeights(pfTickers, longTickers, shortTickers)['weight']) N = pfTickers.shape[0] id = spmatrix(1.0, range(N), range(N)) wBounds = matrix(np.ones((N,1)) * WEIGHT_BOUND) longBounds = matrix(np.zeros((shortTickers.shape[0], 1))) # longBounds = matrix(np.ones((shortTickers.shape[0], 1)) * 0.002) shortBounds = matrix(np.zeros((longTickers.shape[0], 1))) # shortBounds = matrix(np.ones((longTickers.shape[0], 1)) * (-0.005)) A = matrix([[mSector_index], [-mSector_index], [mCqaBeta], [-mCqaBeta], [longIndex], [-longIndex], [shortIndex], [-shortIndex], [id], [-id], [-mLongIndex], [mShortIndex]]).trans() b = matrix([SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, SECTOR_WEIGHT, BETA_BOUND, BETA_BOUND, 1, -0.98, -0.98, 1, wBounds, wBounds, longBounds, shortBounds]) sol = solvers.lp(-mBeta, A, b) w_res = sol['x'] print 'cqaBeta = %.4f' % np.float64(w_res.trans() * mCqaBeta)[0,0] print 'beta = %.4f' % np.float64(w_res.trans() * mBeta)[0,0] wTickers = pfTickers wTickers['weight'] = w_res return wTickers
def test_lp(self): from cvxopt import solvers, glpk c,G,h,A,b = self._prob_data sol1 = solvers.lp(c,G,h) self.assertTrue(sol1['status']=='optimal') sol2 = solvers.lp(c,G,h,A,b) self.assertTrue(sol2['status']=='optimal') sol3 = solvers.lp(c,G,h,solver='glpk') self.assertTrue(sol3['status']=='optimal') sol4 = solvers.lp(c,G,h,A,b,solver='glpk') self.assertTrue(sol4['status']=='optimal') sol5 = glpk.lp(c,G,h) self.assertTrue(sol5[0]=='optimal') sol6 = glpk.lp(c,G,h,A,b) self.assertTrue(sol6[0]=='optimal') sol7 = glpk.lp(c,G,h,None,None) self.assertTrue(sol7[0]=='optimal')
def test(): A = matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0] ]) print A b = matrix([ 1.0, -2.0, 0.0, 4.0 ]) print b c = matrix([ 2.0, 1.0 ]) print c sol=solvers.lp(c,A,b)
#===Append Inverse Weight Vector===# C = numpy.c_[C,-1.0/Weights]; mC = numpy.c_[mC, -1.0/Weights]; #===Form Block Matrix===# Block = matrix(numpy.r_[C, mC]); #===Form RHS Vector===# d = matrix(numpy.asmatrix(numpy.r_[Desired, -Desired]).T); #===Form Cost Vector===# cost = matrix(numpy.r_[numpy.zeros(M+1), 1]); #===Solve LP===# sol=solvers.lp(cost,Block,d) #===Parse Solution===# a = sol['x'][0:-1]; delta = sol['x'][-1]; print "Delta: ", delta #===Make Filter===# design = list(a[-1:0:-1]); design.append(2.0*a[0]); temp = list(a[1::]); for t in temp: design.append(t); print "Filter Length: ", len(design), filter_length; #===Ensure Unity Gain===#
I[2 * ind] = ind J[2 * ind] = listing[0] val[2 * ind] = -1 I[2 * ind + 1] = ind J[2 * ind + 1] = S + listing[1] ctr = listing[2] cvr = listing[3] price = listing[4] mcpc = listing[5] val[2 * ind + 1] = -ctr * mcpc b[ind] = -ctr * (mcpc + 0.03 * cvr * price) ind += 1 I_2 = [total + i for i in range(S)] J_2 = [i for i in range(S)] val_2 = [-1 for i in range(S)] b_2 = [0 for i in range(S)] I_3 = [total + S + i for i in range(M)] J_3 = [S + i for i in range(M)] val_3 = [-1 for i in range(M)] b_3 = [0 for i in range(M)] mat = spmatrix(val + val_2 + val_3, I + I_2 + I_3, J + J_2 + J_3, (total + S + M, S + M)) c = matrix([1 for i in range(S)] + budget_shop) h = matrix(b + b_2 + b_3) sol = solvers.lp(c, mat, h, feastol=1e-4) np.save('/home/ubuntu/dual_sol/solution', sol['x'])
wgl = matrix( np.loadtxt(file_group_weights, delimiter=",", skiprows=2, usecols=(1))) # -------------------------------------------------------------------- # Find minimum cost food composition # -------------------------------------------------------------------- # solve the linear program # minimize: C'X # subject to GX <= h, X >= 0 G = matrix([N, -N, -I, I]) h = scale * matrix([nh, -nl, -wl, wh]) # G = matrix([N,-N,-I,I,-group_def_matrix, group_def_matrix]) # h = matrix([nh,-nl,-wl,wh,-gwl,gwh]) X = solvers.lp(C, G, h) # X=solvers.lp(C,G,h,solver='glpk') # -------------------------------------------------------------------- # Find minimum weight food composition # -------------------------------------------------------------------- # solve the linear probgam # minimize: 1'*Y (i.e., total weight = y_1+y_2+ ...+y_{nf}, where 1 is a column vector of 1s, and 1' is its transpose) # subject to: GY <= h, Y >= 0 D = matrix(np.ones((nf, 1))) G = matrix([N, -N, -I, I, -group_def_matrix, group_def_matrix, C.trans()]) h = scale * matrix([nh, -nl, -wl, wh, -wgl, wgh, cost_hi]) Y = solvers.lp(D, G, h)
def solveLPWithDIC(ranking, k, dataSetName, algoName): """ Solve the linear program with DIC @param ranking: list of candidate objects in the ranking @param k: length of the ranking @param dataSetName: Name of the data set the candidates are from @param algoName: Name of inputed algorithm return doubly stochastic matrix as numpy array """ print('Start building LP with DIC.') #calculate the attention vector v using 1/log(1+indexOfRanking) u = [] unproU = 0 proU = 0 proCount = 0 unproCount = 0 proListX = [] unproListX = [] for candidate in ranking[:k]: u.append(candidate.learnedScores) # initialize v with DCG v = np.arange(1, (k + 1), 1) v = 1 / np.log2(1 + v + 1) v = np.reshape(v, (1, k)) arrayU = np.asarray(u) #normalize input arrayU = (arrayU - np.min(arrayU)) / (np.max(arrayU) - np.min(arrayU)) I = [] J = [] I2 = [] #set up indices for column and row constraints for j in range(k**2): J.append(j) for i in range(k): for j in range(k): I.append(i) for i in range(k): for j in range(k): I2.append(j) for i in range(k): if ranking[i].isProtected == True: proCount += 1 proListX.append(i) proU += arrayU[i] else: unproCount += 1 unproListX.append(i) unproU += arrayU[i] arrayU = np.reshape(arrayU, (k, 1)) uv = arrayU.dot(v) uv = uv.flatten() #negate objective function to convert maximization problem to minimization problem uv = np.negative(uv) # check if there are protected items if proCount == 0: print('Cannot create a P for ' + algoName + ' on data set ' + dataSetName + ' because no protected items in data set.') return 0, False # check if there are unprotected items if unproCount == 0: print('Cannot create a P for ' + algoName + ' on data set ' + dataSetName + ' because no unprotected items in data set.') return 0, False proU = proU / proCount unproU = unproU / unproCount initf = np.zeros((k, 1)) initf[proListX] = (1 / (proCount * proU)) * arrayU[proListX] initf[unproListX] = (-(1 / (unproCount * unproU)) * arrayU[unproListX]) f1 = initf.dot(v) f1 = f1.flatten() f1 = np.reshape(f1, (1, k**2)) f = matrix(f1) #set up constraints x <= 1 A = spmatrix(1.0, range(k**2), range(k**2)) #set up constraints x >= 0 A1 = spmatrix(-1.0, range(k**2), range(k**2)) #set up constraints that sum(rows)=1 M = spmatrix(1.0, I, J) #set up constraints sum(columns)=1 M1 = spmatrix(1.0, I2, J) #values for sums columns and rows == 1 h1 = matrix(1.0, (k, 1)) #values for x<=1 b = matrix(1.0, (k**2, 1)) #values for x >= 0 d = matrix(0.0, (k**2, 1)) #construct objective function c = matrix(uv) #assemble constraint matrix as sparse matrix G = sparse([M, M1, A, A1, f]) #assemble constraint values h = matrix([h1, h1, b, d, 0.0]) print('Start solving LP with DIC.') try: sol = solvers.lp(c, G, h) except Exception: print('Cannot create a P for ' + algoName + ' on data set ' + dataSetName + '.') return 0, False print('Finished solving LP with DIC.') return np.array(sol['x']), True
def solvep(self): """ Solve the Inverse RL problem :return: Scalarization weights as array. """ n_states, n_actions, reward_dimension, gamma, P, R, pi, P_pi = self._prepare_variables( ) v = self._prepare_v(n_states, n_actions, reward_dimension, P) # weights for minimization term c = np.vstack([ np.ones((n_states, 1)), np.zeros((n_states * (n_actions - 1), 1)), np.zeros((reward_dimension, 1)) ]) # big block selection matrix G = np.vstack([ np.hstack([ np.zeros((reward_dimension, n_states)), np.zeros((reward_dimension, n_states * (n_actions - 1))), np.eye(reward_dimension) ]), np.hstack([ np.zeros((reward_dimension, n_states)), np.zeros((reward_dimension, n_states * (n_actions - 1))), -np.eye(reward_dimension) ]), np.hstack([ np.vstack([ self._vertical_ones(n_actions - 1, n_states, i) for i in xrange(n_states) ]), np.eye(n_states * (n_actions - 1), n_states * (n_actions - 1)), np.zeros((n_states * (n_actions - 1), reward_dimension)) ]), np.hstack([ np.zeros((n_states * (n_actions - 1), n_states)), np.eye(n_states * (n_actions - 1), n_states * (n_actions - 1)), np.vstack([ np.vstack([ -v[i, j, :].reshape(1, -1) for j in xrange(n_actions - 1) ]) for i in xrange(n_states) ]) ]), np.hstack([ np.zeros((n_states * (n_actions - 1), n_states)), 2 * np.eye(n_states * (n_actions - 1), n_states * (n_actions - 1)), np.vstack([ np.vstack([ -v[i, j, :].reshape(1, -1) for j in xrange(n_actions - 1) ]) for i in xrange(n_states) ]) ]) ]) # right hand side of inequalities #h = np.vstack([np.ones((2 * reward_dimension, 1)), np.zeros(((n_states * (n_actions - 1)) * 3, 1))]) h = np.vstack([ np.ones((reward_dimension, 1)), np.zeros((reward_dimension, 1)), np.zeros(((n_states * (n_actions - 1)) * 3, 1)) ]) solution = solvers.lp(matrix(-c), matrix(G), matrix(h)) alpha = np.asarray(solution['x'][-reward_dimension:]) return alpha.ravel()
#!/opt/local/bin/python2.7 from cvxopt import matrix, solvers # Constraints a = matrix([[1., 3., 3., -1., 0, 0], [-1., 2., 2., 0, -1., 0], [1., 4., 0., 0., 0., -1.]]) b = matrix([20., 42., 30., 0., 0., 0.]) # Components of the functional c = matrix([-5., -4., -6.]) # Solve the linear program sol = solvers.lp(c, a, b) print(sol['x'])
def path_decompose(a, b, a_true, b_true, overwrite_norm, P, use_GLPK, sparsity=False): '''This function takes in a node's information in the attempt to decompose it into the lowest number of paths that accounts for the flow constraints. Thhe algorithm uses many trials of a randomizaed optimization model and takes the best result seen. **Do not use over_write_norm becuase a_true and b_true surrently have normalization instead of copycount. a: a vector of the copytcounts of the in-edges for the node. b: a vector of the copycounts of the out-edges for the node. a_true: a vector that should have the copycount values for the in-edges but does not. (Don't Use) b_true: a vector that should have the copycount values for the out-edges but does not. (Don't Use) decides whether or not to use a_true and b_true. This is a matrix of in-edges versus out-edges that has a 0 if there is a known path and a 1 otherwise. ''' #mb_check = 1 #if this parameter is set to 1, if the data can be set using MB, then it from cvxopt import matrix, solvers, spmatrix import numpy, copy from numpy import linalg as LA from numpy import array from numpy import zeros import operator solvers.options['msg_lev'] = 'GLP_MSG_OFF' m = len(a) ## a is a vector of the current in edge copycount values. n = len(b) ## b is a vector of the current out edge copycount values. ## Trivial case if m == 0 or n == 0: return [[], 0] ## Make all in flow values non-zero. for i in range(m): a[i] = max(a[i], 1e-10) for j in range(n): b[j] = max(b[j], 1e-10) ## If the flow in does not equal the flow out, make them equal. if sum(a) > sum(b): const = (sum(a) - sum(b)) / n b = [k + const for k in b] else: const = (sum(b) - sum(a)) / m a = [k + const for k in a] ## Trivial cases. if m == 1: answer = array(matrix(b, (m, n))) return [answer, 0] elif n == 1: answer = array(matrix(a, (m, n))) return [answer, 0] A = matrix(0., (m + n - 1, m * n)) ## A is used to enforce flow constraint on decomposition. p = matrix( 0., (m * n, 1) ) ## This vector tells whether there is a known path for the pair of nodes or not. for i in range(m): for j in range(n): A[i, j * m + i] = 1. #check indexing p[j * m + i] = P[i, j] for j in range(n - 1): #Must be range(n) to get full mattrix for i in range(m): A[m + j, j * m + i] = 1. z = a + b rhs = matrix(z) rhs = rhs[0:n + m - 1, 0] ## this vector is used to enforce flow constraints weight = LA.norm(a, 1) ## (Not used) eps = 0.001 tol = eps * weight #test for significance. Used for various purposes sparsity_factor = 0.4 #very aggressive curently - revert to 0.1 later removal_factor = 0.4 scale = max(max(rhs), 1e-100) * 0.01 #print(rhs) trials = int(round(min(2 * m * n * max(m, n), 100))) ## Number of randomized trials. curr_min = m * n + 1 ## The lowest number of non known paths seen so far. curr_ans = [] ## The best solution seen so far. curr_err = 0 curr_mult = 0 ## multiplicity of current solution curr_on_unknown = 0 ## amount of flow on non "known paths". for ctr in range( trials ): ## randomize the the coefficients for the known non=known path flow values. c = matrix(abs(numpy.random.normal(0, 1, (m * n, 1)))) for i in range(m * n): c[i] = c[i] * p[i] ## G and h are used to make sure that the flow values are non-negative. G = spmatrix(-1., range(m * n), range(m * n)) h = matrix(0., (m * n, 1)) if use_GLPK: sol = solvers.lp(c=c, G=G, h=h, A=A, b=rhs / scale, solver='glpk') else: sol = solvers.lp(c=c, G=G, h=h, A=A, b=rhs / scale) temp_sol = array(sol['x']) * scale another_sol = copy.deepcopy(temp_sol) if overwrite_norm: ## Do not use right now because a_true and b_true are wrong. #the true values of copy count are used to decide thresholding behavior a = a_true[:] b = b_true[:] ## This loop basically sets the flow values equal to 0 under certain conditions. for i in range(m): for j in range(n): if another_sol[j * m + i] < sparsity_factor * min(a[i], b[j]): another_sol[j * m + i] = 0 if temp_sol[j * m + i] < removal_factor * min(a[i], b[j]) or temp_sol[ j * m + i] < tol: # temporarily disabled temp_sol[j * m + i] = 0 another_sol[j * m + i] = 0 if another_sol[j * m + i] < 0: another_sol[j * m + i] = 0 temp_sol[j * m + i] = 0 s = 0 ## s equals how many non-zero flows we are sending down non "known paths" in the temp solution for i in range(m * n): if p[i] > 0: #Only couont the paths that are not supported s = s + numpy.count_nonzero(another_sol[i]) ## if the temporary solution is less than the current minimum solution, replace current minimum solution with ## the temporary solution. if s < curr_min: curr_min = s ## current minimum value of non "known paths" in solution curr_ans = temp_sol[:] ## answer that attains it. curr_mult = 0 ## This is how many times a solution with this many non-zero non-known paths is seen. curr_on_unknown = local_dot( array(p), temp_sol ) ## This says how much flow we are sending down non "known paths" else: if s == curr_min: if LA.norm( curr_ans - temp_sol ) > tol: ## Determines whethee to classify the solutions as different. curr_mult = curr_mult + 1 if curr_ans == [] or ( abs(sum(sum(temp_sol)) - sum(sum(curr_ans))) < tol and (local_dot(array(p), temp_sol) < curr_on_unknown)) or sum( sum(temp_sol)) > sum(sum(curr_ans)): ## These are a few conditions that make it the temporary solution: ## 1: curr_sol is empty. 2: total flow difference is below a threshold AND less flow is going down unknown paths. 3: total flow is greater than curr_ans total flow. curr_ans = temp_sol[:] curr_on_unknown = local_dot(array(p), temp_sol) answer = matrix(0., (m, n)) if len(curr_ans) < m * n: pdb.set_trace() for i in range(m): for j in range(n): answer[i, j] = float(curr_ans[j * m + i]) # Uniqueness consideration answer = array(answer) non_unique = 0 if curr_mult > 1: non_unique = 1 # This makes the flow solution more sparse if sparsity != False: if m * n > sparsity: tmp_dict = {} for i in range(m): for j in range(n): tmp_dict[(i, j)] = answer[i, j] sorted_tmp = sorted(tmp_dict.items(), key=operator.itemgetter(1))[::-1] sorted_tmp = sorted_tmp[:sparsity] new_ans = zeros((m, n)) for ind in sorted_tmp: new_ans[ind[0][0], ind[0][1]] = ind[1] answer = new_ans return [answer, non_unique]
h[:, 3] = 1 # acceleration lower and upper h[:, 4] = 20 h[:, 5] = 20 # Cost = 1/2 xPx + qx cost = np.zeros((T, N)) cost[:, 2] = 1.0 # Theta cost P = np.diag(cost.flatten()) #print(P) # or maybe we should only care about the end q = np.zeros((T, N)) q[:, 2] = -np.pi # offset P = sparse(matrix(P)) q = matrix(q.flatten()) A = sparse(matrix(A.reshape((T * (N - 1), T * N)))) b = matrix(b.flatten()) G = sparse(matrix(G.reshape((T * 6, N * T)))) h = matrix(h.flatten()) # If sin(theta) is cost, # cos(theta) would be a c vector for cost # maybe an initial guess for x could help # It all kind of seems too slow, but maybe glpk could help for i in range(10): lp(q, G, h, A, b) #qp(P, q, G, h, A, b)
def outlierScore(neighbors, W, p, edges, C, s): from cvxopt import matrix, solvers """ Returns outlier score""" pairs = list(itertools.combinations(neighbors, 2)) Hm = [] Lm = [] w = [] zeta = [] b = [] c = [] for i in range(p): w.append([]) for rg in range(len(pairs)): zeta.append([]) j = 0 for p in pairs: # Equation 3.9-3.12 n1 = min(p) n2 = max(p) if n2 in edges[ n1]: # Equation 3.9 and 3.10 (alternative lines for each) Hm.append(-1.0) Hm.append(0.0) Lm.append(0.0) Lm.append(0.0) for i in range(len(w)): w[i].append(1.0 * abs(W[n1][i] - W[n2][i])) w[i].append(0.0) for i in range(len(zeta)): if (i == j): zeta[i].append(-1.0) zeta[i].append(-1.0) else: zeta[i].append(0.0) zeta[i].append(0.0) else: # Equation 3.11 and 3.12 (alternative lines for each) Hm.append(0.0) Hm.append(0.0) Lm.append(1.0) Lm.append(0.0) for i in range(len(w)): w[i].append(-1.0 * abs(W[n1][i] - W[n2][i])) w[i].append(0.0) for i in range(len(zeta)): if (i == j): zeta[i].append(-1.0) zeta[i].append(-1.0) else: zeta[i].append(0.0) zeta[i].append(0.0) b.append(0.0) b.append(0.0) j += 1 for i in range(len(w)): # Equation 3.13 Hm.append(0.0) Hm.append(0.0) Lm.append(0.0) Lm.append(0.0) for i in range(len(zeta)): zeta[i].append(0.0) zeta[i].append(0.0) for j in range(len(w)): if (i == j): w[j].append(1.0) w[j].append(-1.0) else: w[j].append(0.0) w[j].append(0.0) b.append(1.0) b.append(0.0) # Equation 3.14 Hm.append(0.0) Hm.append(0.0) Lm.append(0.0) Lm.append(0.0) for i in range(len(zeta)): zeta[i].append(0.0) zeta[i].append(0.0) for i in range(len(w)): w[i].append(1.0) w[i].append(-1.0) b.append(1.0) b.append(-1.0) # Now populating c, the order of coefficients in c is Hm, Lm, w, zeta c.append(1.0) c.append(-1.0) for i in w: c.append(0.0) for i in zeta: c.append(1.0 * C / len(pairs)) tempList = [] tempList.append(Hm) tempList.append(Lm) for i in w: tempList.append(i) for i in zeta: tempList.append(i) A = matrix(tempList) b = matrix(b) c = matrix(c) # Now feeding the input to simplex algo # print A # print b # print c try: sol = solvers.lp(c, A, b, solver='glpk') # print sol['x'] except Exception as e: print("error") print(e) time.sleep(1) return 0, None if sol['x'] == None: return None, None else: w1 = [] for i in range(len(w)): w1.append([i, sol['x'][i + 2]]) w1 = sorted(w1, key=lambda x: x[1]) #print("w1:", w1) return sol['x'][0] - sol['x'][1], [w1[i][0] for i in range(s)]
def ingredientsToRecipe(b, A, y): sol = solvers.lp(b,A,y) print('recipes', sol['x'])
def lp_irl(trans_probs, policy, gamma=0.5, l1=10, R_max=10): """ inputs: trans_probs NxNxN_ACTIONS transition matrix policy policy vector / map R_max maximum possible value of recoverred rewards gamma RL discount factor l1 l1 regularization lambda returns: rewards Nx1 reward vector """ print np.shape(trans_probs) N_STATES, _, N_ACTIONS = np.shape(trans_probs) N_STATES = int(N_STATES) N_ACTIONS = int(N_ACTIONS) # Formulate a linear IRL problem A = np.zeros([2 * N_STATES * (N_ACTIONS + 1), 3 * N_STATES]) b = np.zeros([2 * N_STATES * (N_ACTIONS + 1)]) c = np.zeros([3 * N_STATES]) for i in range(N_STATES): a_opt = int(policy[i]) tmp_inv = np.linalg.inv( np.identity(N_STATES) - gamma * trans_probs[:, :, a_opt]) cnt = 0 for a in range(N_ACTIONS): if a != a_opt: A[i * (N_ACTIONS - 1) + cnt, :N_STATES] = - \ np.dot(trans_probs[i, :, a_opt] - trans_probs[i, :, a], tmp_inv) A[N_STATES * (N_ACTIONS - 1) + i * (N_ACTIONS - 1) + cnt, :N_STATES] = - \ np.dot(trans_probs[i, :, a_opt] - trans_probs[i, :, a], tmp_inv) A[N_STATES * (N_ACTIONS - 1) + i * (N_ACTIONS - 1) + cnt, N_STATES + i] = 1 cnt += 1 for i in range(N_STATES): A[2 * N_STATES * (N_ACTIONS - 1) + i, i] = 1 b[2 * N_STATES * (N_ACTIONS - 1) + i] = R_max for i in range(N_STATES): A[2 * N_STATES * (N_ACTIONS - 1) + N_STATES + i, i] = -1 b[2 * N_STATES * (N_ACTIONS - 1) + N_STATES + i] = 0 for i in range(N_STATES): A[2 * N_STATES * (N_ACTIONS - 1) + 2 * N_STATES + i, i] = 1 A[2 * N_STATES * (N_ACTIONS - 1) + 2 * N_STATES + i, 2 * N_STATES + i] = -1 for i in range(N_STATES): A[2 * N_STATES * (N_ACTIONS - 1) + 3 * N_STATES + i, i] = 1 A[2 * N_STATES * (N_ACTIONS - 1) + 3 * N_STATES + i, 2 * N_STATES + i] = -1 for i in range(N_STATES): c[N_STATES:2 * N_STATES] = -1 c[2 * N_STATES:] = l1 sol = solvers.lp(matrix(c), matrix(A), matrix(b)) rewards = sol['x'][:N_STATES] rewards = normalize(rewards) * R_max return rewards
def optimize(sr_grid, expense, W, init=None, eps=1e-2, verbose=False, max_iter=10000): solvers.options['show_progress'] = False solvers.options['glpk'] = dict(msg_lev='GLP_MSG_OFF') n = sr_grid.shape[0] d = np.zeros(n + 1) d[0] = 1. # A1 ... sum(p) = 1 A1 = np.ones((1, n + 1)) A1[0, 0] = 0. A1 = A1 b = np.array([1.]) stimulus_pdfs = [] if init is None: new_pdf = np.ones(n) new_pdf = new_pdf / new_pdf.sum() elif all(init > 0) and np.isclose(np.sum(init), 1.): if len(init) == n: new_pdf = init else: raise ValueError( 'init has incorrect size. {} expected but len(init) is {}'. format(n, len(init))) else: raise ValueError('init is not a probability distribution') stimulus_pdfs.append(new_pdf) sensitivity_matrix = np.zeros((max_iter, n + 1)) sensitivity_matrix[:, 0] = 1. # G0 ... p > 0 G0 = -np.eye(sr_grid.shape[0], sr_grid.shape[0] + 1, 1, dtype=float) cost_matrix = np.concatenate(([[0]], [expense]), axis=1) G = np.concatenate((cost_matrix, G0, sensitivity_matrix)) # G = np.concatenate((G0, sensitivity_matrix)) h = np.zeros(G.shape[0], dtype=float) h[0] = W min_capacity = 0 for i in range(max_iter): prior = sr_grid.T.dot(stimulus_pdfs[i]) new_sensitivity = sensitivity(sr_grid, prior) lim = 1e2 new_sensitivity[new_sensitivity > lim] = lim sensitivity_matrix[i, 1:] = -new_sensitivity # G[G0.shape[0] + i, 1:] = -new_sensitivity G[G0.shape[0] + i + 1, 1:] = -new_sensitivity # row_count = G0.shape[0] + i + 1 row_count = G0.shape[0] + i + 2 if i >= 0: res = solvers.lp(matrix(-d), matrix(G[:row_count]), matrix(h[:row_count]), matrix(A1), matrix(b), solver=None) else: res = solvers.lp(-d, G[:row_count], h[:row_count], A1, b, solver=None) # print(res) stimulus_pdfs.append(np.array(res['x']).T[0, 1:]) prior = sr_grid.T.dot(stimulus_pdfs[-1]) new_sensitivity = sensitivity(sr_grid, prior) lim = 1e2 new_sensitivity[new_sensitivity > lim] = lim min_capacity = (new_sensitivity * stimulus_pdfs[-1]).sum() max_capacity = -res['primal objective'] # max_capacity = stimulus capacity = min_capacity if min_capacity > 0: accuracy = (max_capacity - min_capacity) / capacity else: accuracy = np.inf if verbose is True: print('min: {}, max: {}, accuracy: {}'.format( min_capacity, max_capacity, (max_capacity - min_capacity) / min_capacity)) if accuracy < eps: break else: warnings.warn(f'Maximum number of iterations ({max_iter}) reached', RuntimeWarning) capacity = (min_capacity + max_capacity) / 2 out_pdf = sr_grid.T.dot(stimulus_pdfs[-1]) return { 'pdf': stimulus_pdfs[-1], 'fun': capacity, 'sensitivity': sensitivity(sr_grid, out_pdf) }
def llp_irl(sf, M, k, T, phi, *, N=1000, p=2.0, verbose=False): """ Implements Linear Programming IRL for large state spaces by NG and Russell, 2000 See https://thinkingwires.com/posts/2018-02-13-irl-tutorial-1.html for a good reference. @param sf - A state factory function that takes no arguments and returns an i.i.d. sample from the MDP state space @param M - The number of sub-samples to draw from the state space |S_0| when estimating the expert's reward function @param k - The number of actions |A| @param T - A sampling transition function T(s, ai) -> s' encoding a stationary deterministic policy. The structure of T must be that the 0th action T(:, 0) corresponds to a sample from the expert policy, and T(:, i), i != 0 corresponds to a sample from the ith non-expert action at each state, for some arbitrary but consistent ordering of states @param phi - A vector of d basis functions phi_i(s) mapping from S to real numbers @param N - Number of transition samples to use when computing expectations over the Value basis functions @param p - Penalty function coefficient. Ng and Russell find p=2 is robust Must be >= 1 @param verbose - If true, progress information will be shown @return A vector of d 'alpha' coefficients for the basis functions phi(S) that allows rewards to be computed for a state via the inner product alpha_i · phi @return A result object from the optimiser """ # Measure number of basis functions d = len(phi) # Enforce valid penalty function coefficient assert p >= 1, \ "Penalty function coefficient must be >= 1, was {}".format(p) def compute_value_expectation_tensor(sf, M, k, T, phi, N, verbose): """ Computes the value expectation tensor VE This is an array of shape (d, k-1, M) where VE[:, i, j] is a vector of coefficients that, when multiplied with the alpha vector give the expected difference in value between the expert policy action and the ith action from state s_j @param sf - A state factory function that takes no arguments and returns an i.i.d. sample from the MDP state space @param M - The number of sub-samples to draw from the state space |S_0| when estimating the expert's reward function @param k - The number of actions |A| @param T - A sampling transition function T(s, ai) -> s' encoding a stationary deterministic policy. The structure of T must be that the 0th action T(:, 0) corresponds to a sample from the expert policy, and T(:, i), i != 0 corresponds to a sample from the ith non-expert action at each state, for some arbitrary but consistent ordering of states @param phi - A vector of d basis functions phi_i(s) mapping from S to real numbers @param N - Number of transition samples to use when computing expectations over the value basis functions @param verbose - If true, progress information will be shown @return The value expectation tensor VE. A numpy array of shape (d, k-1, M) """ def expectation(fn, sf, N): """ Helper function to estimate an expectation over some function fn(sf()) @param fn - A function of a single variable that the expectation will be computed over @param sf - A state factory function - takes no variables and returns an i.i.d. sample from the state space @param N - The number of draws to use when estimating the expectation @return An estimate of the expectation E[fn(sf())] """ state = sf() return sum([fn(sf()) for n in range(N)]) / N # Measure number of basis functions d = len(phi) # Prepare tensor VE_tensor = np.zeros(shape=(d, k-1, M)) # Draw M initial states from the state space for j in range(M): if verbose: print("{} / {}".format(j, M)) s_j = sf() # Compute E[phi(s')] where s' is drawn from the expert policy expert_basis_expectations = np.array([ expectation(phi[di], lambda: T(s_j, 0), N) for di in range(d) ]) # Loop over k-1 non-expert actions for i in range(1, k): # Compute E[phi(s')] where s' is drawn from the ith non-expert action ith_non_expert_basis_expectations = np.array([ expectation(phi[di], lambda: T(s_j, i), N) for di in range(d) ]) # Compute and store the expectation difference for this initial # state VE_tensor[:, i-1, j] = expert_basis_expectations - \ ith_non_expert_basis_expectations return VE_tensor # Precompute the value expectation tensor VE # This is an array of shape (d, k-1, M) where VE[:, i, j] is a vector of # coefficients that, when multiplied with the alpha vector give the # expected difference in value between the expert policy action and the # ith action from state s_j if verbose: print("Computing expectations...") VE_tensor = compute_value_expectation_tensor(sf, M, k, T, phi, N, verbose) # Formulate the linear programming problem constraints # NB: The general form for adding a constraint looks like this # c, A_ub, b_ub = f(c, A_ub, b_ub) if verbose: print("Composing LP problem...") def add_costly_single_step_constraints(c, A_ub, b_ub): """ Augments the objective and adds constraints to implement the Linear Programming IRL method for large state spaces This will add M extra variables and 2M*(k-1) constraints NB: Assumes the true optimisation variables are first in the c vector """ # Step 1: Add the extra optimisation variables for each min{} operator # (one per sampled state) c = np.hstack([np.zeros(shape=(1, d)), np.ones(shape=(1, M))]) A_ub = np.hstack([A_ub, np.zeros(shape=(A_ub.shape[0], M))]) # Step 2: Add the constraints # Loop for each of the starting sampled states s_j for j in range(M): if verbose: print("CSS: Adding constraints ({:.2f}%)".format(j/M*100)) # Loop over the k-1 non-expert actions for i in range(1, k): # Add two constraints, one for each half of the penalty # function p(x) constraint_row = np.hstack([VE_tensor[:, i-1, j], \ np.zeros(shape=M)]) constraint_row[d + j] = -1 A_ub = np.vstack((A_ub, constraint_row)) b_ub = np.vstack((b_ub, 0)) constraint_row = np.hstack([p * VE_tensor[:, i-1, j], \ np.zeros(shape=M)]) constraint_row[d + j] = -1 A_ub = np.vstack((A_ub, constraint_row)) b_ub = np.vstack((b_ub, 0)) return c, A_ub, b_ub def add_alpha_size_constraints(c, A_ub, b_ub): """ Add constraints for a maximum |alpha| value of 1 This will add 2 * d extra constraints NB: Assumes the true optimisation variables are first in the c vector """ for i in range(d): constraint_row = [0] * A_ub.shape[1] constraint_row[i] = 1 A_ub = np.vstack((A_ub, constraint_row)) b_ub = np.vstack((b_ub, 1)) constraint_row = [0] * A_ub.shape[1] constraint_row[i] = -1 A_ub = np.vstack((A_ub, constraint_row)) b_ub = np.vstack((b_ub, 1)) return c, A_ub, b_ub # Prepare LP constraint matrices c = np.zeros(shape=[1, d], dtype=float) A_ub = np.zeros(shape=[0, d], dtype=float) b_ub = np.zeros(shape=[0, 1]) # Compose LP optimisation problem c, A_ub, b_ub = add_costly_single_step_constraints(c, A_ub, b_ub) c, A_ub, b_ub = add_alpha_size_constraints(c, A_ub, b_ub) if verbose: print("Number of optimisation variables: {}".format(c.shape[1])) print("Number of constraints: {}".format(A_ub.shape[0])) # Solve for a solution if verbose: print("Solving LP problem...") # NB: cvxopt.solvers.lp expects a 1d c vector solvers.options['show_progress'] = verbose res = solvers.lp(matrix(c[0, :]), matrix(A_ub), matrix(b_ub)) # Extract the true optimisation variables alpha_vector = res['x'][0:d].T return alpha_vector, res
def linear_program(c, G, h, A=None, b=None, solver=None): """ Solves the dual linear programs: - Minimize `c'x` subject to `Gx + s = h`, `Ax = b`, and `s \geq 0` where `'` denotes transpose. - Maximize `-h'z - b'y` subject to `G'z + A'y + c = 0` and `z \geq 0`. INPUT: - ``c`` -- a vector - ``G`` -- a matrix - ``h`` -- a vector - ``A`` -- a matrix - ``b`` --- a vector - ``solver`` (optional) --- solver to use. If None, the cvxopt's lp-solver is used. If it is 'glpk', then glpk's solver is used. These can be over any field that can be turned into a floating point number. OUTPUT: A dictionary ``sol`` with keys ``x``, ``s``, ``y``, ``z`` corresponding to the variables above: - ``sol['x']`` -- the solution to the linear program - ``sol['s']`` -- the slack variables for the solution - ``sol['z']``, ``sol['y']`` -- solutions to the dual program EXAMPLES: First, we minimize `-4x_1 - 5x_2` subject to `2x_1 + x_2 \leq 3`, `x_1 + 2x_2 \leq 3`, `x_1 \geq 0`, and `x_2 \geq 0`:: sage: c=vector(RDF,[-4,-5]) sage: G=matrix(RDF,[[2,1],[1,2],[-1,0],[0,-1]]) sage: h=vector(RDF,[3,3,0,0]) sage: sol=linear_program(c,G,h) sage: sol['x'] (0.999..., 1.000...) Next, we maximize `x+y-50` subject to `50x + 24y \leq 2400`, `30x + 33y \leq 2100`, `x \geq 45`, and `y \geq 5`:: sage: v=vector([-1.0,-1.0,-1.0]) sage: m=matrix([[50.0,24.0,0.0],[30.0,33.0,0.0],[-1.0,0.0,0.0],[0.0,-1.0,0.0],[0.0,0.0,1.0],[0.0,0.0,-1.0]]) sage: h=vector([2400.0,2100.0,-45.0,-5.0,1.0,-1.0]) sage: sol=linear_program(v,m,h) sage: sol['x'] (45.000000..., 6.2499999...3, 1.00000000...) sage: sol=linear_program(v,m,h,solver='glpk') sage: sol['x'] (45.0..., 6.25, 1.0...) """ from cvxopt.base import matrix as m from cvxopt import solvers solvers.options['show_progress'] = False if solver == 'glpk': from cvxopt import glpk glpk.options['LPX_K_MSGLEV'] = 0 c_ = m(c.base_extend(RDF).numpy()) G_ = m(G.base_extend(RDF).numpy()) h_ = m(h.base_extend(RDF).numpy()) if A != None and b != None: A_ = m(A.base_extend(RDF).numpy()) b_ = m(b.base_extend(RDF).numpy()) sol = solvers.lp(c_, G_, h_, A_, b_, solver=solver) else: sol = solvers.lp(c_, G_, h_, solver=solver) status = sol['status'] if status != 'optimal': return { 'primal objective': None, 'x': None, 's': None, 'y': None, 'z': None, 'status': status } x = vector(RDF, list(sol['x'])) s = vector(RDF, list(sol['s'])) y = vector(RDF, list(sol['y'])) z = vector(RDF, list(sol['z'])) return { 'primal objective': sol['primal objective'], 'x': x, 's': s, 'y': y, 'z': z, 'status': status }
def cc_lp(W): print('Inside the cc_lp function of cc.py^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n') m = W.shape[0] W.shape = m*m,1 #reflexivity constraints G1 = zeros((m,m*m)) r = array([i*m+i for i in arange(m)]) G1[arange(m),r] = 1 h1 = ones(m) G11 = zeros((m,m*m)) G11[arange(m),r] = -1 h11 = -ones(m) ids=array([[i,j] for i in arange(m) for j in arange(m)]) rc=ids[where(ids[:,0]<ids[:,1])] r=array([i[0]*m+i[1] for i in rc]) # symmetry constraints nc = len(rc) G2 = zeros((nc,m*m)) G2[arange(nc),r] = 1 rt = array([i[1]*m+i[0] for i in rc]) G2[arange(nc),rt] = -1 h2 = zeros(nc) G21 = zeros((nc,m*m)) G21[arange(nc),r] = -1 G21[arange(nc),rt] = 1 h21 = zeros(nc) #range [0,1] constraints nc = len(r) G4 = zeros((nc,m*m)) G4[arange(nc),r] = -1 h4 = zeros(nc) G41 = zeros((nc,m*m)) G41[arange(nc),r] = 1 h41 = ones(nc) # transitivity constraints ids = array([[i,j,k] for i in arange(m) for j in arange(m) for k in arange(m)]) ids = ids[where(ids[:,0] != ids[:,1])] ids = ids[where(ids[:,1] != ids[:,2])] # there has to be a better way to do this... rc = ids[where(ids[:,2] != ids[:,0])] nc = len(rc) r1=array([i[0]*m+i[1] for i in rc]) r2=array([i[1]*m+i[2] for i in rc]) r3=array([i[2]*m+i[0] for i in rc]) G3 = zeros((nc,m*m)) G3[arange(nc),r1] = 1 G3[arange(nc),r2] = 1 G3[arange(nc),r2] = -1 h3 = ones(nc) G = vstack((G1,G11,G2,G21,G3,G4,G41)) h = hstack((h1,h11,h2,h21,h3,h4,h41)) sol = solvers.lp(matrix(-W), matrix(G), matrix(h)) sol = array(sol['x']) sol.shape = m,m return sol
def apply_constraints(operator, n_fermions, use_scipy=True): """Function to use linear programming to apply constraints. Args: operator(FermionOperator): FermionOperator with only 1- and 2-body terms that we wish to vectorize. n_fermions(int): The number of particles in the simulation. use_scipy(bool): Whether to use scipy (True) or cvxopt (False). Returns: modified_operator(FermionOperator): The operator with reduced norm that has been modified with equality constraints. """ # Get constraint matrix. n_orbitals = count_qubits(operator) constraints = constraint_matrix(n_orbitals, n_fermions) n_constraints, n_terms = constraints.get_shape() # Get vectorized operator. vectorized_operator = operator_to_vector(operator) initial_bound = numpy.sum(numpy.absolute(vectorized_operator[1::]))**2 print('Initial bound on measurements is %f.' % initial_bound) # Get linear programming coefficient vector. n_variables = n_constraints + n_terms lp_vector = numpy.zeros(n_variables, float) lp_vector[-n_terms:] = 1. # Get linear programming constraint matrix. lp_constraint_matrix = scipy.sparse.dok_matrix((2 * n_terms, n_variables)) for (i, j), value in constraints.items(): if j: lp_constraint_matrix[j, i] = value lp_constraint_matrix[n_terms + j, i] = -value for i in range(n_terms): lp_constraint_matrix[i, n_constraints + i] = -1. lp_constraint_matrix[n_terms + i, n_constraints + i] = -1. # Get linear programming constraint vector. lp_constraint_vector = numpy.zeros(2 * n_terms, float) lp_constraint_vector[:n_terms] = vectorized_operator lp_constraint_vector[-n_terms:] = -vectorized_operator # Perform linear programming. print('Starting linear programming.') if use_scipy: options = {'maxiter': int(1e6)} bound = n_constraints * [(None, None)] + n_terms * [(0, None)] solution = scipy.optimize.linprog(c=lp_vector, A_ub=lp_constraint_matrix.toarray(), b_ub=lp_constraint_vector, bounds=bound, options=options) # Analyze results. print(solution['message']) assert solution['success'] solution_vector = solution['x'] objective = solution['fun']**2 print('Program terminated after %i iterations.' % solution['nit']) else: # Convert to CVXOpt sparse matrix. from cvxopt import matrix, solvers, spmatrix lp_vector = matrix(lp_vector) lp_constraint_matrix = lp_constraint_matrix.tocoo() lp_constraint_matrix = spmatrix(lp_constraint_matrix.data, lp_constraint_matrix.row.tolist(), lp_constraint_matrix.col.tolist()) lp_constraint_vector = matrix(lp_constraint_vector) # Run linear programming. solution = solvers.lp(c=lp_vector, G=lp_constraint_matrix, h=lp_constraint_vector, solver='glpk') # Analyze results. print(solution['status']) solution_vector = numpy.array(solution['x']).transpose()[0] # Alternative bound. residuals = solution_vector[-n_terms:] alternative_bound = numpy.sum(numpy.absolute(residuals[1::]))**2 print('Bound implied by solution vector is %f.' % alternative_bound) # Make sure residuals are positive. for residual in residuals: assert residual > -1e-6 # Get bound on updated Hamiltonian. weights = solution_vector[:n_constraints] final_vectorized_operator = (vectorized_operator - constraints.transpose() * weights) final_bound = numpy.sum(numpy.absolute(final_vectorized_operator[1::]))**2 print('Actual bound determined is %f.' % final_bound) # Return modified operator. modified_operator = vector_to_operator(final_vectorized_operator, n_orbitals) return (modified_operator + hermitian_conjugated(modified_operator)) / 2.
def solvealge(self): """ Solve the Inverse RL problem :return: Scalarization weights as array. """ n_states, n_actions, reward_dimension, gamma, P, R, pi, P_pi = self._prepare_variables( ) v = self._prepare_v(n_states, n_actions, reward_dimension, P) D = reward_dimension x_size = n_states + (n_actions - 1) * n_states * 2 + D c = -np.hstack([np.ones(n_states), np.zeros(x_size - n_states)]) assert c.shape[0] == x_size A = np.vstack([ np.hstack([ np.zeros((n_states * (n_actions - 1), n_states)), np.eye(n_states * (n_actions - 1)), -np.eye(n_states * (n_actions - 1)), np.vstack([ -v[i, j, :].reshape(1, -1) for i in range(n_states) for j in range(n_actions - 1) ]) ]), # np.hstack([ # np.zeros((1, x_size - D)), # np.ones((1, D)) # ]) ]) assert A.shape[1] == x_size b = np.vstack([ np.zeros((A.shape[0], 1)), # np.zeros((A.shape[0] - 1, 1)), # np.ones((1, 1)) ]) # p-Function n = 2.0 bottom_row = np.vstack([ np.hstack([ np.ones((n_actions - 1, 1)).dot(np.eye(1, n_states, l)), np.hstack([ -np.eye(n_actions - 1) if i == l else np.zeros( (n_actions - 1, n_actions - 1)) for i in range(n_states) ]), np.hstack([ n * np.eye(n_actions - 1) if i == l else np.zeros( (n_actions - 1, n_actions - 1)) for i in range(n_states) ]), np.zeros((n_actions - 1, D)) ]) for l in range(n_states) ]) assert bottom_row.shape[1] == x_size G = np.vstack([ # np.hstack([ # np.zeros((1, n_states)), # np.zeros((1, n_states * (n_actions - 1))), # np.zeros((1, n_states * (n_actions - 1))), # np.ones((1, D))]), np.hstack([ np.zeros((D, n_states)), np.zeros((D, n_states * (n_actions - 1))), np.zeros((D, n_states * (n_actions - 1))), np.eye(D) ]), np.hstack([ np.zeros((D, n_states)), np.zeros((D, n_states * (n_actions - 1))), np.zeros((D, n_states * (n_actions - 1))), -np.eye(D) ]), np.hstack([ np.zeros((n_states * (n_actions - 1), n_states)), -np.eye(n_states * (n_actions - 1)), np.zeros( (n_states * (n_actions - 1), n_states * (n_actions - 1))), np.zeros((n_states * (n_actions - 1), D)) ]), np.hstack([ np.zeros((n_states * (n_actions - 1), n_states)), np.zeros( (n_states * (n_actions - 1), n_states * (n_actions - 1))), -np.eye(n_states * (n_actions - 1)), np.zeros((n_states * (n_actions - 1), D)) ]), bottom_row ]) assert G.shape[1] == x_size # h = np.vstack([np.ones((D * 2, 1)), # np.zeros((n_states * (n_actions - 1) * 2 + bottom_row.shape[0], 1))]) # slack tuning variable m = 1.0 # m = 10.0 # h = np.vstack([np.ones((1, 1)), m * np.ones((D, 1)), np.zeros((D, 1)), # np.zeros((n_states * (n_actions - 1) * 2 + bottom_row.shape[0], 1))]) h = np.vstack([ m * np.ones((D, 1)), np.ones((D, 1)), np.zeros((n_states * (n_actions - 1) * 2 + bottom_row.shape[0], 1)) ]) # c = c.reshape(-1, 1) # b = b.reshape(-1, 1) # print c.shape # print G.shape # print h.shape # print A.shape # print b.shape # normalize each row # asum = np.sum(np.abs(A), axis=1) # print asum.shape # print A.shape # A /= asum[:, np.newaxis] # b /= asum # solvers.options['feastol'] = 1e-1 # solvers.options['abstol'] = 1e-3 # solvers.options['show_progress'] = True # c = matrix(c) # G = matrix(G) # h = matrix(h) # A = matrix(A) # b = matrix(b) solution = solvers.lp(matrix(c), matrix(G), matrix(h), matrix(A), matrix(b)) alpha = np.asarray(solution['x'][-reward_dimension:], dtype=np.double) return alpha.ravel()
finaldata.append(temp) A = np.array(finaldata) * (-1) n = len(finaldata) m = len(finaldata[0]) #根据之前所述构造矩阵 a1 = np.concatenate((A, (-1) * np.eye(n)), axis=1) a2 = np.concatenate((np.zeros([n, m]), (-1) * np.eye(n)), axis=1) A1 = np.concatenate((a1, a2)) c1 = np.array([0.0] * m + [1.0] * n) b1 = np.array([-1.0] * n + [0.0] * n) #带入算法求解 c1 = matrix(c1) A1 = matrix(A1) b1 = matrix(b1) sol1 = solvers.lp(c1, A1, b1) w2 = list(sol1['x']) x1 = [0, 1] y1 = [-(w2[0] + w2[1] * i) / w2[2] for i in x1] plt.scatter(X, Y) plt.plot(x, y, 'r') plt.show()
#程序文件Pex5_4.py import numpy as np from cvxopt import matrix, solvers c=matrix([-4.,-5]); A=matrix([[2.,1],[1,2],[-1,0],[0,-1]]).T b=matrix([3.,3,0,0]); sol=solvers.lp(c,A,b) print("最优解为:\n",sol['x']) print("最优值为:",sol['primal objective'])
q[index][:, index].shape r[index].shape newA = q[index][:, index].dot(r[index]) import scipy.sparse sparseG = scipy.sparse.csc_matrix(G) u, s, vt = scipy.sparse.linalg.svds(sparseG) from cvxopt import sparse, spmatrix, matrix, solvers sparse(matrix(G)) sol = solvers.lp(matrix(c), sparse(matrix(G)), matrix(h), sparse(matrix(A)), matrix(b)) sol1 = solvers.lp(matrix(c), sparse(matrix(G)), matrix(h), sparse(matrix(A)), matrix(b), 'glpk') sol1 = solvers.lp(matrix(c), sparse(matrix(G)), matrix(h), sparse(matrix(newA)), matrix(b), 'glpk') print(sol['x']) c = matrix([-4., -5.]) G = matrix([[2., 1., -1., 0.], [1., 2., 0., -1.]]) h = matrix([3., 3., 0., 0.]) sol = solvers.lp(c, G, h) sol = solvers.lp(c, G, h, None, None, 'glpk') print(sol['x'])
def cvar_regression(basis_matrix, values, alpha, verbosity=1): from cvxopt import matrix, solvers, spmatrix # do not include constant basis in optimization assert alpha < 1 and alpha > 0 basis_matrix = basis_matrix[:, 1:] assert basis_matrix.ndim == 2 assert values.ndim == 1 nsamples, nbasis = basis_matrix.shape assert values.shape[0] == nsamples active_index = int(np.ceil(alpha*nsamples)) - \ 1 # 0 based index 0,...,nsamples-1 nactive_samples = nsamples - (active_index + 1) assert nactive_samples > 0, ('no samples in alpha quantile') beta = np.arange(1, nsamples + 1, dtype=float) / nsamples beta[active_index - 1] = alpha beta_diff = np.diff(beta[active_index - 1:-1]) # print (beta_diff assert beta_diff.shape[0] == nactive_samples v_coef = np.log(1 - beta[active_index - 1:-2]) - np.log(1 - beta[active_index:-1]) v_coef /= nsamples * (1 - alpha) nvconstraints = nsamples * nactive_samples Iv = np.identity(nvconstraints) Isamp = np.identity(nsamples) Iactsamp = np.identity(nactive_samples) # nactive_samples = p # nsamples = m # nbasis = n # design vars [c_1,...,c_n,u1,...,u_{m-p},v_1,...,v_{m-p}m,w] c_arr = np.hstack(( basis_matrix.sum(axis=0) / nsamples, 1 / (1 - alpha) * beta_diff, # np.tile(v_coef,nsamples), # tile([1,2],2) = [1,2,1,2] np.repeat(v_coef, nsamples), # repeat([1,2],2) = [1,1,2,2] 1. / (nsamples * (1 - alpha)) * np.ones(1))) num_opt_vars = nbasis + nactive_samples + nvconstraints + 1 # v_ij variables ordering: loop through j fastest, e.g. v_11,v_{12} etc # v_ij+h'c+u_i <=y_j constraints_1 = np.hstack((-np.tile(basis_matrix, (nactive_samples, 1)), -np.repeat(Iactsamp, nsamples, axis=0), -Iv, np.zeros((nvconstraints, 1)))) # W+h'c<=y_j constraints_2 = np.hstack( (-basis_matrix, np.zeros( (nsamples, nactive_samples)), np.zeros( (nsamples, nvconstraints)), -np.ones((nsamples, 1)))) # v_ij >=0 constraints_3 = np.hstack((np.zeros( (nvconstraints, nbasis + nactive_samples)), -Iv, np.zeros((nvconstraints, 1)))) # print ((constraints_1.shape, constraints_2.shape, constraints_3.shape) G_arr = np.vstack((constraints_1, constraints_2, constraints_3)) h_arr = np.hstack( (-np.tile(values, nactive_samples), -values, np.zeros(nvconstraints))) assert G_arr.shape[1] == num_opt_vars assert G_arr.shape[0] == h_arr.shape[0] assert c_arr.shape[0] == num_opt_vars c = matrix(c_arr) #G = matrix(G_arr) h = matrix(h_arr) I, J, data = sparse.find(G_arr) G = spmatrix(data, I, J, size=G_arr.shape) if verbosity < 1: solvers.options['show_progress'] = False else: solvers.options['show_progress'] = True # solvers.options['abstol'] = 1e-10 # solvers.options['reltol'] = 1e-10 # solvers.options['feastol'] = 1e-10 sol = np.asarray(solvers.lp(c=c, G=G, h=h)['x'])[:nbasis] residuals = values - basis_matrix.dot(sol)[:, 0] coef = np.append(conditional_value_at_risk(residuals, alpha), sol) return coef
import numpy as np ''' problem: mimimize 2 x1 + x2 subject to -x1 + x2 <= 1 x1 + x2 >= 2 x2 >= 0 x1 - 2x2 <= 4 ''' # only can use <=, can't use >=, so change notion c = matrix([2.0, 1.0]) b = matrix([1.0, -2.0, 0.0, 4.0]) A = matrix([[-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0]]) sol = solvers.lp(c, A, b) print(sol) print(sol['x']) print(sol['primal objective']) ''' run result: pcost dcost gap pres dres k/t 0: 2.6471e+00 -7.0588e-01 2e+01 8e-01 2e+00 1e+00 1: 3.0726e+00 2.8437e+00 1e+00 1e-01 2e-01 3e-01 2: 2.4891e+00 2.4808e+00 1e-01 1e-02 2e-02 5e-02 3: 2.4999e+00 2.4998e+00 1e-03 1e-04 2e-04 5e-04 4: 2.5000e+00 2.5000e+00 1e-05 1e-06 2e-06 5e-06 5: 2.5000e+00 2.5000e+00 1e-07 1e-08 2e-08 5e-08 Optimal solution found. {'primal infeasibility': 1.1368786420268754e-08,
def cvar_regression_quadrature(basis_matrix, values, alpha, nquad_intervals, verbosity=1, trapezoid_rule=False, solver_name='cvxopt'): """ solver_name = 'cvxopt' solver_name='glpk' trapezoid works but default option is better. """ assert alpha < 1 and alpha > 0 basis_matrix = basis_matrix[:, 1:] assert basis_matrix.ndim == 2 assert values.ndim == 1 nsamples, nbasis = basis_matrix.shape assert values.shape[0] == nsamples if not trapezoid_rule: # left-hand piecewise constant quadrature rule beta = np.linspace(alpha, 1, nquad_intervals + 2)[:-1] # quadrature points dx = beta[1] - beta[0] weights = dx * np.ones(beta.shape[0]) nuvars = weights.shape[0] nvconstraints = nsamples * nuvars num_opt_vars = nbasis + nuvars + nvconstraints num_constraints = 2 * nsamples * nuvars else: beta = np.linspace(alpha, 1, nquad_intervals + 1) # quadrature points dx = beta[1] - beta[0] weights = dx * np.ones(beta.shape[0]) weights[0] /= 2 weights[-1] /= 2 weights = weights[:-1] # ignore left hand side beta = beta[:-1] nuvars = weights.shape[0] nvconstraints = nsamples * nuvars num_opt_vars = nbasis + nuvars + nvconstraints + 1 num_constraints = 2 * nsamples * nuvars + nsamples v_coef = weights / (1 - beta) * 1. / nsamples * 1 / (1 - alpha) #print (beta #print (weights #print (v_coef Iquad = np.identity(nuvars) Iv = np.identity(nvconstraints) # num_quad_point = mu # nsamples = nu # nbasis = m # design vars [c_1,...,c_m,u_1,...,u_{mu+1},v_1,...,v_{mu+1}nu] # v_ij variables ordering: loop through j fastest, e.g. v_11,v_{12} etc if not trapezoid_rule: c_arr = np.hstack( (basis_matrix.sum(axis=0) / nsamples, 1 / (1 - alpha) * weights, np.repeat(v_coef, nsamples))) # # v_ij+h'c+u_i <=y_j # constraints_1 = np.hstack(( # -np.tile(basis_matrix,(nuvars,1)), # -np.repeat(Iquad,nsamples,axis=0),-Iv)) # # v_ij >=0 # constraints_3 = np.hstack(( # np.zeros((nvconstraints,nbasis+nuvars)),-Iv)) # G_arr = np.vstack((constraints_1,constraints_3)) # assert G_arr.shape[0]==num_constraints # assert G_arr.shape[1]==num_opt_vars # assert c_arr.shape[0]==num_opt_vars # I,J,data = sparse.find(G_arr) # G = spmatrix(data,I,J,size=G_arr.shape) #v_ij+h'c+u_i <=y_j constraints_1_shape = (nvconstraints, num_opt_vars) constraints_1a_I = np.repeat(np.arange(nvconstraints), nbasis) constraints_1a_J = np.tile(np.arange(nbasis), nvconstraints) constraints_1a_data = -np.tile(basis_matrix, (nquad_intervals + 1, 1)) constraints_1b_I = np.arange(nvconstraints) constraints_1b_J = np.repeat(np.arange(nquad_intervals + 1), nsamples) + nbasis constraints_1b_data = -np.repeat(np.ones(nquad_intervals + 1), nsamples) ii = nbasis + nquad_intervals + 1 jj = ii + nvconstraints constraints_1c_I = np.arange(nvconstraints) constraints_1c_J = np.arange(ii, jj) constraints_1c_data = -np.ones((nquad_intervals + 1) * nsamples) constraints_1_data = np.hstack( (constraints_1a_data.flatten(), constraints_1b_data, constraints_1c_data)) constraints_1_I = np.hstack( (constraints_1a_I, constraints_1b_I, constraints_1c_I)) constraints_1_J = np.hstack( (constraints_1a_J, constraints_1b_J, constraints_1c_J)) # v_ij >=0 constraints_3_I = np.arange(constraints_1_shape[0], constraints_1_shape[0] + nvconstraints) constraints_3_J = np.arange( nbasis + nquad_intervals + 1, nbasis + nquad_intervals + 1 + nvconstraints) constraints_3_data = -np.ones(nvconstraints) constraints_shape = (num_constraints, num_opt_vars) constraints_I = np.hstack((constraints_1_I, constraints_3_I)) constraints_J = np.hstack((constraints_1_J, constraints_3_J)) constraints_data = np.hstack((constraints_1_data, constraints_3_data)) G = spmatrix(constraints_data, constraints_I, constraints_J, size=constraints_shape) # assert np.allclose(np.asarray(matrix(G)),G_arr) # print (constraints_shape # print (np.asarray(matrix(G)) h_arr = np.hstack((-np.tile(values, nuvars), np.zeros(nvconstraints))) #print (G_arr #print (c_arr #print (h_arr else: c_arr = np.hstack( (basis_matrix.sum(axis=0) / nsamples, 1 / (1 - alpha) * weights, np.repeat(v_coef, nsamples), 1 / (nsamples * (1 - alpha)) * np.ones(1))) # v_ij+h'c+u_i <=y_j constraints_1 = np.hstack((-np.tile(basis_matrix, (nquad_intervals, 1)), -np.repeat(Iquad, nsamples, axis=0), -Iv, np.zeros((nvconstraints, 1)))) #W+h'c<=y_j constraints_2 = np.hstack((-basis_matrix, np.zeros( (nsamples, nuvars)), np.zeros((nsamples, nvconstraints)), -np.ones( (nsamples, 1)))) # v_ij >=0 constraints_3 = np.hstack((np.zeros( (nvconstraints, nbasis + nquad_intervals)), -Iv, np.zeros((nvconstraints, 1)))) G_arr = np.vstack((constraints_1, constraints_2, constraints_3)) h_arr = np.hstack( (-np.tile(values, nuvars), -values, np.zeros(nvconstraints))) assert G_arr.shape[0] == num_constraints assert G_arr.shape[1] == num_opt_vars assert c_arr.shape[0] == num_opt_vars I, J, data = sparse.find(G_arr) G = spmatrix(data, I, J, size=G_arr.shape) c = matrix(c_arr) h = matrix(h_arr) if verbosity < 1: solvers.options['show_progress'] = False else: solvers.options['show_progress'] = True # solvers.options['abstol'] = 1e-10 # solvers.options['reltol'] = 1e-10 # solvers.options['feastol'] = 1e-10 sol = np.asarray(solvers.lp(c=c, G=G, h=h, solver=solver_name)['x'])[:nbasis] residuals = values - basis_matrix.dot(sol)[:, 0] coef = np.append(conditional_value_at_risk(residuals, alpha), sol) return coef
from cvxopt import matrix, solvers c = matrix([-5., -3.]) # since we need to maximize the objective funtion G = matrix([[1., 2., 1., -1., 0.], [1., 1., 4., 0., -1.]]) h = matrix([10., 16., 32., 0., 0.]) solvers.options['show_progress'] = False sol = solvers.lp(c, G, h) print('Solution"') print(sol['x'])
def prob4(): """Solve the allocation model problem in 'ForestData.npy'. Note that the first three rows of the data correspond to the first analysis area, the second group of three rows correspond to the second analysis area, and so on. Returns (in order): The optimizer (sol['x']) The optimal value (sol['primal objective']*-1000) """ data = np.load("ForestData.npy") c = -data[:, 3] t = -data[:, 4] g = -data[:, 5] w = -data[:, 6] acres = data[::3, 1] K = np.array([[ 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1. ], [ -1., -1., -1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., -1., -1., -1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., -1., -1., -1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., -1., -1., -1., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -1., -1., -1., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -1., -1., -1., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -1., -1., -1. ]]) a = -np.eye(21) G = np.vstack((K, t, g, w, a)) #G = np.vstack((K,t)) #G = np.vstack((G,g)) #G = np.vstack((G,w)) #G = np.vstack((G,a)) h = np.array([ acres[0], acres[1], acres[2], acres[3], acres[4], acres[5], acres[6], -acres[0], -acres[1], -acres[2], -acres[3], -acres[4], -acres[5], -acres[6], -40000., -5., (-70.0 * 788) ]) h = np.hstack((h, np.zeros(21))) G = matrix(G) h = matrix(h) sol = solvers.lp(matrix(c), G, h) return sol['x'], -1000 * sol['primal objective']
def compute_reward(n_states, n_actions, trans_prob, policy, discount, max_reward, l1_reg): """ Find the reward function with linear programming IRL n_states: number of states n_actions: number of actions trans_prob: transition probability which is a numpy array which maps state(t), action(t) to state(t+1) (state_t, action_t, state_tp1) policy: mapping of states to actions discount: discount factor max_reward: maximum reward l1_reg: l1 regularization return: reward vector """ A = set(range(n_actions)) trans_prob = np.transpose(trans_prob, (1, 0, 2)) # (P_a1(i) - P_a(i)) * inv(I - gamma * P_a1(i)) def T(a, s): return np.dot( trans_prob[policy[s], s] - trans_prob[a, s], np.linalg.inv(np.eye(n_states) - discount * trans_prob[policy[s]])) c = -np.hstack( [np.zeros(n_states), np.ones(n_states), -l1_reg * np.ones(n_states)]) zero_stack1 = np.zeros((n_states * (n_actions - 1), n_states)) T_stack = np.vstack([ -T(a, s) for s in range(n_states) for a in A - {policy[s]} # other than the chosen a ]) I_stack1 = np.vstack([ np.eye(1, n_states, s) for s in range(n_states) for a in A - {policy[s]} ]) I_stack2 = np.eye(n_states) zero_stack2 = np.zeros((n_states, n_states)) D_left = np.vstack([T_stack, T_stack, -I_stack2, I_stack2]) D_middle = np.vstack([I_stack1, zero_stack1, zero_stack2, zero_stack2]) D_right = np.vstack([zero_stack1, zero_stack1, -I_stack2, -I_stack2]) D = np.hstack([D_left, D_middle, D_right]) b = np.zeros((n_states * (n_actions - 1) * 2 + 2 * n_states, 1)) bounds = np.array([(None, None)] * 2 * n_states + [(-max_reward, max_reward)] * n_states) D_bounds = np.hstack([ np.vstack([-np.eye(n_states), np.eye(n_states)]), np.vstack( [np.zeros((n_states, n_states)), np.zeros((n_states, n_states))]), np.vstack( [np.zeros((n_states, n_states)), np.zeros((n_states, n_states))]) ]) b_bounds = np.vstack([max_reward * np.ones((n_states, 1))] * 2) D = np.vstack((D, D_bounds)) b = np.vstack((b, b_bounds)) A_ub = matrix(D) b = matrix(b) c = matrix(c) results = solvers.lp(c, A_ub, b) r = np.asarray(results["x"][:n_states], dtype=np.double) return r.reshape((n_states))
# maximize -h'*z - b'*w # subject to +/- c + G'*z + A'*w >= 0 # z >= 0 # # with variables z (8), w (1). cc = matrix(0.0, (9,1)) cc[:8], cc[8] = h, b GG = spmatrix([], [], [], (n+8, 9)) GG[:n,:8] = -G.T GG[:n,8] = -A.T GG[n::n+9] = -1.0 hh = matrix(0.0, (n+8,n)) hh[:n,:] = matrix([i>=j for i in range(n) for j in range(n)], (n,n), 'd') # upper triangular matrix of ones l = [-blas.dot(cc, solvers.lp(cc, GG, hh[:,k])['x']) for k in range(n)] u = [blas.dot(cc, solvers.lp(cc, GG, -hh[:,k])['x']) for k in range(n)] def f(x,y): return x+2*[y] def stepl(x): return functools.reduce(f, x[1:], [x[0]]) def stepr(x): return functools.reduce(f, x[:-1], []) + [x[-1]] try: import pylab except ImportError: pass else: pylab.figure(1, facecolor='w') pylab.plot(stepl(a), stepr(p), '-') pylab.title('Maximum entropy distribution (fig. 7.2)') pylab.xlabel('a') pylab.ylabel('p = Prob(X = a)')
def zonotope_sampler(A_zono, **params): """ MCMC based sampler for projection DPPs. The similarity matrix is the orthogonal projection matrix onto the row span of the feature vector matrix. Samples are of size equal to the ransampl_size of the projection matrix also equal to the rank of the feature matrix (assumed to be full row rank). :param A_zono: Feature vector matrix, feature vectors are stacked columnwise. It is assumed to be full row rank. :type A_zono: array_like :param params: Dictionary containing the parameters - ``'lin_obj'`` (list): Linear objective (:math:`c`) of the linear program used to identify the tile in which a point lies. Default is a random Gaussian vector. - ``'x_0'` (list): Initial point. - ``'nb_iter'`` (int): Number of iterations of the MCMC chain. Default is 10. - ``'T_max'`` (float): Maximum running time of the algorithm (in seconds). Default is 10s. :type params: dict :return: MCMC chain of approximate samples (stacked row_wise i.e. nb_iter rows). :rtype: array_like .. seealso:: Algorithm 5 in :cite:`GaBaVa17` - :func:`extract_basis <extract_basis>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` """ r, N = A_zono.shape # Sizes of r=samples=rank(A_zono), N=ground set # Linear objective c = matrix(params.get('lin_obj', np.random.randn(N))) # Initial point x0 = A*u, u~U[0,1]^n x0 = matrix(params.get('x_0', A_zono.dot(np.random.rand(N)))) nb_iter = params.get('nb_iter', 10) T_max = params.get('T_max', None) ################### # Linear problems # ################### # Canonical form # min c.T*x min c.T*x # s.t. G*x <= h <=> s.t. G*x + s = h # A*x = b A*x = b # s >= 0 # CVXOPT # =====> solvers.lp(c, G, h, A, b, solver='glpk') ################################################# # To access the tile Z(B_x) # Solve P_x(A,c) ###################################################### # y^* = # argmin c.T*y argmin c.T*y # s.t. A*y = x <=> s.t. A *y = x # 0 <= y <= 1 [ I_n] *y <= [1^n] # [-I_n] [0^n] ###################################################### # Then B_x = \{ i ; y_i^* \in ]0,1[ \} A = spmatrix(0.0, [], [], (r, N)) A[:, :] = A_zono G = spmatrix(0.0, [], [], (2 * N, N)) G[:N, :] = spmatrix(1.0, range(N), range(N)) G[N:, :] = spmatrix(-1.0, range(N), range(N)) # Endpoints of segment # D_x \cap Z(A) = [x+alpha_m*d, x-alpha_M*d] ########################################################################### # alpha_m/_M = argmin +/-alpha argmin [+/-1 0^N].T * [alpha,lambda] # s.t. x + alpha d = A lambda <=> s.t. [-d A] *[alpha, lambda] = x # 0 <= lambda <= 1 [0^N I_N] *[alpha, lambda] <= [1^N] # [0^N -I_N] [0^N] ########################################################################## c_mM = matrix(0.0, (N + 1, 1)) c_mM[0] = 1.0 A_mM = spmatrix(0.0, [], [], (r, N + 1)) A_mM[:, 1:] = A G_mM = spmatrix(0.0, [], [], (2 * N, N + 1)) G_mM[:, 1:] = G # Common h to both kind of LP # cf. 0 <= y <= 1 and 0 <= lambda <= 1 h = matrix(0.0, (2 * N, 1)) h[:N, :] = 1.0 ################## # Initialization # ################## B_x0 = [] while len(B_x0) != r: # Initial tile B_x0 # Solve P_x0(A,c) y_star = solvers.lp(c, G, h, A, x0, solver='glpk')['x'] # Get the tile B_x0 = extract_basis(np.asarray(y_star)) # Initialize sequence of sample Bases = [B_x0] # Compute the det of the tile (Vol(B)=abs(det(B))) det_B_x0 = la.det(A_zono[:, B_x0]) it, t_start = 1, time.time() flag = it < nb_iter while flag: # Take uniform direction d defining D_x0 d = matrix(np.random.randn(r, 1)) # Define D_x0 \cap Z(A) = [x0 + alpha_m*d, x0 - alpha_M*d] # Update the constraint [-d A] * [alpha,lambda] = x A_mM[:, 0] = -d # Find alpha_m/M alpha_m = solvers.lp(c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0] alpha_M = solvers.lp(-c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0] # Propose x1 ~ U_{[x0+alpha_m*d, x0-alpha_M*d]} x1 = x0 + (alpha_m + (alpha_M - alpha_m) * np.random.rand()) * d # Proposed tile B_x1 # Solve P_x1(A,c) y_star = solvers.lp(c, G, h, A, x1, solver='glpk')['x'] # Get the tile B_x1 = extract_basis(np.asarray(y_star)) # Accept/Reject the move with proba Vol(B1)/Vol(B0) if len(B_x1) != r: # if extract_basis returned smtg ill conditioned Bases.append(B_x0) else: det_B_x1 = la.det(A_zono[:, B_x1]) if np.random.rand() < abs(det_B_x1 / det_B_x0): x0, B_x0, det_B_x0 = x1, B_x1, det_B_x1 Bases.append(B_x1) else: Bases.append(B_x0) it += 1 flag = (it < nb_iter) if not T_max else ((time.time()-t_start) < T_max) return np.array(Bases)
def emd(D1, D2, gdist, scale=False): """ Compute the Earth Mover's Distance (EMD) between two sets of elements, here dictionary atoms, using a ground distance. Possible choice are "chordal", "fubinistudy", "binetcauchy", "geodesic", "frobenius", "abs_euclidean" or "euclidean". The scale parameter changes the return value to be between 0 and 1. """ g = _valid_atom_metric(gdist) if g is None: print("Unknown ground distance, exiting.") return NaN # if gdist == "chordal": # g = chordal # elif gdist == "chordal_principal_angles": # g = chordal_principal_angles # elif gdist == "fubinistudy": # g = fubini_study # elif gdist == "binetcauchy": # g = binet_cauchy # elif gdist == "geodesic": # g = geodesic # elif gdist == "frobenius": # g = frobenius # elif gdist == "abs_euclidean": # g = abs_euclidean # elif gdist == "euclidean": # g = euclidean # else: # print 'Unknown ground distance, exiting.' # return NaN # # Do we need a registration? If kernel do not have the same shape, yes # if not np.all(np.array([i.shape[0] for i in D1+D2]) == D1[0].shape[0]): # # compute correlation and distance matrices # k_dim = D1[0].shape[1] # # minl = np.array([i.shape[1] for i in D1+D2]).min() # max_l1 = np.array([i.shape[0] for i in D1]).max() # max_l2 = np.array([i.shape[0] for i in D2]).max() # if max_l2 > max_l1: # Da = D1 # Db = D2 # max_l = max_l2 # else: # Da = D2 # Db = D1 # max_l = max_l1 # Dbe = [] # for i in range(len(Db)): # k_l = Db[i].shape[0] # Dbe.append(np.concatenate((zeros((max_l-k_l, k_dim)), Db[i]), axis=0)) # gdm = zeros((len(Da), len(Db))) # for i in range(len(Da)): # k_l = Da[i].shape[0] # m_dist, m_corr = _kernel_registration(np.concatenate((zeros(( np.int(np.floor((max_l-k_l)/2.)), k_dim)), Da[i], zeros((np.int(np.ceil((max_l-k_l)/2.)), k_dim))), axis=0), Dbe, g) # for j in range(len(Dbe)): # gdm[i,j] = m_dist[j, np.unravel_index(np.abs(m_corr[j,:]).argmax(), m_corr[j,:].shape)] # else: # # all atoms have the same length, no registration # gdm = np.zeros((len(D1), len(D2))) # for i in range(len(D1)): # for j in range(len(D2)): # gdm[i,j] = g(D1[i], D2[j]) gdm = _compute_gdm(D1, D2, g) c = co.matrix(gdm.flatten(order="F")) G1 = co.spmatrix([], [], [], (len(D1), len(D1) * len(D2))) G2 = co.spmatrix([], [], [], (len(D2), len(D1) * len(D2))) G3 = co.spmatrix(-1.0, range(len(D1) * len(D2)), range(len(D1) * len(D2))) for i in range(len(D1)): for j in range(len(D2)): k = j + (i * len(D2)) G1[i, k] = 1.0 G2[j, k] = 1.0 G = co.sparse([G1, G2, G3]) h1 = co.matrix(1.0 / len(D1), (len(D1), 1)) h2 = co.matrix(1.0 / len(D2), (len(D2), 1)) h3 = co.spmatrix([], [], [], (len(D1) * len(D2), 1)) h = co.matrix([h1, h2, h3]) A = co.matrix(1.0, (1, len(D1) * len(D2))) b = co.matrix([1.0]) co.solvers.options["show_progress"] = False sol = solv.lp(c, G, h, A, b) d = sol["primal objective"] if not scale: return d else: return _scale_metric(gdist, d, D1)
def findMinQ2(Q1): c = matrix( [ -Q1[0,0], -Q1[0, 1], -Q1[0, 2], -Q1[0, 3], -Q1[0, 4], -Q1[1, 0], -Q1[1, 1], -Q1[1, 2], -Q1[1, 3], -Q1[1, 4], -Q1[2, 0], -Q1[2, 1], -Q1[2, 2], -Q1[2, 3], -Q1[2, 4], -Q1[3, 0], -Q1[3, 1], -Q1[3, 2], -Q1[3, 3], -Q1[3, 4], -Q1[3, 0], -Q1[4, 1], -Q1[4, 2], -Q1[4, 3], -Q1[4, 4], -Q2[0, 0], -Q2[0, 1], -Q2[0, 2], -Q2[0, 3], -Q2[0, 4], -Q2[1, 0], -Q2[1, 1], -Q2[1, 2], -Q2[1, 3], -Q2[1, 4], -Q2[2, 0], -Q2[2, 1], -Q2[2, 2], -Q2[2, 3], -Q2[2, 4], -Q2[3, 0], -Q2[3, 1], -Q2[3, 2], -Q2[3, 3], -Q2[3, 4], -Q2[3, 0], -Q2[4, 1], -Q2[4, 2], -Q2[4, 3], -Q2[4, 4] ] ) # c = matrix([ # -Q1[0],-Q1[1],-Q1[2],-Q1[3],-Q1[4],-Q1[5],-Q1[6],-Q1[7],-Q1[8],-Q1[9], # -Q1[10],-Q1[11],-Q1[12],-Q1[13],-Q1[14],-Q1[15],-Q1[16],-Q1[17],-Q1[18],-Q1[19], # -Q1[20],-Q1[21],-Q1[22],-Q1[23],-Q1[24] # ]) G = matrix(np.identity(50)*-1) h = matrix([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) A = matrix([ [1.],[1.],[1.],[1.],[1.],[1.],[1.],[1.],[1.],[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.] ]) b = matrix([1.]) solvers.options['show_progress'] = False sol = solvers.lp(c, G, h, A, b) primeDist = [sol['x'][0],sol['x'][1],sol['x'][2],sol['x'][3],sol['x'][4],sol['x'][5],sol['x'][6],sol['x'][7],sol['x'][8],sol['x'][9], sol['x'][10], sol['x'][11], sol['x'][12], sol['x'][13], sol['x'][14], sol['x'][15], sol['x'][16], sol['x'][17], sol['x'][18], sol['x'][19], sol['x'][20], sol['x'][21], sol['x'][22], sol['x'][23], sol['x'][24], # # sol['x'][25], sol['x'][26], sol['x'][27], sol['x'][28], sol['x'][29], sol['x'][30], sol['x'][31], sol['x'][32], # sol['x'][33], sol['x'][34], # sol['x'][35], sol['x'][36], sol['x'][37], sol['x'][38], sol['x'][39], sol['x'][40], sol['x'][41], # sol['x'][42], sol['x'][43], sol['x'][44], # sol['x'][45], sol['x'][46], sol['x'][47], sol['x'][48], sol['x'][49] ] return primeDist