def get_initial_guess(S, enzymeInfo, concLB, concUB): nMetabs = len(S.index) ini = [] for i, metab in enumerate(S.index): f = np.zeros(nMetabs) f[i] = 1 A = R * T * S.T b = -np.array([ -R * T * np.log(item[0]) for item in enzymeInfo.loc[:, 'Keq'] ]) lb = [np.log(concLB)] * nMetabs ub = [np.log(concUB)] * nMetabs meth = 'pclp' p = LP(f=f, A=A, b=b, lb=lb, ub=ub, iprint=-1) r = p.solve(meth, plot=0) ini.append(r.xf[i]) ini = np.array(ini) return ini
def optimize_minimal_driving_force(S, Vss, enzymeInfo, concLB, concUB): ''' Parameters S: df, stoichiometric matrix, metabolite in rows, reaction in columns. negative for substrates, positive for products Vss: ser, net fluxes in steady state (including in and out fluxes) enzymeInfo: df, reaction in rows concLB: float, concentration lower bound (mM) for all metabolites concUB: float, concentration upper bound (mM) for all metabolites Returns optConcs: ser, optimal log(concentrations) optDeltaGs: ser, optimal minimal driving forces refDeltaGs: float, reference minimal driving forces (all concentrations at 1 mM) ''' from openopt import LP from constants import R, T f = np.zeros(S.shape[0] + 1) f[0] = -1 S = S * Vss[S.columns] A = np.concatenate((np.ones((S.shape[1], 1)), R * T * S.T), axis=1) b = -np.array( [-R * T * np.log(item[0]) for item in enzymeInfo.loc[:, 'Keq']]) b = b * Vss[S.columns].values lb = [-np.inf] + [np.log(concLB)] * S.shape[0] ub = [np.inf] + [np.log(concUB)] * S.shape[0] meth = 'cvxopt_lp' p = LP(f=f, A=A, b=b, lb=lb, ub=ub, iprint=-1, name='Maximize minimal driving force') r = p.solve(meth, plot=0) optLogConcs = r.xf[1:] optConcs = pd.Series(np.exp(optLogConcs), index=S.index) optDeltaGs = pd.Series(-b + R * T * np.dot(S.T, optLogConcs), index=S.columns) refDeltaGs = pd.Series(-b, index=S.columns) return optConcs, optDeltaGs, refDeltaGs
def minimize(self, **kwargs): """ solve the linear problem with the solver given in self.solver Returns: obj_value, solution obj_value -- value of the objective function at the discovered solution solution -- the solution flux vector (indexed like matrix columns) """ if self.solver == _GLPK: # Use pyMathProg interface to GLPK solver self.glpkP.solve() self.istop = self.glpkP.status() # print "istop:", self.istop self.status = glpkToSolverStatus(self.istop) if self.status == SolverStatus.OPTIMAL: self.obj_value = self.glpkP.vobj() self.solution = [ self.glpkVar[i].primal for i in range(len(self.glpkVar)) ] else: self.obj_value = 0. self.solution = [] else: # Use OpenOpt interface lp = LP(self.obj, A=self.Aineq, Aeq=self.Aeq, b=self.bineq, beq=self.beq, lb=self.lb, ub=self.ub, **kwargs) lp.debug = 1 # lp.iprint=-1 # suppress solver output r = lp.solve(self.solver if self.solver != _OOGLPK else 'glpk') if r.istop <= 0. or r.ff != r.ff: # check halting condition self.obj_value = 0. self.solution = [] self.istop = r.istop else: self.obj_value = r.ff self.solution = r.xf self.istop = r.istop # print self.istop return self.obj_value, self.solution
def delete_dom_act_Pi(game, i): no_a_game = list( game.shape) #will contain number of actions available for each player no_a_game.pop( ) #removes last element of list which simply was n: the number of payoffs in each action profile #as the first number is number of actions of last player etc., we turn the list around no_a_game = no_a_game[::-1] no_a_i = no_a_game.pop( i ) #remove the number of actions of player i from no_a_game and put it in no_a_i if no_a_i == 1: #if a single action remains it cannot be dominated return game var_no = np.prod(no_a_game) #size of the support of mu if var_no == 1: #special case: the support of mu has a single elment return del_dom_a_Pi_point_belief(game, i, no_a_i) temp_game = game f = [0.] * var_no #dummy objective used below lb = [0.] * var_no ub = [1.] * var_no Aeq = [[1.] * var_no] beq = (1., ) j = 0 while j < no_a_i: u_action = payoffi_builder(game, j, i) A = [] b = [] k = 0 while k < no_a_i: u_other_action = payoffi_builder(game, k, i) A.append(u_other_action - u_action) #elementwise difference b.append(0.) k += 1 p = LP( f, A=A, b=b, lb=lb, ub=ub, Aeq=Aeq, beq=beq ) #we use the artificial minimization problem under the constraint that action gives a weakly higher payoff than any other action; if no feasible solution is obtained than action is dominated p.iprint = -1 r = p.minimize('pclp') if r.stopcase != 1: #if no feasible solution was obtained then action is dominated... temp_game = np.delete( temp_game, j, n - i - 1 ) #...and therefore action j is removed; recall that players are "in the wrong order" count = 0 z = -1 while count <= j: z += 1 if undominated[i][z] != -1: count += 1 undominated[i][z] = -1 j += 1 return temp_game
def compressed_sensing2(x1, trans): """L1 compressed sensing :Parameters: x1 : array-like, shape=(n_outputs,) input sparse vector trans : array-like, shape=(n_outputs, n_inputs) transformation matrix :Returns: decoded vector, shape=(n_inpus,) :RType: array-like """ # obrain sizes of inputs and outputs (n_outputs, n_inputs) = trans.shape # define variable t = fd.oovar('t', size=n_inputs) x = fd.oovar('x', size=n_inputs) # objective to minimize: f x^T -> min objective = fd.sum(t) # init constraints constraints = [] # equality constraint: a_eq x^T = b_eq constraints.append(fd.dot(trans, x) == x1) # inequality constraint: -t < x < t constraints.append(-t <= x) constraints.append(x <= t) # start_point start_point = {x:np.zeros(n_inputs), t:np.zeros(n_inputs)} # solve linear programming prob = LP(objective, start_point, constraints=constraints) result = prob.minimize('pclp') # glpk, lpSolve... if available # print result # print "x =", result.xf # arguments at mimimum # print "objective =", result.ff # value of objective return result.xf[x]
def compressed_sensing(x1, trans): """L1 compressed sensing :Parameters: x1 : array-like, shape=(n_outputs,) input sparse vector trans : array-like, shape=(n_outputs, n_inputs) transformation matrix :Returns: decoded vector, shape=(n_inpus,) :RType: array-like """ # obrain sizes of inputs and outputs (n_outputs, n_inputs) = trans.shape # objective to minimize: f x^T -> min f = np.zeros((n_inputs * 2), dtype=np.float) f[n_inputs:2 * n_inputs] = 1.0 # constraint: a x^T == b a_eq = np.zeros((n_outputs, 2 * n_inputs), dtype=np.float) a_eq[:, 0:n_inputs] = trans b_eq = x1 # constraint: -t <= x <= t a = np.zeros((2 * n_inputs, 2 * n_inputs), dtype=np.float) for i in xrange(n_inputs): a[i, i] = -1.0 a[i, n_inputs + i] = -1.0 a[n_inputs + i, i] = 1.0 a[n_inputs + i, n_inputs + i] = -1.0 b = np.zeros(n_inputs * 2) # solve linear programming prob = LP(f, Aeq=a_eq, beq=b_eq, A=a, b=b) result = prob.minimize('pclp') # glpk, lpSolve... if available # print result # print "x =", result.xf # arguments at mimimum # print "objective =", result.ff # value of objective return result.xf[0:n_inputs]
def delete_dom_act_Pi(game,i): no_a_game = list(game.shape) #will contain number of actions available for each player no_a_game.pop()#removes last element of list which simply was n: the number of payoffs in each action profile #as the first number is number of actions of last player etc., we turn the list around no_a_game = no_a_game[::-1] no_a_i = no_a_game.pop(i)#remove the number of actions of player i from no_a_game and put it in no_a_i if no_a_i==1:#if a single action remains it cannot be dominated return game var_no = np.prod(no_a_game)#size of the support of mu if var_no == 1:#special case: the support of mu has a single elment return del_dom_a_Pi_point_belief(game, i,no_a_i) temp_game = game f = [0.]*var_no #dummy objective used below lb = [0.]*var_no ub = [1.]*var_no Aeq = [[1.]*var_no] beq = (1.,) j = 0 while j<no_a_i: u_action = payoffi_builder(game,j,i) A = [] b = [] k = 0 while k<no_a_i: u_other_action = payoffi_builder(game,k,i) A.append(u_other_action - u_action)#elementwise difference b.append(0.) k+=1 p = LP(f, A=A,b=b,lb=lb,ub=ub,Aeq=Aeq,beq=beq)#we use the artificial minimization problem under the constraint that action gives a weakly higher payoff than any other action; if no feasible solution is obtained than action is dominated p.iprint = -1 r = p.minimize('pclp') if r.stopcase!=1:#if no feasible solution was obtained then action is dominated... temp_game = np.delete(temp_game,j,n-i-1)#...and therefore action j is removed; recall that players are "in the wrong order" count = 0 z = -1 while count<=j: z+=1 if undominated[i][z]!=-1 : count+=1 undominated[i][z] = -1 j+=1 return temp_game
8x1 + 80x2 + 15x3 <=150 (4) 100x1 + 10x2 + x3 >= 800 (5) 80x1 + 8x2 + 15x3 = 750 (6) x1 + 10x2 + 100x3 = 80 (7) x1 >= 4 (8) -8 >= x2 >= -80 (9) """ from numpy import * from openopt import LP f = array([15, 8, 80]) A = mat( '1 2 3; 8 15 80; 8 80 15; -100 -10 -1') # numpy.ndarray is also allowed b = [15, 80, 150, -800] # numpy.ndarray, matrix etc are also allowed Aeq = mat('80 8 15; 1 10 100') # numpy.ndarray is also allowed beq = (750, 80) lb = [4, -80, -inf] ub = [inf, -8, inf] p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) #or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) #r = p.minimize('glpk') # CVXOPT must be installed #r = p.minimize('lpSolve') # lpsolve must be installed r = p.minimize('pclp') #search for max: r = p.maximize('glpk') # CVXOPT & glpk must be installed #r = p.minimize('nlp:ralg', ftol=1e-7, xtol=1e-7, goal='min', plot=1) print('objFunValue: %f' % r.ff) # should print 204.48841578 print('x_opt: %s' % r.xf) # should print [ 9.89355041 -8. 1.5010645 ]
from FuncDesigner import * N = 100 a = oovars(N) # create array of N oovars b = oovars(N) # another array of N oovars some_lin_funcs = [i * a[i] + 4 * i + 5 * b[i] for i in range(N)] f = some_lin_funcs[15] + some_lin_funcs[80] - sum(a) + sum(b) point = {} for i in range(N): point[a[i]] = 1.5 * i**2 point[b[i]] = 1.5 * i**3 # BTW we could use 1-dimensional arrays here, eg point[a[25]] = [2,3,4,5], point[a[27]] = [1,2,5] etc print f(point) # prints 40899980. from openopt import LP # define prob p = LP(f, point) # add some box-bound constraints aLBs = [a[i] > -10 for i in range(N)] bLBs = [b[i] > -10 for i in range(N)] aUBs = [a[i] < 15 for i in range(N)] bUBs = [b[i] < 15 for i in range(N)] p.constraints = aLBs + bLBs + aUBs + bUBs # add some general linear constraints p.constraints.append(a[4] + b[15] + a[20].size - f.size > -9) # array size, here a[20].size = f.size = 1 # or p.constraints += [a[4] + b[15] + a[20].size - f.size>-9] for i in range(N): p.constraints.append(2 * some_lin_funcs[i] + a[i] < i / 2.0 + some_lin_funcs[N - i - 1] + 1.5 * b[i])
def __solver__(self, p): n = p.n x0 = copy(p.x0) xPrev = x0.copy() xf = x0.copy() xk = x0.copy() p.xk = x0.copy() f0 = p.f(x0) fk = f0 ff = f0 p.fk = fk df0 = p.df(x0) ##################################################################### ## #handling box-bounded problems ## if p.__isNoMoreThanBoxBounded__(): ## for k in range(int(p.maxIter)): ## ## #end of handling box-bounded problems isBB = p.__isNoMoreThanBoxBounded__() ## isBB = 0 H = diag(ones(p.n)) if not p.userProvided.c: p.c = lambda x: array([]) p.dc = lambda x: array([]).reshape(0, p.n) if not p.userProvided.h: p.h = lambda x: array([]) p.dh = lambda x: array([]).reshape(0, p.n) p.use_subproblem = 'QP' #p.use_subproblem = 'LLSP' for k in range(p.maxIter + 4): if isBB: f0 = p.f(xk) df = p.df(xk) direction = -df f1 = p.f(xk + direction) ind_l = direction <= p.lb - xk direction[ind_l] = (p.lb - xk)[ind_l] ind_u = direction >= p.ub - xk direction[ind_u] = (p.ub - xk)[ind_u] ff = p.f(xk + direction) ## print 'f0', f0, 'f1', f1, 'ff', ff else: mr = p.getMaxResidual(xk) if mr > p.contol: mr_grad = p.getMaxConstrGradient(xk) lb = p.lb - xk #- p.contol/2 ub = p.ub - xk #+ p.contol/2 c, dc, h, dh, df = p.c(xk), p.dc(xk), p.h(xk), p.dh(xk), p.df( xk) A, Aeq = vstack((dc, p.A)), vstack((dh, p.Aeq)) b = concatenate((-c, p.b - p.matmult(p.A, xk))) #+ p.contol/2 beq = concatenate((-h, p.beq - p.matmult(p.Aeq, xk))) if b.size != 0: isFinite = isfinite(b) ind = where(isFinite)[0] A, b = A[ind], b[ind] if beq.size != 0: isFinite = isfinite(beq) ind = where(isFinite)[0] Aeq, beq = Aeq[ind], beq[ind] if p.use_subproblem == 'LP': #linear linprob = LP(df, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) linprob.iprint = -1 r2 = linprob.solve( 'cvxopt_glpk') # TODO: replace lpSolve by autoselect if r2.istop <= 0: p.istop = -12 p.msg = "failed to solve LP subproblem" return elif p.use_subproblem == 'QP': #quadratic qp = QP(H=H, f=df, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) qp.iprint = -1 r2 = qp.solve( 'cvxopt_qp') # TODO: replace solver by autoselect #r2 = qp.solve('qld') # TODO: replace solver by autoselect if r2.istop <= 0: for i in range(4): if p.debug: p.warn("iter " + str(k) + ": attempt Num " + str(i) + " to solve QP subproblem has failed") #qp.f += 2*N*sum(qp.A,0) A2 = vstack((A, Aeq, -Aeq)) b2 = concatenate( (b, beq, -beq)) + pow(10, i) * p.contol qp = QP(H=H, f=df, A=A2, b=b2, iprint=-5) qp.lb = lb - pow(10, i) * p.contol qp.ub = ub + pow(10, i) * p.contol # I guess lb and ub don't matter here try: r2 = qp.solve( 'cvxopt_qp' ) # TODO: replace solver by autoselect except: r2.istop = -11 if r2.istop > 0: break if r2.istop <= 0: p.istop = -11 p.msg = "failed to solve QP subproblem" return elif p.use_subproblem == 'LLSP': direction_c = getConstrDirection(p, xk, regularization=1e-7) else: p.err('incorrect or unknown subproblem') if isBB: X0 = xk.copy() N = 0 result, newX = chLineSearch(p, X0, direction, N, isBB) elif p.use_subproblem != 'LLSP': duals = r2.duals N = 1.05 * abs(duals).sum() direction = r2.xf X0 = xk.copy() result, newX = chLineSearch(p, X0, direction, N, isBB) else: # case LLSP direction_f = -df p2 = NSP(LLSsubprobF, [0.8, 0.8], ftol=0, gtol=0, xtol=1e-5, iprint=-1) p2.args.f = (xk, direction_f, direction_c, p, 1e20) r_subprob = p2.solve('ralg') alpha = r_subprob.xf newX = xk + alpha[0] * direction_f + alpha[1] * direction_c # dw = (direction_f * direction_c).sum() # cos_phi = dw/p.norm(direction_f)/p.norm(direction_c) # res_0, res_1 = p.getMaxResidual(xk), p.getMaxResidual(xk+1e-1*direction_c) # print cos_phi, res_0-res_1 # res_0 = p.getMaxResidual(xk) # optimConstrPoint = getDirectionOptimPoint(p, p.getMaxResidual, xk, direction_c) # res_1 = p.getMaxResidual(optimConstrPoint) # # maxConstrLimit = p.contol #xk = getDirectionOptimPoint(p, p.f, optimConstrPoint, -optimConstrPoint+xk+direction_f, maxConstrLimit = maxConstrLimit) #print 'res_0', res_0, 'res_1', res_1, 'res_2', p.getMaxResidual(xk) #xk = getDirectionOptimPoint(p, p.f, xk, direction_f, maxConstrLimit) #newX = xk.copy() result = 0 # x_0 = X0.copy() # N = j = 0 # while p.getMaxResidual(x_0) > Residual0 + 0.1*p.contol: # j += 1 # x_0 = xk + 0.75**j * (X0-xk) # X0 = x_0 # result, newX = 0, X0 # print 'newIterResidual = ', p.getMaxResidual(x_0) if result != 0: p.istop = result p.xf = newX return xk = newX.copy() fk = p.f(xk) p.xk, p.fk = copy(xk), copy(fk) #p._df = p.df(xk) #################### p.iterfcn() if p.istop: p.xf = xk p.ff = fk #p._df = g FIXME: implement me return
def schedule_all(self, timelimit=None): if not self.reservation_list: return self.schedule_dict self.build_data_structures() # allocate A & b # find the row size of A: # first find the number of reservations participating in oneofs oneof_reservation_num = 0 for c in self.oneof_constraints: oneof_reservation_num += len(c) A_numrows = len(self.reservation_list) + len(self.aikt) + len( self.oneof_constraints) - oneof_reservation_num A_rows = [] A_cols = [] A_data = [] # try: # A = numpy.zeros((A_numrows, len(self.Yik)), dtype=numpy.int) # except ValueError: # print("Number of A rows: {}".format(A_numrows)) b = numpy.zeros(A_numrows, dtype=numpy.int16) # build A & b row = 0 # constraint 5: oneof for c in self.oneof_constraints: for r in c: for entry in r.Yik_entries: A_rows.append(row) A_cols.append(entry) A_data.append(1) # A[row,entry] = 1 r.skip_constraint2 = True b[row] = 1 row += 1 # constraint 2: each res should have one start: # optimization: # if the reservation participates in a oneof, then this is # redundant with the oneof constraint added above, so don't add it. for r in self.reservation_list: if hasattr(r, 'skip_constraint2'): continue for entry in r.Yik_entries: A_rows.append(row) A_cols.append(entry) A_data.append(1) # A[row,entry] = 1 b[row] = 1 row += 1 # constraint 3: each slice should only have one sched. reservation: for s in self.aikt.keys(): for entry in self.aikt[s]: A_rows.append(row) A_cols.append(entry) A_data.append(1) # A[row,entry] = 1 b[row] = 1 row += 1 A = coo_matrix((A_data, (A_rows, A_cols)), shape=(A_numrows, len(self.Yik))) # constraint 6: and # figure out size of constraint matrix if not self.and_constraints: Aeq = [] beq = [] else: Aeq_numrows = 0 for c in self.and_constraints: Aeq_numrows += len(c) - 1 # allocate Aeq and beq # Aeq = numpy.zeros((Aeq_numrows, len(self.Yik)), dtype=numpy.int) Aeq_rows = [] Aeq_cols = [] Aeq_data = [] beq = numpy.zeros(Aeq_numrows, dtype=numpy.int16) row = 0 for c in self.and_constraints: constraint_size = len(c) left_idx = 0 right_idx = 1 while right_idx < constraint_size: left_r = c[left_idx] right_r = c[right_idx] for entry in left_r.Yik_entries: # Aeq[row, entry] = 1 Aeq_rows.append(row) Aeq_cols.append(entry) Aeq_data.append(1) for entry in right_r.Yik_entries: Aeq_rows.append(row) Aeq_cols.append(entry) Aeq_data.append(-1) # Aeq[row, entry] = -1 left_idx += 1 right_idx += 1 row += 1 # print(Aeq_numrows) Aeq = coo_matrix((Aeq_data, (Aeq_rows, Aeq_cols)), shape=(Aeq_numrows, len(self.Yik))) # bounds: lb = numpy.zeros(len(self.Yik), dtype=numpy.int16) ub = numpy.ones(len(self.Yik), dtype=numpy.int16) # objective function: f = numpy.zeros(len(self.Yik)) f = numpy.zeros(len(self.Yik), dtype=numpy.int16) row = 0 for entry in self.Yik: f[row] = entry[2] # priority row += 1 dump_matrix_sizes(f, A, Aeq, b, beq, lb, ub, len(self.compound_reservation_list)) p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) # r = p.minimize('pclp') r = p.maximize('glpk', iprint=-1) # r = p.minimize('lpsolve') return self.unpack_result(r)
""" Sparse LP example for Nvariables = 25000 (hence Nconstraints = 75000) glpk peak memory ~70 Mb, time elapsed = 35.79, CPU time elapsed = 35.0 """ from openopt import LP from numpy import arange from FuncDesigner import * N = 25000 x, y, z = oovars(3) startPoint = {x:0, y:[0]*N, z:[0]*(2*N)} # thus x from R, y from R^N, z from R^2N objective = sum(x) + 2*sum(y) + 3*sum(z) cons = [x<100, x>-100, y<arange(N), y>-10-arange(N), z<arange(2*N), z>-100-arange(2*N), x+y>2-3*arange(N), x+z>4-5*arange(2*N)] p = LP(objective, startPoint, constraints = cons) solver = 'glpk' # CVXOPT & glpk must be installed r = p.minimize(solver) print('objFunValue:%f' % r.ff)
from FuncDesigner import * N = 100 a = oovars(N) # create array of N oovars b = oovars(N) # another array of N oovars some_lin_funcs = [i*a[i]+4*i + 5*b[i] for i in range(N)] f = some_lin_funcs[15] + some_lin_funcs[80] - sum(a) + sum(b) point = {} for i in range(N): point[a[i]] = 1.5 * i**2 point[b[i]] = 1.5 * i**3 # BTW we could use 1-dimensional arrays here, eg point[a[25]] = [2,3,4,5], point[a[27]] = [1,2,5] etc print f(point) # prints 40899980. from openopt import LP # define prob p = LP(f, point) # add some box-bound constraints aLBs = [a[i]>-10 for i in range(N)] bLBs = [b[i]>-10 for i in range(N)] aUBs = [a[i]<15 for i in range(N)] bUBs = [b[i]<15 for i in range(N)] p.constraints = aLBs + bLBs + aUBs + bUBs # add some general linear constraints p.constraints.append(a[4] + b[15] + a[20].size - f.size>-9) # array size, here a[20].size = f.size = 1 # or p.constraints += [a[4] + b[15] + a[20].size - f.size>-9] for i in range(N): p.constraints.append(2 * some_lin_funcs[i] + a[i] < i / 2.0 + some_lin_funcs[N-i-1] + 1.5*b[i]) # or p.constraints += [2 * some_lin_funcs[i] + a[i] < i / 2.0 + some_lin_funcs[N-i-1] + 1.5*b[i] for i in range(N]
def __solver__(self, p): n = p.n x0 = copy(p.x0) xPrev = x0.copy() xf = x0.copy() xk = x0.copy() p.xk = x0.copy() f0 = p.f(x0) fk = f0 ff = f0 p.fk = fk df0 = p.df(x0) ##################################################################### ## #handling box-bounded problems ## if p.__isNoMoreThanBoxBounded__(): ## for k in range(int(p.maxIter)): ## ## #end of handling box-bounded problems isBB = p.__isNoMoreThanBoxBounded__() ## isBB = 0 H = diag(ones(p.n)) if not p.userProvided.c: p.c = lambda x : array([]) p.dc = lambda x : array([]).reshape(0, p.n) if not p.userProvided.h: p.h = lambda x : array([]) p.dh = lambda x : array([]).reshape(0, p.n) p.use_subproblem = 'QP' #p.use_subproblem = 'LLSP' for k in range(p.maxIter+4): if isBB: f0 = p.f(xk) df = p.df(xk) direction = -df f1 = p.f(xk+direction) ind_l = direction<=p.lb-xk direction[ind_l] = (p.lb-xk)[ind_l] ind_u = direction>=p.ub-xk direction[ind_u] = (p.ub-xk)[ind_u] ff = p.f(xk + direction) ## print 'f0', f0, 'f1', f1, 'ff', ff else: mr = p.getMaxResidual(xk) if mr > p.contol: mr_grad = p.getMaxConstrGradient(xk) lb = p.lb - xk #- p.contol/2 ub = p.ub - xk #+ p.contol/2 c, dc, h, dh, df = p.c(xk), p.dc(xk), p.h(xk), p.dh(xk), p.df(xk) A, Aeq = vstack((dc, p.A)), vstack((dh, p.Aeq)) b = concatenate((-c, p.b-p.matmult(p.A,xk))) #+ p.contol/2 beq = concatenate((-h, p.beq-p.matmult(p.Aeq,xk))) if b.size != 0: isFinite = isfinite(b) ind = where(isFinite)[0] A, b = A[ind], b[ind] if beq.size != 0: isFinite = isfinite(beq) ind = where(isFinite)[0] Aeq, beq = Aeq[ind], beq[ind] if p.use_subproblem == 'LP': #linear linprob = LP(df, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) linprob.iprint = -1 r2 = linprob.solve('cvxopt_glpk') # TODO: replace lpSolve by autoselect if r2.istop <= 0: p.istop = -12 p.msg = "failed to solve LP subproblem" return elif p.use_subproblem == 'QP': #quadratic qp = QP(H=H,f=df, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub = ub) qp.iprint = -1 r2 = qp.solve('cvxopt_qp') # TODO: replace solver by autoselect #r2 = qp.solve('qld') # TODO: replace solver by autoselect if r2.istop <= 0: for i in range(4): if p.debug: p.warn("iter " + str(k) + ": attempt Num " + str(i) + " to solve QP subproblem has failed") #qp.f += 2*N*sum(qp.A,0) A2 = vstack((A, Aeq, -Aeq)) b2 = concatenate((b, beq, -beq)) + pow(10,i)*p.contol qp = QP(H=H,f=df, A=A2, b=b2, iprint = -5) qp.lb = lb - pow(10,i)*p.contol qp.ub = ub + pow(10,i)*p.contol # I guess lb and ub don't matter here try: r2 = qp.solve('cvxopt_qp') # TODO: replace solver by autoselect except: r2.istop = -11 if r2.istop > 0: break if r2.istop <= 0: p.istop = -11 p.msg = "failed to solve QP subproblem" return elif p.use_subproblem == 'LLSP': direction_c = getConstrDirection(p, xk, regularization = 1e-7) else: p.err('incorrect or unknown subproblem') if isBB: X0 = xk.copy() N = 0 result, newX = chLineSearch(p, X0, direction, N, isBB) elif p.use_subproblem != 'LLSP': duals = r2.duals N = 1.05*abs(duals).sum() direction = r2.xf X0 = xk.copy() result, newX = chLineSearch(p, X0, direction, N, isBB) else: # case LLSP direction_f = -df p2 = NSP(LLSsubprobF, [0.8, 0.8], ftol=0, gtol=0, xtol = 1e-5, iprint = -1) p2.args.f = (xk, direction_f, direction_c, p, 1e20) r_subprob = p2.solve('ralg') alpha = r_subprob.xf newX = xk + alpha[0]*direction_f + alpha[1]*direction_c # dw = (direction_f * direction_c).sum() # cos_phi = dw/p.norm(direction_f)/p.norm(direction_c) # res_0, res_1 = p.getMaxResidual(xk), p.getMaxResidual(xk+1e-1*direction_c) # print cos_phi, res_0-res_1 # res_0 = p.getMaxResidual(xk) # optimConstrPoint = getDirectionOptimPoint(p, p.getMaxResidual, xk, direction_c) # res_1 = p.getMaxResidual(optimConstrPoint) # # maxConstrLimit = p.contol #xk = getDirectionOptimPoint(p, p.f, optimConstrPoint, -optimConstrPoint+xk+direction_f, maxConstrLimit = maxConstrLimit) #print 'res_0', res_0, 'res_1', res_1, 'res_2', p.getMaxResidual(xk) #xk = getDirectionOptimPoint(p, p.f, xk, direction_f, maxConstrLimit) #newX = xk.copy() result = 0 # x_0 = X0.copy() # N = j = 0 # while p.getMaxResidual(x_0) > Residual0 + 0.1*p.contol: # j += 1 # x_0 = xk + 0.75**j * (X0-xk) # X0 = x_0 # result, newX = 0, X0 # print 'newIterResidual = ', p.getMaxResidual(x_0) if result != 0: p.istop = result p.xf = newX return xk = newX.copy() fk = p.f(xk) p.xk, p.fk = copy(xk), copy(fk) #p._df = p.df(xk) #################### p.iterfcn() if p.istop: p.xf = xk p.ff = fk #p._df = g FIXME: implement me return
t = time() # Define some oovars drugs = oovar(2) material = oovar(2) budgets = oovar(3) # Let's define some linear functions f1 = 4*x+5*y + 3*z + 5 f2 = f1.sum() + 2*x + 4*y + 15 f3 = 5*f1 + 4*f2 + 20 # Define objective; sum(a) and a.sum() are same as well as for numpy arrays obj = x.sum() + y - 50*z + sum(f3) + 2*f2.sum() + 4064.6 # Define some constraints via Python list or tuple or set of any length, probably via while/for cycles constraints = [x+5*y<15, x[0]<4, f1<[25, 35], f1>-100, 2*f1+4*z<[80, 800], 5*f2+4*z<100, -5<x, x<1, -20<y, y<20, -4000<z, z<4] # Start point - currently matters only size of variables, glpk, lpSolve and cvxopt_lp use start val = all-zeros startPoint = {x:[8, 15], y:25, z:80} # however, using numpy.arrays is more recommended than Python lists # Create prob p = LP(obj, startPoint, constraints = constraints) # Solve r = p.solve('glpk') # glpk is name of solver involved, see OOF doc for more arguments # Decode solution print('Solution: x = %s y = %f z = %f' % (str(x(r)), y(r), z(r))) # Solution: x = [-4.25 -4.25] y = -20.000000 z = 4.000000 print "elapsed %.3f secs"%(time() - t)
# Example of export OpenOpt LP to MPS file # you should have lpsolve and its Python binding properly installed # (you may take a look at the instructions from openopt.org/LP) # You can solve problems defined in MPS files # with a variety of solvers at NEOS server for free # http://neos.mcs.anl.gov/ # BTW they have Python API along with web API and other from numpy import * from openopt import LP f = array([15,8,80]) A = mat('1 2 3; 8 15 80; 8 80 15; -100 -10 -1') # numpy.ndarray is also allowed b = [15, 80, 150, -800] # numpy.ndarray, matrix etc are also allowed Aeq = mat('80 8 15; 1 10 100') # numpy.ndarray is also allowed beq = (750, 80) lb = [4, -80, -inf] ub = [inf, -8, inf] p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub, name = 'lp_1') # or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) # if file name not ends with '.MPS' or '.mps' # then '.mps' will be appended success = p.exportToMPS('asdf') # success is False if a error occurred (read-only file system, no write access, etc) # elseware success is True # objFunValue should be 204.48841578 # x_opt should be [ 9.89355041 -8. 1.5010645 ]
f1 = 4 * x + 5 * y + 3 * z + 5 f2 = f1.sum() + 2 * x + 4 * y + 15 f3 = 5 * f1 + 4 * f2 + 20 # Define objective; sum(a) and a.sum() are same as well as for numpy arrays obj = sum(f1) + x.sum() + y - 50 * z + 2 * f2.sum() + 4064.6 # Start point - currently matters only size of variables startPoint = { x: [8, 15], y: 25, z: 80 } # however, using numpy.arrays is more recommended than Python lists # Create prob p = LP(obj, startPoint) # Define some constraints p.constraints = [ x + 5 * y < 15, x[0] < 4, f1 < [25, 35], f1 > -100, 2 * f1 + 4 * z < [80, 800], 5 * f2 + 4 * z < 100, -5 < x, x < 1, -20 < y, y < 20, -4000 < z, z < 4 ] # Solve r = p.solve( 'lpSolve', fixedVars=t ) # glpk is name of solver involved, see OOF doc for more arguments # Decode solution print('Solution: x = %s y = %f z = %f' % (r(x), r(y), r(z)))
# you should have lpsolve and its Python binding properly installed # (you may take a look at the instructions from openopt.org/LP) # You can solve problems defined in MPS files # with a variety of solvers at NEOS server for free # http://neos.mcs.anl.gov/ # BTW they have Python API along with web API and other from numpy import * from openopt import LP f = array([15, 8, 80]) A = mat( '1 2 3; 8 15 80; 8 80 15; -100 -10 -1') # numpy.ndarray is also allowed b = [15, 80, 150, -800] # numpy.ndarray, matrix etc are also allowed Aeq = mat('80 8 15; 1 10 100') # numpy.ndarray is also allowed beq = (750, 80) lb = [4, -80, -inf] ub = [inf, -8, inf] p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub, name='lp_1') # or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) # if file name not ends with '.MPS' or '.mps' # then '.mps' will be appended success = p.exportToMPS('asdf') # success is False if a error occurred (read-only file system, no write access, etc) # elseware success is True # objFunValue should be 204.48841578 # x_opt should be [ 9.89355041 -8. 1.5010645 ]
from FuncDesigner import * from openopt import LP # Define some oovars x, y, z = oovars(3) # Let's define some linear functions f1 = 4*x+5*y + 3*z + 5 f2 = f1.sum() + 2*x + 4*y + 15 f3 = 5*f1 + 4*f2 + 20 # Define objective; sum(a) and a.sum() are same as well as for numpy arrays obj = sum(f1)+ x.sum()+ y - 50*z + 2*f2.sum() + 4064.6 # Start point - currently matters only size of variables startPoint = {x:[8, 15], y:25, z:80} # however, using numpy.arrays is more recommended than Python lists # Create prob p = LP(obj, startPoint) # Define some constraints p.constraints = [x+5*y<15, x[0]<4, f1<[25, 35], f1>-100, 2*f1+4*z<[80, 800], 5*f2+4*z<100, -5<x, x<1, -20<y, y<20, -4000<z, z<4] # Solve r = p.solve('lpSolve', fixedVars=t) # glpk is name of solver involved, see OOF doc for more arguments # Decode solution print('Solution: x = %s y = %f z = %f' % (r(x), r(y), r(z))) # Solution: x = [-4.25 -4.25] y = -20.000000 z = 4.000000
ub = [1.]*len(Ulist) Aeq = [[1.]*len(Ulist)] beq = (1.,) A = [] b = [] player = 0 while player<len(no_action): action = 0 while action < no_action[player]: for k in range(0,no_action[player]): A.append(multiply(udiff(player,action,k),aik_indicator(player,action))) b.append(0.) action = action + 1 player = player +1 #print A,b p = LP(neg_welfare, A=A,b=b,lb=lb,ub=ub,Aeq=Aeq,beq=beq)#we use the artificial minimization problem under the constraint that action gives a weakly higher payoff than any other action; if no feasible solution is obtained than action is dominated p.iprint = -1 try: r = p.minimize('pclp') pminw = LP(welfare, A=A,b=b,lb=lb,ub=ub,Aeq=Aeq,beq=beq) pminw.iprint = -1 rminw = pminw.minimize('pclp') except: print "Solver returns error. Probably, each player has a unique rationalizable action (check with rationalizability solver)." quit() ###formatting the result back into the same format as the game input # first: rounding outr = [] for item in r.xf:
def solve(self): p=LP(self.cost_function,Aeq=self.Aeq,beq=self.beq,lb=self.lb) p.iprint = -1 r=p.solve('pclp') return [r.xf,r.ff]
print len(s.matrix) # for m in bMets: # s.add_reaction(Reaction('R_'+m.identifier+'_Transp', (m,), (), ())) # A_eq = numpy.delete(numpy.array(s.matrix, dtype=float), bIndices, 0) transpColumn = numpy.zeros((len(s.matrix), len(bIndices))) for i, elem in enumerate(bIndices): transpColumn[elem,i] = 1 A_eq = numpy.append(numpy.array(s.matrix, dtype=float), transpColumn, 1) print A_eq print numpy.shape(A_eq) b_eq = numpy.zeros(len(A_eq)) print numpy.shape(A_eq)[1] print len(b_eq) print len(numpy.random.randint(-10, -5, numpy.shape(A_eq)[1])) print len(numpy.random.randint(5, 10, numpy.shape(A_eq)[1])) lp = LP(f=obj, Aeq=A_eq, beq=b_eq, lb=numpy.random.randint(-6, -0, numpy.shape(A_eq)[1]), ub=numpy.random.randint(0, 6, numpy.shape(A_eq)[1])) lp.exportToMPS # print help(lp.manage) # lp.solve('cvxopt_lp') # print help(lp.solve) r = lp.solve('glpk', goal='max') # print dir(r) # print r.ff # print r.evals # print r.isFeasible # print r.msg # print r.xf # print r.duals # print r.rf # print r.solverInfo # problem = openopt.LP(f=objective, Aeq=A_eq, beq=b_eq, lb=lb, ub=ub)
(hence Nconstraints = 75000) glpk peak memory ~70 Mb, time elapsed = 35.79, CPU time elapsed = 35.0 """ from openopt import LP from numpy import arange from FuncDesigner import * N = 25000 x, y, z = oovars(3) startPoint = { x: 0, y: [0] * N, z: [0] * (2 * N) } # thus x from R, y from R^N, z from R^2N objective = sum(x) + 2 * sum(y) + 3 * sum(z) cons = [ x < 100, x > -100, y < arange(N), y > -10 - arange(N), z < arange(2 * N), z > -100 - arange(2 * N), x + y > 2 - 3 * arange(N), x + z > 4 - 5 * arange(2 * N) ] p = LP(objective, startPoint, constraints=cons) solver = 'glpk' # CVXOPT & glpk must be installed r = p.minimize(solver) print('objFunValue:%f' % r.ff)
8x1 + 80x2 + 15x3 <=150 (4) 100x1 + 10x2 + x3 >= 800 (5) 80x1 + 8x2 + 15x3 = 750 (6) x1 + 10x2 + 100x3 = 80 (7) x1 >= 4 (8) -8 >= x2 >= -80 (9) """ from numpy import * from openopt import LP f = array([15, 8, 80]) A = mat("1 2 3; 8 15 80; 8 80 15; -100 -10 -1") # numpy.ndarray is also allowed b = [15, 80, 150, -800] # numpy.ndarray, matrix etc are also allowed Aeq = mat("80 8 15; 1 10 100") # numpy.ndarray is also allowed beq = (750, 80) lb = [4, -80, -inf] ub = [inf, -8, inf] p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) # or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub) # r = p.minimize('glpk') # CVXOPT must be installed # r = p.minimize('lpSolve') # lpsolve must be installed r = p.minimize("pclp") # search for max: r = p.maximize('glpk') # CVXOPT & glpk must be installed # r = p.minimize('nlp:ralg', ftol=1e-7, xtol=1e-7, goal='min', plot=1) print("objFunValue: %f" % r.ff) # should print 204.48841578 print("x_opt: %s" % r.xf) # should print [ 9.89355041 -8. 1.5010645 ]