def solve(self): return glpk.glpk(c=self.c, A_ub=self.A_ub, b_ub=self.b_ub, A_eq=self.A_eq, b_eq=self.b_eq, bounds=self.bounds, sense=glpk.GLPK.GLP_MAX, message_level=glpk.GLPK.GLP_MSG_OFF)
c = [-1, 8, 4, -6] A_ub = [[-7, -7, 6, 9], [1, -1, -3, 0], [10, -10, -7, 7], [6, -1, 3, 4]] b_ub = [-3, 6, -6, 6] A_eq = [[-10, 1, 1, -8]] b_eq = [-4] bnds = None res = glpk( c, A_ub, b_ub, A_eq, b_eq, bnds, message_level=GLPK.GLP_MSG_OFF, maxit=100, timeout=10, solver='simplex', basis_fac='btf+cbg', simplex_options={ 'init_basis': 'adv', 'method': 'dual', 'presolve': True, # 'exact': True, }) print('GLPK:') print(res) print('\n\n') print('linprog:') res = linprog(c, A_ub, b_ub, A_eq, b_eq, bnds) print(res)
def solveILPProblem(self, modFile, areaGroups, variables): # now that we've written out the file, solve it. # Necessary to force ply rebuild. Sad... import glpk example = glpk.glpk(modFile) example._parm.tm_lim = 100 # value in milliseconds example._parm.it_lim = 1000 # value in milliseconds # This function is a copy of the solve function # provided by python-glpk. However, because we need # to tweak some of the icop struct arguments, we must # expose this routine. def solve_int(self, instantiate = True, bounds = True): #glp_term_out(GLP_OFF); if not self._ready: self.update() #glp_term_out(GLP_ON); if self._cols == None or self._rows == None: self._read_variables() if bounds: self._apply_bounds() if glpk.glp_get_num_int(self._lp) == 0: # problem is continuous res = glpk.glp_simplex(self._lp, self._parm) # self._parm !!! else: # problem is MIP if self._tran: glpk.glp_mpl_build_prob(self._tran, self._lp); res = glpk.glp_simplex(self._lp, self._parm); # ??? should use dual simplex ??? iocp_param = glpk.glp_iocp(); glpk.glp_init_iocp(iocp_param); #iocp_param.tm_lim=600*1000 iocp_param.mip_gap=0.95 glpk.glp_intopt(self._lp, iocp_param); if self._tran: ret = glpk.glp_mpl_postsolve(self._tran, self._lp, glpk.GLP_MIP); if ret != 0: print "Error on postsolving model" raise AttributeError if instantiate: self._instantiate_solution() if res != 0: return None else: return glpk.glp_get_obj_val(self._lp); print str(example._parm) example.update() solve_int(example) # dump interesting variables def dumpVariables(example): for variable in variables: print variable + ' is: ' + str(eval('example.' + variable).value()) dumpVariables(example) # print out module locations areaGroupNames = sorted([name for name in areaGroups]) for areaGroupIndex in range(len(areaGroupNames)): areaGroup = areaGroups[areaGroupNames[areaGroupIndex]] areaGroup.xLoc = eval('example.xloc_' + areaGroup.name).value() areaGroup.yLoc = eval('example.yloc_' + areaGroup.name).value() # figure out the chose dimensions. # unfortunately, the tools may give 'None' for # some dimensions, if they were previously # defined... xDimension = eval('example.xdim_' + areaGroup.name).value() yDimension = eval('example.ydim_' + areaGroup.name).value() if(xDimension is None): areaGroup.xDimension = areaGroup.xDimension[0] else: areaGroup.xDimension = xDimension if(yDimension is None): areaGroup.yDimension = areaGroup.yDimension[0] else: areaGroup.yDimension = yDimension # If problem was not satisfiable, then all variables # are set to zero. We should really give up and # clear all area groups, since this technically # results in a correct solution. if(areaGroup.xDimension < 1 or areaGroup.yDimension < 1): print "Failed to find solution to area group placement for: " + areaGroup.name return False return True
from glpk import glpk c = [8, 1] A_ub = [ [-1, -2], [-4, -1], [2, 1], ] b_ub = [14, -33, 20] res = glpk(c, A_ub, b_ub, solver='mip', mip_options={ 'nomip': False, 'intcon': [1], 'presolve': True, }) print(res)
def setUp(self): self.lp = util.ImportCplex('test_data/model.lp') self.glp = glpk.glpk(self.lp)
import glpk print "starting..." example = glpk.glpk("example.mod") example.update() example.solve() print "solution:", example.solution() print "solution is also here: x =", example.x, "y =", example.y
def assemble_output_ilp(final_passages, scored_candidates, final_length): import glpk #### # Clean passage text and get final length per sentence. # sentences = list() seen = set() for passage in final_passages: for chunk in passage[1].split(' .'): chunk_sentences = nltk.sent_tokenize(chunk) for sentence in chunk_sentences: clean_text = clean_passage_text(sentence) if clean_text in seen: continue if len(clean_text) > 20: if not clean_text[-1] in [ '.', '!', '?' ]: clean_text += '.' sentences.append( ( clean_text, len(clean_text) + 1, passage[0]['document'] ) ) seen.add(clean_text) #### # Check which candidates appear in which sentences # candidate_per_sentence = set() # string '%d-%d' candidate-sentence candidate_scores = list() for cand_idx in xrange(len(scored_candidates)): candidate_text = scored_candidates[cand_idx][0] candidate_scores.append(scored_candidates[cand_idx][1]) for sent_idx in xrange(len(sentences)): if candidate_text in sentences[sent_idx][0]: candidate_per_sentence.add( '%d-%d' % (cand_idx, sent_idx) ) #### # Build ILP model # f = tempfile.NamedTemporaryFile(delete=False, suffix='.mod') f.write('param NS;\n') f.write('param NC;\n') f.write('param K;\n') f.write('param M{1..NS, 1..NC}, binary;\n') f.write('param L{1..NS}, integer;\n') f.write('param W{1..NC} ;\n') f.write('\n') f.write('var s{1..NS}, binary;\n') f.write('var e{1..NC}, binary;\n') f.write('\n') f.write('maximize z: sum { i in 1..NC } e[i]*W[i];\n'); f.write('\n') f.write('subject to l:\n') f.write(' sum { i in 1..NS } L[i]*s[i] <= K;\n') f.write('\n') f.write('subject to m {j in 1..NC}:\n') f.write(' sum { i in 1..NS } M[i,j]*s[i] >= e[j];\n') f.write('\n') f.write('data;\n') f.write('param NS := %d;\n' % (len(sentences),)) f.write('param NC := %d;\n' % (len(candidate_scores),)) f.write('param K := %d;\n' % (final_length,)) f.write('param L :=') for sent_idx in xrange(len(sentences)): if sent_idx > 0: f.write(',') f.write(' [%d] %d' % (sent_idx + 1, sentences[sent_idx][1])) f.write(';\n') f.write('param M :=') for sent_idx in xrange(len(sentences)): f.write('\n'); for cand_idx in xrange(len(scored_candidates)): f.write('[%d,%d] ' % (sent_idx+1, cand_idx+1,)) if '%d-%d' % (cand_idx, sent_idx) in candidate_per_sentence: f.write(' 1 ') else: f.write(' 0 ') f.write(';\n') f.write('param W :=') for cand_idx in xrange(len(scored_candidates)): f.write(' [%d] %f' % (cand_idx+1, candidate_scores[cand_idx])) f.write(';\n') f.write('end;\n') f.close() constraints = glpk.glpk(f.name) constraints.update() constraints.solve() #print constraints.solution() #print constraints.s # take selected sentences output = "" evidence = list() for sent_idx in xrange(len(sentences)): if constraints.s[sent_idx+1].value() == 1.0: output = "%s %s" % (output, sentences[sent_idx][0]) evidence.append(sentences[sent_idx][2]) output = output.strip() os.unlink(f.name) return (output, evidence)
def assemble_output_ilp(final_passages, scored_candidates, final_length): import glpk #### # Clean passage text and get final length per sentence. # sentences = list() seen = set() for passage in final_passages: for chunk in passage[1].split(' .'): chunk_sentences = nltk.sent_tokenize(chunk) for sentence in chunk_sentences: clean_text = clean_passage_text(sentence) if clean_text in seen: continue if len(clean_text) > 20: if not clean_text[-1] in ['.', '!', '?']: clean_text += '.' sentences.append((clean_text, len(clean_text) + 1, passage[0]['document'])) seen.add(clean_text) #### # Check which candidates appear in which sentences # candidate_per_sentence = set() # string '%d-%d' candidate-sentence candidate_scores = list() for cand_idx in xrange(len(scored_candidates)): candidate_text = scored_candidates[cand_idx][0] candidate_scores.append(scored_candidates[cand_idx][1]) for sent_idx in xrange(len(sentences)): if candidate_text in sentences[sent_idx][0]: candidate_per_sentence.add('%d-%d' % (cand_idx, sent_idx)) #### # Build ILP model # f = tempfile.NamedTemporaryFile(delete=False, suffix='.mod') f.write('param NS;\n') f.write('param NC;\n') f.write('param K;\n') f.write('param M{1..NS, 1..NC}, binary;\n') f.write('param L{1..NS}, integer;\n') f.write('param W{1..NC} ;\n') f.write('\n') f.write('var s{1..NS}, binary;\n') f.write('var e{1..NC}, binary;\n') f.write('\n') f.write('maximize z: sum { i in 1..NC } e[i]*W[i];\n') f.write('\n') f.write('subject to l:\n') f.write(' sum { i in 1..NS } L[i]*s[i] <= K;\n') f.write('\n') f.write('subject to m {j in 1..NC}:\n') f.write(' sum { i in 1..NS } M[i,j]*s[i] >= e[j];\n') f.write('\n') f.write('data;\n') f.write('param NS := %d;\n' % (len(sentences), )) f.write('param NC := %d;\n' % (len(candidate_scores), )) f.write('param K := %d;\n' % (final_length, )) f.write('param L :=') for sent_idx in xrange(len(sentences)): if sent_idx > 0: f.write(',') f.write(' [%d] %d' % (sent_idx + 1, sentences[sent_idx][1])) f.write(';\n') f.write('param M :=') for sent_idx in xrange(len(sentences)): f.write('\n') for cand_idx in xrange(len(scored_candidates)): f.write('[%d,%d] ' % ( sent_idx + 1, cand_idx + 1, )) if '%d-%d' % (cand_idx, sent_idx) in candidate_per_sentence: f.write(' 1 ') else: f.write(' 0 ') f.write(';\n') f.write('param W :=') for cand_idx in xrange(len(scored_candidates)): f.write(' [%d] %f' % (cand_idx + 1, candidate_scores[cand_idx])) f.write(';\n') f.write('end;\n') f.close() constraints = glpk.glpk(f.name) constraints.update() constraints.solve() #print constraints.solution() #print constraints.s # take selected sentences output = "" evidence = list() for sent_idx in xrange(len(sentences)): if constraints.s[sent_idx + 1].value() == 1.0: output = "%s %s" % (output, sentences[sent_idx][0]) evidence.append(sentences[sent_idx][2]) output = output.strip() os.unlink(f.name) return (output, evidence)