def test_solve1(self): model = ConcreteModel() model.A = RangeSet(1,4) model.x = Var(model.A, bounds=(-1,1)) def obj_rule(model): return sum_product(model.x) model.obj = Objective(rule=obj_rule) def c_rule(model): expr = 0 for i in model.A: expr += i*model.x[i] return expr == 0 model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) results.write(filename=join(currdir,"solve1.out"), format='json') self.assertMatchesJsonBaseline( join(currdir,"solve1.out"), join(currdir,"solve1.txt"), tolerance=1e-4) # def d_rule(model): return model.x[1] >= 0 model.d = Constraint(rule=d_rule) model.d.deactivate() results = opt.solve(model) model.solutions.store_to(results) results.write(filename=join(currdir,"solve1x.out"), format='json') self.assertMatchesJsonBaseline( join(currdir,"solve1x.out"), join(currdir,"solve1.txt"), tolerance=1e-4) # model.d.activate() results = opt.solve(model) model.solutions.store_to(results) results.write(filename=join(currdir,"solve1a.out"), format='json') self.assertMatchesJsonBaseline( join(currdir,"solve1a.out"), join(currdir,"solve1a.txt"), tolerance=1e-4) # model.d.deactivate() def e_rule(model, i): return model.x[i] >= 0 model.e = Constraint(model.A, rule=e_rule) for i in model.A: model.e[i].deactivate() results = opt.solve(model) model.solutions.store_to(results) results.write(filename=join(currdir,"solve1y.out"), format='json') self.assertMatchesJsonBaseline( join(currdir,"solve1y.out"), join(currdir,"solve1.txt"), tolerance=1e-4) # model.e.activate() results = opt.solve(model) model.solutions.store_to(results) results.write(filename=join(currdir,"solve1b.out"), format='json') self.assertMatchesJsonBaseline( join(currdir,"solve1b.out"), join(currdir,"solve1b.txt"), tolerance=1e-4)
def Xtest_sip_example1(self): M = examples.sip_example1.create() opt = Solver('pao.pyomo.FA') opt.solve(M, solver='gurobi') self.assertTrue(False)
def _apply_solver(self): start_time = time.time() # construct the high-point problem (LL feasible, no LL objective) # s0 <- solve the high-point # if s0 infeasible then return high_point_infeasible xfrm = TransformationFactory('pao.bilevel.highpoint') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'ipopt' for c in self._instance.component_objects(Block, descend_into=False): if '_hp' in c.name: c.activate() opt = pyomo.opt.SolverFactory(solver) results = opt.solve(c) _check_termination_condition(results) c.deactivate() # s1 <- solve the optimistic bilevel (linear/linear) problem (call solver3) # if s1 infeasible then return optimistic_infeasible opt = BilevelSolver3() opt.options.solver = solver results = opt.solve(self._instance) _check_termination_condition(results)
def test_solve1(self): model = ConcreteModel() model.A = RangeSet(1, 4) model.x = Var(model.A, bounds=(-1, 1)) def obj_rule(model): return summation(model.x) model.obj = Objective(rule=obj_rule) def c_rule(model): expr = 0 for i in model.A: expr += i * model.x[i] return expr == 0 model.c = Constraint(rule=c_rule) opt = solver['glpk'] results = opt.solve(model, keepfiles=True, symbolic_solver_labels=True) model.solutions.store_to(results) results.write(filename=currdir + "solve1.out", format='json') self.assertMatchesJsonBaseline(currdir + "solve1.out", currdir + "solve1.txt", tolerance=1e-4) # def d_rule(model): return model.x[1] >= 0 model.d = Constraint(rule=d_rule) model.d.deactivate() results = opt.solve(model, keepfiles=True) model.solutions.store_to(results) results.write(filename=currdir + "solve1x.out", format='json') self.assertMatchesJsonBaseline(currdir + "solve1x.out", currdir + "solve1.txt", tolerance=1e-4) # model.d.activate() results = opt.solve(model, keepfiles=True) model.solutions.store_to(results) results.write(filename=currdir + "solve1a.out", format='json') self.assertMatchesJsonBaseline(currdir + "solve1a.out", currdir + "solve1a.txt", tolerance=1e-4) # model.d.deactivate() def e_rule(model, i): return model.x[i] >= 0 model.e = Constraint(model.A, rule=e_rule) for i in model.A: model.e[i].deactivate() results = opt.solve(model, keepfiles=True) model.solutions.store_to(results) results.write(filename=currdir + "solve1b.out", format='json') self.assertMatchesJsonBaseline(currdir + "solve1b.out", currdir + "solve1b.txt", tolerance=1e-4)
def test_pineda(self): M = examples.pineda.create() opt = Solver('pao.pyomo.FA') opt.solve(M) self.assertTrue(math.isclose(M.xR.value, 2)) self.assertTrue(math.isclose(M.L.xR.value, 100))
def test_besancon27(self): M = examples.besancon27.create() opt = Solver('pao.pyomo.FA') opt.solve(M) self.assertTrue(math.isclose(M.x.value, 0)) self.assertTrue(math.isclose(M.v.value, 1))
def test_neos_ipopt_available(self): M = create_nlp1() opt = Solver('ipopt', server='neos', email='*****@*****.**', max_cpu_time=1e-12) self.assertTrue(opt.available()) res = opt.solve(M) #M.x.pprint() res = opt.solve(M, max_cpu_time=100) self.assertTrue(math.isclose(opt.solver_options['max_cpu_time'], 1e-12))
def test_PCCG(self): M = examples.bard511.create() opt = Solver('pao.pyomo.PCCG', mip_solver=Solver('cbc')) opt.solve(M) self.assertTrue(math.isclose(M.x.value, 4)) self.assertTrue(math.isclose(M.y.value, 4))
def test_bard511(self): M = examples.bard511.create() opt = Solver('pao.pyomo.FA') opt.solve(M) self.assertTrue(math.isclose(M.x.value, 4)) self.assertTrue(math.isclose(M.y.value, 4))
def test_toyexample2(self): M = examples.toyexample2.create() opt = Solver('pao.pyomo.PCCG') opt.solve(M) self.assertEqual(M.xZ.value, 8) self.assertEqual(M.L.xZ.value, 6)
def test_REG(self): M = examples.bard511.create() opt = Solver('pao.pyomo.REG', nlp_solver=Solver('ipopt')) opt.solve(M) self.assertTrue(math.isclose(M.x.value, 4, abs_tol=1e-4)) self.assertTrue(math.isclose(M.y.value, 4, abs_tol=1e-4))
def test_pineda(self): M = examples.pineda.create() opt = Solver('pao.pyomo.PCCG') opt.solve(M) self.assertTrue(math.isclose(M.xR.value, 2, abs_tol=1e-4)) self.assertTrue(math.isclose(M.L.xR.value, 100, abs_tol=1e-4))
def test_getachew_ex2(self): M = examples.getachew_ex2.create() opt = Solver('pao.pyomo.PCCG') opt.solve(M) self.assertTrue(math.isclose(M.xR.value, 6, abs_tol=1e-4)) self.assertTrue(math.isclose(M.L.xR.value, 8, abs_tol=1e-4))
def test_besancon27(self): M = examples.besancon27.create() opt = Solver('pao.pyomo.PCCG') opt.solve(M) self.assertTrue(math.isclose(M.x.value, 0, abs_tol=1e-4)) self.assertTrue(math.isclose(M.v.value, 1, abs_tol=1e-4))
def test_getachew_ex2(self): M = examples.getachew_ex2.create() opt = Solver('pao.pyomo.FA') opt.solve(M) self.assertTrue(math.isclose(M.xR.value, 6)) self.assertTrue(math.isclose(M.L.xR.value, 8))
def test_toyexample3(self): M = examples.toyexample3.create() opt = Solver('pao.pyomo.PCCG') opt.solve(M) self.assertTrue(math.isclose(M.xR.value, 3, abs_tol=1e-4)) self.assertTrue(math.isclose(M.L.xR.value, 0.5, abs_tol=1e-4)) self.assertEqual(M.xZ.value, 8) self.assertEqual(M.L.xZ.value, 0)
def test_nlp1(self): M = create_nlp1() opt = Solver('ipopt', tee=True, max_cpu_time=1e-12, print_level=0) self.assertTrue(opt.available()) #opt.config.display() #print(opt.solver.options) res = opt.solve(M) res = opt.solve(M, max_cpu_time=100) # Solving with a solver option doesn't change the value configured when creating the solver self.assertTrue(math.isclose(opt.solver.options.max_cpu_time, 1e-12))
def solve(self, alpha, beta, gamma): '''solve(self,alpha,beta,gamma): Solve the equitable retirement optimization problem. PRECONDITION: All sets and params have been initialized. ''' #rebuild model self.__buildModel(alpha, beta, gamma) opt = pyomo.opt.SolverFactory('glpk') opt.solve(self.model) # extract self.__extractResults()
def solve_with_heuristic(self, num_clusters, method='ward', max_time=1800): summary = {} opt = pyomo.opt.SolverFactory('glpk') opt.options['tmlim'] = max_time self.cluster = self.make_clusters(num_clusters, method) # Creating and solving reduced problem reduced_distances = {(i, j): self.calc_dist(x, y) for i, x in self.clusters.iterrows() for j, y in self.clusters.iterrows()} reduced_model = self.create_model(reduced_distances, num_clusters) results = opt.solve(reduced_model) try: obj_func = pe.value(reduced_model.obj) except ValueError: print("Solution for the reduced problem not found") return None # Creating original problem distances = {(i, j): self.calc_dist(x, y) for i, x in self.data.iterrows() for j, y in self.data.iterrows()} model = self.create_model(distances, len(self.data) - 1) # Fixing clusters relative positions for clust_idx in reduced_distances.keys(): if clust_idx[0] != clust_idx[1] and pe.value( reduced_model.x[clust_idx]) == 0: idx1 = self.data[self.data['cluster'] == clust_idx[0]].index idx2 = self.data[self.data['cluster'] == clust_idx[1]].index fix_idx = [ edge for edge in distances.keys() if (edge[0] in idx1) and (edge[1] in idx2) ] for idx in fix_idx: model.x[idx].fix(0) # Solving original problem results = opt.solve(model) try: obj_func = pe.value(model.obj) except ValueError: print("Solution for Original problem not found") return None self.solution = [] for idx in distances.keys(): if pe.value(model.x[idx]) == 1: self.solution.append(idx) return obj_func
def test_barguel(self): M = examples.barguel.create() opt = Solver('pao.pyomo.FA') try: opt.solve(M) self.fail("Expected an assertion error") except AssertionError: pass opt.solve(M, linearize_bigm=1e6) self.assertTrue(math.isclose(M.u.value, 0)) self.assertTrue(math.isclose(M.x.value, 0)) self.assertTrue(math.isclose(M.y.value, 0))
def test_solve7(self): # # Test that solution values are writen with appropriate quotations in results # model = ConcreteModel() model.y = Var(bounds=(-1,1)) model.A = RangeSet(1,4) model.B = Set(initialize=['A B', 'C,D', 'E']) model.x = Var(model.A, model.B, bounds=(-1,1)) def obj_rule(model): return summation(model.x) model.obj = Objective(rule=obj_rule) def c_rule(model): expr = model.y for i in model.A: for j in model.B: expr += i*model.x[i,j] return expr == 0 model.c = Constraint(rule=c_rule) opt = solver['glpk'] results = opt.solve(model, symbolic_solver_labels=True) #model.display() model.solutions.store_to(results) results.write(filename=currdir+'solve7.out', format='json') self.assertMatchesJsonBaseline(currdir+"solve7.out", currdir+"solve7.txt", tolerance=1e-4)
def main(argv): parser = argparse.ArgumentParser( prog='python -m switch_mod.solve', description='Runs the Switch power grid model solver.') parser.add_argument( '--inputs-dir', type=str, default='inputs', help='Directory containing input files (default is "inputs")') parser.add_argument( '--outputs-dir', type=str, default='outputs', help='Directory to write output files (default is "outputs")') parser.add_argument( '--solver', type=str, default='glpk', help='Linear program solver to use (default is "glpk")') args = parser.parse_args(argv) opt = pyomo.opt.SolverFactory(args.solver) module_list = [ line.rstrip('\n') for line in open(os.path.join(args.inputs_dir, 'modules'), 'r')] switch_model = switch_mod.utilities.define_AbstractModel( 'switch_mod', *module_list) switch_instance = switch_model.load_inputs(inputs_dir=args.inputs_dir) results = opt.solve(switch_instance, keepfiles=False, tee=False) switch_instance.load(results) results.write() switch_instance.pprint() switch_model.save_results(results, switch_instance, args.outputs_dir)
def test_solve6(self): # # Test that solution values have complete block names: # b.obj # b.x # model = ConcreteModel() model.y = Var(bounds=(-1, 1)) model.b = Block() model.b.A = RangeSet(1, 4) model.b.x = Var(model.b.A, bounds=(-1, 1)) def obj_rule(block): return summation(block.x) model.b.obj = Objective(rule=obj_rule) def c_rule(model): expr = model.y for i in model.b.A: expr += i * model.b.x[i] return expr == 0 model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) results.write(filename=join(currdir, 'solve6.out'), format='json') self.assertMatchesJsonBaseline(join(currdir, "solve6.out"), join(currdir, "solve6.txt"), tolerance=1e-4)
def test_solve7(self): # # Test that solution values are writen with appropriate # quotations in results # model = ConcreteModel() model.y = Var(bounds=(-1, 1)) model.A = RangeSet(1, 4) model.B = Set(initialize=['A B', 'C,D', 'E']) model.x = Var(model.A, model.B, bounds=(-1, 1)) def obj_rule(model): return summation(model.x) model.obj = Objective(rule=obj_rule) def c_rule(model): expr = model.y for i in model.A: for j in model.B: expr += i * model.x[i, j] return expr == 0 model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) #model.display() model.solutions.store_to(results) results.write(filename=join(currdir, 'solve7.out'), format='json') self.assertMatchesJsonBaseline(join(currdir, "solve7.out"), join(currdir, "solve7.txt"), tolerance=1e-4)
def test_solve_with_pickle_then_clone(self): # This tests github issue Pyomo-#65 model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') self.assertEqual(len(model.solutions), 0) results = opt.solve(model, symbolic_solver_labels=True) self.assertEqual(len(model.solutions), 1) # self.assertEqual(model.solutions[0].gap, 0.0) #self.assertEqual(model.solutions[0].status, SolutionStatus.feasible) self.assertEqual(model.solutions[0].message, None) # buf = pickle.dumps(model) tmodel = pickle.loads(buf) self.assertEqual(len(tmodel.solutions), 1) self.assertEqual(tmodel.solutions[0].gap, 0.0) #self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible) self.assertEqual(tmodel.solutions[0].message, None) self.assertIn(id(tmodel.b.obj), tmodel.solutions[0]._entry['objective']) self.assertIs( tmodel.b.obj, tmodel.solutions[0]._entry['objective'][id(tmodel.b.obj)][0]() ) inst = tmodel.clone() # make sure the clone has all the attributes self.assertTrue(hasattr(inst,'A')) self.assertTrue(hasattr(inst,'b')) self.assertTrue(hasattr(inst.b,'x')) self.assertTrue(hasattr(inst.b,'obj')) self.assertTrue(hasattr(inst,'c')) # and that they were all copied self.assertIsNot(inst.A, tmodel.A) self.assertIsNot(inst.b, tmodel.b) self.assertIsNot(inst.b.x, tmodel.b.x) self.assertIsNot(inst.b.obj, tmodel.b.obj) self.assertIsNot(inst.c, tmodel.c) # Make sure the solution is on the new model self.assertTrue(hasattr(inst,'solutions')) self.assertEqual(len(inst.solutions), 1) self.assertEqual(inst.solutions[0].gap, 0.0) #self.assertEqual(inst.solutions[0].status, SolutionStatus.feasible) self.assertEqual(inst.solutions[0].message, None) # Spot-check some components and make sure all the weakrefs in # the ModelSOlution got updated self.assertIn(id(inst.b.obj), inst.solutions[0]._entry['objective']) _obj = inst.solutions[0]._entry['objective'][id(inst.b.obj)] self.assertIs(_obj[0](), inst.b.obj) for v in [1,2,3,4]: self.assertIn(id(inst.b.x[v]), inst.solutions[0]._entry['variable']) _v = inst.solutions[0]._entry['variable'][id(inst.b.x[v])] self.assertIs(_v[0](), inst.b.x[v])
def test_neos_foobar(self): M = create_lp1() opt = Solver('foobar', server='neos') try: res = opt.solve(M) except RuntimeError: pass
def testMethod(obj): if not testing_solvers[solver, writer]: obj.skipTest("Solver %s (interface=%s) is not available" % (solver, writer)) m = pyutilib.misc.import_file(os.path.join(thisDir, 'problems', problem), clear_cache=True) model = m.define_model(**kwds) opt = pyomo.opt.SolverFactory(solver,solver_io=writer) results = opt.solve(model) # non-recursive new_results = ((var.name, var.value) for var in model.component_data_objects(Var, active=True, descend_into=False)) baseline_results = getattr(obj,problem+'_results') for name, value in new_results: if abs(baseline_results[name]-value) > 0.00001: raise IOError("Difference in baseline solution values and " "current solution values using:\n" + \ "Solver: "+solver+"\n" + \ "Writer: "+writer+"\n" + \ "Variable: "+name+"\n" + \ "Solution: "+str(value)+"\n" + \ "Baseline: "+str(baseline_results[name])+"\n")
def main(argv): parser = argparse.ArgumentParser( prog='python -m switch_mod.solve', description='Runs the Switch power grid model solver.') parser.add_argument( '--inputs-dir', type=str, default='inputs', help='Directory containing input files (default is "inputs")') parser.add_argument( '--outputs-dir', type=str, default='outputs', help='Directory to write output files (default is "outputs")') parser.add_argument( '--solver', type=str, default='glpk', help='Linear program solver to use (default is "glpk")') parser.add_argument( '--verbose', '-v', default=False, action='store_true', help='Dump data about internal workings to stdout') args = parser.parse_args(argv) (switch_model, switch_instance) = load(args.inputs_dir) opt = pyomo.opt.SolverFactory(args.solver) results = opt.solve(switch_instance, keepfiles=False, tee=False) switch_model.save_results(results, switch_instance, args.outputs_dir) if args.verbose: # Print a dump of the results and model instance to standard output. results.write() switch_instance.pprint()
def _perform_queue(self, ah, *args, **kwds): """ Perform the queue operation. This method returns the ActionHandle, and the ActionHandle status indicates whether the queue was successful. """ opt = kwds.pop('solver', kwds.pop('opt', None)) if opt is None: raise ActionManagerError( "No solver passed to %s, use keyword option 'solver'" % (type(self).__name__) ) time_start = time.time() if isinstance(opt, string_types): with pyomo.opt.SolverFactory(opt) as _opt: results = _opt.solve(*args, **kwds) else: results = opt.solve(*args, **kwds) results.pyomo_solve_time = time.time()-time_start self.results[ah.id] = results ah.status = ActionStatus.done self.event_handle[ah.id].update(ah) return ah
def testMethod(obj): if not testing_solvers[solver, writer]: obj.skipTest("Solver %s (interface=%s) is not available" % (solver, writer)) m = import_file(os.path.join(thisDir, 'problems', problem + '.py'), clear_cache=True) model = m.define_model(**kwds) opt = pyomo.opt.SolverFactory(solver,solver_io=writer) results = opt.solve(model) # non-recursive new_results = ((var.name, var.value) for var in model.component_data_objects(Var, active=True, descend_into=False)) baseline_results = getattr(obj,problem+'_results') for name, value in new_results: if abs(baseline_results[name]-value) > 0.00001: raise IOError("Difference in baseline solution values and " "current solution values using:\n" + \ "Solver: "+solver+"\n" + \ "Writer: "+writer+"\n" + \ "Variable: "+name+"\n" + \ "Solution: "+str(value)+"\n" + \ "Baseline: "+str(baseline_results[name])+"\n")
def linprog(f, A, b, Aeq=None, beq=None): # Dimensions of matrices m1 = b.shape[0] n = f.shape[0] # b must be a vector if b.ndim != 1: raise ValueError('b must be a one dimensional array') # A must be a matrix if A.ndim != 2: raise ValueError('A must be a two dimensional array') # Dimension check for inequality constraint if A.shape != (m1, n): raise ValueError( 'The shape of A must be equal to (b.shape[0], f.shape[0])') # If no equality restriction if np.any(Aeq == None) & np.any(beq == None): Aeq = np.zeros((1, n)) beq = np.zeros((1, )) m2 = 1 elif (np.any(Aeq != None) & np.any(beq == None)) | (np.any(Aeq == None) & np.any(beq != None)): raise ValueError( 'Please provide Aeq and beq if there is an equality constraint. If there is not, please provide none of them.' ) else: # Dimension of matrices m2 = beq.shape[0] # beq must be a vector if beq.ndim != 1: raise ValueError('b must be a one dimensional array') # Aeq must be a matrix if Aeq.ndim != 2: raise ValueError('A must be a two dimensional array') # Dimension check for equality constraint if Aeq.shape != (m2, n): raise ValueError( 'The shape of Aeq must be equal to (beq.shape[0], f.shape[0])') # Data file creation dat_write_lin(f, A, b, Aeq, beq) # Solution model = linprog_model() # Create the model instance instance = model.create_instance('default.dat') # Setup the optimizer: linear in this case import pyomo.environ opt = pyomo.opt.SolverFactory('glpk') # Optimize results = opt.solve(instance) # Write the output results.write() # Optimal solution x = np.array([instance.x[k].value for k in instance.K]) return x, pyomo.environ.value(instance.OBJ)
def test_solve_with_store2(self): model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=summation(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = solver['glpk'] results = opt.solve(model) # results.write(filename=currdir+'solve_with_store3.out', format='json') self.assertMatchesYamlBaseline(currdir+"solve_with_store3.out", currdir+"solve_with_store3.txt") # model.solutions.store_to(results) results.write(filename=currdir+'solve_with_store4.out', format='json') self.assertMatchesYamlBaseline(currdir+"solve_with_store4.out", currdir+"solve_with_store4.txt") # # Test that we can pickle the results object # buf = pickle.dumps(results) results_ = pickle.loads(buf) results.write(filename=currdir+'solve_with_store4.out', format='json') self.assertMatchesYamlBaseline(currdir+"solve_with_store4.out", currdir+"solve_with_store4.txt") # # Load results with string indices # tmodel = ConcreteModel() tmodel.A = RangeSet(1,3) tmodel.b = Block() tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) tmodel.b.obj = Objective(expr=summation(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) tmodel.solutions.load_from(results, ignore_invalid_labels=True) self.assertEqual(len(tmodel.solutions), 1)
def test_solve_with_store2(self): # Without symbolic solver labels model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=summation(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = solver['glpk'] results = opt.solve(model, symbolic_solver_labels=False) # results.write(filename=currdir+'solve_with_store1.out', format='yaml') self.assertMatchesYamlBaseline(currdir+"solve_with_store1.out", currdir+"solve_with_store1.txt") model.solutions.store_to(results) # results.write(filename=currdir+'solve_with_store2.out', format='yaml') self.assertMatchesYamlBaseline(currdir+"solve_with_store2.out", currdir+"solve_with_store2.txt") # # Load results with string indices # tmodel = ConcreteModel() tmodel.A = RangeSet(1,4) tmodel.b = Block() tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) tmodel.b.obj = Objective(expr=summation(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) tmodel.solutions.load_from(results) self.assertEqual(len(tmodel.solutions), 1)
def curve_polyfit(x, y, order, reg_mode=None, reg_coef=None, robust=False): # Dimensions of matrices n = x.shape[0] m = order # b must be a vector if y.ndim != 1: raise ValueError('y must be a one dimensional array') # A must be a matrix if x.ndim != 1: raise ValueError('x must be a one dimensional array') # Dimension check for inequality constraint if y.shape[0] != n: raise ValueError('The shape of y must be equal to the shape of x') # Data file creation X = np.array([x**j for j in range(m + 1)]).T dat_write_fit(X, y) # Solution model = fit_model(reg_mode, reg_coef, robust, m) # Create the model instance instance = model.create_instance('default1.dat') # Setup the optimizer: linear in this case import pyomo.environ opt = pyomo.opt.SolverFactory('ipopt') # Optimize results = opt.solve(instance) # Write the output results.write() # Optimal solution b = np.array([instance.b[j].value for j in instance.J]) return b
def test_blending(self): """ The blending example from the PuLP documentation """ model = ConcreteModel() model.x1 = Var(bounds=(0, None), doc="ChickenPercent") model.x2 = Var(bounds=(0, None), doc="BeefPercent") model.obj = Objective(expr=0.013 * model.x1 + 0.008 * model.x2, doc="Total Cost of Ingredients per can") model.c0 = Constraint(expr=model.x1 + model.x2 == 100.0, doc="Percentage Sum") model.c1 = Constraint(expr=0.100 * model.x1 + 0.200 * model.x2 >= 8.0, doc="Protein Requirement") model.c2 = Constraint(expr=0.080 * model.x1 + 0.100 * model.x2 >= 6.0, doc="Fat Requirement") model.c3 = Constraint(expr=0.001 * model.x1 + 0.005 * model.x2 <= 2.0, doc="Fiber Requirement") model.c4 = Constraint(expr=0.002 * model.x1 + 0.005 * model.x2 <= 0.4, doc="Salt Requirement") opt = solver['glpk'] results = opt.solve(model) model.solutions.store_to(results) results.write(filename=currdir + "blend.out", format='json') self.assertMatchesJsonBaseline(currdir + "blend.out", currdir + "blend.txt", tolerance=1e-2)
def _perform_queue(self, ah, *args, **kwds): """ Perform the queue operation. This method returns the ActionHandle, and the ActionHandle status indicates whether the queue was successful. """ opt = kwds.pop('solver', kwds.pop('opt', None)) if opt is None: raise ActionManagerError( "No solver passed to %s, use keyword option 'solver'" % (type(self).__name__)) time_start = time.time() if isinstance(opt, str): with pyomo.opt.SolverFactory(opt) as _opt: results = _opt.solve(*args, **kwds) else: results = opt.solve(*args, **kwds) results.pyomo_solve_time = time.time() - time_start self.results[ah.id] = results ah.status = ActionStatus.done self.event_handle[ah.id].update(ah) return ah
def test_solve_with_store2(self): # Without symbolic solver labels model = ConcreteModel() model.A = RangeSet(1, 4) model.b = Block() model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=summation(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=False) # results.write(filename=join(currdir, 'solve_with_store1.out'), format='yaml') self.assertMatchesYamlBaseline(join(currdir, "solve_with_store1.out"), join(currdir, "solve_with_store1.txt")) model.solutions.store_to(results) # results.write(filename=join(currdir, 'solve_with_store2.out'), format='yaml') self.assertMatchesYamlBaseline(join(currdir, "solve_with_store2.out"), join(currdir, "solve_with_store2.txt")) # # Load results with string indices # tmodel = ConcreteModel() tmodel.A = RangeSet(1, 4) tmodel.b = Block() tmodel.b.x = Var(tmodel.A, bounds=(-1, 1)) tmodel.b.obj = Objective(expr=summation(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) tmodel.solutions.load_from(results) self.assertEqual(len(tmodel.solutions), 1)
def test_solve_with_store1(self): # With symbolic solver labels model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) # results.write(filename=join(currdir,'solve_with_store1.out'), format='yaml') self.assertMatchesYamlBaseline( join(currdir,"solve_with_store1.out"), join(currdir,"solve_with_store1.txt")) model.solutions.store_to(results) # results.write(filename=join(currdir,'solve_with_store2.out'), format='yaml') self.assertMatchesYamlBaseline( join(currdir,"solve_with_store2.out"), join(currdir,"solve_with_store2.txt")) # # Load results with string indices # tmodel = ConcreteModel() tmodel.A = RangeSet(1,4) tmodel.b = Block() tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) tmodel.b.obj = Objective(expr=sum_product(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) tmodel.solutions.load_from(results) self.assertEqual(len(tmodel.solutions), 1)
def logreg_clas(X, Y): # Dimensions of matrices m, n = X.shape # Y must be a vector if Y.ndim != 1: raise ValueError('Y must be a one dimensional array') # Y must have m components if Y.shape[0] != m: raise ValueError('Y must be have as many components as rows have X') # Data file creation Xa = np.append(np.ones((len(Y),1)), X, axis=1) dat_write_clas(Xa, Y) # Solution model = clas_model(n) # Create the model instance instance = model.create_instance('default2.dat') # Setup the optimizer: linear in this case import pyomo.environ opt = pyomo.opt.SolverFactory('ipopt') # Optimize results = opt.solve(instance) # Write the output results.write() # Optimal solution B = np.array([instance.B[j].value for j in instance.J]) return B
def _apply_solver(self): start_time = time.time() # # Transform the instance # xfrm = TransformationFactory('bilevel.linear_mpec') xfrm.apply_to(self._instance) xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance, mpec_bound=self.options.get('mpec_bound', 1e-7)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append( opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Load the result back into the original model # ##self._instance.load(self.results[0], ignore_invalid_labels=True) # stop_time = time.time() self.wall_time = stop_time - start_time # # Deactivate the block that contains the optimality conditions, # and reactivate SubModel # submodel = self._instance._transformation_data[ 'bilevel.linear_mpec'].submodel_cuid.find_component(self._instance) for (name, data) in submodel.component_map(active=False).items(): if not isinstance(data, Var) and not isinstance(data, Set): data.activate() # TODO: delete this subblock self._instance._transformation_data[ 'bilevel.linear_mpec'].block_cuid.find_component( self._instance).deactivate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def test_solve6(self): # # Test that solution values have complete block names: # b.obj # b.x # model = ConcreteModel() model.y = Var(bounds=(-1,1)) model.b = Block() model.b.A = RangeSet(1,4) model.b.x = Var(model.b.A, bounds=(-1,1)) def obj_rule(block): return summation(block.x) model.b.obj = Objective(rule=obj_rule) def c_rule(model): expr = model.y for i in model.b.A: expr += i*model.b.x[i] return expr == 0 model.c = Constraint(rule=c_rule) opt = solver['glpk'] results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) results.write(filename=currdir+'solve6.out', format='json') self.assertMatchesJsonBaseline(currdir+"solve6.out", currdir+"solve6.txt", tolerance=1e-4)
def test3a_solve(self): if not 'glpk' in solvers: self.skipTest("glpk solver is not available") self.model = pyomo.opt.AmplModel(currdir+'test3a.mod', currdir+'test3a.dat') opt = pyomo.opt.SolverFactory('glpk') results = opt.solve(self.model, keepfiles=False) results.write(filename=currdir+'test3a.out', format='json') self.assertMatchesJsonBaseline(currdir+'test3a.out', currdir+'test3.baseline.out', tolerance=1e-6)
def test3_solve(self): if solver['glpk'] is None: self.skipTest("glpk solver is not available") self.model = pyomo.opt.AmplModel(currdir+'test3.mod') opt = solver['glpk'] results = opt.solve(self.model, keepfiles=False) results.write(filename=currdir+'test3.out', format='json') self.assertMatchesJsonBaseline(currdir+'test3.out', currdir+'test3.baseline.out', tolerance=1e-6)
def test3a_solve(self): if not "glpk" in solvers: self.skipTest("glpk solver is not available") self.model = pyomo.opt.AmplModel(currdir + "test3a.mod", currdir + "test3a.dat") opt = pyomo.opt.SolverFactory("glpk") results = opt.solve(self.model, keepfiles=False) results.write(filename=currdir + "test3a.out", format="json") self.assertMatchesJsonBaseline(currdir + "test3a.out", currdir + "test3.baseline.out", tolerance=1e-6)
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'ipopt' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] epsilon_final = self.options.get('epsilon_final', 1e-7) epsilon = self.options.get('epsilon_initial', epsilon_final) while (True): self._instance.mpec_bound.value = epsilon # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # res = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) self.results.append(res) epsilon /= 10.0 if epsilon < epsilon_final: break # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data['mpec.simple_nonlinear'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _apply_solver(self): start_time = time.time() # # Transform the instance # xfrm = TransformationFactory('bilevel.linear_mpec') xfrm.apply_to(self._instance) xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance, mpec_bound=self.options.get('mpec_bound',1e-7)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Load the result back into the original model # ##self._instance.load(self.results[0], ignore_invalid_labels=True) # stop_time = time.time() self.wall_time = stop_time - start_time # # Deactivate the block that contains the optimality conditions, # and reactivate SubModel # submodel = self._instance._transformation_data['bilevel.linear_mpec'].submodel_cuid.find_component(self._instance) for (name, data) in submodel.component_map(active=False).items(): if not isinstance(data,Var) and not isinstance(data,Set): data.activate() # TODO: delete this subblock self._instance._transformation_data['bilevel.linear_mpec'].block_cuid.find_component(self._instance).deactivate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_disjunction') xfrm.apply_to(self._instance) xfrm = TransformationFactory('gdp.bigm') xfrm.apply_to(self._instance, default_bigM=self.options.get('bigM',10**6)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data['mpec.simple_disjunction'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # # Transform the result back into the original model # ##self._instance.solutions.load_from(self.results, ignore_invalid_labels=True) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def OSSolverService(): import pyomo.opt if len(sys.argv) == 1: print("OSSolverService -osil <filename> -solver <name>") sys.exit(1) osilFile = None solver = None i=1 while i<len(sys.argv): if sys.argv[i] == "-osil": i=i+1 osilFile=sys.argv[i] elif sys.argv[i] == "-solver": i=i+1 solver=sys.argv[i] i=i+1 print("osilFile",osilFile,"solver",solver) with pyomo.opt.SolverFactory(solver) as opt: opt.solve(osilFile, rformat=pyomo.opt.ResultsFormat.osrl)
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_mpec') xfrm.apply_to(self._instance) xfrm = TransformationFactory('mpec.simple_disjunction') xfrm.apply_to(self._instance) xfrm = TransformationFactory('gdp.bigm') xfrm.apply_to(self._instance, default_bigM=100000) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # stop_time = time.time() self.wall_time = stop_time - start_time # # Deactivate the block that contains the optimality conditions, # and reactivate SubModel # self._instance._transformation_data['bilevel.linear_mpec'].submodel_cuid.find_component(self._instance).activate() self._instance._transformation_data['bilevel.linear_mpec'].block_cuid.find_component(self._instance).deactivate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def test_solve_with_store3(self): model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model) # model.solutions.store_to(results) results.write(filename=join(currdir,'solve_with_store5.out'), format='json') self.assertMatchesYamlBaseline( join(currdir,"solve_with_store5.out"), join(currdir,"solve_with_store4.txt")) # model.solutions.store_to(results, cuid=True) buf = pickle.dumps(results) results_ = pickle.loads(buf) model.solutions.load_from(results_) model.solutions.store_to(results_) results_.write(filename=join(currdir,'solve_with_store6.out'), format='json') self.assertMatchesYamlBaseline( join(currdir,"solve_with_store6.out"), join(currdir,"solve_with_store4.txt")) # # Load results with string indices # tmodel = ConcreteModel() tmodel.A = RangeSet(1,4) tmodel.b = Block() tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) tmodel.b.obj = Objective(expr=sum_product(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) tmodel.solutions.load_from(results) self.assertEqual(len(tmodel.solutions), 1) tmodel.solutions.store_to(results) results.write(filename=join(currdir,'solve_with_store7.out'), format='json') self.assertMatchesYamlBaseline( join(currdir,"solve_with_store7.out"), join(currdir,"solve_with_store4.txt"))
def test_solve4(self): model = ConcreteModel() model.A = RangeSet(1,4) model.x = Var(model.A, bounds=(-1,1)) def obj_rule(model): return summation(model.x) model.obj = Objective(rule=obj_rule) def c_rule(model): expr = 0 for i in model.A: expr += i*model.x[i] return expr == 0 model.c = Constraint(rule=c_rule) opt = solver['glpk'] results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) results.write(filename=currdir+'solve4.out', format='json') self.assertMatchesJsonBaseline(currdir+"solve4.out",currdir+"solve1.txt", tolerance=1e-4)
def test_solve_with_store4(self): model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=summation(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = solver['glpk'] results = opt.solve(model, load_solutions=False) self.assertEqual(len(model.solutions), 0) self.assertEqual(len(results.solution), 1) model.solutions.load_from(results) self.assertEqual(len(model.solutions), 1) self.assertEqual(len(results.solution), 1) # model.solutions.store_to(results) results.write(filename=currdir+'solve_with_store8.out', format='json') self.assertMatchesYamlBaseline(currdir+"solve_with_store8.out", currdir+"solve_with_store4.txt")
def test_blending(self): """ The blending example from the PuLP documentation """ model = ConcreteModel() model.x1 = Var(bounds=(0,None), doc="ChickenPercent") model.x2 = Var(bounds=(0,None), doc="BeefPercent") model.obj = Objective(expr=0.013*model.x1 + 0.008*model.x2, doc="Total Cost of Ingredients per can") model.c0 = Constraint(expr=model.x1+model.x2 == 100.0, doc="Percentage Sum") model.c1 = Constraint(expr=0.100*model.x1 + 0.200*model.x2 >= 8.0, doc="Protein Requirement") model.c2 = Constraint(expr=0.080*model.x1 + 0.100*model.x2 >= 6.0, doc="Fat Requirement") model.c3 = Constraint(expr=0.001*model.x1 + 0.005*model.x2 <= 2.0, doc="Fiber Requirement") model.c4 = Constraint(expr=0.002*model.x1 + 0.005*model.x2 <= 0.4, doc="Salt Requirement") opt = SolverFactory('glpk') results = opt.solve(model) model.solutions.store_to(results) results.write(filename=currdir+"blend.out", format='json') self.assertMatchesJsonBaseline(currdir+"blend.out",currdir+"blend.txt", tolerance=1e-2)
def test_solve_with_pickle(self): model = ConcreteModel() model.A = RangeSet(1,4) model.b = Block() model.b.x = Var(model.A, bounds=(-1,1)) model.b.obj = Objective(expr=summation(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = solver['glpk'] self.assertEqual(len(model.solutions), 0) results = opt.solve(model, symbolic_solver_labels=True) self.assertEqual(len(model.solutions), 1) # self.assertEqual(model.solutions[0].gap, 0.0) self.assertEqual(model.solutions[0].status, SolutionStatus.feasible) self.assertEqual(model.solutions[0].message, None) # buf = pickle.dumps(model) tmodel = pickle.loads(buf) self.assertEqual(len(tmodel.solutions), 1) self.assertEqual(tmodel.solutions[0].gap, 0.0) self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible) self.assertEqual(tmodel.solutions[0].message, None)
def test_nonnegative_transform_3(self): self.model.S = RangeSet(0,10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with # explicit bounds self.model.x1 = Var(bounds=(-3, 3)) self.model.y1 = Var(self.model.S, bounds=(-3, 3)) self.model.z1 = Var(self.model.S, self.model.T, bounds=(-3, 3)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined bounds def boundsRule(*args): return (-4, 4) self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals, bounds=(-10, 10)) self.model.y3 = Var(self.model.S, domain = NegativeIntegers, bounds=(-10, 10)) self.model.z3 = Var(self.model.S, self.model.T, domain = Reals, bounds=(-10, 10)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains def domainRule(*args): if len(args) == 1: arg = 0 else: arg = args[1] if len(args) == 1 or arg == 0: return NonNegativeReals elif arg == 1: return NonNegativeIntegers elif arg == 2: return NonPositiveReals elif arg == 3: return NonPositiveIntegers elif arg == 4: return NegativeReals elif arg == 5: return NegativeIntegers elif arg == 6: return PositiveReals elif arg == 7: return PositiveIntegers elif arg == 8: return Reals elif arg == 9: return Integers elif arg == 10: return Binary else: return Reals self.model.x4 = Var(domain=domainRule, bounds=(-10, 10)) self.model.y4 = Var(self.model.S, domain=domainRule, bounds=(-10, 10)) self.model.z4 = Var(self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10)) def objRule(model): return sum(5*sum_product(model.__getattribute__(c+n)) \ for c in ('x', 'y', 'z') for n in ('1', '2', '3', '4')) self.model.obj = Objective(rule=objRule) transform = TransformationFactory('core.nonnegative_vars') instance=self.model.create_instance() transformed = transform.create_using(instance) opt = SolverFactory("glpk") instance_sol = opt.solve(instance) transformed_sol = opt.solve(transformed) self.assertEqual( instance_sol["Solution"][0]["Objective"]['obj']["value"], transformed_sol["Solution"][0]["Objective"]['obj']["value"] )
def maximize_one_over_mu(settings, n, k, var_lower, var_upper, node_pos, mat, integer_vars): """Compute the maximum of :math: `1/\mu`. Construct a PyOmo model to maximize :math: `1/\mu` See paper by Costa and Nannicini, equation (7) pag 4, and the references therein. Parameters ---------- settings : rbfopt_settings.RbfSettings Global and algorithmic settings. n : int The dimension of the problem, i.e. size of the space. k : int Number of nodes, i.e. interpolation points. var_lower : List[float] Vector of variable lower bounds. var_upper : List[float] Vector of variable upper bounds. node_pos : List[List[float]] List of coordinates of the nodes mat : numpy.matrix The matrix necessary for the computation. This is the inverse of the matrix [Phi P; P^T 0], see paper as cited above. Must be a square numpy.matrix of appropriate dimension. integer_vars : List[int] or None A list containing the indices of the integrality constrained variables. If None or empty list, all variables are assumed to be continuous. Returns ------- float A maximizer. It is difficult to do global optimization so typically this method returns a local maximum. Raises ------ ValueError If the type of radial basis function is not supported. RuntimeError If the solver cannot be found. """ assert(len(var_lower)==n) assert(len(var_upper)==n) assert(len(node_pos)==k) assert(isinstance(mat, np.matrix)) assert(isinstance(settings, RbfSettings)) # Determine the size of the P matrix p = ru.get_size_P_matrix(settings, n) assert(mat.shape==(k + p, k + p)) # Instantiate model if (ru.get_degree_polynomial(settings) == 1): model = rbfopt_degree1_models elif (ru.get_degree_polynomial(settings) == 0): model = rbfopt_degree0_models else: raise ValueError('RBF type ' + settings.rbf + ' not supported') instance = model.create_max_one_over_mu_model(settings, n, k, var_lower, var_upper, node_pos, mat, integer_vars) # Initialize variables for local search initialize_instance_variables(settings, instance) # Instantiate optimizer opt = pyomo.opt.SolverFactory(config.MINLP_SOLVER_EXEC, solver_io='nl') if opt is None: raise RuntimeError('Solver ' + config.MINLP_SOLVER_EXEC + ' not found') set_minlp_solver_options(opt) # Solve and load results try: results = opt.solve(instance, keepfiles = False, tee = settings.print_solver_output) if ((results.solver.status == pyomo.opt.SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal)): # this is feasible and optimal instance.solutions.load_from(results) point = [instance.x[i].value for i in instance.N] ru.round_integer_vars(point, integer_vars) else: point = None except: point = None return point
def get_noisy_rbf_coefficients(settings, n, k, Phimat, Pmat, node_val, fast_node_index, fast_node_err_bounds, init_rbf_lambda = None, init_rbf_h = None): """Obtain coefficients for the noisy RBF interpolant. Solve a quadratic problem to compute the coefficients of the RBF interpolant that minimizes bumpiness and lets all points with index in fast_node_index deviate by a specified percentage from their value. Parameters ---------- settings : rbfopt_settings.RbfSettings Global and algorithmic settings. n : int The dimension of the problem, i.e. size of the space. k : int Number of nodes, i.e. interpolation points. Phimat : numpy.matrix Matrix Phi, i.e. top left part of the standard RBF matrix. Pmat : numpy.matrix Matrix P, i.e. top right part of the standard RBF matrix. node_val : List[float] List of values of the function at the nodes. fast_node_index : List[int] List of indices of nodes whose function value should be considered variable withing the allowed range. fast_node_err_bounds : List[(float, float)] Allowed deviation from node values for nodes affected by error. This is a list of pairs (lower, upper) of the same length as fast_node_index. init_rbf_lambda : List[float] or None Initial values that should be used for the lambda coefficients of the RBF. Can be None. init_rbf_h : List[float] or None Initial values that should be used for the h coefficients of the RBF. Can be None. Returns --- (List[float], List[float]) Two vectors: lambda coefficients (for the radial basis functions), and h coefficients (for the polynomial). If initialization information was provided and was valid, then some values will always be returned. Otherwise, it will be None. Raises ------ ValueError If the type of radial basis function is not supported. RuntimeError If the solver cannot be found. """ assert(isinstance(settings, RbfSettings)) assert(len(node_val)==k) assert(isinstance(Phimat, np.matrix)) assert(isinstance(Pmat, np.matrix)) assert(len(fast_node_index)==len(fast_node_err_bounds)) assert(init_rbf_lambda is None or len(init_rbf_lambda)==k) assert(init_rbf_h is None or len(init_rbf_h)==Pmat.shape[1]) # Instantiate model if (ru.get_degree_polynomial(settings) == 1): model = rbfopt_degree1_models elif (ru.get_degree_polynomial(settings) == 0): model = rbfopt_degree0_models else: raise ValueError('RBF type ' + settings.rbf + ' not supported') instance = model.create_min_bump_model(settings, n, k, Phimat, Pmat, node_val, fast_node_index, fast_node_err_bounds) # Instantiate optimizer opt = pyomo.opt.SolverFactory(config.NLP_SOLVER_EXEC, solver_io='nl') if opt is None: raise RuntimeError('Solver ' + config.NLP_SOLVER_EXEC + ' not found') set_nlp_solver_options(opt) # Initialize instance variables with the solution provided (if # available). This should avoid any infeasibility. if (init_rbf_lambda is not None and init_rbf_h is not None): for i in range(len(init_rbf_lambda)): instance.rbf_lambda[i] = init_rbf_lambda[i] instance.slack[i] = 0.0 for i in range(len(init_rbf_h)): instance.rbf_h[i] = init_rbf_h[i] # Solve and load results try: results = opt.solve(instance, keepfiles = False, tee = settings.print_solver_output) if ((results.solver.status == pyomo.opt.SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal)): # this is feasible and optimal instance.solutions.load_from(results) rbf_lambda = [instance.rbf_lambda[i].value for i in instance.K] rbf_h = [instance.rbf_h[i].value for i in instance.P] else: # If we have initialization information, return it. It is # a feasible solution. Otherwise, this will be None. rbf_lambda = init_rbf_lambda rbf_h = init_rbf_h except: # If we have initialization information, return it. It is # a feasible solution. Otherwise, this will be None. rbf_lambda = init_rbf_lambda rbf_h = init_rbf_h return (rbf_lambda, rbf_h)
def test_standard_form_transform_2(self): """ Same as #1, but adds constraints """ self.model.S = RangeSet(0,10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with # explicit bounds self.model.x1 = Var(bounds=(-3, 3)) self.model.y1 = Var(self.model.S, bounds=(-3, 3)) self.model.z1 = Var(self.model.S, self.model.T, bounds=(-3, 3)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined bounds def boundsRule(*args): return (-4, 4) self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals, bounds=(-10, 10)) self.model.y3 = Var(self.model.S, domain = NegativeIntegers, bounds=(-10, 10)) self.model.z3 = Var(self.model.S, self.model.T, domain = Reals, bounds=(-10, 10)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains def domainRule(*args): if len(args) == 1: arg = 0 else: arg = args[1] if len(args) == 1 or arg == 0: return NonNegativeReals elif arg == 1: return NonNegativeIntegers elif arg == 2: return NonPositiveReals elif arg == 3: return NonPositiveIntegers elif arg == 4: return NegativeReals elif arg == 5: return NegativeIntegers elif arg == 6: return PositiveReals elif arg == 7: return PositiveIntegers elif arg == 8: return Reals elif arg == 9: return Integers elif arg == 10: return Binary else: return Reals self.model.x4 = Var(domain=domainRule, bounds=(-10, 10)) self.model.y4 = Var(self.model.S, domain=domainRule, bounds=(-10, 10)) self.model.z4 = Var(self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10)) # Add some constraints def makeXConRule(var): def xConRule(model, var): return (-1, var, 1) def makeYConRule(var): def yConRule(model, var, s): return (-1, var[s], 1) def makeZConRule(var): def zConRule(model, var, s, t): return (-1, var[s, t], 1) for n in ('1', '2', '3', '4'): self.model.__setattr__( "x" + n + "_constraint", Constraint( rule=makeXConRule( self.model.__getattribute__("x"+n)))) self.model.__setattr__( "y" + n + "_constraint", Constraint( rule=makeYConRule( self.model.__getattribute__("y"+n)))) self.model.__setattr__( "z" + n + "_constraint", Constraint( rule=makeZConRule( self.model.__getattribute__("z"+n)))) def objRule(model): return sum(5*sum_product(model.__getattribute__(c+n)) \ for c in ('x', 'y', 'z') for n in ('1', '2', '3', '4')) self.model.obj = Objective(rule=objRule) transform = StandardForm() instance=self.model.create_instance() transformed = transform(instance) opt = SolverFactory("glpk") instance_sol = opt.solve(instance) transformed_sol = opt.solve(transformed) self.assertEqual( instance_sol["Solution"][0]["Objective"]['obj']["value"], transformed_sol["Solution"][0]["Objective"]['obj']["value"] )
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_dual') xfrm.apply_to(self._instance) # # Apply an additional transformation to remap bilinear terms # if self.options.transform is None: xfrm = None else: xfrm = TransformationFactory(self.options.transform) xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Transform the result back into the original model # tdata = self._instance._transformation_data['bilevel.linear_dual'] unfixed_cuids = set() # Copy variable values and fix them for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if not data_.fixed: data_.value = self._instance.find_component(data_).value data_.fixed = True unfixed_cuids.add(ComponentUID(data_)) # Reclassify the SubModel components and resolve for name_ in tdata.submodel: submodel = getattr(self._instance, name_) submodel.activate() dual_submodel = getattr(self._instance, name_+'_dual') dual_submodel.deactivate() pyomo.util.PyomoAPIFactory('pyomo.repn.compute_canonical_repn')({}, model=submodel) self._instance.reclassify_component_type(name_, Block) # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt_inner: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt_inner.solve(self._instance, tee=self._tee, timelimit=self._timelimit, select=None)) self._instance.solutions.select(0, ignore_fixed_vars=True) data_.parent_component().parent_block().reclassify_component_type(name_, SubModel) # Unfix variables for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if ComponentUID(data_) in unfixed_cuids: data_.fixed = False stop_time = time.time() self.wall_time = stop_time - start_time # Reactivate top level objective for oname, odata in self._instance.component_map(Objective).items(): odata.activate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_dual') xfrm.apply_to(self._instance) # # Verify whether the objective is linear # nonlinear=False for odata in self._instance.component_objects(Objective, active=True): nonlinear = odata.expr.polynomial_degree() != 1 # Stop after the first objective break # # Apply an additional transformation to remap bilinear terms # if nonlinear: gdp_xfrm = TransformationFactory("gdp.bilinear") gdp_xfrm.apply_to(self._instance) mip_xfrm = TransformationFactory("gdp.bigm") mip_xfrm.apply_to(self._instance, default_bigM=self.options.get('bigM',100000)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) #print("POST-SOLVE - BEGIN") #self._instance.write("tmp.lp", io_options={"symbolic_solver_labels":True}) #self._instance.pprint() #self._instance.display() #print("POST-SOLVE - END") # # If the problem was bilinear, then reactivate the original data # if nonlinear: i = 0 for v in self._instance.bilinear_data_.vlist.itervalues(): #print(v) #print(v.cname()) #print(type(v)) #print(v.value) if abs(v.value) <= 1e-7: self._instance.bilinear_data_.vlist_boolean[i] = 0 else: self._instance.bilinear_data_.vlist_boolean[i] = 1 i = i + 1 # self._instance.bilinear_data_.deactivate() # # Transform the result back into the original model # tdata = self._instance._transformation_data['bilevel.linear_dual'] unfixed_cuids = set() # Copy variable values and fix them for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if not data_.fixed: data_.value = self._instance.find_component(data_).value data_.fixed = True unfixed_cuids.add(ComponentUID(data_)) # Reclassify the SubModel components and resolve for name_ in tdata.submodel: submodel = getattr(self._instance, name_) submodel.activate() for (name, data) in submodel.component_map(active=False).items(): if not isinstance(data,Var) and not isinstance(data,Set): data.activate() dual_submodel = getattr(self._instance, name_+'_dual') dual_submodel.deactivate() pyomo.util.PyomoAPIFactory('pyomo.repn.compute_canonical_repn')({}, model=submodel) self._instance.reclassify_component_type(name_, Block) # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt_inner: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # results = opt_inner.solve(self._instance, tee=self._tee, timelimit=self._timelimit) #select=None) # Unfix variables for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if ComponentUID(data_) in unfixed_cuids: data_.fixed = False # self._instance.solutions.select(0, ignore_fixed_vars=True) self.results.append(results) # stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Reactivate top level objective # and reclassify the submodel # for oname, odata in self._instance.component_map(Objective).items(): odata.activate() # TODO: rework the Block logic to allow for searching SubModel objects for variables, etc. #data_.parent_component().parent_block().reclassify_component_type(name_, SubModel) # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))