def solve_instance(instance): solver = SolverFactory('couenne') results = solver.solve(instance, tee=True) instance.solutions.load_from(results) return instance
def initialize(**kwds): obj = Options(**kwds) # # Set obj.available # opt = None try: opt = SolverFactory(obj.name, solver_io=obj.io) except: pass if opt is None or isinstance(opt, UnknownSolver): obj.available = False elif (obj.name == "gurobi") and (not GUROBISHELL.license_is_valid()): obj.available = False elif (obj.name == "baron") and (not BARONSHELL.license_is_valid()): obj.available = False else: obj.available = (opt.available(exception_flag=False)) and ( (not hasattr(opt, "executable")) or (opt.executable() is not None) ) # # Check capabilities # if obj.available: for _c in obj.capabilities: if not _c in opt._capabilities: raise ValueError("Solver %s does not support capability %s!" % (obj.name, _c)) # # Get version # obj.version = opt.version() return obj
def solveModel(self, x, y, z): model = self.model opt = SolverFactory(self.config.solver) opt.options.update(self.config.solver_options) results = opt.solve( model, keepfiles=self.keepfiles, tee=self.stream_solver) if ((results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal)): model.solutions.load_from(results) for i in range(0, self.lx): x[i] = value(self.TRF.xvars[i]) for i in range(0, self.ly): y[i] = value(self.TRF.y[i+1]) for i in range(0, self.lz): z[i] = value(self.TRF.zvars[i]) for obj in model.component_data_objects(Objective,active=True): return True, obj() else: print("Waring: solver Status: " + str(results.solver.status)) print("And Termination Conditions: " + str(results.solver.termination_condition)) return False, 0
def solve_optimization_period(self, period, return_model_instance=False): model = dispatch_formulation.create_dispatch_model(self, period) instance = model.create_instance(report_timing=False) # report_timing=True used to try to make this step faster solver = SolverFactory(cfg.solver_name) solution = solver.solve(instance) instance.solutions.load_from(solution) return instance if return_model_instance else all_results_to_list(instance)
def solve(self): opt = SolverFactory("cplex") model = self._model model.dual.clearValue() model.load(opt.solve(model))#,keepfiles=True,symbolic_solver_labels=True,tee=True)) self._solved = True self._update_tree_node_xbars()
def run_problem(purchases, sales, stella_correction, jammies_correction): opt = SolverFactory('glpk') (number_corr, price_corr, model, dual_model) = make_model(purchases,sales,stella_correction,jammies_correction) results = opt.solve(model) output = [] solutions = results.get('Solution', []) if len(solutions) > 0: model.load(results) for (p,s) in model.pairings: ct = model.selected[p,s].value if ct > 0: output.append((purchases[p-1], sales[s-1], float(ct) / number_corr)) ret = dict(pairs=output, full_result=results.json_repn()) if results.solver.status == SolverStatus.ok: if results.solver.termination_condition == TerminationCondition.optimal: ret['status'] = "optimal" # the following procedure for getting the value is right from # the coopr source itself... key = results.solution.objective.keys()[0] ret['value'] = float(results.solution.objective[key].value) / price_corr / number_corr collect_dual(**locals()) else: ret['status'] = "not solved" else: ret['status'] = "solver error" return ret
def calculateSharesQ(expressionVars, relationSizes, reducerCapacity): """ Use the MINLP solver to calculate the shares of attribute variables input expressionVars A list of lists of expression vars ex. [[[3], [1], [2]] relationSizes A list ex. [1000, 1000, 1000] numberReducers an integer ex. 32 output (shares, com_cost) Two outputs. shares First argument is the shares DICT !! unordered ex. {'1':2, '2': 1, '3': 16} com_cost The objective function's value give the shares ex. 2600000 """ # print expressionVars uniqueVars = getUniqueExpressionVars(expressionVars) print uniqueVars shares = {} # if sum(relationSizes) < reducerCapacity*10: # skew_share = int(pow(np.prod(relationSizes)/100000 , 1.0/len(uniqueVars))) # shares = {str(var): skew_share for var in uniqueVars} # shares = {str(var): 1 for var in uniqueVars} # com_cost = sum(relationSizes) # return (shares, com_cost, com_cost/np.prod(shares.values())) # reducerCapacity = 100000 objectiveExpression = constructObjective(expressionVars, relationSizes) print objectiveExpression budgetExpression_UB = constructCapacityConstraintUB( expressionVars, objectiveExpression, reducerCapacity) budgetExpression_LB = constructCapacityConstraintLB( expressionVars, objectiveExpression, reducerCapacity) # Create a solver factory using Couenne opt = SolverFactory('couenne') model = ConcreteModel() model.x = Var(uniqueVars, domain=PositiveIntegers) model.OBJ = Objective(expr=eval(objectiveExpression)) model.Constraint1 = Constraint(expr=eval(budgetExpression_UB)) # model.Constraint2 = Constraint(expr=eval(budgetExpression_LB)) # Create a model instance and optimize instance = model.create_instance() results = opt.solve(instance) instance.display() # Save calculated shares for v in instance.component_objects(Var, active=True): varobject = getattr(instance, str(v)) for index in varobject: # Round 2.999->3 shares[str(varobject[index])[2:-1] ] = (int(round(varobject[index].value))) # Save communication cost for o in instance.component_objects(Objective, active=True): oobject = getattr(instance, str(o)) for idx in oobject: com_cost = value(oobject[idx]) return (shares, com_cost, com_cost/np.prod(shares.values()))
def Model_Resolution(model,datapath="Example/data.dat"): ''' This function creates the model and call Pyomo to solve the instance of the proyect :param model: Pyomo model as defined in the Model_creation library :param datapath: path to the input data file :return: The solution inside an object call instance. ''' from Constraints import Net_Present_Cost, Solar_Energy,State_of_Charge,\ Maximun_Charge, Minimun_Charge, Max_Power_Battery_Charge, Max_Power_Battery_Discharge, Max_Bat_in, Max_Bat_out, \ Financial_Cost, Energy_balance, Maximun_Lost_Load,Scenario_Net_Present_Cost, Scenario_Lost_Load_Cost, \ Initial_Inversion, Operation_Maintenance_Cost, Total_Finalcial_Cost, Battery_Reposition_Cost, Maximun_Diesel_Energy, Diesel_Comsuption,Diesel_Cost_Total # OBJETIVE FUNTION: model.ObjectiveFuntion = Objective(rule=Net_Present_Cost, sense=minimize) # CONSTRAINTS #Energy constraints model.EnergyBalance = Constraint(model.scenario,model.periods, rule=Energy_balance) model.MaximunLostLoad = Constraint(model.scenario, rule=Maximun_Lost_Load) # Maximum permissible lost load model.ScenarioLostLoadCost = Constraint(model.scenario, rule=Scenario_Lost_Load_Cost) # PV constraints model.SolarEnergy = Constraint(model.scenario, model.periods, rule=Solar_Energy) # Energy output of the solar panels # Battery constraints model.StateOfCharge = Constraint(model.scenario, model.periods, rule=State_of_Charge) # State of Charge of the battery model.MaximunCharge = Constraint(model.scenario, model.periods, rule=Maximun_Charge) # Maximun state of charge of the Battery model.MinimunCharge = Constraint(model.scenario, model.periods, rule=Minimun_Charge) # Minimun state of charge model.MaxPowerBatteryCharge = Constraint(rule=Max_Power_Battery_Charge) # Max power battery charge constraint model.MaxPowerBatteryDischarge = Constraint(rule=Max_Power_Battery_Discharge) # Max power battery discharge constraint model.MaxBatIn = Constraint(model.scenario, model.periods, rule=Max_Bat_in) # Minimun flow of energy for the charge fase model.Maxbatout = Constraint(model.scenario, model.periods, rule=Max_Bat_out) #minimun flow of energy for the discharge fase # Diesel Generator constraints model.MaximunDieselEnergy = Constraint(model.scenario, model.periods, rule=Maximun_Diesel_Energy) # Maximun energy output of the diesel generator model.DieselComsuption = Constraint(model.scenario, model.periods, rule=Diesel_Comsuption) # Diesel comsuption model.DieselCostTotal = Constraint(model.scenario, rule=Diesel_Cost_Total) # Financial Constraints model.FinancialCost = Constraint(rule=Financial_Cost) # Financial cost model.ScenarioNetPresentCost = Constraint(model.scenario, rule=Scenario_Net_Present_Cost) model.InitialInversion = Constraint(rule=Initial_Inversion) model.OperationMaintenanceCost = Constraint(rule=Operation_Maintenance_Cost) model.TotalFinalcialCost = Constraint(rule=Total_Finalcial_Cost) model.BatteryRepositionCost = Constraint(rule=Battery_Reposition_Cost) instance = model.create_instance(datapath) # load parameters opt = SolverFactory('cplex') # Solver use during the optimization results = opt.solve(instance, tee=True) # Solving a model instance instance.solutions.load_from(results) # Loading solution into instance return instance
def run_model (input_data_file): list=[] insts=[] opt = SolverFactory("glpk") instance=model.create(input_data_file) res = opt.solve(instance) instance.load(res) list.append(res) insts.append(instance) print res return list, insts
def test_instance_constraints(model): instance = model.create_instance(report_timing=False) for c in instance.component_objects(Constraint): c.activate() solver = SolverFactory(cfg.solver_name) solution = solver.solve(instance) if solution.solver.termination_condition == TerminationCondition.infeasible: pass else: print c.name c.activate()
def solve(self, solver='glpk', solver_io='lp', debug=False, duals=False, **kwargs): """ Method that takes care of the communication with the solver to solve the optimization model Parameters ---------- self : pyomo.ConcreteModel solver str: solver to be used e.g. 'glpk','gurobi','cplex' solver_io str: str that defines the solver interaction (file or interface) 'lp','nl','python' **kwargs: other arguments for the pyomo.opt.SolverFactory.solve() method Returns ------- self : solved pyomo.ConcreteModel() instance """ from pyomo.opt import SolverFactory # Create a 'dual' suffix component on the instance # so the solver plugin will know which suffixes to collect if duals is True: # dual variables (= shadow prices) self.dual = po.Suffix(direction=po.Suffix.IMPORT) # reduced costs self.rc = po.Suffix(direction=po.Suffix.IMPORT) # write lp-file if debug == True: self.write('problem.lp', io_options={'symbolic_solver_labels': True}) # print instance # instance.pprint() # solve instance opt = SolverFactory(solver, solver_io=solver_io) # store results results = opt.solve(self, **kwargs) if debug == True: if (results.solver.status == "ok") and \ (results.solver.termination_condition == "optimal"): # Do something when the solution in optimal and feasible self.solutions.load_from(results) elif (results.solver.termination_condition == "infeasible"): print("Model is infeasible", "Solver Status: ", results.solver.status) else: # Something else is wrong print("Solver Status: ", results.solver.status, "\n" "Termination condition: ", results.solver.termination_condition)
def main(): # create the empty list of cuts to start cut_on = [] cut_off = [] done = False while not done: model = create_sudoku_model(cut_on, cut_off, board) # options = Options() # options.solver = 'glpk' # options.quiet = True # options.tee = True # results, opt = util.apply_optimizer(options, model) # instance.load(results) ## SOLVE ## opt = SolverFactory('glpk') # create model instance, solve # instance = model.create_instance() results = opt.solve(model) model.solutions.load_from(results) if str(results.Solution.Status) != 'optimal': break # add cuts new_cut_on = [] new_cut_off = [] for r in model.ROWS: for c in model.COLS: for v in model.VALUES: # check if the binary variable is on or off # note, it may not be exactly 1 if value(model.y[r,c,v]) >= 0.5: new_cut_on.append((r,c,v)) else: new_cut_off.append((r,c,v)) cut_on.append(new_cut_on) cut_off.append(new_cut_off) print "Solution #" + str(len(cut_on)) for i in xrange(1,10): for j in xrange(1,10): for v in xrange(1,10): if value(model.y[i,j,v]) >= 0.5: print v, " ", print
def _populate_bundle_dual_master_model(self, ph): current_iteration = ph._current_iteration # first step is to update the historical information from PH for scenario in ph._scenario_tree._scenarios: primal_objective_value = scenario._objective self._past_objective_values[(current_iteration, scenario._name)] = primal_objective_value # print "PAST OBJECTIVE FUNCTION VALUES=",self._past_objective_values assert current_iteration not in self._past_var_values iter_var_values = self._past_var_values[current_iteration] = {} for scenario in ph._scenario_tree._scenarios: iter_var_values[scenario._name] = copy.deepcopy(scenario._x) # print "PAST VAR VALUES=",self._past_var_values # propagate PH parameters to concrete model and re-preprocess. for scenario in ph._scenario_tree._scenarios: for tree_node in scenario._node_list[:-1]: new_w_k_parameter_name = \ "WDATA_"+str(tree_node._name)+"_"+str(scenario._name)+"_K" w_k_parameter = \ self._master_model.find_component(new_w_k_parameter_name) ph_weights = scenario._w[tree_node._name] for idx in w_k_parameter: w_k_parameter[idx] = ph_weights[idx] # V bounds are per-variable, per-iteration for scenario in ph._scenario_tree._scenarios: scenario_name = scenario._name v_var = getattr(self._master_model, "V_"+str(scenario_name)) expr = self._past_objective_values[(current_iteration, scenario_name)] for tree_node in scenario._node_list[:-1]: new_w_variable_name = "WVAR_"+str(tree_node._name)+"_"+str(scenario_name) w_variable = self._master_model.find_component(new_w_variable_name) expr += sum(iter_var_values[scenario_name][tree_node._name][var_id] * w_variable[var_id] for var_id in w_variable) self._master_model.V_Bound.add(v_var <= expr) # print "V_BOUNDS CONSTRAINT:" # self._master_model.V_Bound.pprint() solver = SolverFactory("cplex") results=solver.solve(self._master_model,tee=False,load_solutions=False) self._master_model.solutions.load_from(results)
def run_pyomo(self, model, data, **kwargs): """ Pyomo optimization steps: create model instance from model formulation and data, get solver, solve instance, and load solution. """ logging.debug("Creating model instance...") instance = model.create_instance(data) logging.debug("Getting solver...") solver = SolverFactory(cfg.solver_name) logging.debug("Solving...") solution = solver.solve(instance, **kwargs) logging.debug("Loading solution...") instance.solutions.load_from(solution) return instance
def Model_Resolution_Dispatch(model,datapath="Example/data_Dispatch.dat"): ''' This function creates the model and call Pyomo to solve the instance of the proyect :param model: Pyomo model as defined in the Model_creation library :return: The solution inside an object call instance. ''' from Constraints_Dispatch import Net_Present_Cost, State_of_Charge, Maximun_Charge, \ Minimun_Charge, Max_Bat_in, Max_Bat_out, \ Energy_balance, Maximun_Lost_Load, Generator_Cost_1_Integer, \ Total_Cost_Generator_Integer, \ Scenario_Lost_Load_Cost, \ Generator_Bounds_Min_Integer, Generator_Bounds_Max_Integer,Energy_Genarator_Energy_Max_Integer # OBJETIVE FUNTION: model.ObjectiveFuntion = Objective(rule=Net_Present_Cost, sense=minimize) # CONSTRAINTS #Energy constraints model.EnergyBalance = Constraint(model.periods, rule=Energy_balance) # Energy balance model.MaximunLostLoad = Constraint(rule=Maximun_Lost_Load) # Maximum permissible lost load # Battery constraints model.StateOfCharge = Constraint(model.periods, rule=State_of_Charge) # State of Charge of the battery model.MaximunCharge = Constraint(model.periods, rule=Maximun_Charge) # Maximun state of charge of the Battery model.MinimunCharge = Constraint(model.periods, rule=Minimun_Charge) # Minimun state of charge model.MaxBatIn = Constraint(model.periods, rule=Max_Bat_in) # Minimun flow of energy for the charge fase model.Maxbatout = Constraint(model.periods, rule=Max_Bat_out) #minimun flow of energy for the discharge fase #Diesel Generator constraints model.GeneratorBoundsMin = Constraint(model.periods, rule=Generator_Bounds_Min_Integer) model.GeneratorBoundsMax = Constraint(model.periods, rule=Generator_Bounds_Max_Integer) model.GeneratorCost1 = Constraint(model.periods, rule=Generator_Cost_1_Integer) model.EnergyGenaratorEnergyMax = Constraint(model.periods, rule=Energy_Genarator_Energy_Max_Integer) model.TotalCostGenerator = Constraint(rule=Total_Cost_Generator_Integer) # Financial Constraints model.ScenarioLostLoadCost = Constraint(rule=Scenario_Lost_Load_Cost) instance = model.create_instance("Example/data_dispatch.dat") # load parameters opt = SolverFactory('cplex') # Solver use during the optimization # opt.options['emphasis_memory'] = 'y' # opt.options['node_select'] = 3 results = opt.solve(instance, tee=True,options_string="mipgap=0.03") # Solving a model instance # instance.write(io_options={'emphasis_memory':True}) #options_string="mipgap=0.03", timelimit=1200 instance.solutions.load_from(results) # Loading solution into instance return instance
def __init__(self, manager, *args, **kwds): import pyomo.solvers.plugins.smanager.pyro super(ExtensiveFormAlgorithm, self).__init__(*args, **kwds) # TODO: after PH moves over to the new code #if not isinstance(manager, ScenarioTreeManager): # raise TypeError("ExtensiveFormAlgorithm requires an instance of the " # "ScenarioTreeManager interface as the " # "second argument") if not manager.initialized: raise ValueError("ExtensiveFormAlgorithm requires a scenario tree " "manager that has been fully initialized") self._manager = manager self.instance = None self._solver_manager = None self._solver = None # The following attributes will be modified by the # solve() method. For users that are scripting, these # can be accessed after the solve() method returns. # They will be reset each time solve() is called. ############################################ self.objective = None self.gap = None self.termination_condition = None self.termination_message = None self.solver_status = None self.solution_status = None self.solver_results = None self.time = None self.pyomo_time = None ############################################ # apparently the SolverFactory does not have sane # behavior when the solver name is None if self.get_option("solver") is None: raise ValueError("The 'solver' option can not be None") self._solver = SolverFactory(self.get_option("solver"), solver_io=self.get_option("solver_io")) if isinstance(self._solver, UnknownSolver): raise ValueError("Failed to create solver of type="+ self.get_option("solver")+ " for use in extensive form solve") solver_manager_type = self.get_option("solver_manager") if solver_manager_type == "phpyro": print("*** WARNING ***: PHPyro is not a supported solver " "manager type for the extensive-form solver. " "Falling back to serial.") solver_manager_type = 'serial' self._solver_manager = SolverManagerFactory( solver_manager_type, host=self.get_option("solver_manager_pyro_host"), port=self.get_option("solver_manager_pyro_port")) if self._solver_manager is None: raise ValueError("Failed to create solver manager of type=" +self.get_option("solver")+ " for use in extensive form solve")
def run_model(datafile): print "==== Running the model ====" opt = SolverFactory("cplex") list = [] list_ = [] model.current_time_step.add(1) instance = model.create_instance(datafile) ## determine the time steps for comp in instance.component_objects(): if str(comp) == "time_step": parmobject = getattr(instance, str(comp)) for vv in parmobject.value: list_.append(vv) storage = {} insts = [] for vv in list_: model.current_time_step.clear() model.current_time_step.add(vv) print "Running for time step: ", vv instance = model.create_instance(datafile) # update initial storage value from previous storage if len(storage) > 0: set_initial_storage(instance, storage) instance.preprocess() res=opt.solve(instance) instance.solutions.load_from(res) set_post_process_variables(instance) insts.append(instance) storage=get_storage(instance) list.append(res) print "-------------------------" count=1 for res in list: print " ========= Time step: %s =========="%count print res count+=1 count=1 for inst in insts: print " ========= Time step: %s =========="%count display_variables(inst) count+=1 return list, insts
def run(self, input_file): opt = SolverFactory("glpk") list=[] list_=[] instances=[] self.model.current_time_step.add(1) instance=self.model.create_instance(input_file) for comp in instance.component_objects(): if str(comp) == "time_step": parmobject = getattr(instance, str(comp)) for vv in parmobject.value: list_.append(vv) instance =self.model.create_instance(input_file) storage={} demand_nodes=get_demand_nodes_list(instance) for vv in list_: ################## self.cu_timp=vv self.model.current_time_step.clear() #self.model.preprocess() self.model.current_time_step.add(vv) #self.model.preprocess() instance=self.model.create_instance(input_file) if(len(storage)>0): set_initial_storage(instance, storage) self.model.preprocess() instance.preprocess() else: instance.preprocess() res=opt.solve(instance) instance.solutions.load_from(res) instance.preprocess() storage=get_storage(instance) set_delivery(instance, demand_nodes, vv) instance.solutions.load_from(res) instances.append(instance) list.append(res) count=1 for res in instances: print " ========= Time step: %s =========="%count self.display_variables(res) count+=1 return list, instances
def constructMinotaurSolver(): opt = SolverFactory('qg') opt.set_options('--ampl=1') opt.set_options('--nlp_engine=IPOPT') opt.set_options('--bnb_time_limit=3600.') # opt.set_options('--linfpump=1') return opt
def solve(self, solver="glpk", solver_io="lp", **kwargs): r""" Takes care of communication with solver to solve the model. Parameters ---------- solver : string solver to be used e.g. "glpk","gurobi","cplex" solver_io : string pyomo solver interface file format: "lp","python","nl", etc. \**kwargs : keyword arguments Possible keys can be set see below: Other Parameters ---------------- solve_kwargs : dict Other arguments for the pyomo.opt.SolverFactory.solve() method Example : {"tee":True} cmdline_options : dict Dictionary with command line options for solver e.g. {"mipgap":"0.01"} results in "--mipgap 0.01" {"interior":" "} results in "--interior" Gurobi solver takes numeric parameter values such as {"method": 2} """ solve_kwargs = kwargs.get("solve_kwargs", {}) solver_cmdline_options = kwargs.get("cmdline_options", {}) opt = SolverFactory(solver, solver_io=solver_io) # set command line options options = opt.options for k in solver_cmdline_options: options[k] = solver_cmdline_options[k] results = opt.solve(self, **solve_kwargs) self.solutions.load_from(results) # storage optimization results in result dictionary of energysystem self.es.results = self.results() self.es.results.objective = self.objective() self.es.results.solver = results return results
def schedule_exams(data, solver_name="gurobi", n_cliques=0, print_results=False): optimizer = SolverFactory(solver_name) if solver_name == "gurobi": optimizer.options["threads"] = 1 optimizer.options["--solver-suffixes"] = ".*" print optimizer.options t = time() instance = build_model(data, n_cliques = n_cliques) print("Solving...") results = optimizer.solve(instance) t = time() - t instance.solutions.load_from(results) is_integral = is_integer_solution(instance) if is_integral: print "All integer solution!" y = {} for (i,l) in instance.NxP: if is_integral: y[i,l] = int(instance.y[i,l].value) else: y[i,l] = instance.y[i,l].value x = {} for (i,k,l) in instance.NxRxP: if y[i,l] == 1: if is_integral: x[i,k] = int(instance.x[i,k,l].value) else: x[i,k] = instance.x[i,k,l].value objVal = instance.OBJ_v(instance) if print_results: print instance.display() print (results) return instance, x, y, objVal, t
def initialize(self): if self.solver is not None: self.solver.deactivate() self.solver = None opt = None try: opt = SolverFactory(self.name,solver_io=self.io) except: pass if isinstance(opt, UnknownSolver): opt = None if opt is not None: for key,value in iteritems(self.options): opt.options[key] = value self.solver = opt self.available = (self.solver is not None) and \ (self.solver.available(exception_flag=False)) and \ ((not hasattr(self.solver,'executable')) or \ (self.solver.executable() is not None)) return self.solver, self.io_options
def solve(model): # solve # calls the GLPK solver and finds solution # # Inputs: model - Pyomo model object # Outputs: x - binary |E|x1 solution vector that is 1 if the # row is in the solution and 0 otherwise. # This is an optional code path that allows the script to be # run outside of Pyomo command-line. For example: python transport.py # This replicates what the Pyomo command-line tools does from pyomo.opt import SolverFactory opt = SolverFactory("glpk") results = opt.solve(model) # save results model.solutions.load_from(results) x = model.x._data return x
def solve(): if instance is None: raise RuntimeError("instance is not initialized; load_inputs() or load_dat_inputs() must be called before solve().") # can be accessed from interactive prompt via import ReferenceModel; ReferenceModel.solve() print "solving model..." opt = SolverFactory("cplex")# , solver_io="nl") # tell cplex to find an irreducible infeasible set (and report it) # opt.options['iisfind'] = 1 # relax the integrality constraints, to allow commitment constraints to match up with # number of units available # opt.options['mipgap'] = 0.001 # # display more information during solve # opt.options['display'] = 1 # opt.options['bardisplay'] = 1 # opt.options['mipdisplay'] = 1 # opt.options['primalopt'] = "" # this is how you specify single-word arguments # opt.options['advance'] = 2 # # opt.options['threads'] = 1 # opt.options['parallelmode'] = -1 # 1=opportunistic, 0 or 1=deterministic start = time.time() results = opt.solve(instance, keepfiles=False, tee=True, symbolic_solver_labels=True, suffixes=['dual', 'rc', 'urc', 'lrc']) print "Total time in solver: {t}s".format(t=time.time()-start) instance.solutions.load_from(results) if results.solver.termination_condition == TerminationCondition.infeasible: print "Model was infeasible; Irreducible Infeasible Set (IIS) returned by solver:" print "\n".join(c.cname() for c in instance.iis) raise RuntimeError("Infeasible model") print "\n\n=======================================================" print "Solved model" print "=======================================================" print "Total cost: ${v:,.0f}".format(v=value(instance.Minimize_System_Cost))
def schedule_pyomo(): num = 10 at = [randint(0, 100) for i in xrange(num)] length = [randint(2, 5) for i in xrange(num)] wt = [randint(1, 6) for i in xrange(num)] model = AbstractModel("schedule") model.n = Param(default=num) model.i = RangeSet(0, model.n-1) model.st = Var(model.i, domain=NonNegativeIntegers, bounds=(0, model.n-1)) et = [model.st[i]+length[i] for i in xrange(num)] lt = [max(et[i]-at[i], 0) for i in xrange(num)] # def obj_rule(model, length, at, num): def obj_rule(model): # a = [[length[model.rank[x]] for x in xrange(i)] for i in xrange(num)] # st = [sum(a[i]) for i in xrange(num)] # et = [st[i]+length[i] for i in xrange(num)] # lt = [max(et[i]-at[i], 0) for i in xrange(num)] # return lt return sum([max((model.st[i]+length[i]-at[i]), 0) for i in xrange(10)]) model.obj = Objective(rule=obj_rule, sense=minimize) def c1_rule(model, j): # a = [[length[model.rank[x]] for x in xrange(i)] for i in xrange(num)] # st = [sum(a[i]) for i in xrange(num)] return lt[j] - wt[j] <= 0 model.c1 = Constraint(model.i, rule=c1_rule) opt = SolverFactory("glpk") instance = model.create() result = opt.solve(instance) instance.load(result) print result.solution[0].status
def run_pyomo_optimization(model, data, solver_name, stdout_detail, **kwargs): """ Pyomo optimization steps: create model instance from model formulation and data, get solver, solve instance, and load solution. :param model: :param data: :param solver_name: :param stdout_detail: :param kwargs: :return: instance """ if stdout_detail: print "Creating model instance..." instance = model.create_instance(data) if stdout_detail: print "Getting solver..." solver = SolverFactory(solver_name) if stdout_detail: print "Solving..." solution = solver.solve(instance, **kwargs) if stdout_detail: print "Loading solution..." instance.solutions.load_from(solution) return instance
def __init__(self, numberRepresentativeDays=24, timelimit=60, binsPerTimeSeries=40, solverName='cplex', verbose=False): self.binsPerTimeSeries = binsPerTimeSeries self.numberRepresentativeDays = numberRepresentativeDays self.timeLimit = timelimit self.verbose = verbose # Prepare the solver self.solver = SolverFactory(solverName) if self.solver is None: raise Exception('Unable to use the solver "%s".' % self.solver) # Define the time limit parameter in function of the solver if solverName == 'cbc': self._timelimitParameter='sec' else: self._timelimitParameter="timelimit"
def __init__(self, _results, threshold=None, k=10, solver="glpk", verbosity=0): #check input data if not isinstance(_results, EpitopePredictionResult): raise ValueError("first input parameter is not of type EpitopePredictionResult") #start constructing model self.__pool = mp.Pool(mp.cpu_count()) self.__solver = SolverFactory(solver)#, solver_io = "python") self.__verbosity = verbosity self.__changed = True a = _results.columns.tolist() self.__alleleProb = self.__init_alleles(a) self.__k = k self.__result = None self.__thresh = {} if threshold is None else threshold self.__imm, self.__peps = self.__init_imm(_results) self.__n = len(self.__peps) self.__arcCost = self.__init_arc_cost() self.instance = self.__init_model()
def __init__(self, manager, *args, **kwds): import pyomo.solvers.plugins.smanager.pyro super(ExtensiveFormAlgorithm, self).__init__(*args, **kwds) # TODO: after PH moves over to the new code #if not isinstance(manager, ScenarioTreeManager): # raise TypeError("ExtensiveFormAlgorithm requires an instance of the " # "ScenarioTreeManager interface as the " # "second argument") if not manager.initialized: raise ValueError("ExtensiveFormAlgorithm requires a scenario tree " "manager that has been fully initialized") self._manager = manager self.instance = None self._solver_manager = None self._solver = None # The following attributes will be modified by the # solve() method. For users that are scripting, these # can be accessed after the solve() method returns. # They will be reset each time solve() is called. ############################################ self.objective = undefined self.gap = undefined self.termination_condition = undefined self.solver_status = undefined self.solution_status = undefined self.solver_results = undefined self.pyomo_solve_time = undefined self.solve_time = undefined ############################################ self._solver = SolverFactory(self.get_option("solver"), solver_io=self.get_option("solver_io")) if isinstance(self._solver, UnknownSolver): raise ValueError("Failed to create solver of type=" + self.get_option("solver") + " for use in extensive form solve") if len(self.get_option("solver_options")) > 0: if self.get_option("verbose"): print("Initializing ef solver with options=" + str(list(self.get_option("solver_options")))) self._solver.set_options("".join( self.get_option("solver_options"))) if self.get_option("mipgap") is not None: if (self.get_option("mipgap") < 0.0) or \ (self.get_option("mipgap") > 1.0): raise ValueError("Value of the mipgap parameter for the EF " "solve must be on the unit interval; " "value specified=" + str(self.get_option("mipgap"))) self._solver.options.mipgap = float(self.get_option("mipgap")) solver_manager_type = self.get_option("solver_manager") if solver_manager_type == "phpyro": print("*** WARNING ***: PHPyro is not a supported solver " "manager type for the extensive-form solver. " "Falling back to serial.") solver_manager_type = 'serial' self._solver_manager = SolverManagerFactory( solver_manager_type, host=self.get_option("pyro_host"), port=self.get_option("pyro_port")) if self._solver_manager is None: raise ValueError("Failed to create solver manager of type=" + self.get_option("solver") + " for use in extensive form solve")
def main(): """ Make the flowsheet object and solve """ ss_init = ss_sim.main() flowsheet = Flowsheet(name='MB_Model') # fill in values of IC parameters from steady state solve setICs(flowsheet, ss_init) # Fix variables setInputs(flowsheet) # Initialize at steady state initialize_ss(flowsheet, ss_init) mb = flowsheet.MB_fuel #write_differential_equations(flowsheet) # Then perturb ptb = {} solid_x_ptb = {'Fe2O3': 0.25, 'Fe3O4': 0.01, 'Al2O3': 0.74} gas_y_ptb = {'CO2': 0.04999, 'H2O': 0.00001, 'CH4': 0.95} #perturbInputs(flowsheet,0,Solid_M=691.4,Solid_T=1283.15,Solid_x=solid_x_ptb, # Gas_F=150,Gas_T=350,Gas_y=gas_y_ptb) for t in mb.t: ptb_inputs(flowsheet, t, Solid_M=691.4) ss_final = ss_sim.main(Solid_M=691.4) with open('ss_init.txt', 'w') as f: ss_init.display(ostream=f) with open('ss_fin.txt', 'w') as f: ss_final.display(ostream=f) # should put this in a dedicated ~intialize~ function # that also intelligently initializes the model after perturbation mb.eq_d4.deactivate() mb.eq_d5.deactivate() mb.eq_d8.deactivate() mb.eq_d9.deactivate() mb.eq_d10.deactivate() mb.eq_g7.deactivate() mb.eq_g8.deactivate() mb.eq_g10.deactivate() mb.eq_g11.deactivate() mb.eq_g12.deactivate() mb.eq_g13.deactivate() mb.eq_g14.deactivate() mb.eq_g4.deactivate() mb.eq_g5.deactivate() mb.eq_g2.deactivate() mb.Tg_GW.fix(0.0) mb.Tw_GW.fix(0.0) mb.Tg_refractory.fix(0.0) mb.Tw_Wamb.fix() mb.Tw.fix() mb.Nuw.fix() mb.Nu_ext.fix() mb.hw.fix() mb.hext.fix() mb.hext2.fix() mb.U.fix() mb.Uw.fix() mb.Pr_ext.fix() mb.Ra.fix() mb.Re.fix() ### # other tentatively unused variables: mb.mFe_mAl.fix(0.0) mb.Solid_Out_M_Comp.fix() # choose how to calculate certain algebraic variables: mb.eq_c5.deactivate() # Create a solver tol = 1e-8 opt = SolverFactory('ipopt') opt.options = { 'tol': tol, 'linear_solver': 'ma57', 'bound_push': 1e-8, 'max_cpu_time': 600, 'print_level': 5, 'output_file': 'ipopt_out.txt', 'linear_system_scaling': 'mc19', #'linear_scaling_on_demand' : 'yes', 'halt_on_ampl_error': 'yes' } # initialized at steady state, works regardless: flowsheet.strip_bounds() print_violated_constraints(flowsheet, tol) for t in mb.t: alg_update(flowsheet, t) update_time_derivatives(flowsheet, t) constraint_list = [] for c in mb.component_objects(Constraint): constraint_list.append(c) create_suffixes(flowsheet) vars_to_scale = [] data_scaled = [] con_data_2update = [] for var in mb.component_objects(Var): # need static list of vars to scale vars_to_scale.append(var) for var in mb.component_data_objects(Var): # should I create parallel blocks of scaled/unscaled constraints? data_scaled.append(var) for con in mb.component_data_objects(Constraint): con_data_2update.append(con) # should separate variables into input/response variables # only scale response variables, or have separate function to # scale input variables # # or, an is_alg_fcn_of() function would be nice, but probably very # difficult to implement # # should be able to partition variables into useful categories... # differential, algebraic-important, algebraic-auxiliary, time-derivative, # space-derivative, geometric, (parameter,) input, input-algebraic # # such a partition would be incredibly powerful for a variety of applications # e.g. identifying degrees of freedom, index reduction, initialization, # model simplification diff_vars = [mb.Cg, mb.q, mb.Tg, mb.Ts] alg_vars = [ mb.Gas_Out_P, mb.Solid_Out_M, mb.Solid_Out_Ts, mb.Solid_Out_x, mb.CgT, mb.Ctrans, mb.G_flux, mb.X_gas, mb.y, mb.ytot, mb.Ftotal, mb.F, mb.Gas_M, mb.qT, mb.qtrans, mb.S_flux, mb.X_OC, mb.x, mb.xtot, mb.Solid_M_total, mb.Solid_M, mb.Solid_F_total, mb.Solid_F, mb.mFe_mAl, mb.P, mb.Tg_GS, mb.Ts_dHr, mb.vg, mb.umf, mb.v_diff, mb.Rep, mb.Pr, mb.Pr_ext, mb.Ra, mb.Nu, mb.hf, mb.Gh_flux, mb.Sh_flux, mb.DH_rxn_s, mb.cp_sol, mb.MW_vap, mb.rho_vap, mb.mu_vap, mb.cp_gas, mb.cp_vap, mb.k_cpcv, mb.k_vap, mb.X, mb.X_term, mb.k, mb.r_gen, mb.rg, mb.rs, mb.dG_fluxdz, mb.dS_fluxdz, mb.dPdz, mb.dGh_fluxdz, mb.dSh_fluxdz ] dyn_vars = diff_vars + alg_vars constraints_to_scan = [] for con in mb.component_objects(Constraint): constraints_to_scan.append(con) for var in dyn_vars: print(var.name) create_scale_values(var, flowsheet, ss_init, ss_final) #create_scale_values(mb.Cg, flowsheet, ss_init, ss_final) #with open('pre_constraint_update.txt', 'w') as f: # flowsheet.display(ostream=f) for con in constraints_to_scan: # probably need to selectively update constraints as well print(con.name) update_constraint(con, flowsheet) # TODO: -check that constraints have been properly updated, # -check for violated constraints (in this single element), # ^ should only be discretization equations... # -try to solve model # -have not scaled constraints yet, but should still solve... #update_constraint(mb.eq_c4, flowsheet) #update_constraint(mb.eq_b1, flowsheet) #update_constraint(mb.eq_a2, flowsheet) #update_constraint(mb.eq_p2, flowsheet) with open('scale_vars.txt', 'w') as f: # per custom, I should replace this with a 'write_scale_vars()' function # line = '' # for var in m.component_objects(Var): # if isinstance(var, SimpleVar): # line = line + var.name + ' ' + str(var.value) # # ^probably need to re-format this value string # try: # if var.has_dev_exp == True: # line = line + '\t' + var.dev.name + ' ' + str(value(var.dev)) # except AttributeError: # pass # else: # try: # if var.has_dev_exp == True: # for index in var: for var in data_scaled: line = var.local_name + ' ' try: if var.parent_component().has_dev_exp == True: line = (line + var.parent_component().dev_exp[ var.index()].expr.to_string()) except AttributeError: line = line + ' has no deviation expression' line = line + '\n' f.write(line) write_constraint_expressions(con_data_2update) #print(mb.eq_p2[0.00062,0].expr.to_string()) #print(mb.eq_p2.dev_con[0.00062,0].expr.to_string()) with open('dyn_scaled_init.txt', 'w') as f: flowsheet.display(ostream=f) mb.cont_param.set_value(1) return flowsheet
class TRFInterface(object): """ Pyomo interface for Trust Region algorithm. """ def __init__(self, model, decision_variables, ext_fcn_surrogate_map_rule, config): self.original_model = model tmp_name = unique_component_name(self.original_model, 'tmp') setattr(self.original_model, tmp_name, decision_variables) self.config = config self.model = self.original_model.clone() self.decision_variables = getattr(self.model, tmp_name) delattr(self.original_model, tmp_name) self.data = Block() self.model.add_component(unique_component_name(self.model, 'trf_data'), self.data) self.basis_expression_rule = ext_fcn_surrogate_map_rule self.efSet = None self.solver = SolverFactory(self.config.solver) # TODO: Provide an API for users to set this only to substitute # a subset of identified external functions. # Also rename to "efFilterSet" or something similar. def replaceEF(self, expr): """ Replace an External Function. Arguments: expr : a Pyomo expression. We will search this expression tree This function returns an expression after removing any ExternalFunction in the set efSet from the expression tree `expr` and replacing them with variables. New variables are declared on the `TRF` block. TODO: Future work - investigate direct substitution of basis or surrogate models using Expression objects instead of new variables. """ return EFReplacement(self.data, self.efSet).walk_expression(expr) def _remove_ef_from_expr(self, component): """ This method takes a component and looks at its expression. If the expression contains an external function (EF), a new expression with the EF replaced with a "holder" variable is added to the component and the basis expression for the new "holder" variable is updated. """ expr = component.expr next_ef_id = len(self.data.ef_outputs) new_expr = self.replaceEF(expr) if new_expr is not expr: component.set_value(new_expr) new_output_vars = list( self.data.ef_outputs[i+1] for i in range( next_ef_id, len(self.data.ef_outputs) ) ) for v in new_output_vars: self.data.basis_expressions[v] = \ self.basis_expression_rule( component, self.data.truth_models[v]) def replaceExternalFunctionsWithVariables(self): """ This method sets up essential data objects on the new trf_data block on the model as well as triggers the replacement of external functions in expressions trees. Data objects created: self.data.all_variables : ComponentSet A set of all variables on the model, including "holder" variables from the EF replacement self.data.truth_models : ComponentMap A component map for replaced nodes that keeps track of the truth model for that replacement. self.data.basis_expressions : ComponentMap A component map for the Pyomo expressions for basis functions as they apply to each variable self.data.ef_inputs : Dict A dictionary that tracks the input variables for each EF self.data.ef_outputs : VarList A list of the "holder" variables which replaced the original External Function expressions """ self.data.all_variables = ComponentSet() self.data.truth_models = ComponentMap() self.data.basis_expressions = ComponentMap() self.data.ef_inputs = {} self.data.ef_outputs = VarList() number_of_equality_constraints = 0 for con in self.model.component_data_objects(Constraint, active=True): if con.lb == con.ub and con.lb is not None: number_of_equality_constraints += 1 self._remove_ef_from_expr(con) self.degrees_of_freedom = (len(list(self.data.all_variables)) - number_of_equality_constraints) if self.degrees_of_freedom != len(self.decision_variables): raise ValueError( "replaceExternalFunctionsWithVariables: " "The degrees of freedom %d do not match the number of decision " "variables supplied %d." % (self.degrees_of_freedom, len(self.decision_variables))) for var in self.decision_variables: if var not in self.data.all_variables: raise ValueError( "replaceExternalFunctionsWithVariables: " f"The supplied decision variable {var.name} cannot " "be found in the model variables.") self.data.objs = list(self.model.component_data_objects(Objective, active=True)) # HACK: This is a hack that we will want to remove once the NL writer # has been corrected to not send unused EFs to the solver for ef in self.model.component_objects(ExternalFunction): ef.parent_block().del_component(ef) if len(self.data.objs) != 1: raise ValueError( "replaceExternalFunctionsWithVariables: " "TrustRegion only supports models with a single active Objective.") if self.data.objs[0].sense == maximize: self.data.objs[0].expr = -1* self.data.objs[0].expr self.data.objs[0].sense = minimize self._remove_ef_from_expr(self.data.objs[0]) for i in self.data.ef_outputs: self.data.ef_inputs[i] = \ list(identify_variables( self.data.truth_models[self.data.ef_outputs[i]], include_fixed=False) ) self.data.all_variables.update(self.data.ef_outputs.values()) self.data.all_variables = list(self.data.all_variables) def createConstraints(self): """ Create the basis constraint y = b(w) (equation 3) and the surrogate model constraint y = r_k(w) (equation 5) Both constraints are immediately deactivated after creation and are activated later as necessary. """ b = self.data # This implements: y = b(w) from Yoshio/Biegler (2020) @b.Constraint(b.ef_outputs.index_set()) def basis_constraint(b, i): ef_output_var = b.ef_outputs[i] return ef_output_var == b.basis_expressions[ef_output_var] b.basis_constraint.deactivate() b.INPUT_OUTPUT = Set(initialize=( (i, j) for i in b.ef_outputs.index_set() for j in range(len(b.ef_inputs[i])) )) b.basis_model_output = Param(b.ef_outputs.index_set(), mutable=True) b.grad_basis_model_output = Param(b.INPUT_OUTPUT, mutable=True) b.truth_model_output = Param(b.ef_outputs.index_set(), mutable=True) b.grad_truth_model_output = Param(b.INPUT_OUTPUT, mutable=True) b.value_of_ef_inputs = Param(b.INPUT_OUTPUT, mutable=True) # This implements: y = r_k(w) @b.Constraint(b.ef_outputs.index_set()) def sm_constraint_basis(b, i): ef_output_var = b.ef_outputs[i] return ef_output_var == b.basis_expressions[ef_output_var] + \ b.truth_model_output[i] - b.basis_model_output[i] + \ sum((b.grad_truth_model_output[i, j] - b.grad_basis_model_output[i, j]) * (w - b.value_of_ef_inputs[i, j]) for j, w in enumerate(b.ef_inputs[i])) b.sm_constraint_basis.deactivate() def getCurrentDecisionVariableValues(self): """ Return current decision variable values """ decision_values = {} for var in self.decision_variables: decision_values[var.name] = value(var) return decision_values def updateDecisionVariableBounds(self, radius): """ Update the TRSP_k decision variable bounds This corresponds to: || E^{-1} (u - u_k) || <= trust_radius We omit E^{-1} because we assume that the users have correctly scaled their variables. """ for var in self.decision_variables: var.setlb( maxIgnoreNone(value(var) - radius, self.initial_decision_bounds[var.name][0])) var.setub( minIgnoreNone(value(var) + radius, self.initial_decision_bounds[var.name][1])) def updateSurrogateModel(self): """ The parameters needed for the surrogate model are the values of: b(w_k) : basis_model_output d(w_k) : truth_model_output grad b(w_k) : grad_basis_model_output grad d(w_k) : grad_truth_model_output """ b = self.data for i, y in b.ef_outputs.items(): b.basis_model_output[i] = value(b.basis_expressions[y]) b.truth_model_output[i] = value(b.truth_models[y]) # Basis functions are Pyomo expressions (in theory) gradBasis = differentiate(b.basis_expressions[y], wrt_list=b.ef_inputs[i]) # These, however, are external functions gradTruth = differentiate(b.truth_models[y], wrt_list=b.ef_inputs[i]) for j, w in enumerate(b.ef_inputs[i]): b.grad_basis_model_output[i, j] = gradBasis[j] b.grad_truth_model_output[i, j] = gradTruth[j] b.value_of_ef_inputs[i, j] = value(w) def getCurrentModelState(self): """ Return current state of all model variables. This is necessary if we need to reject a step and move backwards. """ return list(value(v, exception=False) for v in self.data.all_variables) def calculateFeasibility(self): """ Feasibility measure (theta(x)) is: || y - d(w) ||_1 """ b = self.data return sum(abs(value(y) - value(b.truth_models[y])) for i, y in b.ef_outputs.items()) def calculateStepSizeInfNorm(self, original_values, new_values): """ Taking original and new values, calculate the step-size norm ||s_k||: || u - u_k ||_inf We assume that the user has correctly scaled their variables. """ original_vals = [] new_vals = [] for var, val in original_values.items(): original_vals.append(val) new_vals.append(new_values[var]) return max([abs(new - old) for new, old in zip(new_vals, original_vals)]) def initializeProblem(self): """ Initializes appropriate constraints, values, etc. for TRF problem Returns ------- objective_value : Initial objective feasibility : Initial feasibility measure STEPS: 1. Create and solve PMP (eq. 3) and set equal to "x_0" 2. Evaluate d(w_0) 3. Evaluate initial feasibility measure (theta(x_0)) 4. Create initial SM (difference btw. low + high fidelity models) """ self.replaceExternalFunctionsWithVariables() self.initial_decision_bounds = {} for var in self.decision_variables: self.initial_decision_bounds[var.name] = [var.lb, var.ub] self.createConstraints() self.data.basis_constraint.activate() objective_value, _, _ = self.solveModel() self.data.basis_constraint.deactivate() self.updateSurrogateModel() feasibility = self.calculateFeasibility() self.data.sm_constraint_basis.activate() return objective_value, feasibility def solveModel(self): """ Call the specified solver to solve the problem. Returns ------- self.data.objs[0] : Current objective value step_norm : Current step size inf norm feasibility : Current feasibility measure This also caches the previous values of the vars, just in case we need to access them later if a step is rejected """ current_decision_values = self.getCurrentDecisionVariableValues() self.data.previous_model_state = self.getCurrentModelState() results = self.solver.solve(self.model, keepfiles=self.config.keepfiles, tee=self.config.tee) if not check_optimal_termination(results): raise ArithmeticError( 'EXIT: Model solve failed with status {} and termination' ' condition(s) {}.'.format( str(results.solver.status), str(results.solver.termination_condition)) ) self.model.solutions.load_from(results) new_decision_values = self.getCurrentDecisionVariableValues() step_norm = self.calculateStepSizeInfNorm(current_decision_values, new_decision_values) feasibility = self.calculateFeasibility() return self.data.objs[0](), step_norm, feasibility def rejectStep(self): """ If a step is rejected, we reset the model variables values back to their cached state - which we set in solveModel """ for var, val in zip(self.data.all_variables, self.data.previous_model_state): var.set_value(val, skip_validation=True)
def solve(self, model, **kwds): config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) # Validate model to be used with gdpbb self.validate_model(model) # Set solver as an MINLP solver = SolverFactory(config.solver) solve_data = GDPbbSolveData() solve_data.timing = Container() solve_data.original_model = model solve_data.results = SolverResults() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total'), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPbb_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPbb version %s using %s as subsolver" % (".".join(map(str, self.version())), config.solver)) # Setup results solve_data.results.solver.name = 'GDPbb - %s' % (str( config.solver)) setup_results_object(solve_data, config) # Initialize list containing indicator vars for reupdating model after solving indicator_list_name = unique_component_name( model, "_indicator_list") indicator_vars = [] for disjunction in model.component_data_objects(ctype=Disjunction, active=True): for disjunct in disjunction.disjuncts: indicator_vars.append(disjunct.indicator_var) setattr(model, indicator_list_name, indicator_vars) # get objective sense objectives = model.component_data_objects(Objective, active=True) obj = next(objectives, None) obj_sign = 1 if obj.sense == minimize else -1 solve_data.results.problem.sense = obj.sense # clone original model for root node of branch and bound root = model.clone() # set up lists to keep track of which disjunctions have been covered. # this list keeps track of the original disjunctions that were active and are soon to be inactive root.GDPbb_utils.unenforced_disjunctions = list( disjunction for disjunction in root.GDPbb_utils.disjunction_list if disjunction.active) # this list keeps track of the disjunctions that have been activated by the branch and bound root.GDPbb_utils.curr_active_disjunctions = [] # deactivate all disjunctions in the model # self.indicate(root) for djn in root.GDPbb_utils.unenforced_disjunctions: djn.deactivate() # Deactivate all disjuncts in model. To be reactivated when disjunction # is reactivated. for disj in root.component_data_objects(Disjunct, active=True): disj._deactivate_without_fixing_indicator() # Satisfiability check would go here # solve the root node config.logger.info("Solving the root node.") obj_value, result, _ = self.subproblem_solve(root, solver, config) # initialize minheap for Branch and Bound algorithm # Heap structure: (ordering tuple, model) # Ordering tuple: (objective value, disjunctions_left, -counter) # - select solutions with lower objective value, # then fewer disjunctions left to explore (depth first), # then more recently encountered (tiebreaker) heap = [] counter = 0 disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions) heapq.heappush( heap, ((obj_sign * obj_value, disjunctions_left, -counter), root, result, root.GDPbb_utils.variable_list)) # loop to branch through the tree while len(heap) > 0: # pop best model off of heap sort_tup, mdl, mdl_results, vars = heapq.heappop(heap) old_obj_val, disjunctions_left, _ = sort_tup config.logger.info( "Exploring node with LB %.10g and %s inactive disjunctions." % (old_obj_val, disjunctions_left)) # if all the originally active disjunctions are active, solve and # return solution if disjunctions_left == 0: config.logger.info("Model solved.") # Model is solved. Copy over solution values. for orig_var, soln_var in zip( model.GDPbb_utils.variable_list, vars): orig_var.value = soln_var.value solve_data.results.problem.lower_bound = mdl_results.problem.lower_bound solve_data.results.problem.upper_bound = mdl_results.problem.upper_bound solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.termination_condition = mdl_results.solver.termination_condition return solve_data.results next_disjunction = mdl.GDPbb_utils.unenforced_disjunctions.pop( 0) config.logger.info("Activating disjunction %s" % next_disjunction.name) next_disjunction.activate() mdl.GDPbb_utils.curr_active_disjunctions.append( next_disjunction) djn_left = len(mdl.GDPbb_utils.unenforced_disjunctions) for disj in next_disjunction.disjuncts: disj._activate_without_unfixing_indicator() if not disj.indicator_var.fixed: disj.indicator_var = 0 # initially set all indicator vars to zero for disj in next_disjunction.disjuncts: if not disj.indicator_var.fixed: disj.indicator_var = 1 mnew = mdl.clone() if not disj.indicator_var.fixed: disj.indicator_var = 0 obj_value, result, vars = self.subproblem_solve( mnew, solver, config) counter += 1 ordering_tuple = (obj_sign * obj_value, djn_left, -counter) heapq.heappush(heap, (ordering_tuple, mnew, result, vars)) config.logger.info( "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % (len(next_disjunction.disjuncts), djn_left, len(heap)))
if (bb, rr, cc) in brc) == 1 m.R4 = Constraint(b, k, rule=R4, doc='Los bloques 3x3 no deben repetir números') ## FUNCIÓN OBJETIVO # No es necesario una. Se crea una dummy. m.OBJ = Objective(expr=1) ## VERBATIM DE RESOLUCIÓN from pyomo.opt import SolverFactory opt = SolverFactory('glpk') results = opt.solve(m) results.write() ## LOGGING # Se crea el logger y se da el nivel logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # Se crea el formateador formatter = logging.Formatter('%(message)s') # Se crea lo que tratará el archivo y se le asigna el formateador file_handler = logging.FileHandler(__file__[:-3] + '_Logging.log', mode='w') file_handler.setFormatter(formatter) # Se empieza a añadir cosas logger.addHandler(file_handler)
def max_heat(n, m, k, QH, QC, R, M): (A, VH, VC, AT) = valid_quadruples_set(k, QH, QC, R, M) if len(A) == 0: total_fraction = 0 q = [[[[0 for t in range(k)] for j in range(m)] for ti in range(k)] for i in range(n)] Q = [[0 for j in range(m)] for i in range(n)] return (total_fraction, q) model = AbstractModel() model.n = Param(within=NonNegativeIntegers, initialize=n) # number of hot streams model.m = Param(within=NonNegativeIntegers, initialize=m) # number of cold streams model.k = Param(within=NonNegativeIntegers, initialize=k) # number of temperature intervals model.H = RangeSet(0, model.n - 1) # set of hot streams model.C = RangeSet(0, model.m - 1) # set of cold streams model.T = RangeSet(0, model.k - 1) # set of temperature intervals model.M = Set(within=model.H * model.C, initialize=M) model.A = Set(within=model.H * model.T * model.C * model.T, initialize=A) # set of valid quadruples (arcs) model.VH = Set(within=model.H * model.T, initialize=VH) # set of valid hot pairs (vertices) model.VC = Set(within=model.C * model.T, initialize=VC) # set of valid cold pairs (vertices) model.AT = Set(within=model.T, initialize=AT) # set of active heat residuals # Parameter: heat load of hot stream i in temperature interval t model.QH = Param(model.VH, within=NonNegativeReals, initialize=lambda model, i, s: QH[i][s]) # Parameter: heat load of cold stream j in temperature interval t model.QC = Param(model.VC, within=NonNegativeReals, initialize=lambda model, j, t: QC[j][t]) # Parameter: heat load of hot stream i model.h = Param(model.H, within=NonNegativeReals, initialize=lambda model, i: sum(QH[i])) # Parameter: heat load of cold stream j model.c = Param(model.C, within=NonNegativeReals, initialize=lambda model, j: sum(QC[j])) # Parameter: heat load of cold stream j in temperature interval t model.R = Param(model.AT, within=NonNegativeReals, initialize=lambda model, t: R[t]) # Variable: heat transferred from (i,s) to (j,t) model.q = Var(model.A, within=NonNegativeReals) # Objective: minimization of the total fraction def total_heat_rule(model): return sum(model.q[i, s, j, t] for (i, s, j, t) in model.A) model.total_heat = Objective(rule=total_heat_rule, sense=maximize) #Constraint: heat conservation of hot streams def hot_supply_rule(model, i, s): return sum(model.q[temp_i, temp_s, j, t] for (temp_i, temp_s, j, t) in model.A if temp_i == i and temp_s == s) <= model.QH[i, s] model.hot_supply_constraint = Constraint(model.VH, rule=hot_supply_rule) #Constraint: heat conservation of cold streams def cold_demand_rule(model, j, t): return sum(model.q[i, s, temp_j, temp_t] for (i, s, temp_j, temp_t) in model.A if temp_j == j and temp_t == t) <= model.QC[j, t] model.cold_demand_constraint = Constraint(model.VC, rule=cold_demand_rule) #Constraint: heat conservation of cold streams def residual_feasibility_rule(model, u): return sum(model.q[i, s, j, t] for (i, s, j, t) in model.A if s <= u and t > u) <= model.R[u] model.residual_feasibility_constraint = Constraint( model.AT, rule=residual_feasibility_rule) solver = 'cplex' opt = SolverFactory(solver) opt.options['threads'] = 1 LP = model.create_instance() results = opt.solve(LP) total_heat = results.problem.upper_bound q = [[[[0 for t in range(k)] for j in range(m)] for ti in range(k)] for i in range(n)] for (i, s, j, t) in A: q[i][s][j][t] = LP.q[i, s, j, t].value return (total_heat, q)
model.constraints.add( 10000 * model.needed[worker] >= sum(model.works[worker, day, shift] for day in days for shift in days_shifts[day]) ) # if any model.works[worker, ·, ·] non-zero, model.needed[worker] must be one; else is zero to reduce the obj function # 10000 is to remark, but 5 was enough since max of 40 hours yields max of 5 shifts, the maximum possible sum # Constraint (def of model.no_pref) for worker in workers: model.constraints.add( model.no_pref[worker] >= sum(model.works[worker, 'Sat', shift] for shift in days_shifts['Sat']) - sum(model.works[worker, 'Sun', shift] for shift in days_shifts['Sun']) ) # if not working on sunday but working saturday model.needed must be 1; else will be zero to reduce the obj function opt = SolverFactory('cbc') # choose a solver results = opt.solve(model) # solve the model with the selected solver opt = SolverFactory('cbc') # Select solver solver_manager = SolverManagerFactory('neos') # Solve in neos server results = solver_manager.solve(model, opt=opt) def get_workers_needed(needed): """Extract to a list the needed workers for the optimal solution.""" workers_needed = [] for worker in workers: if needed[worker].value == 1: workers_needed.append(worker) return workers_needed
####################Define objective function########################## def ObjectiveFunction(model): return summation(model.dist, model.x) * model.cf / 1000 + summation( model.k) * model.cPS model.Obj = Objective(rule=ObjectiveFunction, sense=minimize) #############Solve model################## instance = model.create_instance(data) print('Instance is constructed:', instance.is_constructed()) #opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\Gisele_MILP\cbc') opt = SolverFactory('gurobi') opt.options['mipgap'] = 0.30 #opt = SolverFactory('cbc',executable=r'C:\Users\Asus\Desktop\POLIMI\Thesis\GISELE\New folder\cbc') print('Starting optimization process') time_i = datetime.now() opt.solve(instance, tee=True) time_f = datetime.now() print('Time required for optimization is', time_f - time_i) links = instance.x power = instance.P voltage = instance.E PS = instance.PPS connections_output = pd.DataFrame(columns=[['id1', 'id2']]) k = 0
def solve(self, solver, io, io_options, solver_options, symbolic_labels, load_solutions): """ Optimize the model """ assert self.model is not None opt = SolverFactory(solver, solver_io=io) opt.options.update(solver_options) if io == 'nl': assert opt.problem_format() == ProblemFormat.nl elif io == 'lp': assert opt.problem_format() == ProblemFormat.cpxlp elif io == 'mps': assert opt.problem_format() == ProblemFormat.mps #elif io == 'python': # print opt.problem_format() # assert opt.problem_format() is None try: if isinstance(opt, PersistentSolver): opt.set_instance(self.model, symbolic_solver_labels=symbolic_labels) if opt.warm_start_capable(): results = opt.solve(warmstart=True, load_solutions=load_solutions, **io_options) else: results = opt.solve(load_solutions=load_solutions, **io_options) else: if opt.warm_start_capable(): results = opt.solve(self.model, symbolic_solver_labels=symbolic_labels, warmstart=True, load_solutions=load_solutions, **io_options) else: results = opt.solve(self.model, symbolic_solver_labels=symbolic_labels, load_solutions=load_solutions, **io_options) return opt, results finally: opt.deactivate() del opt return None, None
optimizer = ParameterEstimator(pyomo_model2) optimizer.apply_discretization('dae.collocation', nfe=30, ncp=3, scheme='LAGRANGE-RADAU') # optimizer.model.time.pprint() # Provide good initial guess p_guess = {'k1': 2.0, 'k2': 0.5} #: @dthierry: regular stuff for fe_factory param_dict = {} param_dict["P", "k1"] = 2.0 param_dict["P", "k2"] = 0.5 model = optimizer.model #: @dthierry: gracefully call fe_factory fe_factory = fe_initialize(model, src, init_con="init_conditions_c", param_name="P", param_values=param_dict) fe_factory.run() optimizer.model.P['k1'].set_value(p_guess['k1']) optimizer.model.P['k2'].set_value(p_guess['k2']) optimizer.model.P.fix() ip = SolverFactory('ipopt') ip.solve(optimizer.model, tee=True) #: all done
def test_propagate_uncertainty1(self): ''' It tests the function propagate_uncertainty with min f: p1*x1+ p2*(x2^2) + p1*p2 s.t c1: x1 + x2 = p1 c2: x2 + x3 = p2 0 <= x1, x2, x3 <= 10 p1 = 10 p2 = 5 Variables = (x1, x2, x3) Parameters (fixed variables) = (p1, p2) ''' ### Create optimization model m = ConcreteModel() m.dual = Suffix(direction=Suffix.IMPORT) m.x1 = Var() m.x2 = Var() m.x3 = Var() # Define parameters m.p1 = Var(initialize=10) m.p2 = Var(initialize=5) m.p1.fix() m.p2.fix() # Define constraints m.con1 = Constraint(expr=m.x1 + m.x2 - m.p1 == 0) m.con2 = Constraint(expr=m.x2 + m.x3 - m.p2 == 0) # Define objective m.obj = Objective(expr=m.p1 * m.x1 + m.p2 * (m.x2**2) + m.p1 * m.p2, sense=minimize) ### Solve optimization model opt = SolverFactory('ipopt', tee=True) opt.solve(m) ### Analytic solution ''' At the optimal solution, none of the bounds are active. As long as the active set does not change (i.e., none of the bounds become active), the first order optimality conditions reduce to a simple linear system. ''' # dual variables (multipliers) v2_ = 0 v1_ = m.p1() # primal variables x2_ = (v1_ + v2_) / (2 * m.p2()) x1_ = m.p1() - x2_ x3_ = m.p2() - x2_ ### Analytic sensitivity ''' Using the analytic solution above, we can compute the sensitivies of x and v to perturbations in p1 and p2. The matrix dx_dp constains the sensitivities of x to perturbations in p ''' # Initialize sensitivity matrix Nx x Np # Rows: variables x # Columns: parameters p dx_dp = np.zeros((3, 2)) # dx2/dp1 = 1/(2 * p2) dx_dp[1, 0] = 1 / (2 * m.p2()) # dx2/dp2 = -(v1 + v2)/(2 * p2**2) dx_dp[1, 1] = -(v1_ + v2_) / (2 * m.p2()**2) # dx1/dp1 = 1 - dx2/dp1 dx_dp[0, 0] = 1 - dx_dp[1, 0] # dx1/dp2 = 0 - dx2/dp2 dx_dp[0, 1] = 0 - dx_dp[1, 1] # dx3/dp1 = 1 - dx2/dp1 dx_dp[2, 0] = 0 - dx_dp[1, 0] # dx3/dp2 = 0 - dx2/dp2 dx_dp[2, 1] = 1 - dx_dp[1, 1] ''' Similarly, we can compute the gradients df_dx, df_dp and Jacobians dc_dx, dc_dp ''' # Initialize 1 x 3 array to store (\partial f)/(\partial x) # Elements: variables x df_dx = np.zeros(3) # df/dx1 = p1 df_dx[0] = m.p1() # df/dx2 = p2 df_dx[1] = 2 * m.p2() * x2_ # df/dx3 = 0 # Initialize 1 x 2 array to store (\partial f)/(\partial p) # Elements: parameters p df_dp = np.zeros(2) # df/dxp1 = x1 + p2 df_dp[0] = x1_ + m.p2() # df/dp2 = x2**2 + p1 df_dp[1] = x2_**2 + m.p1() # Initialize 2 x 3 array to store (\partial c)/(\partial x) # Rows: constraints c # Columns: variables x dc_dx = np.zeros((2, 3)) # dc1/dx1 = 1 dc_dx[0, 0] = 1 # dc1/dx2 = 1 dc_dx[0, 1] = 1 # dc2/dx2 = 1 dc_dx[1, 1] = 1 # dc2/dx3 = 1 dc_dx[1, 2] = 1 # Remaining entries are 0 # Initialize 2 x 2 array to store (\partial c)/(\partial x) # Rows: constraints c # Columns: variables x dc_dp = np.zeros((2, 2)) # dc1/dp1 = -1 dc_dp[0, 0] = -1 # dc2/dp2 = -1 dc_dp[1, 1] = -1 ### Uncertainty propagation ''' Now lets test the uncertainty propagation package. We will assume p has covariance sigma_p = [[2, 0], [0, 1]] ''' ## Prepare inputs # Covariance matrix sigma_p = np.array([[2, 0], [0, 1]]) # Nominal values for uncertain parameters theta = {'p1': m.p1(), 'p2': m.p2()} # Names of uncertain parameters theta_names = ['p1', 'p2'] # Important to unfix the parameters! # Otherwise k_aug will complain about too few degrees of freedom m.p1.unfix() m.p2.unfix() ## Run package results = propagate_uncertainty(m, theta, sigma_p, theta_names) ## Check results tmp_f = (df_dp + df_dx @ dx_dp) sigma_f = tmp_f @ sigma_p @ tmp_f.transpose() tmp_c = (dc_dp + dc_dx @ dx_dp) sigma_c = tmp_c @ sigma_p @ tmp_c.transpose() # This currently just checks if the order of the outputs did not change # TODO: improve test robustness by using this information to set # var_idx and theta_idx. This way the test will still work # regardless of the order. In other words, the analytic solution needs to be # reordered to match the variable/constraint order from # this package. Alternately, the results could be converted into a Pandas dataframe assert results.col == ['x1', 'x2', 'p1', 'p2', 'x3'] assert results.row == ['con1', 'con2', 'obj'] var_idx = np.array([True, True, False, False, True]) theta_idx = np.array([False, False, True, True, False]) # Check the gradient of the objective w.r.t. x matches np.testing.assert_array_almost_equal(results.gradient_f[var_idx], np.array(df_dx)) # Check the gradient of the objective w.r.t. p (parameters) matches np.testing.assert_array_almost_equal(results.gradient_f[theta_idx], np.array(df_dp)) # Check the Jacobian of the constraints w.r.t. x matches np.testing.assert_array_almost_equal( results.gradient_c.toarray()[:, var_idx], np.array(dc_dx)) # Check the Jacobian of the constraints w.r.t. p (parameters) matches np.testing.assert_array_almost_equal( results.gradient_c.toarray()[:, theta_idx], np.array(dc_dp)) # Check the NLP sensitivity results for the variables (x) matches np.testing.assert_array_almost_equal( results.dsdp.toarray()[var_idx, :], np.array(dx_dp)) # Check the NLP sensitivity results for the parameters (p) matches np.testing.assert_array_almost_equal( results.dsdp.toarray()[theta_idx, :], np.array([[1, 0], [0, 1]])) # Check the uncertainty propagation results for the constrains matches np.testing.assert_array_almost_equal(results.propagation_c, np.sum(sigma_c)) # Check the uncertainty propagation results for the objective matches assert results.propagation_f == pytest.approx(sigma_f)
# Filename: PYOMO_example_2.py # Description: An example of solving a binary # linear programming problem with Pyomo # Authors: Papathanasiou, J. & Ploskas, N. from pyomo.environ import * from pyomo.opt import SolverFactory import matplotlib.pyplot as plt import numpy as np # Create an object to perform optimization opt = SolverFactory('cplex') # Create an object of a concrete model model = ConcreteModel() # Define the decision variables model.x1 = Var(within=Binary) model.x2 = Var(within=Binary) model.x3 = Var(within=Binary) model.x4 = Var(within=Binary) model.x5 = Var(within=Binary) model.x6 = Var(within=Binary) # Define the objective function model.obj = Objective(expr=model.x1 + model.x2 + model.x3 + model.x4 + model.x5 + model.x6) # Define the constraints model.con1 = Constraint(expr=model.x1 + model.x3 >= 2) model.con2 = Constraint(expr=model.x1 + model.x2 + model.x5 >= 2)
if vecinoPiso == 0: modelo.R4.add(modelo.x[i, j - 1] >= modelo.x[i, j]) modelo.R4.add(modelo.x[i - 1, j] >= modelo.x[i, j]) modelo.R4.add(modelo.x[i, j - 2] >= modelo.x[i, j]) casillas_expr_R5.append((i, j)) elif fichaNueva == 'd': casillas_expr_R5.append((i, j)) if len(casillas_expr_R5) == 0 and len(entrada) > 0: raise Exception('Ninguna ficha fue valida, revisar entradas') modelo.R5 = Constraint(expr=sum(modelo.x[idx] for idx in casillas_expr_R5) == 1) #Mostrar solucion del modelo SolverFactory('glpk').solve(modelo) #modelo.display() #modelo.pprint() print("-----------------ESTADO FINAL-------------") actualizarMapa(modelo, nFilas, nColumnas) print_campo(modelo, nFilas) print("------------------------------------------------------") print("------------------------------------------------------") delete_component(modelo, 'x') delete_component(modelo, 'ob') delete_component(modelo, 'R1') delete_component(modelo, 'R2') delete_component(modelo, 'R3') delete_component(modelo, 'R4') delete_component(modelo, 'R5') entrada.pop(0)
def initialize( blk, state_args={ "flow_component": { "N2": 1.0, "CO2": 1.0, "NO": 1.0, "O2": 1.0, "H2O": 1.0, "SO2": 1.0 }, "pressure": 1e5, "temperature": 495.0 }, hold_state=False, state_vars_fixed=False, outlvl=0, solver='ipopt', optarg={'tol': 1e-8}): ''' Initialisation routine for property package. Key values for the state_args dict: flow_component : value at which to initialize component flows (default=27.5e3 mol/s) pressure : value at which to initialize pressure (default=2.97e7 Pa) temperature : value at which to initialize temperature (default=866.5 K) outlvl : sets output level of initialisation routine * 0 = no output (default) * 1 = return solver state for each step in routine * 2 = include solver output infomation (tee=True) state_vars_fixed: Flag to denote if state vars have already been fixed. - True - states have already been fixed by the control volume 1D. Control volume 0D does not fix the state vars, so will be False if this state block is used with 0D blocks. - False - states have not been fixed. The state block will deal with fixing/unfixing. optarg : solver options dictionary object (default=None) solver : str indicating whcih solver to use during initialization (default = 'ipopt') hold_state : flag indicating whether the initialization routine should unfix any state variables fixed during initialization (default=False). - True - states varaibles are not unfixed, and a dict of returned containing flags for which states were fixed during initialization. - False - state variables are unfixed after initialization by calling the relase_state method Returns: If hold_states is True, returns a dict containing flags for which states were fixed during initialization. ''' if state_vars_fixed is False: flags = fix_state_vars(blk, state_args) # Check when the state vars are fixed already result in dof 0 for k in blk.keys(): if degrees_of_freedom(blk[k]) != 0: raise Exception("State vars fixed but degrees of freedom " "for state block is not zero during " "initialization.") # Set solver options if outlvl > 1: stee = True else: stee = False opt = SolverFactory(solver) opt.options = optarg # --------------------------------------------------------------------- # Solve 1st stage for k in blk.keys(): if hasattr(blk[k], "vapor_pressure_correlation"): blk[k].vapor_pressure = \ exp(blk[k].vapor_pressure_coeff[1].value + blk[k].vapor_pressure_coeff[2].value / value(blk.temperature) + blk[k].vapor_pressure_coeff[3].value * value(blk.temperature) + blk[k].vapor_pressure_coeff[4].value * log(value(blk.temperature)) + blk[k].vapor_pressure_coeff[5].value * value(blk.temperature)**2) if hasattr(blk[k], 'enthalpy_correlation'): blk[k].enthalpy_correlation.deactivate() if hasattr(blk[k], "volumetric_flow_calculation"): blk[k].volumetric_flow_calculation.deactivate() if hasattr(blk[k], "entropy_correlation"): blk[k].entropy_correlation.deactivate() if hasattr(blk[k], "density_mol_calculation"): blk[k].density_mol_calculation.deactivate() results = opt.solve(blk[k], tee=stee) if outlvl > 0: if results.solver.termination_condition \ == TerminationCondition.optimal: logger.info('{} Initialisation Step 1 Complete.'.format( blk.name)) else: logger.warning('{} Initialisation Step 1 Failed.'.format( blk.name)) # --------------------------------------------------------------------- # Solve 2nd stage for k in blk.keys(): if hasattr(blk[k], 'enthalpy_correlation'): blk[k].enthalpy_correlation.activate() if hasattr(blk[k], "volumetric_flow_calculation"): blk[k].volumetric_flow_calculation.activate() if hasattr(blk[k], "entropy_correlation"): blk[k].entropy_correlation.activate() if hasattr(blk[k], "density_mol_calculation"): blk[k].density_mol_calculation.activate() results = opt.solve(blk[k], tee=stee) if outlvl > 0: if results.solver.termination_condition \ == TerminationCondition.optimal: logger.info('{} Initialisation Step 2 Complete.'.format( blk.name)) else: logger.warning('{} Initialisation Step 2 Failed.'.format( blk.name)) # --------------------------------------------------------------------- # If input block, return flags, else release state if outlvl > 0: if outlvl > 0: logger.info('{} Initialisation Complete.'.format(blk.name)) if state_vars_fixed is False: if hold_state is True: return flags else: blk.release_state(flags)
################################################################################# import sys import os from unittest.mock import patch sys.path.append(os.path.abspath('..')) # current folder is ~/tests import numpy as np import pandas as pd from scipy import sparse import pytest from pytest import approx from idaes.apps.uncertainty_propagation.uncertainties import quantify_propagate_uncertainty, propagate_uncertainty, clean_variable_name from pyomo.opt import SolverFactory from pyomo.environ import * import pyomo.contrib.parmest.parmest as parmest ipopt_available = SolverFactory('ipopt').available() kaug_available = SolverFactory('k_aug').available() dotsens_available = SolverFactory('dot_sens').available() @pytest.mark.skipif(not ipopt_available, reason="The 'ipopt' command is not available") @pytest.mark.skipif(not kaug_available, reason="The 'k_aug' command is not available") @pytest.mark.skipif(not dotsens_available, reason="The 'dot_sens' command is not available") class TestUncertaintyPropagation: @pytest.mark.unit def test_quantify_propagate_uncertainty1(self): ''' It tests the function quantify_propagate_uncertainty with rooney & biegler's model.
class ExtensiveFormAlgorithm(PySPConfiguredObject): @classmethod def _declare_options(cls, options=None): if options is None: options = PySPConfigBlock() safe_declare_unique_option( options, "cvar_weight", PySPConfigValue( 1.0, domain=_domain_nonnegative, description=("The weight associated with the CVaR term in " "the risk-weighted objective " "formulation. If the weight is 0, then " "*only* a non-weighted CVaR cost will appear " "in the EF objective - the expected cost " "component will be dropped. Default is 1.0."), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "generate_weighted_cvar", PySPConfigValue( False, domain=bool, description=("Add a weighted CVaR term to the " "primary objective. Default is False."), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "risk_alpha", PySPConfigValue( 0.95, domain=_domain_unit_interval, description=("The probability threshold associated with " "CVaR (or any future) risk-oriented " "performance metrics. Default is 0.95."), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "cc_alpha", PySPConfigValue( 0.0, domain=_domain_unit_interval, description=("The probability threshold associated with a " "chance constraint. The RHS will be one " "minus this value. Default is 0."), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "cc_indicator_var", PySPConfigValue( None, domain=_domain_must_be_str, description=("The name of the binary variable to be used " "to construct a chance constraint. Default " "is None, which indicates no chance " "constraint."), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "mipgap", PySPConfigValue( None, domain=_domain_unit_interval, description=("Specifies the mipgap for the EF solve."), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_common_option(options, "solver") safe_declare_common_option(options, "solver_io") safe_declare_common_option(options, "solver_manager") safe_declare_common_option(options, "solver_options") safe_declare_common_option(options, "disable_warmstart") safe_declare_common_option(options, "pyro_host") safe_declare_common_option(options, "pyro_port") safe_declare_common_option(options, "pyro_shutdown") safe_declare_common_option(options, "verbose", ap_group=_ef_group_label) safe_declare_common_option(options, "output_times", ap_group=_ef_group_label) safe_declare_common_option(options, "output_solver_results", ap_group=_ef_group_label) safe_declare_common_option(options, "symbolic_solver_labels", ap_group=_ef_group_label) safe_declare_common_option(options, "output_solver_log", ap_group=_ef_group_label) safe_declare_common_option(options, "verbose", ap_group=_ef_group_label) safe_declare_common_option(options, "output_times", ap_group=_ef_group_label) safe_declare_common_option(options, "keep_solver_files", ap_group=_ef_group_label) safe_declare_common_option(options, "output_solver_results", ap_group=_ef_group_label) return options def __enter__(self): return self def __exit__(self, *args): self.close() def close(self): self.destroy_ef() if self._solver is not None: self._solver.deactivate() if self._solver_manager is not None: if isinstance(self._solver_manager, pyomo.solvers.plugins.smanager.\ pyro.SolverManager_Pyro): if self.get_option("pyro_shutdown_workers"): self._solver_manager.shutdown_workers() self._solver_manager.deactivate() self._solver_manager = None self._manager = None self.objective = undefined self.objective_sense = undefined self.gap = undefined self.termination_condition = undefined self.solver_status = undefined self.solution_status = undefined self.solver_results = undefined self.pyomo_solve_time = undefined self.solve_time = undefined def __init__(self, manager, *args, **kwds): import pyomo.solvers.plugins.smanager.pyro super(ExtensiveFormAlgorithm, self).__init__(*args, **kwds) # TODO: after PH moves over to the new code #if not isinstance(manager, ScenarioTreeManager): # raise TypeError("ExtensiveFormAlgorithm requires an instance of the " # "ScenarioTreeManager interface as the " # "second argument") if not manager.initialized: raise ValueError("ExtensiveFormAlgorithm requires a scenario tree " "manager that has been fully initialized") self._manager = manager self.instance = None self._solver_manager = None self._solver = None # The following attributes will be modified by the # solve() method. For users that are scripting, these # can be accessed after the solve() method returns. # They will be reset each time solve() is called. ############################################ self.objective = undefined self.gap = undefined self.termination_condition = undefined self.solver_status = undefined self.solution_status = undefined self.solver_results = undefined self.pyomo_solve_time = undefined self.solve_time = undefined ############################################ self._solver = SolverFactory(self.get_option("solver"), solver_io=self.get_option("solver_io")) if isinstance(self._solver, UnknownSolver): raise ValueError("Failed to create solver of type=" + self.get_option("solver") + " for use in extensive form solve") if len(self.get_option("solver_options")) > 0: if self.get_option("verbose"): print("Initializing ef solver with options=" + str(list(self.get_option("solver_options")))) self._solver.set_options("".join( self.get_option("solver_options"))) if self.get_option("mipgap") is not None: if (self.get_option("mipgap") < 0.0) or \ (self.get_option("mipgap") > 1.0): raise ValueError("Value of the mipgap parameter for the EF " "solve must be on the unit interval; " "value specified=" + str(self.get_option("mipgap"))) self._solver.options.mipgap = float(self.get_option("mipgap")) solver_manager_type = self.get_option("solver_manager") if solver_manager_type == "phpyro": print("*** WARNING ***: PHPyro is not a supported solver " "manager type for the extensive-form solver. " "Falling back to serial.") solver_manager_type = 'serial' self._solver_manager = SolverManagerFactory( solver_manager_type, host=self.get_option("pyro_host"), port=self.get_option("pyro_port")) if self._solver_manager is None: raise ValueError("Failed to create solver manager of type=" + self.get_option("solver") + " for use in extensive form solve") def build_ef(self): self.destroy_ef() if self.get_option("verbose"): print("Creating extensive form instance") start_time = time.time() # then validate the associated parameters. generate_weighted_cvar = False cvar_weight = None risk_alpha = None if self.get_option("generate_weighted_cvar"): generate_weighted_cvar = True cvar_weight = self.get_option("cvar_weight") risk_alpha = self.get_option("risk_alpha") self.instance = create_ef_instance( self._manager.scenario_tree, verbose_output=self.get_option("verbose"), generate_weighted_cvar=generate_weighted_cvar, cvar_weight=cvar_weight, risk_alpha=risk_alpha, cc_indicator_var_name=self.get_option("cc_indicator_var"), cc_alpha=self.get_option("cc_alpha")) if self.get_option("verbose") or self.get_option("output_times"): print("Time to construct extensive form instance=%.2f seconds" % (time.time() - start_time)) def destroy_ef(self): if self.instance is not None: for scenario in self._manager.scenario_tree.scenarios: self.instance.del_component(scenario.name) scenario._instance_objective.activate() self.instance = None def write(self, filename): if self.instance is None: raise RuntimeError( "The extensive form instance has not been constructed." "Call the build_ef() method to construct it.") suf = os.path.splitext(filename)[1] if suf not in ['.nl', '.lp', '.mps']: if self._solver.problem_format() == ProblemFormat.cpxlp: filename += '.lp' elif self._solver.problem_format() == ProblemFormat.nl: filename += '.nl' elif self._solver.problem_format() == ProblemFormat.mps: filename += '.mps' else: raise ValueError("Could not determine output file format. " "No recognized ending suffix was provided " "and no format was indicated was by the " "--solver-io option.") start_time = time.time() if self.get_option("verbose"): print("Starting to write extensive form") smap_id = write_ef(self.instance, filename, self.get_option("symbolic_solver_labels")) print("Extensive form written to file=" + filename) if self.get_option("verbose") or self.get_option("output_times"): print("Time to write output file=%.2f seconds" % (time.time() - start_time)) return filename, smap_id def solve(self, check_status=True, exception_on_failure=True, io_options=None): # TODO: Does this import need to be delayed because # it is in a plugins subdirectory from pyomo.solvers.plugins.solvers.persistent_solver import \ PersistentSolver if self.instance is None: raise RuntimeError( "The extensive form instance has not been constructed." "Call the build_ef() method to construct it.") start_time = time.time() if self.get_option("verbose"): print("Queuing extensive form solve") self.objective = undefined self.gap = undefined self.bound = undefined self.pyomo_solve_time = undefined self.solve_time = undefined self.termination_condition = undefined self.solver_status = undefined self.solution_status = undefined self.solver_results = undefined if isinstance(self._solver, PersistentSolver): self._solver.set_instance(self.instance, symbolic_solver_labels=self.get_option( "symbolic_solver_labels")) solve_kwds = {} solve_kwds['load_solutions'] = False if self.get_option("keep_solver_files"): solve_kwds['keepfiles'] = True if self.get_option("symbolic_solver_labels"): solve_kwds['symbolic_solver_labels'] = True if self.get_option("output_solver_log"): solve_kwds['tee'] = True if io_options is not None: solve_kwds.update(io_options) self.objective_sense = \ find_active_objective(self.instance).sense if (not self.get_option("disable_warmstart")) and \ (self._solver.warm_start_capable()): action_handle = self._solver_manager.queue(self.instance, opt=self._solver, warmstart=True, **solve_kwds) else: action_handle = self._solver_manager.queue(self.instance, opt=self._solver, **solve_kwds) if self.get_option("verbose"): print("Waiting for extensive form solve") results = self._solver_manager.wait_for(action_handle) if self.get_option("verbose"): print("Done with extensive form solve - loading results") if self.get_option("output_solver_results"): print("Results for ef:") results.write(num=1) self.solver_results = results if hasattr(results.solver,"user_time") and \ (not isinstance(results.solver.user_time, UndefinedData)) and \ (results.solver.user_time is not None): # the solve time might be a string, or might # not be - we eventually would like more # consistency on this front from the solver # plugins. self.solve_time = \ float(results.solver.user_time) elif hasattr(results.solver, "time"): self.solve_time = \ float(results.solver.time) else: self.solve_time = undefined if hasattr(results, "pyomo_solve_time"): self.pyomo_solve_time = \ results.pyomo_solve_time else: self.pyomo_solve_times = undefined self.termination_condition = \ results.solver.termination_condition self.solver_status = \ results.solver.status if len(results.solution) > 0: assert len(results.solution) == 1 results_sm = results._smap self.instance.solutions.load_from(results) solution0 = results.solution(0) if hasattr(solution0, "gap") and \ (solution0.gap is not None): self.gap = solution0.gap else: self.gap = undefined self.solution_status = solution0.status if self.get_option("verbose"): print("Storing solution in scenario tree") for scenario in self._manager.scenario_tree.scenarios: scenario.update_solution_from_instance() self._manager.scenario_tree.snapshotSolutionFromScenarios() self.objective = self._manager.scenario_tree.\ findRootNode().\ computeExpectedNodeCost() if self.gap is not undefined: if self.objective_sense == pyomo.core.base.minimize: self.bound = self.objective - self.gap else: self.bound = self.objective + self.gap else: self.objective = undefined self.gap = undefined self.bound = undefined self.solution_status = undefined failure = False if check_status: if not ((self.solution_status == SolutionStatus.optimal) or \ (self.solution_status == SolutionStatus.feasible)): failure = True if self.get_option("verbose") or \ exception_on_failure: msg = ("EF solve failed solution status check:\n" "Solver Status: %s\n" "Termination Condition: %s\n" "Solution Status: %s\n" % (self.solver_status, self.termination_condition, self.solution_status)) if self.get_option("verbose"): print(msg) if exception_on_failure: raise RuntimeError(msg) else: if self.get_option("verbose"): print("EF solve completed. Skipping status check.") if self.get_option("verbose") or self.get_option("output_times"): print("Time to solve and load results for the " "extensive form=%.2f seconds" % (time.time() - start_time)) return failure
def FVA(model, settings, fileName='', fluxes=-1, inpFvaSettings=None, epsPerc=0.01, lowerBound=10**-8, upperBound=10**3, nOfProcessors=cpu_count() / 2): # Functions have to be included above parallelization task. Cannot use import in normal sense. Windows only probably. def mesg(logFile, mesgToAdd): fl = open(logFile, "a+") fl.write(mesgToAdd) fl.close() def fvaSubProblem(Orinstance, OrOpt, size, optV, lowerBound, upperBound, i): # import statements need to be inside due to parallelization, only necessary for Windows probably from pyomo.environ import Objective, minimize, maximize, value, Constraint from pyomo.opt import TerminationCondition import logging # Suppress pyomo warnings logging.getLogger('pyomo.core').setLevel(logging.ERROR) # initialize (min, max, termination condition at min, termination condition at max, index, opt. v w/ biomass) fva = [[], [], [], [], [], []] # (ABSOLUTELY NECESSARY) though not entirely sure why... instance = Orinstance Opt = OrOpt # minimize flux if optV[i] < lowerBound: minState = 'Flux is lower than' + str( lowerBound) + 'in optimal solution' minV = 0 else: try: # change objective to minimizing flux instance.minFlux = Objective( expr=instance.v[i], sense=minimize) # default sense is minimize instance.minFlux.activate() # try to avoid sub. term. instance.boxMin = Constraint(expr=instance.v[i] >= lowerBound) instance.boxMin.activate() # solve min. problem Soln = Opt.solve(instance, tee=False) if Soln.solver.termination_condition == TerminationCondition.unbounded: minV = lowerBound elif Soln.solver.termination_condition == TerminationCondition.optimal: minV = value(instance.minFlux) else: minV = -1 minState = str(Soln.solver.termination_condition) instance.minFlux.deactivate() instance.boxMin.deactivate() except Exception as e: instance.minFlux.deactivate() instance.boxMin.deactivate() minState = 'Cannot load a SolverResults object with bad status: error' minV = -1 # print(e) # maximize flux if optV[i] >= upperBound: maxState = 'Flux is larger than ' + str( upperBound) + ' in optimal solution.' maxV = upperBound else: try: # change objective to maximizing flux instance.maxFlux = Objective(expr=instance.v[i], sense=maximize) instance.maxFlux.activate() # set box constraint on flux to roughly predict unboundedness. instance.boxMax = Constraint(expr=instance.v[i] <= upperBound) instance.boxMax.activate() # solve max. problem Soln = Opt.solve(instance, tee=False) if Soln.solver.termination_condition == TerminationCondition.unbounded: maxV = upperBound elif Soln.solver.termination_condition == TerminationCondition.optimal: maxV = value(instance.maxFlux) else: maxV = -1 maxState = str(Soln.solver.termination_condition) instance.maxFlux.deactivate() instance.boxMax.deactivate() except Exception as e: instance.maxFlux.deactivate() instance.boxMax.deactivate() maxState = 'Cannot load a SolverResults object with bad status: error' maxV = -1 # print(e) # update FVA list fva[0] = minV fva[1] = maxV fva[2] = minState fva[3] = maxState fva[4] = i fva[5] = optV[i] # check for errors to log minError = '' maxError = '' if minV == -1: minError = '. Error in minimization problem: ' + minState if maxV == -1: maxError = 'Error in maximization problem: ' + maxState # send update to screen mesg( settings['logFile'], '\tSolved FVA subproblem ' + str(i) + ' of ' + str(size - 1) + minError + '. ' + maxError + '\n') # we're done return fva # following if-sentence necessary in Windows for use of multiprocess package if __name__ == '__main__': # initialize fvaData = [] # Sets logfile to default filename and checks if logFile already exists if 'logFile' not in settings: settings['logFile'] = 'logFileFVA' if os.path.exists(settings['logFile']): os.remove(settings['logFile']) try: # set fvaSettings to default if none is chosen fvaSettings = { 'solver': 'gurobi', 'NumericFocus': 3, 'Aggregate': 0, 'BarQCPConvTol': 10**-3, 'BarHomogeneous': 1, 'Presolve': -1, 'DualReductions': 1, 'BarCorrectors': -1, 'Method': 2 } if inpFvaSettings is not None: if set(inpFvaSettings.keys()).issubset(fvaSettings.keys()): for key, item in inpFvaSettings.items(): fvaSettings[key] = item except: mesg(settings['logFile'], 'Error: could not load settings argument properly') return fvaData mesg(settings['logFile'], 'Initializing FVA process\n\n') # measure time start_time = time.time() # set solver options for FVAsubproblem. Interior point experiences less problems differentiating feasibility/unboundedness # setting all settings to default except for Aggregate, which is turned off (=0), and BarQCPConvTol set to 10**-3, seems to work fine. solver = fvaSettings['solver'] OrOpt = SolverFactory(solver) OrOpt.options['NumericFocus'] = fvaSettings[ 'NumericFocus'] # 0 default. When set to zero, multiple max. problems cannot differentiate infeasible and unbounded problems. OrOpt.options['Presolve'] = fvaSettings['Presolve'] # -1 default OrOpt.options['Aggregate'] = fvaSettings[ 'Aggregate'] # Presolver options. Gurobi docs recommend turning this off if num. trouble, 1 - default, 0 - off OrOpt.options['DualReductions'] = fvaSettings[ 'DualReductions'] # put to zero to verify if unbounded or infeasible OrOpt.options['BarQCPConvTol'] = fvaSettings['BarQCPConvTol'] OrOpt.options['BarHomogeneous'] = fvaSettings[ 'BarHomogeneous'] # -1 default (automatic), 0 off, 1 on: Some min problems results in unloadable SolveResults object if not turned on OrOpt.options['BarCorrectors'] = fvaSettings['BarCorrectors'] OrOpt.options['Method'] = fvaSettings['Method'] # set this automatically since we will need the original instance anyway settings['retrieveInstance'] = True mesg( settings['logFile'], 'Starting to solve initial optimization problem to find optimal growth rate\n' ) # fetch opt. growth rate and flux state and pyomo concrete model. try: sol, stat = uncModel(model, settings) grwthRate = sol['grwthRate'] Oinstance = sol['instance'] optV = sol['optFlux'] cS = sol['cS'] except: mesg(settings['logFile'], 'Error: could not solve initial optimization problem\n') return fvaData mesg( settings['logFile'], 'Initial optimization problem solved. Optimal growth rate is ' + str(grwthRate) + ' g/gDWh\n\n') # constrain biomass reaction def bioConstr(instance): return sum(cS[j] * instance.v[j] for j in instance.flux) >= (1 - epsPerc) * grwthRate # fix growth rate and deactivate biomass as objective Oinstance.con = Constraint(rule=bioConstr) Oinstance.grwthRate.deactivate() # set flux to default if type(fluxes) is not list and fluxes == -1: fluxes = range(len(optV)) mesg( settings['logFile'], 'Initializing parallelization FVA subproblem tasks on ' + str(nOfProcessors) + ' processors\n') try: # Parallelize size = len(fluxes) fvaData = [] p = Pool(nOfProcessors) func = partial(fvaSubProblem, Oinstance, OrOpt, size, optV, lowerBound, upperBound) r = p.map_async(func, fluxes, callback=fvaData.append) r.wait() except: mesg(settings['logFile'], 'Error: could not parallelize FVA subproblems\n') return fvaData # Add the model and its settings to fvaData try: fvaData[0].append(model + ' ' + str(settings)) except: mesg( settings['logFile'], 'Error: could not append settings to FVA data list. Check if output is empty' ) if fileName != '': try: # saves fva using pickle with open(fileName, 'wb') as f: pickle.dump(fvaData, f) except: mesg(settings['logFile'], 'Error: could not save FVA data list to file') return fvaData # print where FVA is saved and time duration mesg( settings['logFile'], '\nDone. FVA results saved as ' + fileName + ' using the pickle package. Duration of FVA: ' + str(round(time.time() - start_time)) + ' s') return fvaData
def initialize( self, state_args={ "flow_mol_comp": { "N2": 1.0, "CO2": 1.0, "NO": 1.0, "O2": 1.0, "H2O": 1.0, "SO2": 1.0 }, "pressure": 1e5, "temperature": 495.0 }, hold_state=False, state_vars_fixed=False, outlvl=0, solver='ipopt', optarg={'tol': 1e-8}): """Initialisation routine for property package. Key values for the state_args dict: flow_mol_comp : value at which to initialize component flows (default=27.5e3 mol/s) pressure : value at which to initialize pressure (default=2.97e7 Pa) temperature : value at which to initialize temperature (default=866.5 K) Args: outlvl: sets logging level state_vars_fixed: Flag to denote state vars have been fixed. - True - states have been fixed by the control volume 1D. Control volume 0D does not fix the state vars, so will be False if this state block is used with 0D blocks. - False - states have not been fixed. The state block will deal with fixing/unfixing. optarg: solver options dictionary object (default=None) solver: str indicating whcih solver to use during initialization (default = 'ipopt') hold_state: flag indicating whether the initialization routine should unfix any state variables fixed during initialization (default=False). - True - states varaibles are not unfixed, and a dict of returned containing flags for which states were fixed during initialization. - False - state variables are unfixed after initialization by calling the relase_state method Returns: If hold_states is True, returns a dict containing flags for which states were fixed during initialization. """ init_log = idaeslog.getInitLogger(self.name, outlvl, tag="properties") solve_log = idaeslog.getSolveLogger(self.name, outlvl, tag="properties") opt = SolverFactory(solver) opt.options = optarg if state_vars_fixed is False: flags = fix_state_vars(self, state_args) # Check when the state vars are fixed already result in dof 0 for b in self.values(): if degrees_of_freedom(b) != 0: raise Exception(f"{self.name} initializtion error: State vars " "fixed but degrees of freedom not equal to 0") # --------------------------------------------------------------------- # Solve 1st stage for k, b in self.items(): deactivate_list = [] if hasattr(b, 'enthalpy_correlation'): deactivate_list.append(b.enthalpy_correlation) if hasattr(b, "volumetric_flow_calculation"): deactivate_list.append(b.volumetric_flow_calculation) if hasattr(b, "entropy_correlation"): deactivate_list.append(b.entropy_correlation) for c in deactivate_list: c.deactivate() if number_activated_constraints(b) > 0: with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc: res = opt.solve(b, tee=slc.tee) else: res = "skipped" init_log.info_high("Initialization Step 1 {}.".format( idaeslog.condition(res))) for c in deactivate_list: c.activate() if number_activated_constraints(b) > 0: with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc: res = opt.solve(b, tee=slc.tee) else: res = "skipped" init_log.info_high("Initialization Step 2 {}.".format( idaeslog.condition(res))) init_log.info('Initialisation Complete, {}.'.format( idaeslog.condition(res))) # --------------------------------------------------------------------- # If input block, return flags, else release state if state_vars_fixed is False: if hold_state is True: return flags else: self.release_state(flags)
import json import pyomo.environ as pyo from pyomo.opt import SolverFactory import numpy as np import os from pathlib import Path import pandas as pd import concurrent.futures from modeling_helper.utilities import * from modeling_helper.printing import * from parameters import * from model_options import * opt = SolverFactory('gurobi') current_path = Path.cwd() # ****************************************************************************** # Model Options # ****************************************************************************** # Model options are defined in the file 'model_options.py'. multiprocessing_testing = True # ****************************************************************************** # Parameters # ****************************************************************************** # Seet for randomness
import pyomo.environ from pyomo.opt import SolverFactory from pyomo.common.collections import Bunch from nonlin import model model.pprint() model.skip_canonical_repn = True # for nonlinear models instance = model.create() SolverName = "asl" so = Bunch() so.solver = "ipopt" opt = SolverFactory(SolverName, options=so) if opt is None: print("Could not construct solver %s:%s" % (SolverName, so.solver)) sys.exit(1) results = opt.solve(instance) results.write() instance.load(results) # put results in model # because we know there is a variable named x x_var = getattr(instance, "x") x_val = x_var() print("x was " + str(x_val))
into gen_inc_heat_rates.tab if you want to play around with the model behavior. In both versions of incremental heat rate tables, I gave natural gas combustion turbines a very minor heat rate penalty to discourage committing more capacity than is needed. I changed the incremental heat rate to 99 percent of the full load heat rate, with 1 percent of the fuel use incurred at 0 electricity output. For this to work, you need to ensure that the switch_mod package directory is in your python search path. See the README for more info. """ from pyomo.environ import * from pyomo.opt import SolverFactory from switch_mod.utilities import define_AbstractModel switch_model = define_AbstractModel("switch_mod", "local_td", "project.unitcommit", "fuel_cost") switch_instance = switch_model.load_inputs(inputs_dir="inputs") opt = SolverFactory("cplex") results = opt.solve(switch_instance, keepfiles=False, tee=False) switch_model.save_results(results, switch_instance, "outputs") # Dump all results # switch_instance.load(results) results.write() switch_instance.pprint()
return sum(m.d[h].Q_h for h in m.p[x].IDh) == m.p[x].Q_from_h else: return Constraint.Skip m.house_to_point = Constraint(m.x, rule=Q_initial) def opp_directions_rule(m, x, x2): if (x2 in m.p[x].i) and (x < x2): return m.p[x].Q_to[x2] * m.p[x2].Q_to[x] == 0 else: return Constraint.Skip m.opposite_directions = Constraint(m.x, m.x, rule=opp_directions_rule) def obj_rule(m): return sum(m.P_grid * m.d[h].WV for h in m.h) - sum(m.p[x].street_cost for x in m.x) m.obj = Objective(rule=obj_rule, sense=maximize) # opt = SolverFactory('baron', executable="/home/rogier/PycharmProjects/solvers/baron-lin64/baron") # opt = SolverFactory('mindtpy') opt = SolverFactory('ipopt') instance = m.create_instance() results = opt.solve(instance, tee=True) # results = opt.solve(instance, mip_solver='glpk', nlp_solver='ipopt', strategy='OA', tee=True) instance.solutions.store_to(results) instance.pprint() instance.display() # results.write(filename='results_12_block_simple.json', format='json')
class ExtensiveFormAlgorithm(PySPConfiguredObject): @classmethod def _declare_options(cls, options=None): if options is None: options = PySPConfigBlock() safe_declare_unique_option( options, "cvar_weight", PySPConfigValue( 1.0, domain=_domain_nonnegative, description=( "The weight associated with the CVaR term in " "the risk-weighted objective " "formulation. If the weight is 0, then " "*only* a non-weighted CVaR cost will appear " "in the EF objective - the expected cost " "component will be dropped. Default is 1.0." ), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "generate_weighted_cvar", PySPConfigValue( False, domain=bool, description=( "Add a weighted CVaR term to the " "primary objective. Default is False." ), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "risk_alpha", PySPConfigValue( 0.95, domain=_domain_unit_interval, description=( "The probability threshold associated with " "CVaR (or any future) risk-oriented " "performance metrics. Default is 0.95." ), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "cc_alpha", PySPConfigValue( 0.0, domain=_domain_unit_interval, description=( "The probability threshold associated with a " "chance constraint. The RHS will be one " "minus this value. Default is 0." ), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_unique_option( options, "cc_indicator_var", PySPConfigValue( None, domain=_domain_must_be_str, description=( "The name of the binary variable to be used " "to construct a chance constraint. Default " "is None, which indicates no chance " "constraint." ), doc=None, visibility=0), ap_group=_ef_group_label) safe_declare_common_option(options, "solver") safe_declare_common_option(options, "solver_io") safe_declare_common_option(options, "solver_manager") safe_declare_common_option(options, "solver_options") safe_declare_common_option(options, "disable_warmstart") safe_declare_common_option(options, "solver_manager_pyro_host") safe_declare_common_option(options, "solver_manager_pyro_port") safe_declare_common_option(options, "solver_manager_pyro_shutdown") safe_declare_common_option(options, "verbose", ap_group=_ef_group_label) safe_declare_common_option(options, "output_times", ap_group=_ef_group_label) safe_declare_common_option(options, "output_solver_results", ap_group=_ef_group_label) return options def __enter__(self): return self def __exit__(self, *args): self.close() def close(self): self.destroy_ef() if self._solver_manager is not None: if isinstance(self._solver_manager, pyomo.solvers.plugins.smanager.\ pyro.SolverManager_Pyro): if self.get_option("pyro_shutdown_workers"): self._solver_manager.shutdown_workers() self._solver_manager = None self._manager = None def __init__(self, manager, *args, **kwds): import pyomo.solvers.plugins.smanager.pyro super(ExtensiveFormAlgorithm, self).__init__(*args, **kwds) # TODO: after PH moves over to the new code #if not isinstance(manager, ScenarioTreeManager): # raise TypeError("ExtensiveFormAlgorithm requires an instance of the " # "ScenarioTreeManager interface as the " # "second argument") if not manager.initialized: raise ValueError("ExtensiveFormAlgorithm requires a scenario tree " "manager that has been fully initialized") self._manager = manager self.instance = None self._solver_manager = None self._solver = None # The following attributes will be modified by the # solve() method. For users that are scripting, these # can be accessed after the solve() method returns. # They will be reset each time solve() is called. ############################################ self.objective = None self.gap = None self.termination_condition = None self.termination_message = None self.solver_status = None self.solution_status = None self.solver_results = None self.time = None self.pyomo_time = None ############################################ # apparently the SolverFactory does not have sane # behavior when the solver name is None if self.get_option("solver") is None: raise ValueError("The 'solver' option can not be None") self._solver = SolverFactory(self.get_option("solver"), solver_io=self.get_option("solver_io")) if isinstance(self._solver, UnknownSolver): raise ValueError("Failed to create solver of type="+ self.get_option("solver")+ " for use in extensive form solve") solver_manager_type = self.get_option("solver_manager") if solver_manager_type == "phpyro": print("*** WARNING ***: PHPyro is not a supported solver " "manager type for the extensive-form solver. " "Falling back to serial.") solver_manager_type = 'serial' self._solver_manager = SolverManagerFactory( solver_manager_type, host=self.get_option("solver_manager_pyro_host"), port=self.get_option("solver_manager_pyro_port")) if self._solver_manager is None: raise ValueError("Failed to create solver manager of type=" +self.get_option("solver")+ " for use in extensive form solve") def build_ef(self): self.destroy_ef() if self.get_option("verbose"): print("Creating extensive form instance") start_time = time.time() # then validate the associated parameters. generate_weighted_cvar = False cvar_weight = None risk_alpha = None if self.get_option("generate_weighted_cvar"): generate_weighted_cvar = True cvar_weight = self.get_option("cvar_weight") risk_alpha = self.get_option("risk_alpha") self.instance = create_ef_instance( self._manager.scenario_tree, verbose_output=self.get_option("verbose"), generate_weighted_cvar=generate_weighted_cvar, cvar_weight=cvar_weight, risk_alpha=risk_alpha, cc_indicator_var_name=self.get_option("cc_indicator_var"), cc_alpha=self.get_option("cc_alpha")) if self.get_option("verbose") or self.get_option("output_times"): print("Time to construct extensive form instance=%.2f seconds" %(time.time() - start_time)) def destroy_ef(self): if self.instance is not None: for scenario in self._manager.scenario_tree.scenarios: self.instance.del_component(scenario.name) scenario._instance_objective.activate() self.instance = None def write(self, filename): if self.instance is None: raise RuntimeError( "The extensive form instance has not been constructed." "Call the build_ef() method to construct it.") suf = os.path.splitext(filename)[1] if suf not in ['.nl','.lp','.mps']: if self._solver.problem_format() == ProblemFormat.cpxlp: filename += '.lp' elif self._solver.problem_format() == ProblemFormat.nl: filename += '.nl' elif self._solver.problem_format() == ProblemFormat.mps: filename += '.mps' else: raise ValueError("Could not determine output file format. " "No recognized ending suffix was provided " "and no format was indicated was by the " "--solver-io option.") start_time = time.time() print("Writing extensive form to file="+filename) smap_id = write_ef(self.instance, filename, self.get_option("symbolic_solver_labels")) if self.get_option("verbose") or self.get_option("output_times"): print("Time to write output file=%.2f seconds" % (time.time() - start_time)) return filename, smap_id def solve(self, check_status=True, exception_on_failure=True, output_solver_log=False, symbolic_solver_labels=False, keep_solver_files=False, io_options=None): # TODO: Does this import need to be delayed because # it is in a plugins subdirectory from pyomo.solvers.plugins.solvers.persistent_solver import \ PersistentSolver if self.instance is None: raise RuntimeError( "The extensive form instance has not been constructed." "Call the build_ef() method to construct it.") start_time = time.time() if self.get_option("verbose"): print("Queuing extensive form solve") self.objective = None self.gap = None self.bound = None self.termination_condition = None self.termination_message = None self.solver_status = None self.solution_status = None self.solver_results = None self.time = None self.pyomo_time = None if isinstance(self._solver, PersistentSolver): self._solver.compile_instance( self.instance, symbolic_solver_labels=symbolic_solver_labels) solve_kwds = {} solve_kwds['load_solutions'] = False if keep_solver_files: solve_kwds['keepfiles'] = True if symbolic_solver_labels: solve_kwds['symbolic_solver_labels'] = True if output_solver_log: solve_kwds['tee'] = True solver_options = self.get_option("solver_options") if len(solver_options) > 0: if type(solver_options) is tuple: solve_kwds["options"] = {} for name_val in solver_options: assert "=" in name_val name, val = name_val.split("=") solve_kwds["options"][name.strip()] = val.strip() else: solve_kwds["options"] = solver_options if io_options is not None: solve_kwds.update(io_options) self.objective_sense = \ find_active_objective(self.instance).sense if (not self.get_option("disable_warmstart")) and \ (self._solver.warm_start_capable()): action_handle = self._solver_manager.queue(self.instance, opt=self._solver, warmstart=True, **solve_kwds) else: action_handle = self._solver_manager.queue(self.instance, opt=self._solver, **solve_kwds) if self.get_option("verbose"): print("Waiting for extensive form solve") results = self._solver_manager.wait_for(action_handle) if self.get_option("verbose"): print("Done with extensive form solve - loading results") if self.get_option("output_solver_results"): print("Results for ef:") results.write(num=1) self.solver_results = results if hasattr(results.solver,"user_time") and \ (not isinstance(results.solver.user_time, UndefinedData)) and \ (results.solver.user_time is not None): # the solve time might be a string, or might # not be - we eventually would like more # consistency on this front from the solver # plugins. self.time = \ float(results.solver.user_time) elif hasattr(results.solver,"time"): self.time = \ float(results.solver.time) else: self.time = None if hasattr(results,"pyomo_solve_time"): self.pyomo_time = \ results.pyomo_solve_time else: self.pyomo_time = None self.termination_condition = \ results.solver.termination_condition self.termination_message = None if hasattr(results.solver,"termination_message"): self.termination_message = results.solver.termination_message elif hasattr(results.solver,"message"): self.termination_message = results.solver.message self.solver_status = \ results.solver.status if len(results.solution) > 0: assert len(results.solution) == 1 results_sm = results._smap self.instance.solutions.load_from(results) solution0 = results.solution(0) if hasattr(solution0, "gap") and \ (solution0.gap is not None) and \ (not isinstance(solution0.gap, UndefinedData)): self.gap = float(solution0.gap) else: self.gap = None self.solution_status = solution0.status if self.get_option("verbose"): print("Storing solution in scenario tree") for scenario in self._manager.scenario_tree.scenarios: scenario.update_solution_from_instance() self._manager.scenario_tree.snapshotSolutionFromScenarios() self.objective = self._manager.scenario_tree.\ findRootNode().\ computeExpectedNodeCost() if self.gap is not None: if self.objective_sense == pyomo.core.base.minimize: self.bound = self.objective - self.gap else: self.bound = self.objective + self.gap else: self.objective = None self.gap = None self.bound = None self.solution_status = None failure = False if check_status: if not ((self.solution_status == SolutionStatus.optimal) or \ (self.solution_status == SolutionStatus.feasible)): failure = True if self.get_option("verbose") or \ exception_on_failure: msg = ("EF solve failed solution status check:\n" "Solver Status: %s\n" "Termination Condition: %s\n" "Solution Status: %s\n" % (self.solver_status, self.termination_condition, self.solution_status)) if self.get_option("verbose"): print(msg) if exception_on_failure: raise RuntimeError(msg) else: if self.get_option("verbose"): print("EF solve completed. Skipping status check.") if self.get_option("verbose") or self.get_option("output_times"): print("Time to solve and load results for the " "extensive form=%.2f seconds" % (time.time()-start_time)) return failure
def sim(days): #os.chdir('./LR/CA0') instance = m1.create_instance('data.dat') instance2 = m2.create_instance('data.dat') instance2.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT) opt = SolverFactory("cplex") H = instance.HorizonHours D = 2 K=range(1,H+1) #Space to store results mwh_1=[] mwh_2=[] mwh_3=[] on=[] switch=[] srsv=[] nrsv=[] solar=[] wind=[] flow=[] Generator=[] Duals=[] df_generators = pd.read_csv('LR/CA1/generators.csv',header=0) #max here can be (1,365) for day in range(1,days): #load time series data for z in instance.zones: instance.GasPrice[z] = instance.SimGasPrice[z,day] for i in K: instance.HorizonDemand[z,i] = instance.SimDemand[z,(day-1)*24+i] instance.HorizonWind[z,i] = instance.SimWind[z,(day-1)*24+i] instance.HorizonSolar[z,i] = instance.SimSolar[z,(day-1)*24+i] instance.HorizonMustRun[z,i] = instance.SimMustRun[z,(day-1)*24+i] for d in range(1,D+1): instance.HorizonPath66_imports[d] = instance.SimPath66_imports[day-1+d] instance.HorizonPath46_SCE_imports[d] = instance.SimPath46_SCE_imports[day-1+d] instance.HorizonPath61_imports[d] = instance.SimPath61_imports[day-1+d] instance.HorizonPath42_imports[d] = instance.SimPath42_imports[day-1+d] instance.HorizonPath24_imports[d] = instance.SimPath24_imports[day-1+d] instance.HorizonPath45_imports[d] = instance.SimPath45_imports[day-1+d] instance.HorizonPGE_valley_hydro[d] = instance.SimPGE_valley_hydro[day-1+d] instance.HorizonSCE_hydro[d] = instance.SimSCE_hydro[day-1+d] for i in K: instance.HorizonReserves[i] = instance.SimReserves[(day-1)*24+i] instance.HorizonPath42_exports[i] = instance.SimPath42_exports[(day-1)*24+i] instance.HorizonPath24_exports[i] = instance.SimPath24_exports[(day-1)*24+i] instance.HorizonPath45_exports[i] = instance.SimPath45_exports[(day-1)*24+i] instance.HorizonPath66_exports[i] = instance.SimPath66_exports[(day-1)*24+i] instance.HorizonPath46_SCE_minflow[i] = instance.SimPath46_SCE_imports_minflow[(day-1)*24+i] instance.HorizonPath66_minflow[i] = instance.SimPath66_imports_minflow[(day-1)*24+i] instance.HorizonPath42_minflow[i] = instance.SimPath42_imports_minflow[(day-1)*24+i] instance.HorizonPath61_minflow[i] = instance.SimPath61_imports_minflow[(day-1)*24+i] instance.HorizonPGE_valley_hydro_minflow[i] = instance.SimPGE_valley_hydro_minflow[(day-1)*24+i] instance.HorizonSCE_hydro_minflow[i] = instance.SimSCE_hydro_minflow[(day-1)*24+i] # CAISO_result = opt.solve(instance) instance.solutions.load_from(CAISO_result) for z in instance2.zones: instance2.GasPrice[z] = instance2.SimGasPrice[z,day] for i in K: instance2.HorizonDemand[z,i] = instance2.SimDemand[z,(day-1)*24+i] instance2.HorizonWind[z,i] = instance2.SimWind[z,(day-1)*24+i] instance2.HorizonSolar[z,i] = instance2.SimSolar[z,(day-1)*24+i] instance2.HorizonMustRun[z,i] = instance2.SimMustRun[z,(day-1)*24+i] for d in range(1,D+1): instance2.HorizonPath66_imports[d] = instance2.SimPath66_imports[day-1+d] instance2.HorizonPath46_SCE_imports[d] = instance2.SimPath46_SCE_imports[day-1+d] instance2.HorizonPath61_imports[d] = instance2.SimPath61_imports[day-1+d] instance2.HorizonPath42_imports[d] = instance2.SimPath42_imports[day-1+d] instance2.HorizonPath24_imports[d] = instance2.SimPath24_imports[day-1+d] instance2.HorizonPath45_imports[d] = instance2.SimPath45_imports[day-1+d] instance2.HorizonPGE_valley_hydro[d] = instance2.SimPGE_valley_hydro[day-1+d] instance2.HorizonSCE_hydro[d] = instance2.SimSCE_hydro[day-1+d] for i in K: instance2.HorizonReserves[i] = instance2.SimReserves[(day-1)*24+i] instance2.HorizonPath42_exports[i] = instance2.SimPath42_exports[(day-1)*24+i] instance2.HorizonPath24_exports[i] = instance2.SimPath24_exports[(day-1)*24+i] instance2.HorizonPath45_exports[i] = instance2.SimPath45_exports[(day-1)*24+i] instance2.HorizonPath66_exports[i] = instance2.SimPath66_exports[(day-1)*24+i] instance2.HorizonPath46_SCE_minflow[i] = instance2.SimPath46_SCE_imports_minflow[(day-1)*24+i] instance2.HorizonPath66_minflow[i] = instance2.SimPath66_imports_minflow[(day-1)*24+i] instance2.HorizonPath42_minflow[i] = instance2.SimPath42_imports_minflow[(day-1)*24+i] instance2.HorizonPath61_minflow[i] = instance2.SimPath61_imports_minflow[(day-1)*24+i] instance2.HorizonPGE_valley_hydro_minflow[i] = instance2.SimPGE_valley_hydro_minflow[(day-1)*24+i] instance2.HorizonSCE_hydro_minflow[i] = instance2.SimSCE_hydro_minflow[(day-1)*24+i] for j in instance.Generators: for t in K: if instance.on[j,t] == 1: instance2.on[j,t] = 1 instance2.on[j,t].fixed = True else: instance.on[j,t] = 0 instance2.on[j,t] = 0 instance2.on[j,t].fixed = True if instance.switch[j,t] == 1: instance2.switch[j,t] = 1 instance2.switch[j,t].fixed = True else: instance2.switch[j,t] = 0 instance2.switch[j,t] = 0 instance2.switch[j,t].fixed = True results = opt.solve(instance2) instance2.solutions.load_from(results) print ("Duals") for c in instance2.component_objects(Constraint, active=True): # print (" Constraint",c) cobject = getattr(instance2, str(c)) if str(c) in ['Bal1Constraint','Bal2Constraint','Bal3Constraint','Bal4Constraint']: for index in cobject: if int(index>0 and index<25): # print (" Constraint",c) try: Duals.append((str(c),index+((day-1)*24), instance2.dual[cobject[index]])) except KeyError: Duals.append((str(c),index+((day-1)*24),-999)) # print (" ", index, instance2.dual[cobject[index]]) #The following section is for storing and sorting results for v in instance.component_objects(Var, active=True): varobject = getattr(instance, str(v)) a=str(v) if a=='mwh_1': for index in varobject: name = index[0] g = df_generators[df_generators['name']==name] seg1 = g['seg1'].values seg1 = seg1[0] if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: gas_price = instance.GasPrice['PGE_valley'].value if index[0] in instance.Gas: marginal_cost = seg1*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg1*2 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg1*20 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost)) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost)) elif index[0] in instance.Zone2Generators: gas_price = instance.GasPrice['PGE_bay'].value if index[0] in instance.Gas: marginal_cost = seg1*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg1*2 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg1*20 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost)) elif index[0] in instance.Zone3Generators: gas_price = instance.GasPrice['SCE'].value if index[0] in instance.Gas: marginal_cost = seg1*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg1*2 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg1*20 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost)) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost)) elif index[0] in instance.Zone4Generators: gas_price = instance.GasPrice['SDGE'].value if index[0] in instance.Gas: marginal_cost = seg1*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg1*2 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg1*20 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost)) elif index[0] in instance.WECCImportsSDGE: gas_price = instance.GasPrice['SDGE'].value marginal_cost = 14.5+2.76*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost)) elif index[0] in instance.WECCImportsSCE: gas_price = instance.GasPrice['SCE'].value marginal_cost = 14.5+2.76*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost)) elif index[0] in instance.WECCImportsPGEV: marginal_cost = 5 mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost)) if a=='mwh_2': for index in varobject: name = index[0] g = df_generators[df_generators['name']==name] seg2 = g['seg2'].values seg2 = seg2[0] if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: gas_price = instance.GasPrice['PGE_valley'].value if index[0] in instance.Gas: marginal_cost = seg2*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg2*2 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg2*20 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost)) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost)) elif index[0] in instance.Zone2Generators: gas_price = instance.GasPrice['PGE_bay'].value if index[0] in instance.Gas: marginal_cost = seg2*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg2*2 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg2*20 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost)) elif index[0] in instance.Zone3Generators: gas_price = instance.GasPrice['SCE'].value if index[0] in instance.Gas: marginal_cost = seg2*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg2*2 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg2*20 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost)) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost)) elif index[0] in instance.Zone4Generators: gas_price = instance.GasPrice['SDGE'].value if index[0] in instance.Gas: marginal_cost = seg2*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg2*2 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg2*20 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost)) elif index[0] in instance.WECCImportsSDGE: gas_price = instance.GasPrice['SDGE'].value marginal_cost = 14.5+2.76*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost)) elif index[0] in instance.WECCImportsSCE: gas_price = instance.GasPrice['SCE'].value marginal_cost = 14.5+2.76*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost)) elif index[0] in instance.WECCImportsPGEV: marginal_cost = 5 mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost)) if a=='mwh_3': for index in varobject: name = index[0] g = df_generators[df_generators['name']==name] seg3 = g['seg3'].values seg3 = seg3[0] if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: gas_price = instance.GasPrice['PGE_valley'].value if index[0] in instance.Gas: marginal_cost = seg3*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg3*2 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg3*20 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost)) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost)) elif index[0] in instance.Zone2Generators: gas_price = instance.GasPrice['PGE_bay'].value if index[0] in instance.Gas: marginal_cost = seg3*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg3*2 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg3*20 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost)) elif index[0] in instance.Zone3Generators: gas_price = instance.GasPrice['SCE'].value if index[0] in instance.Gas: marginal_cost = seg3*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg3*2 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg3*20 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost)) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost)) elif index[0] in instance.Zone4Generators: gas_price = instance.GasPrice['SDGE'].value if index[0] in instance.Gas: marginal_cost = seg3*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost)) elif index[0] in instance.Coal: marginal_cost = seg3*2 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost)) elif index[0] in instance.Oil: marginal_cost = seg3*20 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost)) elif index[0] in instance.PSH: marginal_cost = 10 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost)) elif index[0] in instance.Slack: marginal_cost = 700 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost)) elif index[0] in instance.WECCImportsSDGE: gas_price = instance.GasPrice['SDGE'].value marginal_cost = 14.5+2.76*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost)) elif index[0] in instance.WECCImportsSCE: gas_price = instance.GasPrice['SCE'].value marginal_cost = 14.5+2.76*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost)) elif index[0] in instance.WECCImportsPGEV: marginal_cost = 5 mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost)) if a=='on': for index in varobject: if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley')) elif index[0] in instance.Zone2Generators: on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay')) elif index[0] in instance.Zone3Generators: on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE')) elif index[0] in instance.Zone4Generators: on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE')) if a=='switch': for index in varobject: if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley')) elif index[0] in instance.Zone2Generators: switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay')) elif index[0] in instance.Zone3Generators: switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE')) elif index[0] in instance.Zone4Generators: switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE')) if a=='srsv': for index in varobject: if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley')) elif index[0] in instance.Zone2Generators: srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay')) elif index[0] in instance.Zone3Generators: srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE')) elif index[0] in instance.Zone4Generators: srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE')) if a=='nrsv': for index in varobject: if int(index[1]>0 and index[1]<25): if index[0] in instance.Zone1Generators: nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley')) elif index[0] in instance.Zone2Generators: nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay')) elif index[0] in instance.Zone3Generators: nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE')) elif index[0] in instance.Zone4Generators: nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE')) if a=='solar': for index in varobject: if int(index[1]>0 and index[1]<25): solar.append((index[0],index[1]+((day-1)*24),varobject[index].value)) if a=='wind': for index in varobject: if int(index[1]>0 and index[1]<25): wind.append((index[0],index[1]+((day-1)*24),varobject[index].value)) if a=='flow': for index in varobject: if int(index[2]>0 and index[2]<25): flow.append((index[0],index[1],index[2]+((day-1)*24),varobject[index].value)) for j in instance.Generators: if instance.on[j,24] == 1: instance.on[j,0] = 1 else: instance.on[j,0] = 0 instance.on[j,0].fixed = True if instance.mwh_1[j,24].value <=0 and instance.mwh_1[j,24].value>= -0.0001: newval_1=0 else: newval_1=instance.mwh_1[j,24].value instance.mwh_1[j,0] = newval_1 instance.mwh_1[j,0].fixed = True if instance.mwh_2[j,24].value <=0 and instance.mwh_2[j,24].value>= -0.0001: newval=0 else: newval=instance.mwh_2[j,24].value if instance.mwh_3[j,24].value <=0 and instance.mwh_3[j,24].value>= -0.0001: newval2=0 else: newval2=instance.mwh_3[j,24].value instance.mwh_2[j,0] = newval instance.mwh_2[j,0].fixed = True instance.mwh_3[j,0] = newval2 instance.mwh_3[j,0].fixed = True if instance.switch[j,24] == 1: instance.switch[j,0] = 1 else: instance.switch[j,0] = 0 instance.switch[j,0].fixed = True if instance.srsv[j,24].value <=0 and instance.srsv[j,24].value>= -0.0001: newval_srsv=0 else: newval_srsv=instance.srsv[j,24].value instance.srsv[j,0] = newval_srsv instance.srsv[j,0].fixed = True if instance.nrsv[j,24].value <=0 and instance.nrsv[j,24].value>= -0.0001: newval_nrsv=0 else: newval_nrsv=instance.nrsv[j,24].value instance.nrsv[j,0] = newval_nrsv instance.nrsv[j,0].fixed = True print(day) mwh_1_pd=pd.DataFrame(mwh_1,columns=('Generator','Time','Value','Zones','Type','$/MWh')) mwh_2_pd=pd.DataFrame(mwh_2,columns=('Generator','Time','Value','Zones','Type','$/MWh')) mwh_3_pd=pd.DataFrame(mwh_3,columns=('Generator','Time','Value','Zones','Type','$/MWh')) on_pd=pd.DataFrame(on,columns=('Generator','Time','Value','Zones')) switch_pd=pd.DataFrame(switch,columns=('Generator','Time','Value','Zones')) srsv_pd=pd.DataFrame(srsv,columns=('Generator','Time','Value','Zones')) nrsv_pd=pd.DataFrame(nrsv,columns=('Generator','Time','Value','Zones')) solar_pd=pd.DataFrame(solar,columns=('Zone','Time','Value')) wind_pd=pd.DataFrame(wind,columns=('Zone','Time','Value')) flow_pd=pd.DataFrame(flow,columns=('Source','Sink','Time','Value')) shadow_price=pd.DataFrame(Duals,columns=('Constraint','Time','Value')) flow_pd.to_csv('flow.csv') mwh_1_pd.to_csv('mwh_1.csv') mwh_2_pd.to_csv('mwh_2.csv') mwh_3_pd.to_csv('mwh_3.csv') on_pd.to_csv('on.csv') switch_pd.to_csv('switch.csv') srsv_pd.to_csv('srsv.csv') nrsv_pd.to_csv('nrsv.csv') solar_pd.to_csv('solar_out.csv') wind_pd.to_csv('wind_out.csv') shadow_price.to_csv('shadow_price.csv') return None