Example #1
0
File: base.py Project: Pyomo/pyomo
    def solve(self,
              solver,
              io,
              io_options,
              solver_options,
              symbolic_labels,
              load_solutions):
        """ Optimize the model """
        assert self.model is not None

        opt = SolverFactory(solver, solver_io=io)
        opt.options.update(solver_options)

        if io == 'nl':
            assert opt.problem_format() == ProblemFormat.nl
        elif io == 'lp':
            assert opt.problem_format() == ProblemFormat.cpxlp
        elif io == 'mps':
            assert opt.problem_format() == ProblemFormat.mps
        #elif io == 'python':
        #    print opt.problem_format()
        #    assert opt.problem_format() is None

        try:
            if isinstance(opt, PersistentSolver):
                opt.set_instance(self.model, symbolic_solver_labels=symbolic_labels)
                if opt.warm_start_capable():
                    results = opt.solve(warmstart=True,
                                        load_solutions=load_solutions,
                                        **io_options)
                else:
                    results = opt.solve(load_solutions=load_solutions,
                                        **io_options)
            else:
                if opt.warm_start_capable():
                    results = opt.solve(
                        self.model,
                        symbolic_solver_labels=symbolic_labels,
                        warmstart=True,
                        load_solutions=load_solutions,
                        **io_options)
                else:
                    results = opt.solve(
                        self.model,
                        symbolic_solver_labels=symbolic_labels,
                        load_solutions=load_solutions,
                        **io_options)

            return opt, results
        finally:
            pass
            #opt.deactivate()
        del opt
        return None, None
Example #2
0
 def solve(self):
     opt = SolverFactory("cplex")
     model = self._model
     model.dual.clearValue()
     model.load(opt.solve(model))#,keepfiles=True,symbolic_solver_labels=True,tee=True))
     self._solved = True
     self._update_tree_node_xbars()
def solve_instance(instance):
    solver = SolverFactory('couenne')

    results = solver.solve(instance, tee=True)
    instance.solutions.load_from(results)

    return instance
 def solve_optimization_period(self, period, return_model_instance=False):
     model = dispatch_formulation.create_dispatch_model(self, period)
     instance = model.create_instance(report_timing=False) # report_timing=True used to try to make this step faster
     solver = SolverFactory(cfg.solver_name)
     solution = solver.solve(instance)
     instance.solutions.load_from(solution)
     return instance if return_model_instance else all_results_to_list(instance)
Example #5
0
    def solveModel(self, x, y, z):
        model = self.model
        opt = SolverFactory(self.config.solver)
        opt.options.update(self.config.solver_options)

        results = opt.solve(
            model, keepfiles=self.keepfiles, tee=self.stream_solver)

        if ((results.solver.status == SolverStatus.ok)
                and (results.solver.termination_condition == TerminationCondition.optimal)):
            model.solutions.load_from(results)
            for i in range(0, self.lx):
                x[i] = value(self.TRF.xvars[i])
            for i in range(0, self.ly):
                y[i] = value(self.TRF.y[i+1])
            for i in range(0, self.lz):
                z[i] = value(self.TRF.zvars[i])

            for obj in model.component_data_objects(Objective,active=True):
                return True, obj()

        else:
            print("Waring: solver Status: " + str(results.solver.status))
            print("And Termination Conditions: " + str(results.solver.termination_condition))
            return False, 0
def run_problem(purchases, sales, stella_correction, jammies_correction):
    opt = SolverFactory('glpk')

    (number_corr, price_corr, model, dual_model) = make_model(purchases,sales,stella_correction,jammies_correction)

    results = opt.solve(model)

    output = []
    solutions = results.get('Solution', [])
    if len(solutions) > 0:
        model.load(results)
        for (p,s) in model.pairings:
            ct = model.selected[p,s].value
            if ct > 0:
                output.append((purchases[p-1], sales[s-1], float(ct) / number_corr))


    ret = dict(pairs=output, full_result=results.json_repn())


    if results.solver.status == SolverStatus.ok:
        if results.solver.termination_condition == TerminationCondition.optimal:
            ret['status'] = "optimal"
            # the following procedure for getting the value is right from
            # the coopr source itself...
            key = results.solution.objective.keys()[0]
            ret['value'] = float(results.solution.objective[key].value) / price_corr / number_corr
            collect_dual(**locals())
        else:
            ret['status'] = "not solved"
    else:
        ret['status'] = "solver error"

    return ret
Example #7
0
def calculateSharesQ(expressionVars, relationSizes, reducerCapacity):
    """ Use the MINLP solver to calculate the shares of attribute variables

    input   expressionVars  A list of lists of expression vars
                            ex. [[[3], [1], [2]]
            relationSizes A list ex. [1000, 1000, 1000]
            numberReducers an integer ex. 32

    output (shares, com_cost) Two outputs.
            shares First argument is the shares DICT !! unordered
                    ex. {'1':2, '2': 1, '3': 16}
            com_cost The objective function's value give the shares
                    ex. 2600000
    """
    # print expressionVars
    uniqueVars = getUniqueExpressionVars(expressionVars)
    print uniqueVars
    shares = {}
    # if sum(relationSizes) < reducerCapacity*10:
    # skew_share = int(pow(np.prod(relationSizes)/100000 , 1.0/len(uniqueVars)))
    # shares = {str(var): skew_share for var in uniqueVars}
    # shares = {str(var): 1 for var in uniqueVars}

    # com_cost = sum(relationSizes)
    # return (shares, com_cost, com_cost/np.prod(shares.values()))
    # reducerCapacity = 100000

    objectiveExpression = constructObjective(expressionVars, relationSizes)
    print objectiveExpression
    budgetExpression_UB = constructCapacityConstraintUB(
        expressionVars, objectiveExpression, reducerCapacity)
    budgetExpression_LB = constructCapacityConstraintLB(
        expressionVars, objectiveExpression, reducerCapacity)

    # Create a solver factory using Couenne
    opt = SolverFactory('couenne')
    model = ConcreteModel()
    model.x = Var(uniqueVars, domain=PositiveIntegers)
    model.OBJ = Objective(expr=eval(objectiveExpression))
    model.Constraint1 = Constraint(expr=eval(budgetExpression_UB))
    # model.Constraint2 = Constraint(expr=eval(budgetExpression_LB))
    # Create a model instance and optimize
    instance = model.create_instance()
    results = opt.solve(instance)
    instance.display()
    # Save calculated shares
    for v in instance.component_objects(Var, active=True):
        varobject = getattr(instance, str(v))
        for index in varobject:
            # Round 2.999->3
            shares[str(varobject[index])[2:-1]
                   ] = (int(round(varobject[index].value)))
    # Save communication cost
    for o in instance.component_objects(Objective, active=True):
        oobject = getattr(instance, str(o))
        for idx in oobject:
            com_cost = value(oobject[idx])
    return (shares, com_cost, com_cost/np.prod(shares.values()))
Example #8
0
def Model_Resolution(model,datapath="Example/data.dat"):   
    '''
    This function creates the model and call Pyomo to solve the instance of the proyect 
    
    :param model: Pyomo model as defined in the Model_creation library
    :param datapath: path to the input data file
    
    :return: The solution inside an object call instance.
    '''
    
    from Constraints import  Net_Present_Cost, Solar_Energy,State_of_Charge,\
    Maximun_Charge, Minimun_Charge, Max_Power_Battery_Charge, Max_Power_Battery_Discharge, Max_Bat_in, Max_Bat_out, \
    Financial_Cost, Energy_balance, Maximun_Lost_Load,Scenario_Net_Present_Cost, Scenario_Lost_Load_Cost, \
    Initial_Inversion, Operation_Maintenance_Cost, Total_Finalcial_Cost, Battery_Reposition_Cost, Maximun_Diesel_Energy, Diesel_Comsuption,Diesel_Cost_Total
    
    
    # OBJETIVE FUNTION:
    model.ObjectiveFuntion = Objective(rule=Net_Present_Cost, sense=minimize)  
    
    # CONSTRAINTS
    #Energy constraints
    model.EnergyBalance = Constraint(model.scenario,model.periods, rule=Energy_balance)
    model.MaximunLostLoad = Constraint(model.scenario, rule=Maximun_Lost_Load) # Maximum permissible lost load
    model.ScenarioLostLoadCost = Constraint(model.scenario, rule=Scenario_Lost_Load_Cost)

    # PV constraints
    model.SolarEnergy = Constraint(model.scenario, model.periods, rule=Solar_Energy)  # Energy output of the solar panels
    # Battery constraints
    model.StateOfCharge = Constraint(model.scenario, model.periods, rule=State_of_Charge) # State of Charge of the battery
    model.MaximunCharge = Constraint(model.scenario, model.periods, rule=Maximun_Charge) # Maximun state of charge of the Battery
    model.MinimunCharge = Constraint(model.scenario, model.periods, rule=Minimun_Charge) # Minimun state of charge
    model.MaxPowerBatteryCharge = Constraint(rule=Max_Power_Battery_Charge)  # Max power battery charge constraint
    model.MaxPowerBatteryDischarge = Constraint(rule=Max_Power_Battery_Discharge)    # Max power battery discharge constraint
    model.MaxBatIn = Constraint(model.scenario, model.periods, rule=Max_Bat_in) # Minimun flow of energy for the charge fase
    model.Maxbatout = Constraint(model.scenario, model.periods, rule=Max_Bat_out) #minimun flow of energy for the discharge fase

    # Diesel Generator constraints
    model.MaximunDieselEnergy = Constraint(model.scenario, model.periods, rule=Maximun_Diesel_Energy) # Maximun energy output of the diesel generator
    model.DieselComsuption = Constraint(model.scenario, model.periods, rule=Diesel_Comsuption)    # Diesel comsuption 
    model.DieselCostTotal = Constraint(model.scenario, rule=Diesel_Cost_Total)
    
    # Financial Constraints
    model.FinancialCost = Constraint(rule=Financial_Cost) # Financial cost
    model.ScenarioNetPresentCost = Constraint(model.scenario, rule=Scenario_Net_Present_Cost)    
    model.InitialInversion = Constraint(rule=Initial_Inversion)
    model.OperationMaintenanceCost = Constraint(rule=Operation_Maintenance_Cost)
    model.TotalFinalcialCost = Constraint(rule=Total_Finalcial_Cost)
    model.BatteryRepositionCost = Constraint(rule=Battery_Reposition_Cost) 

    
    instance = model.create_instance(datapath) # load parameters       
    opt = SolverFactory('cplex') # Solver use during the optimization    
    results = opt.solve(instance, tee=True) # Solving a model instance 
    instance.solutions.load_from(results)  # Loading solution into instance
    return instance
 def test_instance_constraints(model):
     instance = model.create_instance(report_timing=False)        
     for c in instance.component_objects(Constraint):
         c.activate()
         solver = SolverFactory(cfg.solver_name)
         solution = solver.solve(instance)
         if solution.solver.termination_condition == TerminationCondition.infeasible:
             pass
         else:
             print c.name
             c.activate()
def run_model (input_data_file):
    list=[]
    insts=[]
    opt = SolverFactory("glpk")
    instance=model.create(input_data_file)
    res = opt.solve(instance)
    instance.load(res)
    list.append(res)
    insts.append(instance)
    print res
    return list, insts
    def solve(self, solver='glpk', solver_io='lp', debug=False,
              duals=False, **kwargs):
        """ Method that takes care of the communication with the solver
        to solve the optimization model

        Parameters
        ----------

        self : pyomo.ConcreteModel
        solver str: solver to be used e.g. 'glpk','gurobi','cplex'
        solver_io str: str that defines the solver interaction
        (file or interface) 'lp','nl','python'
        **kwargs: other arguments for the pyomo.opt.SolverFactory.solve()
        method

        Returns
        -------
        self : solved pyomo.ConcreteModel() instance
        """

        from pyomo.opt import SolverFactory
        # Create a 'dual' suffix component on the instance
        # so the solver plugin will know which suffixes to collect
        if duals is True:
            # dual variables (= shadow prices)
            self.dual = po.Suffix(direction=po.Suffix.IMPORT)
            # reduced costs
            self.rc = po.Suffix(direction=po.Suffix.IMPORT)
        # write lp-file
        if debug == True:
            self.write('problem.lp',
                       io_options={'symbolic_solver_labels': True})
            # print instance
            # instance.pprint()

        # solve instance
        opt = SolverFactory(solver, solver_io=solver_io)
        # store results
        results = opt.solve(self, **kwargs)
        if debug == True:
            if (results.solver.status == "ok") and \
               (results.solver.termination_condition == "optimal"):
                # Do something when the solution in optimal and feasible
                self.solutions.load_from(results)

            elif (results.solver.termination_condition == "infeasible"):
                print("Model is infeasible",
                      "Solver Status: ", results.solver.status)
            else:
                # Something else is wrong
                print("Solver Status: ", results.solver.status, "\n"
                      "Termination condition: ",
                      results.solver.termination_condition)
Example #12
0
def main():

    # create the empty list of cuts to start
    cut_on = []
    cut_off = []

    done = False
    while not done:
        model = create_sudoku_model(cut_on, cut_off, board)

        # options = Options()
        # options.solver = 'glpk'
        # options.quiet = True
        # options.tee = True

        # results, opt = util.apply_optimizer(options, model)
        # instance.load(results)

        ## SOLVE ##
        opt = SolverFactory('glpk')

        # create model instance, solve
        # instance = model.create_instance()
        results = opt.solve(model)
        model.solutions.load_from(results)

        if str(results.Solution.Status) != 'optimal':
            break

        # add cuts
        new_cut_on = []
        new_cut_off = []
        for r in model.ROWS:
            for c in model.COLS:
                for v in model.VALUES:
                    # check if the binary variable is on or off
                    # note, it may not be exactly 1
                    if value(model.y[r,c,v]) >= 0.5:
                        new_cut_on.append((r,c,v))
                    else:
                        new_cut_off.append((r,c,v))

        cut_on.append(new_cut_on)
        cut_off.append(new_cut_off)

        print "Solution #" + str(len(cut_on))
        for i in xrange(1,10):
            for j in xrange(1,10):
                for v in xrange(1,10):
                    if value(model.y[i,j,v]) >= 0.5:
                        print v, " ",
            print
    def _populate_bundle_dual_master_model(self, ph):

        current_iteration = ph._current_iteration

        # first step is to update the historical information from PH

        for scenario in ph._scenario_tree._scenarios:
            primal_objective_value = scenario._objective
            self._past_objective_values[(current_iteration, scenario._name)] = primal_objective_value

#        print "PAST OBJECTIVE FUNCTION VALUES=",self._past_objective_values

        assert current_iteration not in self._past_var_values
        iter_var_values = self._past_var_values[current_iteration] = {}
        for scenario in ph._scenario_tree._scenarios:
            iter_var_values[scenario._name] = copy.deepcopy(scenario._x)

#        print "PAST VAR VALUES=",self._past_var_values

        # propagate PH parameters to concrete model and re-preprocess.
        for scenario in ph._scenario_tree._scenarios:
            for tree_node in scenario._node_list[:-1]:
                new_w_k_parameter_name = \
                    "WDATA_"+str(tree_node._name)+"_"+str(scenario._name)+"_K"
                w_k_parameter = \
                    self._master_model.find_component(new_w_k_parameter_name)
                ph_weights = scenario._w[tree_node._name]

                for idx in w_k_parameter:
                    w_k_parameter[idx] = ph_weights[idx]

        # V bounds are per-variable, per-iteration
        for scenario in ph._scenario_tree._scenarios:
            scenario_name = scenario._name
            v_var = getattr(self._master_model, "V_"+str(scenario_name))
            expr = self._past_objective_values[(current_iteration, scenario_name)]
            for tree_node in scenario._node_list[:-1]:
                new_w_variable_name = "WVAR_"+str(tree_node._name)+"_"+str(scenario_name)
                w_variable = self._master_model.find_component(new_w_variable_name)
                expr += sum(iter_var_values[scenario_name][tree_node._name][var_id] * w_variable[var_id] for var_id in w_variable)

            self._master_model.V_Bound.add(v_var <= expr)

#        print "V_BOUNDS CONSTRAINT:"
#        self._master_model.V_Bound.pprint()


        solver = SolverFactory("cplex")
        results=solver.solve(self._master_model,tee=False,load_solutions=False)
        self._master_model.solutions.load_from(results)
Example #14
0
def Model_Resolution_Dispatch(model,datapath="Example/data_Dispatch.dat"):   
    '''
    This function creates the model and call Pyomo to solve the instance of the proyect 
    
    :param model: Pyomo model as defined in the Model_creation library
    
    :return: The solution inside an object call instance.
    '''
    from Constraints_Dispatch import  Net_Present_Cost,  State_of_Charge, Maximun_Charge, \
    Minimun_Charge, Max_Bat_in, Max_Bat_out, \
    Energy_balance, Maximun_Lost_Load, Generator_Cost_1_Integer,  \
    Total_Cost_Generator_Integer, \
    Scenario_Lost_Load_Cost, \
     Generator_Bounds_Min_Integer, Generator_Bounds_Max_Integer,Energy_Genarator_Energy_Max_Integer

    # OBJETIVE FUNTION:
    model.ObjectiveFuntion = Objective(rule=Net_Present_Cost, sense=minimize)  
    
    # CONSTRAINTS
    #Energy constraints
    model.EnergyBalance = Constraint(model.periods, rule=Energy_balance)  # Energy balance
    model.MaximunLostLoad = Constraint(rule=Maximun_Lost_Load) # Maximum permissible lost load
    
    # Battery constraints
    model.StateOfCharge = Constraint(model.periods, rule=State_of_Charge) # State of Charge of the battery
    model.MaximunCharge = Constraint(model.periods, rule=Maximun_Charge) # Maximun state of charge of the Battery
    model.MinimunCharge = Constraint(model.periods, rule=Minimun_Charge) # Minimun state of charge
    model.MaxBatIn = Constraint(model.periods, rule=Max_Bat_in) # Minimun flow of energy for the charge fase
    model.Maxbatout = Constraint(model.periods, rule=Max_Bat_out) #minimun flow of energy for the discharge fase
   
    #Diesel Generator constraints
    model.GeneratorBoundsMin = Constraint(model.periods, rule=Generator_Bounds_Min_Integer) 
    model.GeneratorBoundsMax = Constraint(model.periods, rule=Generator_Bounds_Max_Integer)
    model.GeneratorCost1 = Constraint(model.periods,  rule=Generator_Cost_1_Integer)
    model.EnergyGenaratorEnergyMax = Constraint(model.periods, rule=Energy_Genarator_Energy_Max_Integer)
    model.TotalCostGenerator = Constraint(rule=Total_Cost_Generator_Integer)
    
    # Financial Constraints
    model.ScenarioLostLoadCost = Constraint(rule=Scenario_Lost_Load_Cost)
    
    instance = model.create_instance("Example/data_dispatch.dat") # load parameters       
    opt = SolverFactory('cplex') # Solver use during the optimization    
#    opt.options['emphasis_memory'] = 'y'
#    opt.options['node_select'] = 3
    results = opt.solve(instance, tee=True,options_string="mipgap=0.03") # Solving a model instance 

    #    instance.write(io_options={'emphasis_memory':True})
    #options_string="mipgap=0.03", timelimit=1200
    instance.solutions.load_from(results) # Loading solution into instance
    return instance
 def run_pyomo(self, model, data, **kwargs):
     """
     Pyomo optimization steps: create model instance from model formulation and data,
     get solver, solve instance, and load solution.
     """
     logging.debug("Creating model instance...")
     instance = model.create_instance(data)
     logging.debug("Getting solver...")
     solver = SolverFactory(cfg.solver_name)
     logging.debug("Solving...")
     solution = solver.solve(instance, **kwargs)
     logging.debug("Loading solution...")
     instance.solutions.load_from(solution)
     return instance
Example #16
0
    def run(self, input_file):        
        opt = SolverFactory("glpk")
        list=[]
        list_=[]
        instances=[]
        self.model.current_time_step.add(1)
        instance=self.model.create_instance(input_file)
        for comp in instance.component_objects():
            if str(comp) == "time_step":
                parmobject = getattr(instance, str(comp))
                for vv in parmobject.value:
                      list_.append(vv)
        instance =self.model.create_instance(input_file)
        storage={}
        demand_nodes=get_demand_nodes_list(instance)


        for vv in list_:
            ##################
            self.cu_timp=vv
            self.model.current_time_step.clear()
            #self.model.preprocess()
            self.model.current_time_step.add(vv)
            #self.model.preprocess()
            instance=self.model.create_instance(input_file)

            if(len(storage)>0):
                set_initial_storage(instance, storage)
                self.model.preprocess()
                instance.preprocess()
            else:
                instance.preprocess()
            res=opt.solve(instance)
            instance.solutions.load_from(res)
            instance.preprocess()
            storage=get_storage(instance)
            set_delivery(instance, demand_nodes, vv)
            instance.solutions.load_from(res)
            instances.append(instance)
            list.append(res)
            count=1
        for res in instances:
            print " ========= Time step:  %s =========="%count
            self.display_variables(res)
            count+=1
        return  list, instances
Example #17
0
def run_model(datafile):
    print "==== Running the model ===="
    opt = SolverFactory("cplex")
    list = []
    list_ = []
    model.current_time_step.add(1)
    instance = model.create_instance(datafile)
    ## determine the time steps
    for comp in instance.component_objects():
        if str(comp) == "time_step":
            parmobject = getattr(instance, str(comp))
            for vv in parmobject.value:
                list_.append(vv)
    storage = {}
    insts = []

    for vv in list_:
        model.current_time_step.clear()
        model.current_time_step.add(vv)
        print "Running for time step: ", vv

        instance = model.create_instance(datafile)
        # update initial storage value from previous storage
        if len(storage) > 0:
            set_initial_storage(instance, storage)
            instance.preprocess()

        res=opt.solve(instance)  
        instance.solutions.load_from(res)
        set_post_process_variables(instance)
        insts.append(instance)
        storage=get_storage(instance)
        list.append(res)
        print "-------------------------"
    count=1
    for res in list:
        print " ========= Time step:  %s =========="%count
        print res
        count+=1
    count=1

    for inst in insts:
        print " ========= Time step:  %s =========="%count
        display_variables(inst)
        count+=1
    return list, insts
Example #18
0
    def solve(self, solver="glpk", solver_io="lp", **kwargs):
        r""" Takes care of communication with solver to solve the model.

        Parameters
        ----------
        solver : string
            solver to be used e.g. "glpk","gurobi","cplex"
        solver_io : string
            pyomo solver interface file format: "lp","python","nl", etc.
        \**kwargs : keyword arguments
            Possible keys can be set see below:

        Other Parameters
        ----------------
        solve_kwargs : dict
            Other arguments for the pyomo.opt.SolverFactory.solve() method
            Example : {"tee":True}
        cmdline_options : dict
            Dictionary with command line options for solver e.g.
            {"mipgap":"0.01"} results in "--mipgap 0.01"
            {"interior":" "} results in "--interior"
            Gurobi solver takes numeric parameter values such as
            {"method": 2}

        """
        solve_kwargs = kwargs.get("solve_kwargs", {})
        solver_cmdline_options = kwargs.get("cmdline_options", {})

        opt = SolverFactory(solver, solver_io=solver_io)
        # set command line options
        options = opt.options
        for k in solver_cmdline_options:
            options[k] = solver_cmdline_options[k]

        results = opt.solve(self, **solve_kwargs)

        self.solutions.load_from(results)

        # storage optimization results in result dictionary of energysystem
        self.es.results = self.results()
        self.es.results.objective = self.objective()
        self.es.results.solver = results

        return results
def schedule_exams(data, solver_name="gurobi", n_cliques=0, print_results=False):
    
    optimizer = SolverFactory(solver_name)
    
    if solver_name == "gurobi":
        optimizer.options["threads"] = 1
        optimizer.options["--solver-suffixes"] = ".*"
    print optimizer.options
    
    t = time()
    instance = build_model(data, n_cliques = n_cliques)        
    print("Solving...")
    results = optimizer.solve(instance)
    t = time() - t   
    
    instance.solutions.load_from(results)
    
    is_integral = is_integer_solution(instance)
    if is_integral:
        print "All integer solution!"
    
    y = {}
    for (i,l) in instance.NxP:
        if is_integral:
            y[i,l] = int(instance.y[i,l].value)
        else:
            y[i,l] = instance.y[i,l].value
        
    x = {}
    for (i,k,l) in instance.NxRxP:
        if y[i,l] == 1:
            if is_integral:
                x[i,k] = int(instance.x[i,k,l].value)
            else:
                x[i,k] = instance.x[i,k,l].value
        
    objVal = instance.OBJ_v(instance)
    
    if print_results:
        print instance.display()
    print (results)
        
    return instance, x, y, objVal, t
def solve(model):

    # solve
    # calls the GLPK solver and finds solution
    #
    # Inputs:   model   -   Pyomo model object
    # Outputs:  x       -   binary |E|x1 solution vector that is 1 if the
    #                       row is in the solution and 0 otherwise.

    # This is an optional code path that allows the script to be
    # run outside of Pyomo command-line.  For example:  python transport.py
    # This replicates what the Pyomo command-line tools does
    from pyomo.opt import SolverFactory
    opt = SolverFactory("glpk")
    results = opt.solve(model)

    # save results
    model.solutions.load_from(results)
    x = model.x._data

    return x
Example #21
0
def schedule_pyomo():
    num = 10

    at = [randint(0, 100) for i in xrange(num)]
    length = [randint(2, 5) for i in xrange(num)]
    wt = [randint(1, 6) for i in xrange(num)]

    model = AbstractModel("schedule")
    model.n = Param(default=num)
    model.i = RangeSet(0, model.n-1)
    model.st = Var(model.i, domain=NonNegativeIntegers, bounds=(0, model.n-1))
    et = [model.st[i]+length[i] for i in xrange(num)]
    lt = [max(et[i]-at[i], 0) for i in xrange(num)]

    # def obj_rule(model, length, at, num):
    def obj_rule(model):
    # a = [[length[model.rank[x]] for x in xrange(i)] for i in xrange(num)]
    # st = [sum(a[i]) for i in xrange(num)]
    # et = [st[i]+length[i] for i in xrange(num)]
    # lt = [max(et[i]-at[i], 0) for i in xrange(num)]
    # return lt
        return sum([max((model.st[i]+length[i]-at[i]), 0) for i in xrange(10)])
    model.obj = Objective(rule=obj_rule, sense=minimize)


    def c1_rule(model, j):
        # a = [[length[model.rank[x]] for x in xrange(i)] for i in xrange(num)]
        # st = [sum(a[i]) for i in xrange(num)]
        return lt[j] - wt[j] <= 0

    model.c1 = Constraint(model.i, rule=c1_rule)

    opt = SolverFactory("glpk")
    instance = model.create()
    result = opt.solve(instance)
    instance.load(result)
    print result.solution[0].status
Example #22
0
def solve():
    if instance is None:
        raise RuntimeError("instance is not initialized; load_inputs() or load_dat_inputs() must be called before solve().")
    # can be accessed from interactive prompt via import ReferenceModel; ReferenceModel.solve()
    print "solving model..."
    opt = SolverFactory("cplex")# , solver_io="nl")
    # tell cplex to find an irreducible infeasible set (and report it)
    # opt.options['iisfind'] = 1

    # relax the integrality constraints, to allow commitment constraints to match up with 
    # number of units available
    # opt.options['mipgap'] = 0.001
    # # display more information during solve
    # opt.options['display'] = 1
    # opt.options['bardisplay'] = 1
    # opt.options['mipdisplay'] = 1
    # opt.options['primalopt'] = ""   # this is how you specify single-word arguments
    # opt.options['advance'] = 2
    # # opt.options['threads'] = 1
    # opt.options['parallelmode'] = -1    # 1=opportunistic, 0 or 1=deterministic

    start = time.time()
    results = opt.solve(instance, keepfiles=False, tee=True, 
        symbolic_solver_labels=True, suffixes=['dual', 'rc', 'urc', 'lrc'])
    print "Total time in solver: {t}s".format(t=time.time()-start)

    instance.solutions.load_from(results)

    if results.solver.termination_condition == TerminationCondition.infeasible:
        print "Model was infeasible; Irreducible Infeasible Set (IIS) returned by solver:"
        print "\n".join(c.cname() for c in instance.iis)
        raise RuntimeError("Infeasible model")

    print "\n\n======================================================="
    print "Solved model"
    print "======================================================="
    print "Total cost: ${v:,.0f}".format(v=value(instance.Minimize_System_Cost))
Example #23
0
def run_pyomo_optimization(model, data, solver_name, stdout_detail, **kwargs):
    """
    Pyomo optimization steps: create model instance from model formulation and data,
    get solver, solve instance, and load solution.
    :param model:
    :param data:
    :param solver_name:
    :param stdout_detail:
    :param kwargs:
    :return: instance
    """
    if stdout_detail:
        print "Creating model instance..."
    instance = model.create_instance(data)
    if stdout_detail:
        print "Getting solver..."
    solver = SolverFactory(solver_name)
    if stdout_detail:
        print "Solving..."
    solution = solver.solve(instance, **kwargs)
    if stdout_detail:
        print "Loading solution..."
    instance.solutions.load_from(solution)
    return instance
model.c3.pprint()

# constraint 4
model.c4 = pyo.Constraint(expr = x[5] + 2*y >= 30)
# display balance
print('\nconstrain 4:') 
model.c4.pprint()


# objective function
model.obj = pyo.Objective(expr = sum([x[i] for i in range(1,6,1)])+y, sense=minimize)

# select solver
opt = SolverFactory('cbc')
# launch solver
opt.solve(model)

# display
print('\nmodel:') 
model.pprint()

# get optimal results and display
print(f'\n{"-"*50}')
for i in range(1,6):
    print('x[%i] = %i' % (i, pyo.value(x[i])))
print('y = %.2f' % pyo.value(y))
print('Obj = ', pyo.value(model.obj))



Example #25
0
class TestCBCUsingMock(unittest.TestCase):
    """
    These tests cover various abnormal exit conditions from CBC (notably various time or solution limits).
    In order to be able to reliably compare solutions on different platforms, we will compare against cached solutions.
    Effectively we are just testing how we parse the output files.
    The files were produced by using the following configuration:
    macOS Mojave Version 10.14.3
    CBC Version 2.9.9
    appdirs==1.4.3
    certifi==2018.11.29
    nose==1.3.7
    numpy==1.16.1
    ply==3.11
    Pyomo==5.6.1
    PyUtilib==5.6.5
    six==1.12.0


    n = 19
    np.random.seed(42)
    distance_matrix = np.random.rand(n, n)
    model = ConcreteModel()
    model.N = RangeSet(n)
    model.c = Param(model.N, model.N, initialize=lambda a_model, i, j: distance_matrix[i - 1][j - 1])
    model.x = Var(model.N, model.N, within=Binary)
    model.u = Var(model.N, within=NonNegativeReals)

    # Remove arcs that go to and from same node
    for n in model.N:
        model.x[n, n].fix(0)

    def obj_rule(a_model):
        return sum(a_model.c[i, j] * a_model.x[i, j] for i in a_model.N for j in a_model.N)

    model.obj = Objective(rule=obj_rule, sense=minimize)

    def only_leave_each_node_once_constraints(a_model, i):
        return sum(a_model.x[i, j] for j in a_model.N) == 1

    def only_arrive_at_each_node_once_constraints(a_model, j):
        return sum(a_model.x[i, j] for i in a_model.N) == 1

    def miller_tucker_zemlin_constraints(a_model, i, j):
        if i != j and i >= 2 and j >= 2:
            return a_model.u[i] - a_model.u[j] + a_model.x[i, j] * n <= n - 1
        return Constraint.NoConstraint

    model.con1 = Constraint(model.N, rule=only_leave_each_node_once_constraints)
    model.con2 = Constraint(model.N, rule=only_arrive_at_each_node_once_constraints)
    model.con3 = Constraint(model.N, model.N, rule=miller_tucker_zemlin_constraints)

    opt = SolverFactory('cbc', executable=_get_path_for_solver(), options={'randomCbcSeed': 42, 'randomSeed': 42})
    results = opt.solve(self.model, tee=True, **solver_kwargs)

    """

    def setUp(self):
        self.stderr = sys.stderr
        sys.stderr = None
        self.opt = SolverFactory("_mock_cbc", solver_io="lp")

    def tearDown(self):
        sys.stderr = self.stderr

    def test_optimal_mip(self):
        """
        solver_kwargs={}
        """
        lp_file = 'optimal.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        self.assertEqual(1.20645976, results.problem.lower_bound)
        self.assertEqual(1.20645976, results.problem.upper_bound)
        self.assertEqual(SolverStatus.ok, results.solver.status)
        self.assertEqual(0.34, results.solver.system_time)
        self.assertEqual(0.72, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition)
        self.assertEqual(
            'Model was solved to optimality (subject to tolerances), and an optimal solution is available.',
            results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 2)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 2)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 625)

    def test_max_time_limit_mip(self):
        """
        solver_kwargs={'timelimit': 0.1}
        """
        lp_file = 'max_time_limit.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        self.assertEqual(1.1084706, results.problem.lower_bound)  # Note that we ignore the lower bound given at the end
        self.assertEqual(1.35481947, results.problem.upper_bound)
        self.assertEqual(SolverStatus.aborted, results.solver.status)
        self.assertEqual(0.1, results.solver.system_time)
        self.assertEqual(0.11, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.maxTimeLimit, results.solver.termination_condition)
        self.assertEqual(
            'Optimization terminated because the time expended exceeded the value specified in the seconds parameter.',
            results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 82)

    def test_intermediate_non_integer_mip(self):
        """
        solver_kwargs={'timelimit': 0.0001}
        """
        lp_file = 'intermediate_non_integer.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        self.assertEqual(0.92543678, results.problem.lower_bound)
        self.assertEqual(SolverStatus.aborted, results.solver.status)
        self.assertEqual(0.02, results.solver.system_time)
        self.assertEqual(0.02, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.intermediateNonInteger, results.solver.termination_condition)
        self.assertEqual(
            'Optimization terminated because a limit was hit, however it had not found an integer solution yet.',
            results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0)

    def test_max_solutions(self):
        """
        solver_kwargs={'options': {'maxSolutions': 1}}
        """
        lp_file = 'max_solutions.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        self.assertEqual(0.92543678, results.problem.lower_bound)
        self.assertEqual(1.35481947, results.problem.upper_bound)
        self.assertEqual(SolverStatus.aborted, results.solver.status)
        self.assertEqual(0.03, results.solver.system_time)
        self.assertEqual(0.03, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.other, results.solver.termination_condition)
        self.assertEqual(
            'Optimization terminated because the number of solutions found reached the value specified in the '
            'maxSolutions parameter.', results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0)

    def test_within_gap_tolerance(self):
        """
        solver_kwargs={'options': {'allowableGap': 1000000}}
        """
        lp_file = 'within_gap_tolerance.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        self.assertEqual(0.925437, results.problem.lower_bound)
        self.assertEqual(1.35481947, results.problem.upper_bound)
        self.assertEqual(SolverStatus.ok, results.solver.status)
        self.assertEqual(0.07, results.solver.system_time)
        self.assertEqual(0.07, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition)
        self.assertEqual(
            'Model was solved to optimality (subject to tolerances), and an optimal solution is available.',
            results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0)

    def test_max_evaluations(self):
        """
        solver_kwargs={'options': {'maxNodes': 1}}
        """
        lp_file = 'max_evaluations.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        self.assertEqual(1.2052223, results.problem.lower_bound)
        self.assertEqual(1.20645976, results.problem.upper_bound)
        self.assertEqual(SolverStatus.aborted, results.solver.status)
        self.assertEqual(0.16, results.solver.system_time)
        self.assertEqual(0.18, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.maxEvaluations, results.solver.termination_condition)
        self.assertEqual(
            'Optimization terminated because the total number of branch-and-cut nodes explored exceeded the value '
            'specified in the maxNodes parameter', results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 1)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 1)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 602)

    def test_fix_parsing_bug(self):
        """
        The test wasn't generated using the method in the class docstring
        See https://github.com/Pyomo/pyomo/issues/1001
        """
        lp_file = 'fix_parsing_bug.out.lp'
        results = self.opt.solve(os.path.join(data_dir, lp_file))

        if self.opt.version() < (2, 10, 2):
            self.assertEqual(3.0, results.problem.lower_bound)
            self.assertEqual(3.0, results.problem.upper_bound)
        else:
            self.assertEqual(-3.0, results.problem.lower_bound)
            self.assertEqual(-3.0, results.problem.upper_bound)
        self.assertEqual(SolverStatus.aborted, results.solver.status)
        self.assertEqual(0.08, results.solver.system_time)
        self.assertEqual(0.09, results.solver.wallclock_time)
        self.assertEqual(TerminationCondition.other, results.solver.termination_condition)
        self.assertEqual(
            'Optimization terminated because the number of solutions found reached the value specified in the '
            'maxSolutions parameter.', results.solver.termination_message)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0)

    def test_process_logfile(self):
        cbc_shell = CBCSHELL()
        cbc_shell._log_file = os.path.join(data_dir, 'test5_timeout_cbc.txt')
        results = cbc_shell.process_logfile()
        self.assertEqual(results.solution.gap, 0.01)
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 50364)
        self.assertEqual(results.solver.system_time, 2.01)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 34776)

    def test_process_logfile_gap_inf(self):
        cbc_shell = CBCSHELL()
        cbc_shell._log_file = os.path.join(data_dir, 'test5_timeout_cbc_gap.txt')
        results = cbc_shell.process_logfile()
        self.assertEqual(results.solution.gap, float('inf'))
        self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 50364)
        self.assertEqual(results.solver.system_time, 2.01)
        self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 34776)
Example #26
0
def fix_dual_bound(solve_data, config, last_iter_cuts):
    """Fix the dual bound when no-good cuts or tabu list is activated.

    Args:
        solve_data (MindtPySolveData): data container that holds solve-instance data.
        config (ConfigBlock): the specific configurations for MindtPy.
        last_iter_cuts (bool): whether the cuts in the last iteration have been added.
    """
    if config.single_tree:
        config.logger.info(
            'Fix the bound to the value of one iteration before optimal solution is found.'
        )
        try:
            if solve_data.objective_sense == minimize:
                solve_data.LB = solve_data.stored_bound[solve_data.UB]
            else:
                solve_data.UB = solve_data.stored_bound[solve_data.LB]
        except KeyError:
            config.logger.info('No stored bound found. Bound fix failed.')
    else:
        config.logger.info(
            'Solve the main problem without the last no_good cut to fix the bound.'
            'zero_tolerance is set to 1E-4')
        config.zero_tolerance = 1E-4
        # Solve NLP subproblem
        # The constraint linearization happens in the handlers
        if not last_iter_cuts:
            fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config)
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data,
                                     config)

        MindtPy = solve_data.mip.MindtPy_utils
        # deactivate the integer cuts generated after the best solution was found.
        if config.strategy == 'GOA':
            try:
                if solve_data.objective_sense == minimize:
                    valid_no_good_cuts_num = solve_data.num_no_good_cuts_added[
                        solve_data.UB]
                else:
                    valid_no_good_cuts_num = solve_data.num_no_good_cuts_added[
                        solve_data.LB]
                if config.add_no_good_cuts:
                    for i in range(valid_no_good_cuts_num + 1,
                                   len(MindtPy.cuts.no_good_cuts) + 1):
                        MindtPy.cuts.no_good_cuts[i].deactivate()
                if config.use_tabu_list:
                    solve_data.integer_list = solve_data.integer_list[:
                                                                      valid_no_good_cuts_num]
            except KeyError:
                config.logger.info('No-good cut deactivate failed.')
        elif config.strategy == 'OA':
            # Only deactive the last OA cuts may not be correct.
            # Since integer solution may also be cut off by OA cuts due to calculation approximation.
            if config.add_no_good_cuts:
                MindtPy.cuts.no_good_cuts[len(
                    MindtPy.cuts.no_good_cuts)].deactivate()
            if config.use_tabu_list:
                solve_data.integer_list = solve_data.integer_list[:-1]
        if config.add_regularization is not None and MindtPy.find_component(
                'mip_obj') is None:
            MindtPy.objective_list[-1].activate()
        mainopt = SolverFactory(config.mip_solver)
        # determine if persistent solver is called.
        if isinstance(mainopt, PersistentSolver):
            mainopt.set_instance(solve_data.mip, symbolic_solver_labels=True)
        if config.use_tabu_list:
            tabulist = mainopt._solver_model.register_callback(
                tabu_list.IncumbentCallback_cplex)
            tabulist.solve_data = solve_data
            tabulist.opt = mainopt
            tabulist.config = config
            mainopt._solver_model.parameters.preprocessing.reduce.set(1)
            # If the callback is used to reject incumbents, the user must set the
            # parameter c.parameters.preprocessing.reduce either to the value 1 (one)
            # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions
            mainopt._solver_model.set_warning_stream(None)
            mainopt._solver_model.set_log_stream(None)
            mainopt._solver_model.set_error_stream(None)
        mip_args = dict(config.mip_solver_args)
        set_solver_options(mainopt, solve_data, config, solver_type='mip')
        main_mip_results = mainopt.solve(solve_data.mip,
                                         tee=config.mip_solver_tee,
                                         **mip_args)
        if main_mip_results.solver.termination_condition is tc.infeasible:
            config.logger.info(
                'Bound fix failed. The bound fix problem is infeasible')
        else:
            uptade_suboptimal_dual_bound(solve_data, main_mip_results)
            config.logger.info('Fixed bound values: LB: {}  UB: {}'.format(
                solve_data.LB, solve_data.UB))
        # Check bound convergence
        if solve_data.LB + config.bound_tolerance >= solve_data.UB:
            solve_data.results.solver.termination_condition = tc.optimal
Example #27
0
class TestCBC(unittest.TestCase):
    """
    These tests are here to test the general functionality of the cbc solver when using the lp solverio, which will
    ensure that we have a consistent output from CBC for some simple problems
    """

    def setUp(self):
        self.stderr = sys.stderr
        sys.stderr = None
        self.model = ConcreteModel()
        # Do we need to pass in seeds to ensure consistent behaviour? options={'randomSeed: 42, 'randomCbcSeed': 42}
        self.opt = SolverFactory("cbc", solver_io="lp")

    def tearDown(self):
        sys.stderr = self.stderr

    @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available")
    def test_infeasible_lp(self):
        self.model.X = Var(within=Reals)
        self.model.C1 = Constraint(expr=self.model.X <= 1)
        self.model.C2 = Constraint(expr=self.model.X >= 2)
        self.model.Obj = Objective(expr=self.model.X, sense=minimize)

        results = self.opt.solve(self.model)

        self.assertEqual(ProblemSense.minimize, results.problem.sense)
        self.assertEqual(TerminationCondition.infeasible, results.solver.termination_condition)
        self.assertEqual('Model was proven to be infeasible.', results.solver.termination_message)
        self.assertEqual(SolverStatus.warning, results.solver.status)

    @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available")
    def test_unbounded_lp(self):
        self.model.Idx = RangeSet(2)
        self.model.X = Var(self.model.Idx, within=Reals)
        self.model.Obj = Objective(expr=self.model.X[1] + self.model.X[2], sense=maximize)

        results = self.opt.solve(self.model)

        self.assertEqual(ProblemSense.maximize, results.problem.sense)
        self.assertEqual(TerminationCondition.unbounded, results.solver.termination_condition)
        self.assertEqual('Model was proven to be unbounded.', results.solver.termination_message)
        self.assertEqual(SolverStatus.warning, results.solver.status)

    @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available")
    def test_optimal_lp(self):
        self.model.X = Var(within=NonNegativeReals)
        self.model.Obj = Objective(expr=self.model.X, sense=minimize)

        results = self.opt.solve(self.model)

        self.assertEqual(0.0, results.problem.lower_bound)
        self.assertEqual(0.0, results.problem.upper_bound)
        self.assertEqual(ProblemSense.minimize, results.problem.sense)
        self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition)
        self.assertEqual(
            'Model was solved to optimality (subject to tolerances), and an optimal solution is available.',
            results.solver.termination_message)
        self.assertEqual(SolverStatus.ok, results.solver.status)

    @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available")
    def test_infeasible_mip(self):
        self.model.X = Var(within=NonNegativeIntegers)
        self.model.C1 = Constraint(expr=self.model.X <= 1)
        self.model.C2 = Constraint(expr=self.model.X >= 2)
        self.model.Obj = Objective(expr=self.model.X, sense=minimize)

        results = self.opt.solve(self.model)

        self.assertEqual(ProblemSense.minimize, results.problem.sense)
        self.assertEqual(TerminationCondition.infeasible, results.solver.termination_condition)
        self.assertEqual('Model was proven to be infeasible.', results.solver.termination_message)
        self.assertEqual(SolverStatus.warning, results.solver.status)

    @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available")
    def test_unbounded_mip(self):
        self.model.X = Var(within=Integers)
        self.model.Obj = Objective(expr=self.model.X, sense=minimize)

        results = self.opt.solve(self.model)

        self.assertEqual(ProblemSense.minimize, results.problem.sense)
        self.assertEqual(TerminationCondition.unbounded, results.solver.termination_condition)
        self.assertEqual('Model was proven to be unbounded.', results.solver.termination_message)
        self.assertEqual(SolverStatus.warning, results.solver.status)

    @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available")
    def test_optimal_mip(self):
        self.model.Idx = RangeSet(2)
        self.model.X = Var(self.model.Idx, within=NonNegativeIntegers)
        self.model.Y = Var(self.model.Idx, within=Binary)
        self.model.C1 = Constraint(expr=self.model.X[1] == self.model.X[2] + 1)
        self.model.Obj = Objective(expr=self.model.Y[1] + self.model.Y[2] - self.model.X[1],
                                   sense=maximize)

        results = self.opt.solve(self.model)

        self.assertEqual(1.0, results.problem.lower_bound)
        self.assertEqual(1.0, results.problem.upper_bound)
        self.assertEqual(results.problem.number_of_binary_variables, 2)
        self.assertEqual(results.problem.number_of_integer_variables, 4)
        self.assertEqual(ProblemSense.maximize, results.problem.sense)
        self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition)
        self.assertEqual(
            'Model was solved to optimality (subject to tolerances), and an optimal solution is available.',
            results.solver.termination_message)
        self.assertEqual(SolverStatus.ok, results.solver.status)
Example #28
0
    for z in instance.h_imports:
        #load Hydropower time series data
        for i in K:
            instance.HorizonHydroImport[z, i] = instance.SimHydroImport[
                z, (day - 1) * 24 + i]

    for z in instance.Generators:
        #load Deratef time series data
        for i in K:
            instance.HorizonDeratef[z,
                                    i] = instance.SimDeratef[z,
                                                             (day - 1) * 24 +
                                                             i]

    result = opt.solve(instance)  ##,tee=True to check number of variables
    # instance.display()
    if result.solver.status == SolverStatus.aborted:  #max time limit reached
        result.solver.status = SolverStatus.warning  #change status so that results can be loaded
    instance.solutions.load_from(result)

    #  #The following section is for storing and sorting results
    for v in instance.component_objects(Var, active=True):
        varobject = getattr(instance, str(v))
        a = str(v)
        if a == 'hydro':
            for index in varobject:
                if int(index[1] > 0 and index[1] < 25):
                    if index[0] in instance.h_nodes:
                        hydro.append((index[0], index[1] + ((day - 1) * 24),
                                      varobject[index].value))
Example #29
0
def bin_search(tech, vintage, dat, eps=0.01, all_v=False):
    # This code block performs breakeven analysis by brutal force in an
    # iterative way. I did this because sometimes sensitivity() and
    # sensitivity_api() return anomalous values. Note that this code block
    # returns the absolutely correct breakeven costs, however, it is
    # significantly more time-consuming, since it takes 8-9 instances to
    # calculate the breakeven cost of just one technology of one vintage.
    # Arguments are defined below:
    # tech     -> Target technology.
    # vintage  -> Target vintage. It is break-even when capacity in this year >= 0
    # dat      -> A list of .dat files.
    # eps      -> Convergence tolerance
    # all_v    -> A flag used indicate the costs of which vintages are subject
    # to change. If it is FALSE, then only the investment costs and fixed costs
    # in the target vintage will be altered, otherwise all vintages are affected
    # Note that, only the capacity of the target vintage will be monitored and
    # be used to signal breakeven.
    monitor_year = vintage
    monitor_tech = tech

    t0 = time()
    time_mark = lambda: time() - t0

    model = return_Temoa_model()
    optimizer = SolverFactory('cplex')
    data = return_Temoa_data(model, dat)
    instance = model.create_instance(data)

    time_optimize = [i for i in data['time_future']]
    time_optimize.sort()
    ic0 = dict()
    fc0 = dict()
    if all_v:
        for v in time_optimize:
            if (monitor_tech, v) in data['CostInvest']:
                ic0[monitor_tech, v] = data['CostInvest'][monitor_tech, v]
                for p in time_optimize:
                    if (p, monitor_tech, v) in data['CostFixed']:
                        fc0[p, monitor_tech,
                            v] = data['CostFixed'][p, monitor_tech, v]
    else:
        ic0[monitor_tech, monitor_year] = data['CostInvest'][monitor_tech,
                                                             monitor_year]
        for p in time_optimize:
            if (p, monitor_tech, monitor_year) in data['CostFixed']:
                fc0[p, monitor_tech,
                    monitor_year] = data['CostFixed'][p, monitor_tech,
                                                      monitor_year]

    cap_target = 0
    scale_u = 1.0
    scale_l = 0.0

    history = dict()
    history['scale_u'] = [scale_u]
    history['scale_l'] = [scale_l]

    counter = 0
    scale_this = scale_u  # Starting scale

    print 'Iteration # {} starts at {} s'.format(counter, time_mark())
    instance = model.create_instance(data)
    instance.preprocess()
    results = optimizer.solve(instance,
                              suffixes=['dual', 'urc', 'slack', 'lrc'])
    instance.solutions.load_from(results)
    cap_target = value(instance.V_Capacity[monitor_tech, monitor_year])
    print 'Iteration # {} solved at {} s'.format(counter, time_mark())
    print 'Iteration # {}, scale: {:1.2f}, capacity: {} GW'.format(
        counter, scale_this, cap_target)
    if 1.0 - scale_this <= eps and cap_target > 0:
        return scale_this

    while (scale_u - scale_l) >= eps and counter <= 20:
        if cap_target <= 0:
            scale_u = scale_this
            history['scale_u'].append(scale_u)
        else:
            scale_l = scale_this
            history['scale_l'].append(scale_l)
        counter += 1

        scale_this = (scale_u + scale_l) * 0.5
        for k in ic0:
            data['CostInvest'][k] = scale_this * ic0[k]
        for k in fc0:
            data['CostFixed'][k] = scale_this * fc0[k]

        print 'Iteration # {} starts at {} s'.format(counter, time_mark())
        instance = model.create_instance(data)
        instance.preprocess()
        results = optimizer.solve(instance,
                                  suffixes=['dual', 'urc', 'slack', 'lrc'])
        instance.solutions.load_from(results)
        cap_target = value(instance.V_Capacity[monitor_tech, monitor_year])
        print 'Iteration # {} solved at {} s'.format(counter, time_mark())
        print 'Iteration # {}, scale: {:1.2f}, capacity: {} GW'.format(
            counter, scale_this, cap_target)
    return (scale_u + scale_l) / 2.0
Example #30
0
def Saturacion(m, trabajo):
    return sum(m.y[persona, trabajo] for persona in m.p) == 1


m.R2 = Constraint(m.t, rule=Saturacion, doc='Un trabajo, una persona')

## OBJETIVO
# La función objetivo es maximizar la adecuación de cada persona a su
# puesto de trabajo


def fobj(m):
    return sum(m.CI[persona, trabajo] * m.y[persona, trabajo]
               for persona in m.p for trabajo in m.t)


m.OBJ = Objective(rule=fobj, sense=maximize)

## DATA
# HAy que añadir los datos, claro.
inst = m.create_instance(data='Abstract_Data.dat')
inst.pprint()
## VERBATIM DE RESOLUCIÓN
from pyomo.opt import SolverFactory
opt = SolverFactory('glpk')
results = opt.solve(inst)
results.write()

for i in inst.y:
    print(i, ' :', inst.y[i].value)
def mip_single_temperature_interval(n, m, QH, QC, u):

    # Local copy of the instance.
    h = list([QH[i][u] for i in range(n)])
    c = list([QC[j][u] for j in range(m)])

    Hu = []  # Hot streams with positive heat load
    Cu = []  # Cold streams with positive heat load

    for i in range(n):
        if h[i] < epsilon:
            h[i] = 0
        else:
            Hu.append(i)

    for j in range(m):
        if c[j] < epsilon:
            c[j] = 0
        else:
            Cu.append(j)

    #print(h)
    #print(c)

    model = AbstractModel()

    model.n = Param(within=NonNegativeIntegers,
                    initialize=n)  # number of hot streams
    model.m = Param(within=NonNegativeIntegers,
                    initialize=m)  # number of cold streams
    model.b = Param(within=NonNegativeIntegers,
                    initialize=min(n, m))  # number of bins

    model.H = RangeSet(0, model.n - 1)  # set of hot streams
    model.C = RangeSet(0, model.m - 1)  # set of cold streams
    model.B = RangeSet(0, model.b - 1)  # set of bins

    model.Hu = Set(within=model.H, initialize=Hu)
    model.Cu = Set(within=model.C, initialize=Cu)

    # Parameter: heat load of hot stream i in temperature interval t
    model.h = Param(model.Hu,
                    within=NonNegativeReals,
                    initialize=lambda model, i: h[i])

    # Parameter: heat load of cold stream j in temperature interval t
    model.c = Param(model.Cu,
                    within=NonNegativeReals,
                    initialize=lambda model, j: c[j])

    # Variable: indicating whether bin b is used
    model.x = Var(model.B, within=Binary)

    # Variable: indicating whether hot stream i is placed into bin b
    model.y = Var(model.Hu, model.B, within=Binary)

    # Variable: indicating whether cold stream j is placed into bin b
    model.z = Var(model.Cu, model.B, within=Binary)

    # Objective: number of bins
    def number_of_bins_rule(model):
        return sum(model.x[b] for b in model.B)

    model.number_of_bins_obj = Objective(rule=number_of_bins_rule,
                                         sense=maximize)

    #Constraint: bin usage by hot streams
    def bin_not_used_rule(model, b):
        return model.x[b] <= sum(model.z[j, b] for j in model.Cu)

    model.bin_not_used_constraint = Constraint(model.B, rule=bin_not_used_rule)

    #Constraint: assignment of each hot stream
    def hot_assignment_rule(model, i):
        return sum(model.y[i, b] for b in model.B) <= 1

    model.hot_assignment_rule = Constraint(model.Hu, rule=hot_assignment_rule)

    #Constraint: assignment of each hot stream
    def cold_assignment_rule(model, j):
        return sum(model.z[j, b] for b in model.B) == 1

    model.cold_assignment_rule = Constraint(model.Cu,
                                            rule=cold_assignment_rule)

    #Constraint: heat conservation
    def heat_conservation_rule(model, b):
        return sum(model.y[i, b] * model.h[i]
                   for i in model.Hu) >= sum(model.z[j, b] * model.c[j]
                                             for j in model.Cu)

    model.heat_conservation_rule = Constraint(model.B,
                                              rule=heat_conservation_rule)

    solver = 'cplex'
    opt = SolverFactory(solver)
    opt.options['threads'] = 1
    MIP = model.create_instance()
    results = opt.solve(MIP)

    #print(results)

    #for b in range(min(n,m)):
    #print(MIP.x[b].value)

    bins = []

    for b in range(min(n, m)):

        hot_streams = []
        cold_streams = []

        for i in Hu:
            if MIP.y[i, b].value > epsilon:
                #print('H'+str(i)+' in B'+str(b))
                hot_streams.append(i)
        for j in Cu:
            if MIP.z[j, b].value > epsilon:
                #print('C'+str(j)+' in B'+str(b))
                cold_streams.append(j)

        if cold_streams != []:
            bins.append((list(hot_streams), list(cold_streams)))

    #print(bins)

    q = [[0 for j in range(m)] for i in range(n)]

    for (H, C) in bins:
        i = 0
        j = 0
        while i <= len(H) and j < len(C):

            hot_heat = h[H[i]]
            cold_heat = c[C[j]]

            temp_q = min(hot_heat, cold_heat)
            q[H[i]][C[j]] += temp_q
            h[H[i]] -= temp_q
            c[C[j]] -= temp_q

            if h[H[i]] < epsilon:
                i += 1
            if c[C[j]] < epsilon:
                j += 1

    #print(q)

    return q
Example #32
0
def solveVRP(Nodes, max_agents,max_nodes):
    #this is a variation of the Vehicle Routing Problem intended to minimize waiting time
    n = len(Nodes)
    print "\tAgents available: ", max_agents, "\tNodes: ", n

    #Define the graph - This is a 3D graph [((node i, node j),agent k) ...]
    Points = [i for i in range(n)]
    list_of_agents = [i for i in range(max_agents)]
    graph = [(i,j,k) for i in range(n) for j in range(n) if i<j or j==0 for k in range(max_agents)]


    # Creation of a Concrete Model
    model = ConcreteModel()

    #ouse init feature of pyomo to init the costs
    model.use_edge = Var(graph, within=Binary)

    def waiting_time(node, vehicle):
        """Returns the waiting time at the specified node"""
        #remember to consider the actual distance between 2 nodes for this model
        return sum([model.use_edge[(i,j,vehicle)]  for i in range(1,node) for j in range(i+1,node) if (i+1)==j ])

    def objective_rule(model):
        #mtsp_prob += lpSum([cost(edge[0][0],edge[0][1])*waiting_time(edge[0][1],edge[1])  for edge in graph ])
        # minimize total waiting time
        return sum(model.use_edge[node]*waiting_time(node[1],node[2]) for node in graph)
    model.objective = Objective(rule=objective_rule, sense=minimize, doc='Define objective function')

    #We are assuming the node zero is the default starting Node
    #Ensure atmost  m agents depart from node 0
    def startingNode_rule(model, k):
        return sum(model.use_edge[0,j,k] for j in range(n) if j != 0 ) <= 1
      #use <= 1 if you want "at most m agents to depart from node 0" and == if you
      #want exactly m agents to depart from node 0
    model.starting = Constraint(list_of_agents, rule=startingNode_rule, doc='Ensure atmost m agents depart from node 0')

    #Ensure atmost m agents return to node 0
    def endingNode_rule(model, k):
        return sum(model.use_edge[i,0,k] for i in range(n) if i != 0) <= 1
      #use <= 1 if you want "at most m agents to depart from node 0" and == if you
      #want exactly m agents to depart from node 0
    model.ending = Constraint(list_of_agents, rule=endingNode_rule, doc='Ensure atmost m agents return to node 0')

    #Ensure that only one tour enters each nodes
    def singleTourEnter_rule(model, j):
        return sum(model.use_edge[(i,j,k)] for i in range(n) if (i+1)==j or j==0 or i==0 for k in range(max_agents)) == 1
    model.singleTourEnter = Constraint(Points[1:], rule=singleTourEnter_rule, doc='Ensure that only one tour enters each nodes')

    #Ensure that only one tour leaves each nodes
    def singleTourLeave_rule(model, j):
        return sum(model.use_edge[(i,j,k)] for j in range(n) if (i+1)==j or j==0 or i==0 for k in range(max_agents)) == 1
    model.singleTourLeave = Constraint(Points[1:], rule=singleTourLeave_rule, doc='Ensure that only one tour leaves each nodes')

    #Ensure that each agent visits at most "max_nodes" nodes
    if max_nodes != None:
        def maxNodes_rule(model, k):
            return sum(model.use_edge[(i,j,k)] for i in range(n) for j in range(n) if (i+1)==j or i==0 or j==0) <= max_nodes
        model.maxNodes = Constraint(list_of_agents, rule=maxNodes_rule, doc='Ensure that each agent visits at most "max_nodes" nodes')

    #Ensure that same vehicle arrives and departs from each node it serves
    def sameVehicle_rule(model, k, node):
        sum_entering = sum(model.use_edge[((i,node), k)] for i in range(n) if (i+1) == node or i==0 or node==0)
        sum_leaving = sum(model.use_edge[((node,j), k)] for j in range(n) if (node+1) == j or node==0 or j==0)
        return sum_entering - sum_leaving == 0
    model.sameVehicle = Constraint(list_of_agents, Points, rule=sameVehicle_rule, doc='Ensure that same vehicle arrives and departs from each node it serves')

    #subtour elimination
    #find u[i] - u[j] that satifies the constraints. Model U as a LP variable
    model.u = Var(Points, within=NonNegativeIntegers)
    ste_nodes = [(i,j,k) for k in range(max_agents) for i in range(n) for j in range(1,n) if (((i+1)==j) or (j==0) or i==0)]
    def subtourElimination_rule(model, i,j,k):
        return model.u[i] - model.u[j] + n*model.use_edge[(i,j,k)] <= n-1
    model.subtourElimination = Constraint(ste_nodes, rule=subtourElimination_rule, doc='subtour Elimination rule')

    #solve the problem using the gurobi solver. Gurobi solves mixed integer quadratic problems
    opt = SolverFactory("gurobi")
    results = opt.solve(model)
    #sends results to stdout
    #results.write()
    print("\nDisplaying Solution\n" + '-'*60)

    result = []
    #Print result - We are only concerned with Integer values of 1
    #also build the result return list
    print "\tResult: \n"
    for node in graph:
        #print value(use_edge[item]),
        if model.use_edge[node] == 1:
            print '\t', node#, value(use_edge[item])
            result.append(node)
    return result
model.ZRule = Constraint(model.ROUTES, model.SCENARIOS, rule=z_rule)
   
#
# Stage-specific cost computations
#
def first_stage_cost_rule(model):
    return (model.FirstStageCost \
            - sum(sum(model.C[j,r] * model.X[j,r] for r in model.ROUTES) for j in model.TYPES) \
            - sum(sum(model.PROB[r,u] * model.Q[r] * (model.D[r,u] - sum(model.Z[r,w] for w in model.SCENARIOS if w <= u )) for u in model.SCENARIOS) for r in model.ROUTES)) == 0.0
model.ComputeFirstStageCost = Constraint(rule=first_stage_cost_rule)

def second_stage_cost_rule(model):
    return model.SecondStageCost == 0.0
model.ComputeSecondStageCost = Constraint(rule=second_stage_cost_rule)

#
# Objective
#
def total_cost_rule(model):
    return (model.FirstStageCost + model.SecondStageCost)
model.Total_Cost_Objective = Objective(rule=total_cost_rule, sense=minimize)

#
# Solve and print
#
instance = model.create('ReferenceModel.dat')
opt = SolverFactory('glpk')
PartBresults = opt.solve(instance)

print PartBresults.solution.objective
Example #34
0
#!/usr/local/bin/python
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""

Illustrate the use of switch to construct and run a very simple model
with a single load zone, one investment period, and two timepoints.
This expands on copperplate0.py by adding a few more types of generators
as well as local transmission and distribution.

For this to work, you need to ensure that the switch_mod package
directory is in your python search path. See the README for more info.

"""

from pyomo.environ import *
from pyomo.opt import SolverFactory
from switch_mod.utilities import define_AbstractModel

switch_model = define_AbstractModel('switch_mod', 'local_td',
                                    'project.no_commit', 'fuel_markets')
switch_instance = switch_model.load_inputs(inputs_dir="inputs")

opt = SolverFactory("cplex")
results = opt.solve(switch_instance, keepfiles=False, tee=False)
switch_instance.load(results)

results.write()
switch_instance.pprint()
Example #35
0
class OptiType(object):
    """
    classdocs

    """
    def __init__(self,
                 cov,
                 occ,
                 groups_4digit,
                 beta,
                 t_max_allele=2,
                 solver="glpk",
                 threads=1,
                 verbosity=0):
        """
        Constructor
        """

        self.__beta = float(beta)
        self.__t_max_allele = t_max_allele
        self.__solver = SolverFactory(solver)
        self.__threads = threads
        self.__opts = {"threads": threads} if threads > 1 else {}
        self.__verbosity = verbosity
        self.__changed = True  # model needs to know if it changed from last run or not
        self.__ks = 1
        self.__groups_4digit = groups_4digit

        loci_alleles = defaultdict(list)
        for type_4digit, group_alleles in groups_4digit.iteritems():
            # print type_4digit, group_alleles
            loci_alleles[type_4digit.split('*')[0]].extend(group_alleles)

        loci = loci_alleles

        self.__allele_to_4digit = {
            allele: type_4digit
            for type_4digit, group in groups_4digit.iteritems()
            for allele in group
        }
        '''
            generates the basic ILP model
        '''

        model = ConcreteModel()

        # init Sets
        model.LociNames = Set(initialize=loci.keys())
        model.Loci = Set(model.LociNames, initialize=lambda m, l: loci[l])

        L = list(itertools.chain(*loci.values()))
        reconst = {allele_id: 0.01 for allele_id in L if '_' in allele_id}
        R = set([r for (r, _) in cov.keys()])
        model.L = Set(initialize=L)
        model.R = Set(initialize=R)

        # init Params
        model.cov = Param(model.R,
                          model.L,
                          initialize=lambda model, r, a: cov.get((r, a), 0))
        model.reconst = Param(model.L,
                              initialize=lambda model, a: reconst.get(a, 0))

        model.occ = Param(model.R, initialize=occ)
        model.t_allele = Param(initialize=self.__t_max_allele, mutable=True)

        model.beta = Param(
            initialize=self.__beta,
            validate=lambda val, model: 0.0 <= float(self.__beta) <= 0.999,
            mutable=True)
        model.nof_loci = Param(initialize=len(loci))

        # init variables
        model.x = Var(model.L, domain=Binary)
        model.y = Var(model.R, domain=Binary)

        model.re = Var(model.R, bounds=(0.0, None))
        model.hetero = Var(bounds=(0.0, model.nof_loci))

        # init objective
        model.read_cov = Objective(rule=lambda model: sum(
            model.occ[r] * (model.y[r] - model.beta * (model.re[r]))
            for r in model.R) - sum(model.reconst[a] * model.x[a]
                                    for a in model.L),
                                   sense=maximize)

        # init Constraints
        model.max_allel_selection = Constraint(
            model.LociNames,
            rule=lambda model, l: sum(model.x[a] for a in model.Loci[l]
                                      ) <= model.t_allele)
        model.min_allel_selection = Constraint(
            model.LociNames,
            rule=lambda model, l: sum(model.x[a] for a in model.Loci[l]) >= 1)
        model.is_read_cov = Constraint(
            model.R,
            rule=lambda model, r: sum(model.cov[r, a] * model.x[a]
                                      for a in model.L) >= model.y[r])
        model.heterozygot_count = Constraint(
            rule=lambda model: model.hetero >= sum(model.x[a] for a in model.L
                                                   ) - model.nof_loci)

        # regularization constraints
        model.reg1 = Constraint(
            model.R,
            rule=lambda model, r: model.re[r] <= model.nof_loci * model.y[r])
        model.reg2 = Constraint(
            model.R, rule=lambda model, r: model.re[r] <= model.hetero)
        model.reg3 = Constraint(model.R,
                                rule=lambda model, r: model.re[r] >= model.
                                hetero - model.nof_loci * (1 - model.y[r]))

        # generate constraint list for solution enumeration
        model.c = ConstraintList()
        # Generate instance. Used to be .create() but deprecated since,
        # as ConcreteModels are instances on their own now.
        self.__instance = model

    def set_beta(self, beta):
        """
            Sets the parameter beta
        """
        self.__changed = True
        getattr(self.__instance,
                str(self.__instance.beta)).set_value(float(beta))

    def set_t_max_allele(self, t_max_allele):
        """
            Sets the upper bound of alleles selected per loci
        """
        self.__changed = True
        getattr(self.__instance,
                str(self.__instance.t_allele)).set_value(t_max_allele)

    def solve(self, ks):
        """
            solves the problem k times and discards the found solutions in the next run.
        """
        d = defaultdict(
            list
        )  # in there we store the typing +objective and generate afterwards a DatarFrame with it

        if self.__changed or self.__ks != ks:
            self.__ks = ks
            for k in xrange(ks):
                expr = 0

                self.__instance.preprocess()
                try:
                    res = self.__solver.solve(self.__instance,
                                              options=self.__opts,
                                              tee=self.__verbosity)
                except:
                    print(
                        "WARNING: Solver does not support multi-threading. Please change the config"
                        " file accordingly. Falling back to single-threading.")
                    res = self.__solver.solve(self.__instance,
                                              options={},
                                              tee=self.__verbosity)
                self.__instance.solutions.load_from(
                    res)  # solution loading changed recently.

                # if self.__verbosity > 0:
                #     res.write(num=1)

                if res.solver.termination_condition != TerminationCondition.optimal:
                    print "Optimal solution hasn't been obtained. This is a terminal problem."  # TODO message, exit
                    break

                selected = []
                indices = []
                encountered_4digit = []
                for j in self.__instance.x:
                    if self.__allele_to_4digit[j][0] in 'HJG':
                        if 0.99 <= self.__instance.x[j].value <= 1.01:
                            selected.append(j)
                        indices.append(j)
                        continue
                    if 0.99 <= self.__instance.x[j].value <= 1.01:
                        selected.append(j)
                        exp_i = 0
                        exp_i += self.__instance.x[j]
                        if self.__allele_to_4digit[j] in encountered_4digit:
                            continue
                        encountered_4digit.append(self.__allele_to_4digit[j])
                        for i_allele in self.__groups_4digit[
                                self.__allele_to_4digit[j]]:
                            if self.__instance.x[i_allele].value <= 0:
                                exp_i += self.__instance.x[i_allele]
                            indices.append(i_allele)
                        expr += (1.0 - exp_i)
                zero_indices = set([j for j in self.__instance.x
                                    ]).difference(set(indices))
                for j in zero_indices:
                    expr += self.__instance.x[j]

                self.__instance.c.add(expr >= 1)

                # if self.__verbosity > 0:
                #     print selected
                #     self.__instance.c.pprint()
                aas = [
                    self.__allele_to_4digit[x].split('*')[0] for x in selected
                ]
                c = dict.fromkeys(aas, 1)
                for i in xrange(len(aas)):
                    if aas.count(aas[i]) < 2:
                        d[aas[i] + "1"].append(selected[i])
                        d[aas[i] + "2"].append(selected[i])
                    else:
                        d[aas[i] + str(c[aas[i]])].append(selected[i])
                        c[aas[i]] += 1

                nof_reads = sum(
                    (self.__instance.occ[j] * self.__instance.y[j].value
                     for j in self.__instance.y))
                # if self.__verbosity > 0:
                #     print "Obj", res.Solution.Objective.__default_objective__.Value
                d['obj'].append(self.__instance.read_cov())
                d['nof_reads'].append(nof_reads)

            self.__instance.c.clear()
            self.__changed = False
            self.__enumeration = pd.DataFrame(d)

            # self.__rank()
            return self.__enumeration
        else:
            return self.__enumeration

    def solve_for_k_alleles(self, k, ks=1):
        """
            EXPERIMENTAL!

            generates a solution without the regularization term and only k selected alleles
        """
        if k < int(self.__instance.nof_loci.value) or k > int(
                self.__instance.nof_loci.value * self.__t_max_allele):
            raise Warning("k " + str(k) + " is out of range [" +
                          str(self.__instance.nof_loci.value) + "," +
                          str(self.__instance.nof_loci * self.__t_max_allele) +
                          "]")

        # copy the instance
        inst = self.__instance.clone()
        # set beta = 0  # because we do homozygosity calling manually
        getattr(inst, str(inst.beta)).set_value(float(0.0))

        inst.del_component("heterozygot_count")
        inst.del_component("reg1")
        inst.del_component("reg2")
        inst.del_component("reg3")
        # generate constraint which allows only k alleles to be selected
        expr1 = 0
        for j in inst.x:
            expr1 += inst.x[j]

        inst.c.add(expr1 == k)
        d = defaultdict(list)

        for _ in xrange(ks):
            inst.preprocess()
            try:
                res = self.__solver.solve(inst,
                                          options=self.__opts,
                                          tee=self.__verbosity)
            except:
                print(
                    "WARNING: Solver does not support multi-threading. Please change the config"
                    " file accordingly. Falling back to single-threading.")
                res = self.__solver.solve(inst,
                                          options={},
                                          tee=self.__verbosity)
            inst.solutions.load_from(res)

            if self.__verbosity > 0:
                res.write(num=1)

            if res.solver.termination_condition != TerminationCondition.optimal:
                print "Optimal solution hasn't been obtained. This is a terminal problem."  # TODO message, exit
                break

            selected = []
            expr = 0

            indices = []
            encountered_4digit = []
            for j in inst.x:
                if 0.99 <= inst.x[j].value <= 1.01:
                    exp_i = 0
                    selected.append(j)
                    exp_i += inst.x[j]
                    if self.__allele_to_4digit[j] in encountered_4digit:
                        continue

                    encountered_4digit.append(self.__allele_to_4digit[j])
                    for i_allele in self.__groups_4digit[
                            self.__allele_to_4digit[j]]:
                        if inst.x[i_allele].value <= 0:
                            exp_i += inst.x[i_allele]
                        indices.append(i_allele)
                    expr += (1 - exp_i)
            zero_indices = set([j for j in inst.x]).difference(set(indices))
            for j in zero_indices:
                expr += inst.x[j]

            inst.c.add(expr >= 1)

            if self.__verbosity > 0:
                print selected
            aas = [self.__allele_to_4digit[x].split('*')[0] for x in selected]
            c = dict.fromkeys(aas, 1)
            for i in xrange(len(aas)):
                if aas.count(aas[i]) < 2:
                    d[aas[i] + "1"].append(selected[i])
                    d[aas[i] + "2"].append(selected[i])
                else:
                    d[aas[i] + str(c[aas[i]])].append(selected[i])
                    c[aas[i]] += 1

            # print "Obj", res.Solution.Objective.__default_objective__.Value
            nof_reads = sum((inst.occ[j] * inst.y[j].value for j in inst.y))
            d['obj'].append(inst.read_cov())
            d['nof_reads'].append(nof_reads)

        return pd.DataFrame(d)

    def solve_fixed_typing(self, fixed_alleles):
        """
            EXPERIMENTAL!

            forces the allele to pic a 4-digit of the provided alleles
        """
        k = len(set(fixed_alleles))
        if k < int(self.__instance.nof_loci.value) or k > int(
                self.__instance.nof_loci.value * self.__t_max_allele):
            raise Warning("k " + str(k) + " is out of range [" +
                          str(self.__instance.nof_loci.value) + "," +
                          str(self.__instance.nof_loci * self.__t_max_allele) +
                          "]")

        # copy the instance
        inst = self.__instance.clone()
        # set beta = 0 because we do homozygocity calling manually
        getattr(inst, str(inst.beta)).set_value(float(0.0))

        inst.del_component("heterozygot_count")
        inst.del_component("reg1")
        inst.del_component("reg2")
        inst.del_component("reg3")
        # generate constraint which allows only k alleles to be selected
        expr1 = 0
        for j in inst.x:
            expr1 += inst.x[j]
        inst.c.add(expr1 == k)

        # generate for each of the provided alleles the fixation constraint:
        for a in set(fixed_alleles):
            expr_f = 0
            print self.__groups_4digit
            for ids in self.__groups_4digit[a]:
                print ids
                expr_f += inst.x[ids]
            inst.c.add(expr_f == 1)

        d = defaultdict(list)

        inst.preprocess()
        try:
            res = self.__solver.solve(inst,
                                      options=self.__opts,
                                      tee=self.__verbosity)
        except:
            print(
                "WARNING: Solver does not support multi-threading. Please change the config"
                " file accordingly. Falling back to single-threading.")
            res = self.__solver.solve(inst, options={}, tee=self.__verbosity)
        inst.solutions.load_from(res)

        opt_ids = [j for j in inst.x if 0.99 <= inst.x[j].value <= 1.01]

        aas = [self.__allele_to_4digit[x].split('*')[0] for x in opt_ids]
        c = dict.fromkeys(aas, 1)
        for i in xrange(len(aas)):
            if aas.count(aas[i]) < 2:
                d[aas[i] + "1"].append(opt_ids[i])
                d[aas[i] + "2"].append(opt_ids[i])
            else:
                d[aas[i] + str(c[aas[i]])].append(opt_ids[i])
                c[aas[i]] += 1
        nof_reads = sum((inst.occ[j] * inst.y[j].value for j in inst.y))
        d['obj'].append(self.inst.read_cov())
        d['nof_reads'].append(nof_reads)

        return pd.DataFrame(d)

    def enumerate_allele_wise(self):
        """
            EXPERIMENTAL!

            fixes all but one allele and solves it again to investigate the influence of this
            particular allele on the objective value.
        """
        d = defaultdict(list)

        self.__instance.preprocess()
        try:
            res = self.__solver.solve(self.__instance,
                                      options=self.__opts,
                                      tee=self.__verbosity)
        except:
            print(
                "WARNING: Solver does not support multi-threading. Please change the config"
                " file accordingly. Falling back to single-threading.")
            res = self.__solver.solve(self.__instance,
                                      options={},
                                      tee=self.__verbosity)
        self.__instance.solutions.load_from(res)

        opt_ids = [
            j for j in self.__instance.x
            if 0.99 <= self.__instance.x[j].value <= 1.01
        ]

        aas = [self.__allele_to_4digit[x].split('*')[0] for x in opt_ids]
        c = dict.fromkeys(aas, 1)
        for i in xrange(len(aas)):
            if aas.count(aas[i]) < 2:
                d[aas[i] + "1"].append(opt_ids[i])
                d[aas[i] + "2"].append(opt_ids[i])
            else:
                d[aas[i] + str(c[aas[i]])].append(opt_ids[i])
                c[aas[i]] += 1
        nof_reads = sum((self.__instance.occ[j] * self.__instance.y[j].value
                         for j in self.__instance.y))
        d['obj'].append(self.__instance.read_cov())
        d['nof_reads'].append(nof_reads)
        d['discarded'].append(0)

        for j in opt_ids:
            if self.__verbosity > 0:
                self.__instance.c.pprint()
            self.__instance.c.clear()
            # fix all but j'th variable
            fix = 0
            for i in opt_ids:
                if i != j:
                    fix += (1 - self.__instance.x[i])
            self.__instance.c.add(fix == 0.0)

            # discard j'th allele and all its 4digit equivalent alleles form the next solution
            discard = 0
            for k in self.__groups_4digit[self.__allele_to_4digit[j]]:
                discard += self.__instance.x[k]
            self.__instance.c.add(discard == 0.0)

            # solve with new constraints
            self.__instance.preprocess()
            try:
                res = self.__solver.solve(
                    self.__instance,
                    tee=self.__verbosity)  # ,tee=True) verbose solvinf
                self.__instance.solutions.load_from(res)
            except:
                print Warning("There is no replacement for allele " +
                              self.__allele_to_4digit[j])
                continue

            selected = [
                al for al in self.__instance.x
                if 0.99 <= self.__instance.x[al].value <= 1.01
            ]
            aas = [self.__allele_to_4digit[x].split('*')[0] for x in selected]
            c = dict.fromkeys(aas, 1)
            for q in xrange(len(aas)):
                if aas.count(aas[q]) < 2:
                    d[aas[q] + "1"].append(selected[q])
                    d[aas[q] + "2"].append(selected[q])
                else:
                    d[aas[q] + str(c[aas[q]])].append(selected[q])
                    c[aas[q]] += 1
            nof_reads = sum(
                (self.__instance.occ[h] * self.__instance.y[h].value
                 for h in self.__instance.y))
            d['obj'].append(self.__instance.read_cov())
            d['nof_reads'].append(nof_reads)
            d['discarded'].append(j)
        return pd.DataFrame(d)

    def solve_enforced_zygosity(self, gosity_dict):
        """
            EXPERIMENTAL!

            solves the ilp without regularization but enforced h**o/heterozygosity for each locus
            @param gosity_dict: a dictionary with all loci as keys and value = number of alleles per locus (default is 2)
        """

        inst = self.__instance.clone()
        # set beta = 0 because we do homozygocity calling manually
        getattr(inst, str(inst.beta)).set_value(float(0.0))

        inst.del_component("heterozygot_count")
        inst.del_component("reg1")
        inst.del_component("reg2")
        inst.del_component("reg3")

        # now delete max_allele_constraint and reconstruct it again
        inst.del_component("max_allel_selection")
        for locus in inst.LociNames:
            cons = 0
            for a in inst.Loci[locus]:
                cons += inst.x[a]
            inst.c.add(cons <= gosity_dict.get(locus, 2))

        d = defaultdict(list)

        inst.preprocess()
        try:
            res = self.__solver.solve(inst,
                                      options=self.__opts,
                                      tee=self.__verbosity)
        except:
            print(
                "WARNING: Solver does not support multi-threading. Please change the config"
                " file accordingly. Falling back to single-threading.")
            res = self.__solver.solve(inst, options={}, tee=self.__verbosity)
        inst.solutions.load_from(res)

        selected = [al for al in inst.x if 0.99 <= inst.x[al].value <= 1.01]
        aas = [self.__allele_to_4digit[x].split('*')[0] for x in selected]
        c = dict.fromkeys(aas, 1)
        for q in xrange(len(aas)):
            if aas.count(aas[q]) < 2:
                d[aas[q] + "1"].append(selected[q])
                d[aas[q] + "2"].append(selected[q])
            else:
                d[aas[q] + str(c[aas[q]])].append(selected[q])
                c[aas[q]] += 1
        nof_reads = sum((inst.occ[h] * inst.y[h].value for h in inst.y))
        d['obj'].append(inst.read_cov())
        d['nof_reads'].append(nof_reads)
        return pd.DataFrame(d)
Example #36
0
    def _Q_opt(self,
               ThetaVals=None,
               solver="ef_ipopt",
               return_values=[],
               bootlist=None,
               calc_cov=False):
        """
        Set up all thetas as first stage Vars, return resulting theta
        values as well as the objective function value.

        """
        if (solver == "k_aug"):
            raise RuntimeError("k_aug no longer supported.")

        # (Bootstrap scenarios will use indirection through the bootlist)
        if bootlist is None:
            scen_names = ["Scenario{}".format(i) for i in self._numbers_list]
        else:
            scen_names = ["Scenario{}".format(i)\
                         for i in range(len(self._numbers_list))]

        # tree_model.CallbackModule = None
        outer_cb_data = dict()
        outer_cb_data["callback"] = self._instance_creation_callback
        if ThetaVals is not None:
            outer_cb_data["ThetaVals"] = ThetaVals
        if bootlist is not None:
            outer_cb_data["BootList"] = bootlist
        outer_cb_data["cb_data"] = self.callback_data  # None is OK
        outer_cb_data["theta_names"] = self.theta_names

        options = {"solver": "ipopt"}
        scenario_creator_options = {"cb_data": outer_cb_data}
        if use_mpisppy:
            ef = sputils.create_EF(
                scen_names,
                _experiment_instance_creation_callback,
                EF_name="_Q_opt",
                suppress_warnings=True,
                scenario_creator_kwargs=scenario_creator_options)
        else:
            ef = local_ef.create_EF(
                scen_names,
                _experiment_instance_creation_callback,
                EF_name="_Q_opt",
                suppress_warnings=True,
                scenario_creator_kwargs=scenario_creator_options)
        self.ef_instance = ef

        # Solve the extensive form with ipopt
        if solver == "ef_ipopt":

            if not calc_cov:
                # Do not calculate the reduced hessian

                solver = SolverFactory('ipopt')
                if self.solver_options is not None:
                    for key in self.solver_options:
                        solver.options[key] = self.solver_options[key]

                solve_result = solver.solve(ef, tee=self.tee)

            # The import error will be raised when we attempt to use
            # inv_reduced_hessian_barrier below.
            #
            #elif not asl_available:
            #    raise ImportError("parmest requires ASL to calculate the "
            #                      "covariance matrix with solver 'ipopt'")
            else:
                # parmest makes the fitted parameters stage 1 variables
                ind_vars = []
                for ndname, Var, solval in ef_nonants(ef):
                    ind_vars.append(Var)
                # calculate the reduced hessian
                solve_result, inv_red_hes = \
                    inverse_reduced_hessian.inv_reduced_hessian_barrier(
                        self.ef_instance,
                        independent_variables= ind_vars,
                        solver_options=self.solver_options,
                        tee=self.tee)

            if self.diagnostic_mode:
                print('    Solver termination condition = ',
                      str(solve_result.solver.termination_condition))

            # assume all first stage are thetas...
            thetavals = {}
            for ndname, Var, solval in ef_nonants(ef):
                # process the name
                # the scenarios are blocks, so strip the scenario name
                vname = Var.name[Var.name.find(".") + 1:]
                thetavals[vname] = solval

            objval = pyo.value(ef.EF_Obj)

            if calc_cov:
                # Calculate the covariance matrix

                # Extract number of data points considered
                n = len(self.callback_data)

                # Extract number of fitted parameters
                l = len(thetavals)

                # Assumption: Objective value is sum of squared errors
                sse = objval
                '''Calculate covariance assuming experimental observation errors are
                independent and follow a Gaussian 
                distribution with constant variance.
                
                The formula used in parmest was verified against equations (7-5-15) and
                (7-5-16) in "Nonlinear Parameter Estimation", Y. Bard, 1974.
                
                This formula is also applicable if the objective is scaled by a constant;
                the constant cancels out. (was scaled by 1/n because it computes an
                expected value.)
                '''
                cov = 2 * sse / (n - l) * inv_red_hes
                cov = pd.DataFrame(cov,
                                   index=thetavals.keys(),
                                   columns=thetavals.keys())

            if len(return_values) > 0:
                var_values = []
                for exp_i in self.ef_instance.component_objects(
                        Block, descend_into=False):
                    vals = {}
                    for var in return_values:
                        exp_i_var = exp_i.find_component(str(var))
                        if exp_i_var is None:  # we might have a block such as _mpisppy_data
                            continue
                        temp = [pyo.value(_) for _ in exp_i_var.values()]
                        if len(temp) == 1:
                            vals[var] = temp[0]
                        else:
                            vals[var] = temp
                    if len(vals) > 0:
                        var_values.append(vals)
                var_values = pd.DataFrame(var_values)
                if calc_cov:
                    return objval, thetavals, var_values, cov
                else:
                    return objval, thetavals, var_values

            if calc_cov:

                return objval, thetavals, cov
            else:
                return objval, thetavals

        else:
            raise RuntimeError("Unknown solver in Q_Opt=" + solver)
Example #37
0
    model = createModel(number_of_EVs, number_of_Chargers, number_of_timeslot,
                        installed_chargers, installed_cost, arrival, depart,
                        TFC)
    """
    solve the model
    """
    solver = SolverFactory(SOLVER_NAME)

    if SOLVER_NAME == 'cplex':
        solver.options['timelimit'] = TIME_LIMIT
    elif SOLVER_NAME == 'glpk':
        solver.options['tmlim'] = TIME_LIMIT
    elif SOLVER_NAME == 'gurobi':
        solver.options['TimeLimit'] = TIME_LIMIT

    results = solver.solve(model)

    while True:
        if (results.solver.status
                == SolverStatus.ok) and (results.solver.termination_condition
                                         == TerminationCondition.optimal):
            #if solver find an feasible solution
            list_data = save_scenario(number_of_EVs, number_of_Chargers,
                                      arrival, depart, distance, demand,
                                      charge_power, installed_chargers,
                                      installed_cost, TFC, model, EV_samples,
                                      scenario, scenario_model)
            #check if there is error in data
            if list_data:
                break
            else:
instance = model.create('data_set2.dat')
instance.t.pprint()

# Discretize model using Backward Finite Difference method
#discretize = Finite_Difference_Transformation()
#disc_instance = discretize.apply(instance,nfe=20,scheme='BACKWARD')

# Discretize model using Orthogonal Collocation
discretize = Collocation_Discretization_Transformation()
disc_instance = discretize.apply(instance, nfe=8, ncp=5)

solver = 'ipopt'
opt = SolverFactory(solver)

results = opt.solve(disc_instance, tee=True)
disc_instance.load(results)

x1 = []
x1_meas = []
t = []
t_meas = []

print(sorted(disc_instance.t))

for i in sorted(disc_instance.MEAS_t):
    t_meas.append(i)
    x1_meas.append(value(disc_instance.x1_meas[i]))

for i in sorted(disc_instance.t):
    t.append(i)
def max_heat(n, m, QH, QC, u, M):

    # Local copy of the instance.
    h = list([QH[i][u] for i in range(n)])
    c = list([QC[j][u] for j in range(m)])

    for i in range(n):
        if h[i] < epsilon:
            h[i] = 0

    for j in range(m):
        if c[j] < epsilon:
            c[j] = 0

    model = AbstractModel()

    model.n = Param(within=NonNegativeIntegers,
                    initialize=n)  # number of hot streams
    model.m = Param(within=NonNegativeIntegers,
                    initialize=m)  # number of cold streams

    model.H = RangeSet(0, model.n - 1)  # set of hot streams
    model.C = RangeSet(0, model.m - 1)  # set of cold streams

    model.M = Set(within=model.H * model.C, initialize=M)

    # Parameter: heat load of hot stream i in temperature interval t
    model.h = Param(model.H,
                    within=NonNegativeReals,
                    initialize=lambda model, i: h[i])

    # Parameter: heat load of cold stream j in temperature interval t
    model.c = Param(model.C,
                    within=NonNegativeReals,
                    initialize=lambda model, j: c[j])

    # Variable: heat transferred from i to j
    model.q = Var(model.H, model.C, within=NonNegativeReals)

    # Objective: minimization of the total fraction
    def total_heat_rule(model):
        return sum(model.q[i, j] for (i, j) in model.M)

    model.total_heat = Objective(rule=total_heat_rule, sense=maximize)

    #Constraint: heat conservation of hot streams
    def hot_supply_rule(model, i):
        return sum(model.q[i, j] for j in model.C) <= model.h[i]

    model.hot_supply_constraint = Constraint(model.H, rule=hot_supply_rule)

    #Constraint: heat conservation of cold streams
    def cold_demand_rule(model, j):
        return sum(model.q[i, j] for i in model.H) <= model.c[j]

    model.cold_demand_constraint = Constraint(model.C, rule=cold_demand_rule)

    solver = 'cplex'
    opt = SolverFactory(solver)
    opt.options['threads'] = 1
    LP = model.create_instance()
    results = opt.solve(LP)

    q = [[LP.q[i, j].value for j in range(m)] for i in range(n)]

    return q
Example #40
0
def sim(days):
    
    instance = model.create_instance('../Model_setup/CA_data_file/data.dat')
    
    opt = SolverFactory("cplex")
    H = instance.HorizonHours
    D = 2
    K=range(1,H+1)
    
    
    #Space to store results
    mwh_1=[]
    mwh_2=[]
    mwh_3=[]
    on=[]
    switch=[]
    srsv=[]
    nrsv=[]
    solar=[]
    wind=[]
    flow=[]
    Generator=[]
    
    df_generators = pd.read_csv('../Model_setup/CA_data_file/generators.csv',header=0)
    
    #max here can be (1,365)
    for day in range(1,days):
        
         #load time series data
        for z in instance.zones:
            
            instance.GasPrice[z] = instance.SimGasPrice[z,day]
            
            for i in K:
                instance.HorizonDemand[z,i] = instance.SimDemand[z,(day-1)*24+i]
                instance.HorizonWind[z,i] = instance.SimWind[z,(day-1)*24+i]
                instance.HorizonSolar[z,i] = instance.SimSolar[z,(day-1)*24+i]
                instance.HorizonMustRun[z,i] = instance.SimMustRun[z,(day-1)*24+i]
        
        for d in range(1,D+1):
            instance.HorizonPath66_imports[d] = instance.SimPath66_imports[day-1+d]
            instance.HorizonPath46_SCE_imports[d] = instance.SimPath46_SCE_imports[day-1+d]
            instance.HorizonPath61_imports[d] = instance.SimPath61_imports[day-1+d]
            instance.HorizonPath42_imports[d] = instance.SimPath42_imports[day-1+d]
            instance.HorizonPath24_imports[d] = instance.SimPath24_imports[day-1+d]
            instance.HorizonPath45_imports[d] = instance.SimPath45_imports[day-1+d]
            instance.HorizonPGE_valley_hydro[d] = instance.SimPGE_valley_hydro[day-1+d]
            instance.HorizonSCE_hydro[d] = instance.SimSCE_hydro[day-1+d]
            
        for i in K:
            instance.HorizonReserves[i] = instance.SimReserves[(day-1)*24+i] 
            instance.HorizonPath42_exports[i] = instance.SimPath42_exports[(day-1)*24+i] 
            instance.HorizonPath24_exports[i] = instance.SimPath24_exports[(day-1)*24+i] 
            instance.HorizonPath45_exports[i] = instance.SimPath45_exports[(day-1)*24+i]             
            instance.HorizonPath66_exports[i] = instance.SimPath66_exports[(day-1)*24+i]  
            
            instance.HorizonPath46_SCE_minflow[i] = instance.SimPath46_SCE_imports_minflow[(day-1)*24+i]             
            instance.HorizonPath66_minflow[i] = instance.SimPath66_imports_minflow[(day-1)*24+i] 
            instance.HorizonPath42_minflow[i] = instance.SimPath42_imports_minflow[(day-1)*24+i] 
            instance.HorizonPath61_minflow[i] = instance.SimPath61_imports_minflow[(day-1)*24+i]  
            instance.HorizonPGE_valley_hydro_minflow[i] = instance.SimPGE_valley_hydro_minflow[(day-1)*24+i]
            instance.HorizonSCE_hydro_minflow[i] = instance.SimSCE_hydro_minflow[(day-1)*24+i]
    #            
        CAISO_result = opt.solve(instance)
        instance.solutions.load_from(CAISO_result)   
     
        #The following section is for storing and sorting results
        for v in instance.component_objects(Var, active=True):
            varobject = getattr(instance, str(v))
            a=str(v)
            if a=='mwh_1':
             
             for index in varobject:
                 
               name = index[0]   
               
               g = df_generators[df_generators['name']==name]
               seg1 = g['seg1'].values
               seg1 = seg1[0]
               
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                    
                    gas_price = instance.GasPrice['PGE_valley'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg1*gas_price
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg1*2
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg1*20
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost))               
                    elif index[0] in instance.Hydro:
                        marginal_cost = 0
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost))
                        
                        
                elif index[0] in instance.Zone2Generators:
                    
                    gas_price = instance.GasPrice['PGE_bay'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg1*gas_price
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg1*2
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg1*20
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost))  
        
                elif index[0] in instance.Zone3Generators:
                    
                    gas_price = instance.GasPrice['SCE'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg1*gas_price
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg1*2
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg1*20
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost))  
                    elif index[0] in instance.Hydro:
                        marginal_cost = 0
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost))  
            
                elif index[0] in instance.Zone4Generators:
                    
                    gas_price = instance.GasPrice['SDGE'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg1*gas_price
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg1*2
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg1*20
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost))  
        
    
                elif index[0] in instance.WECCImportsSDGE:
                    
                    gas_price = instance.GasPrice['SDGE'].value
                    marginal_cost = 14.5+2.76*gas_price
                    mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost))
    
    
                elif index[0] in instance.WECCImportsSCE:
                    
                    gas_price = instance.GasPrice['SCE'].value
                    marginal_cost = 14.5+2.76*gas_price
                    mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost))
    
                
                elif index[0] in instance.WECCImportsPGEV:
                    
                    marginal_cost = 5
                    mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost))
                      
    
            if a=='mwh_2':
    
             for index in varobject:
                 
               name = index[0]
               g = df_generators[df_generators['name']==name]
               seg2 = g['seg2'].values
               seg2 = seg2[0]
                 
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                    
                    gas_price = instance.GasPrice['PGE_valley'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg2*gas_price
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg2*2
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg2*20
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost))               
                    elif index[0] in instance.Hydro:
                        marginal_cost = 0
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost))
                        
                        
                elif index[0] in instance.Zone2Generators:
                    
                    gas_price = instance.GasPrice['PGE_bay'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg2*gas_price
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg2*2
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg2*20
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost))  
        
                elif index[0] in instance.Zone3Generators:
                    
                    gas_price = instance.GasPrice['SCE'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg2*gas_price
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg2*2
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg2*20
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost))  
                    elif index[0] in instance.Hydro:
                        marginal_cost = 0
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost))  
            
                elif index[0] in instance.Zone4Generators:
                    
                    gas_price = instance.GasPrice['SDGE'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg2*gas_price
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg2*2
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg2*20
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost))  
        
    
                elif index[0] in instance.WECCImportsSDGE:
                    
                    gas_price = instance.GasPrice['SDGE'].value
                    marginal_cost = 14.5+2.76*gas_price
                    mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost))
    
    
                elif index[0] in instance.WECCImportsSCE:
                    
                    gas_price = instance.GasPrice['SCE'].value
                    marginal_cost = 14.5+2.76*gas_price
                    mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost))
    
                
                elif index[0] in instance.WECCImportsPGEV:
                    
                    marginal_cost = 5
                    mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost))
     
            
            if a=='mwh_3':
               
             for index in varobject:
                 
               name = index[0]
               g = df_generators[df_generators['name']==name]
               seg3 = g['seg3'].values
               seg3 = seg3[0]
                 
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                    
                    gas_price = instance.GasPrice['PGE_valley'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg3*gas_price
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg3*2
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg3*20
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost))               
                    elif index[0] in instance.Hydro:
                        marginal_cost = 0
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost))
                        
                        
                elif index[0] in instance.Zone2Generators:
                    
                    gas_price = instance.GasPrice['PGE_bay'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg3*gas_price
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg3*2
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg3*20
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost))  
        
                elif index[0] in instance.Zone3Generators:
                    
                    gas_price = instance.GasPrice['SCE'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg3*gas_price
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg3*2
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg3*20
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost))  
                    elif index[0] in instance.Hydro:
                        marginal_cost = 0
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost))  
            
                elif index[0] in instance.Zone4Generators:
                    
                    gas_price = instance.GasPrice['SDGE'].value
                    
                    if index[0] in instance.Gas:
                        marginal_cost = seg3*gas_price
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost))
                    elif index[0] in instance.Coal:
                        marginal_cost = seg3*2
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost))
                    elif index[0] in instance.Oil:
                        marginal_cost = seg3*20
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost))
                    elif index[0] in instance.PSH:
                        marginal_cost = 10
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost))
                    elif index[0] in instance.Slack:
                        marginal_cost = 700
                        mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost))  
        
    
                elif index[0] in instance.WECCImportsSDGE:
                    
                    gas_price = instance.GasPrice['SDGE'].value
                    marginal_cost = 14.5+2.76*gas_price
                    mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost))
    
    
                elif index[0] in instance.WECCImportsSCE:
                    
                    gas_price = instance.GasPrice['SCE'].value
                    marginal_cost = 14.5+2.76*gas_price
                    mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost))
    
                
                elif index[0] in instance.WECCImportsPGEV:
                    
                    marginal_cost = 5
                    mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost))
    
              
            if a=='on':
                
             for index in varobject:
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                 on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
                elif index[0] in instance.Zone2Generators:
                 on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
                elif index[0] in instance.Zone3Generators:
                 on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
                elif index[0] in instance.Zone4Generators:
                 on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))     
          
             
            if a=='switch':
            
             for index in varobject:
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                 switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
                elif index[0] in instance.Zone2Generators:
                 switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
                elif index[0] in instance.Zone3Generators:
                 switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
                elif index[0] in instance.Zone4Generators:
                 switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))    
        
             
            if a=='srsv':
            
             for index in varobject:
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                 srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
                elif index[0] in instance.Zone2Generators:
                 srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
                elif index[0] in instance.Zone3Generators:
                 srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
                elif index[0] in instance.Zone4Generators:
                 srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))  
        
             
            if a=='nrsv':
           
             for index in varobject:
               if int(index[1]>0 and index[1]<25):
                if index[0] in instance.Zone1Generators:
                 nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
                elif index[0] in instance.Zone2Generators:
                 nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
                elif index[0] in instance.Zone3Generators:
                 nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
                elif index[0] in instance.Zone4Generators:
                 nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))
             
             
            if a=='solar':
               
             for index in varobject:
               if int(index[1]>0 and index[1]<25):
                solar.append((index[0],index[1]+((day-1)*24),varobject[index].value))   
             
              
            if a=='wind':
               
             for index in varobject:
               if int(index[1]>0 and index[1]<25):
                wind.append((index[0],index[1]+((day-1)*24),varobject[index].value))  
                 
            if a=='flow':
               
             for index in varobject:
               if int(index[2]>0 and index[2]<25):
                flow.append((index[0],index[1],index[2]+((day-1)*24),varobject[index].value))   
             
        
            for j in instance.Generators:
                if instance.on[j,H] == 1:
                    instance.on[j,0] = 1
                else: 
                    instance.on[j,0] = 0
                instance.on[j,0].fixed = True
                           
                if instance.mwh_1[j,H].value <=0 and instance.mwh_1[j,H].value>= -0.0001:
                    newval_1=0
                else:
                    newval_1=instance.mwh_1[j,H].value
                instance.mwh_1[j,0] = newval_1
                instance.mwh_1[j,0].fixed = True
                              
                if instance.mwh_2[j,H].value <=0 and instance.mwh_2[j,H].value>= -0.0001:
                    newval=0
                else:
                    newval=instance.mwh_2[j,H].value
                                         
                if instance.mwh_3[j,H].value <=0 and instance.mwh_3[j,H].value>= -0.0001:
                    newval2=0
                else:
                    newval2=instance.mwh_3[j,H].value
                                          
                                          
                instance.mwh_2[j,0] = newval
                instance.mwh_2[j,0].fixed = True
                instance.mwh_3[j,0] = newval2
                instance.mwh_3[j,0].fixed = True 
                if instance.switch[j,H] == 1:
                    instance.switch[j,0] = 1
                else:
                    instance.switch[j,0] = 0
                instance.switch[j,0].fixed = True
              
                if instance.srsv[j,H].value <=0 and instance.srsv[j,H].value>= -0.0001:
                    newval_srsv=0
                else:
                    newval_srsv=instance.srsv[j,H].value
                instance.srsv[j,0] = newval_srsv 
                instance.srsv[j,0].fixed = True        
        
                if instance.nrsv[j,H].value <=0 and instance.nrsv[j,H].value>= -0.0001:
                    newval_nrsv=0
                else:
                    newval_nrsv=instance.nrsv[j,H].value
                instance.nrsv[j,0] = newval_nrsv 
                instance.nrsv[j,0].fixed = True      
                
        print(day)
                     
    mwh_1_pd=pd.DataFrame(mwh_1,columns=('Generator','Time','Value','Zones','Type','$/MWh'))
    mwh_2_pd=pd.DataFrame(mwh_2,columns=('Generator','Time','Value','Zones','Type','$/MWh'))
    mwh_3_pd=pd.DataFrame(mwh_3,columns=('Generator','Time','Value','Zones','Type','$/MWh'))
    on_pd=pd.DataFrame(on,columns=('Generator','Time','Value','Zones'))
    switch_pd=pd.DataFrame(switch,columns=('Generator','Time','Value','Zones'))
    srsv_pd=pd.DataFrame(srsv,columns=('Generator','Time','Value','Zones'))
    nrsv_pd=pd.DataFrame(nrsv,columns=('Generator','Time','Value','Zones'))
    solar_pd=pd.DataFrame(solar,columns=('Zone','Time','Value'))
    wind_pd=pd.DataFrame(wind,columns=('Zone','Time','Value'))
    flow_pd=pd.DataFrame(flow,columns=('Source','Sink','Time','Value'))
    
    flow_pd.to_csv('CAISO/flow.csv')
    mwh_1_pd.to_csv('CAISO/mwh_1.csv')
    mwh_2_pd.to_csv('CAISO/mwh_2.csv')
    mwh_3_pd.to_csv('CAISO/mwh_3.csv')
    on_pd.to_csv('CAISO/on.csv')
    switch_pd.to_csv('CAISO/switch.csv')
    srsv_pd.to_csv('CAISO/srsv.csv')
    nrsv_pd.to_csv('CAISO/nrsv.csv')
    solar_pd.to_csv('CAISO/solar_out.csv')
    wind_pd.to_csv('CAISO/wind_out.csv')
    
    return None
Example #41
0
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
"""
max 16a + 30b + 50c
s.t.
4a + 5b + 8c <= 112
2a + 4b + 5c <= 160
a + 2b + 3c <= 48

a, b, c >= 0
"""
opt = SolverFactory('glpk')

model = pyo.ConcreteModel()
model.a = pyo.Var(within=pyo.NonNegativeReals)
model.b = pyo.Var(within=pyo.NonNegativeReals)
model.c = pyo.Var(within=pyo.NonNegativeReals)
model.obj = pyo.Objective(expr=-(16 * model.a + 30 * model.b + 50 * model.c))
model.con1 = pyo.Constraint(
    expr=4 * model.a + 5 * model.b + 8 * model.c <= 112)
model.con2 = pyo.Constraint(
    expr=2 * model.a + 4 * model.b + 5 * model.c <= 160)
model.con3 = pyo.Constraint(expr=model.a + 2 * model.b + 3 * model.c <= 48)

results = opt.solve(model)
model.display()
print('***** a = {} *****'.format(pyo.value(model.a)))
print('***** b = {} *****'.format(pyo.value(model.b)))
print('***** c = {} *****'.format(pyo.value(model.c)))
Example #42
0
        for name,suffix in active_import_suffix_generator(model):
            six.print_("%10s" % (suffix.get(model.x[i])),end='')
        six.print_("")
    for i in model.s:
        six.print_(model.con[i].name+"\t",end='')
        for name,suffix in active_import_suffix_generator(model):
            six.print_("%10s" % (suffix.get(model.con[i])),end='')
        six.print_("")
    six.print_(model.obj.name+"\t",end='')
    for name,suffix in active_import_suffix_generator(model):
        six.print_("%10s" % (suffix.get(model.obj)),end='')
    print("")
    print("")

print("")
print("Suffixes Before Solve:")
print_model_suffixes(model)

### Send the model to gurobi_ampl and collect the solution
# The solver plugin will scan the model for all active suffixes
# valid for importing, which it will store into the results object
results = opt.solve(model,
                    keepfiles=keepfiles,
                    tee=stream_solver)
###

print("")
print("Suffixes After Solve:")
print_model_suffixes(model)

Example #43
0
def sensitivity(dat, techs):
    # This function performs break-even analysis for technologies specified in
    # the argument techs. It uses suffix from Pyomo and returns the breakeven
    # cost as screen outputs. Note that the Pyomo suffix sometimes returns
    # anomalous values, and that's why I create another function,
    # sensitivity_api() to use Python API for CPLEX.
    from temoa_model import temoa_create_model
    model = temoa_create_model()

    model.dual = Suffix(direction=Suffix.IMPORT)
    model.rc = Suffix(direction=Suffix.IMPORT)
    model.slack = Suffix(direction=Suffix.IMPORT)
    model.lrc = Suffix(direction=Suffix.IMPORT)
    model.urc = Suffix(direction=Suffix.IMPORT)

    data = DataPortal(model=model)
    for d in dat:
        data.load(filename=d)
    instance = model.create_instance(data)
    optimizer = SolverFactory('cplex')
    optimizer.options['lpmethod'] = 1  # Use primal simplex
    results = optimizer.solve(instance,
                              suffixes=['dual', 'urc', 'slack', 'lrc'])
    instance.solutions.load_from(results)

    coef_CAP = dict()
    scal_CAP = dict()
    # Break-even investment cost for this scenario, indexed by technology
    years = list()
    bic_s = dict()
    ic_s = dict()  # Raw investment costs for this scenario, indexed by tech
    cap_s = dict()
    for t in techs:
        vintages = instance.vintage_optimize
        P_0 = min(instance.time_optimize)
        GDR = value(instance.GlobalDiscountRate)
        MLL = instance.ModelLoanLife
        MPL = instance.ModelProcessLife
        LLN = instance.LifetimeLoanProcess
        x = 1 + GDR  # convenience variable, nothing more.

        bic_s[t] = list()
        ic_s[t] = list()
        cap_s[t] = list()
        years = vintages.value
        for v in vintages:
            period_available = set()
            for p in instance.time_future:
                if (p, t, v) in instance.CostFixed.keys():
                    period_available.add(p)
            c_i = (instance.CostInvest[t, v] * instance.LoanAnnualize[t, v] *
                   (LLN[t, v] if not GDR else
                    (x**(P_0 - v + 1) * (1 - x**(-value(LLN[t, v]))) / GDR)))

            c_s = (-1) * (value(instance.CostInvest[t, v]) *
                          value(instance.SalvageRate[t, v]) /
                          (1 if not GDR else
                           (1 + GDR)**(instance.time_future.last() -
                                       instance.time_future.first() - 1)))

            c_f = sum(instance.CostFixed[p, t, v] *
                      (MPL[p, t, v] if not GDR else
                       (x**(P_0 - p + 1) *
                        (1 - x**(-value(MPL[p, t, v]))) / GDR))
                      for p in period_available)

            c = c_i + c_s + c_f
            s = (c - instance.lrc[instance.V_Capacity[t, v]]) / c
            coef_CAP[t, v] = c
            scal_CAP[t, v] = s  # Must reduce TO this percentage
            bic_s[t].append(scal_CAP[t, v] * instance.CostInvest[t, v])
            ic_s[t].append(instance.CostInvest[t, v])
            cap_s[t].append(value(instance.V_Capacity[t, v]))

        # print "Tech\tVintage\tL. RC\tCoef\tU .RC\tScale\tBE IC\tBE FC\tIC\tFC\tCap"
        print "{:>10s}\t{:>7s}\t{:>6s}\t{:>4s}\t{:>6s}\t{:>5s}\t{:>7s}\t{:>7s}\t{:>5s}\t{:>3s}\t{:>5s}".format(
            'Tech', 'Vintage', 'L. RC', 'Coef', 'U. RC', 'Scale', 'BE IC',
            'BE FC', 'IC', 'FC', 'Cap')
        for v in vintages:
            lrc = instance.lrc[instance.V_Capacity[t, v]]
            urc = instance.urc[instance.V_Capacity[t, v]]

            # print "{:>s}\t{:>g}\t{:>.0f}\t{:>.0f}\t{:>.0f}\t{:>.3f}\t{:>.1f}\t{:>.1f}\t{:>.0f}\t{:>.0f}\t{:>.3f}".format(
            print "{:>10s}\t{:>7g}\t{:>6.0f}\t{:>4.0f}\t{:>6.0f}\t{:>5.3f}\t{:>7.1f}\t{:>7.1f}\t{:>5.0f}\t{:>3.0f}\t{:>5.3f}".format(
                t,
                v,
                lrc,
                coef_CAP[t, v],
                urc,
                scal_CAP[t, v],
                scal_CAP[t, v] * instance.CostInvest[t, v],
                scal_CAP[t, v] *
                instance.CostFixed[v, t, v],  # Use the FC of the first period
                instance.CostInvest[t, v],
                instance.CostFixed[v, t, v],
                value(instance.V_Capacity[t, v]))

    print 'Dual and slack variables for emission caps:'
    for e in instance.commodity_emissions:
        for p in instance.time_optimize:
            if (p, e) in instance.EmissionLimitConstraint:
                print p, e, instance.dual[instance.EmissionLimitConstraint[
                    p, e]], '\t', instance.slack[
                        instance.EmissionLimitConstraint[p, e]]
    return years, bic_s, ic_s

    print 'Dual and slack variables for Commodity Demand Constraints'
    for c in instance.commodity_demand:
        for p in instance.time_optimize:
            for s in instance.time_season:
                for tod in instance.time_of_day:
                    print p, s, tod, instance.dual[instance.DemandConstraint[
                        p, s, tod,
                        c]], instance.slack[instance.DemandConstraint[p, s,
                                                                      tod, c]]
Example #44
0

# Objective
def obj_cost(model):


    return sum(sum(data.periodDuration*(BELPEX_data.AGC_price*model.AGCup[i,j]+BELPEX_data.AGC_price*model.AGCdown[i,j]+BELPEX_data.price[day][int(math.ceil(i/4)-1)] * model.powerGen[i,j]) - (data.periodDuration*data.fixedCost[j-1]*model.onOffStatus[i,j] + model.startUp[i,j]*data.startUpCost[j-1] +\
      data.periodDuration*data.powerCost[j-1][0]*data.powerMin[j-1]*model.onOffStatus[i,j] +sum(data.periodDuration*data.powerCost[j-1][l-1]*model.piecesGen[i,j,l] for l in model.Pieces)) for j in model.Generators) for i in model.Periods)


model.obj = Objective(rule=obj_cost, sense=maximize)

if __name__ == '__main__':

    opt = SolverFactory("gurobi")
    results = opt.solve(model, tee=True, keepfiles=True)
    print("")
    print("Day %s" % (day + 1))
    print("Global profit: %s" % model.obj())

powgen = np.array(
    np.asarray(
        [[round(model.powerGen[i, j].value, 8) for j in model.Generators]
         for i in model.Periods]))
powgentot = np.asarray([
    sum(model.powerGen[i, j].value for j in model.Generators)
    for i in model.Periods
])
onOff = np.array(
    np.asarray([[model.onOffStatus[i, j].value for j in model.Generators]
                for i in model.Periods]))
Example #45
0
def sen_range(tech, vintage, scales, dat):
    # Given a range of scaling factor for coefficient of a specific V_Capacity,
    # returns objective value, reduced cost, capacity etc. for each scaling
    # factor
    from openpyxl import Workbook
    target_year = vintage
    target_tech = tech
    algmap = {
        'primal simplex': 1,
        'dual simplex': 2,
        'barrier': 4,
        'default': 0,
    }  # cplex definition

    t0 = time()
    time_mark = lambda: time() - t0

    model = return_Temoa_model()
    data = return_Temoa_data(model, dat)
    optimizer = SolverFactory('cplex')

    ic0 = data['CostInvest'][target_tech, target_year]
    fc0 = data['CostFixed'][target_year, target_tech, target_year]
    all_periods = data['time_future']

    obj = dict()
    cap = dict()
    lrc = dict()
    coef = dict()
    urc = dict()
    bic = dict()
    bfc = dict()
    ic = dict()  # Original IC
    fc = dict()  # Original FC

    for algorithm in ['barrier', 'dual simplex', 'primal simplex']:
        optimizer.options['lpmethod'] = algmap[algorithm]
        print 'Algorithm: {}'.format(algorithm)

        obj_alg = list()
        cap_alg = defaultdict(list)
        lrc_alg = defaultdict(list)
        coef_alg = defaultdict(list)
        urc_alg = defaultdict(list)
        bic_alg = defaultdict(list)
        bfc_alg = defaultdict(list)
        ic_alg = defaultdict(list)
        fc_alg = defaultdict(list)
        for s in scales:
            print '[{:>9.2f}] Scale: {:>.3f} starts'.format(time_mark(), s)
            data['CostInvest'][target_tech, target_year] = s * ic0
            for y in data['time_future']:
                if (y, target_tech, target_year) in data['CostFixed']:
                    data['CostFixed'][y, target_tech, target_year] = s * fc0
            instance = model.create_instance(data)
            instance.preprocess()
            results = optimizer.solve(instance,
                                      suffixes=['dual', 'urc', 'slack', 'lrc'])
            instance.solutions.load_from(results)

            obj_alg.append(value(instance.TotalCost))
            for y in instance.time_optimize:
                key = str(y)
                c_vector = return_c_vector(instance, [])
                coefficient = c_vector[('V_Capacity', (target_tech, y))]
                capacity = value(instance.V_Capacity[target_tech, y])
                lower_rc = value(instance.lrc[instance.V_Capacity[target_tech,
                                                                  y]])
                upper_rc = value(instance.urc[instance.V_Capacity[target_tech,
                                                                  y]])
                cost_i = value(instance.CostInvest[target_tech, y])
                cost_f = value(instance.CostFixed[y, target_tech, y])
                s_be = (coefficient -
                        lower_rc) / coefficient  # Break-even scale

                cap_alg[key].append(capacity)
                lrc_alg[key].append(lower_rc)
                coef_alg[key].append(coefficient)
                urc_alg[key].append(upper_rc)
                ic_alg[key].append(cost_i)
                fc_alg[key].append(cost_f)
                bic_alg[key].append(s_be * cost_i)
                bfc_alg[key].append(s_be * cost_f)

        obj[algorithm] = obj_alg
        cap[algorithm] = cap_alg
        lrc[algorithm] = lrc_alg
        coef[algorithm] = coef_alg
        urc[algorithm] = urc_alg
        bic[algorithm] = bic_alg
        bfc[algorithm] = bfc_alg
        ic[algorithm] = ic_alg
        fc[algorithm] = fc_alg

        # Write to Excel spreadsheet
        print '[{:>9.2f}] Saving to Excel spreadsheet'.format(time_mark())
        row_title = [
            'scale', 'obj', 'cap', 'lrc', 'coef', 'urc', 'bic', 'bfc', 'ic',
            'fc'
        ]
        wb = Workbook()
        # for ws_title in cap_alg:
        for year in all_periods:
            ws_title = str(year)
            if ws_title not in cap_alg:
                continue
            ws = wb.create_sheet(ws_title)

            row = [
                scales, obj_alg, cap_alg[ws_title], lrc_alg[ws_title],
                coef_alg[ws_title], urc_alg[ws_title], bic_alg[ws_title],
                bfc_alg[ws_title], ic_alg[ws_title], fc_alg[ws_title]
            ]

            # Note Python starts from 0, but row number starts from 1
            for j in range(0, len(row_title)):
                c = ws.cell(row=1, column=j + 1)
                c.value = row_title[j]
            for i in range(0, len(scales)):
                for j in range(0, len(row_title)):
                    c = ws.cell(row=i + 2, column=j + 1)
                    c.value = row[j][i]
        fname = '.'.join(
            [target_tech, str(target_year)] +
            [i[:-4] for i in dat]  # Remove the .dat extension
            + [algorithm])  # tech_name.year.dat_file_name.algorithm.xlsx
        wb.save(fname + '.xlsx')
Example #46
0
# Discretize model using Backward Finite Difference method
#discretize = Finite_Difference_Transformation()
#disc_instance = discretize.apply(instance,nfe=20,scheme='BACKWARD')

# Discretize model using Orthogonal Collocation
discretize = Collocation_Discretization_Transformation()
disc_instance = discretize.apply(instance,nfe=7,ncp=6,scheme='LAGRANGE-RADAU')
# Will reimplement this method in future release of pyomo.dae
# disc_instance = discretize.reduce_collocation_points(var=instance.u,
# 	ncp=2, diffset=instance.t)

solver='ipopt'
opt=SolverFactory(solver)

results = opt.solve(disc_instance,tee=True)
disc_instance.load(results)

x1 = []
x2 = []
u = []
t=[]

print(sorted(disc_instance.t))

for i in sorted(disc_instance.t):
    t.append(i)
    x1.append(value(disc_instance.x1[i]))
    x2.append(value(disc_instance.x2[i]))
    u.append(value(disc_instance.u[i]))
Example #47
0
                         for utes in range(G_exp.numUTE)
                         for periodo in range(24 * numperiodo)
                         for cenario in range(numcenario)))
model.restricoes.add(model.CustoexpUHEs == sum(
    model.binaryUHE_exp[uhes] * G_exp.UHE_exp[uhes][2] * G_exp.UHE_exp[uhes][4]
    for uhes in range(G_exp.numUHE)))


def FObjetivo(model):
    return model.CustoGTExist + model.CustoexpOutras + model.CustoexpUTEs + model.CustoexpUHEs


model.objetivo = Objective(rule=FObjetivo, sense=minimize)

solver = SolverFactory('cbc', executable="C:\\CoinAll\\bin\\cbc.exe")
results = solver.solve(model, load_solutions=True)
model.solutions.store_to(results)
results.write(
    filename=
    r'C:\JoaoCho\Python\MDI 24h\Versao main Antigo - Teste PDE\resultados.json',
    format='json')

file1 = open(
    r"C:\JoaoCho\Python\MDI 24h\Versao main Antigo - Teste PDE\Resultados\UHE_existente.csv",
    "w")

txt_UHEexist = "Nome;Cenario;Capacidade Instalada;Garantia Física"

for periodo in range(24 * numperiodo):
    txt_UHEexist = txt_UHEexist + ";G_periodo" + str(periodo + 1)
Example #48
0
    # The scaling factor stuff here is just for testing and demonstration
    # You don't need to supply scaling factors and if you do provide the
    # scaling_factor suffix you don't need factors for each varibale and
    # constaint.  These are used only for user scaling options
    # "-scale_eqs 3" and "-scale_vars 1"
    model.scaling_factor = Suffix(direction=Suffix.EXPORT, datatype=Suffix.FLOAT)
    model.scaling_factor[model.Fin] = 0.5

    model.scaling_factor[model.eq_Fin] = 100
    #---------------------------------------------------------------------------

    print("Solving initial conditions:")
    res = opt.solve(
        model,
        tee=True,
        options={
            "-snes_monitor":"",
            "-on_error_attach_debugger":"",
            "-scale_vars":0,
            "-scale_eqs":1})

    for i in [1,2,3,4,5]: model.y[i].unfix()
    model.display() # show the initial state

    #Set suffixes to show the structure of the problem
    # dae_suffix holds variable types 0=algebraic 1=differential 2=derivative
    # 3=time. dae_link associates differential variables to their derivatives
    model.dae_suffix = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT)
    model.dae_link = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT)

    # Label the vars.  Seems 0 is default if I don't attach a suffix so don't
    # need to explicitly label algebraic vars
Example #49
0
problem_names.append("convex_multi_vararray2")
problem_names.append("convex_vararray")
problem_names.append("concave_vararray")
problem_names.append("convex_var")
problem_names.append("concave_var")
problem_names.append("piecewise_var")
problem_names.append("piecewise_vararray")
problem_names.append("step_var")
problem_names.append("step_vararray")

problem_names = ['convex_var']

for problem_name in problem_names:
    p = __import__(problem_name)

    model = p.define_model(**kwds)
    inst = model.create()

    results = opt.solve(inst, tee=True)

    inst.load(results)

    res = dict()
    for block in inst.block_data_objects(active=True):
        for variable in block.component_map(Var, active=True).values():
            for var in variable.values():
                name = var.name
                if (name[:2] == 'Fx') or (name[:1] == 'x'):
                    res[name] = value(var)
    print(res)
Example #50
0
    e.lsmhe.hi_t.display()
    e.lsmhe.report_zL()
    if stat == 1:
        stat = e.solve_dyn(e.lsmhe,
                           skip_update=True,
                           iter_max=250,
                           stop_if_nopt=True,
                           jacobian_regularization_value=1e-02,
                           linear_scaling_on_demand=True)
        if stat != 0:
            e.olnmpc.write_nl(name="bad_mhe.nl")
            with open("ipopt.opt", "w") as f:
                f.write("linear_solver ma57\n"
                        "ma57_dep_tol 1e-8\nbig_M 1e30\n")
                f.close()
            ipsr.solve(e.olnmpc, tee=True)
    e.update_state_mhe()
    # # Prior-Covariance stuff
    e.check_active_bound_noisy()
    e.load_covariance_prior()
    e.set_state_covariance()
    e.regen_objective_fun()
    # # Update prior-state
    e.set_prior_state_from_prior_mhe()
    #
    e.print_r_mhe()
    #
    e.shift_mhe()
    e.shift_measurement_input_mhe()

    e.initialize_olnmpc(e.PlantSample, "estimated")
Example #51
0
        +
        # O&M costs (variable & fixed)
        sum((mdl.om_cost_fix + mdl.capacity_factor * mdl.om_cost_var) * mdl.CapInstalled[s] for s in mdl.SUBSTATIONS)
        +
        # Transportation costs
        sum(mdl.distances[r] * model.BiomassTransported[r] * mdl.transport_cost for r in mdl.ROUTES)
        +
        # Biomass acquisition costs.
        sum(mdl.biomass_cost[b] * sum(mdl.BiomassTransported[b, s] for s in mdl.SUBSTATIONS) for b in mdl.SOURCES)
        -
        # Gross profits during the period
        sum(mdl.fit_tariff[s] * mdl.CapInstalled[s] * mdl.capacity_factor * mdl.total_hours for s in mdl.SUBSTATIONS)
    )


model.net_profits = Objective(rule=net_revenue_rule, sense=minimize, doc="Define objective function")

# Display of the output #

# plt.plot(size, cost)
# plt.show()

opt = SolverFactory("gurobi")
results = opt.solve(model, tee=True)

f = open("results.txt", "w")
for v_data in model.component_data_objects(Var):
    if value(v_data) > 1:
        f.write(v_data.cname(True) + ", value = " + str(value(v_data)) + "\n")
f.close()
def bindingly_recover(instance, sol_id, solver):

    model = AbstractModel()

    # Parameter: number of machines
    model.m = Param(within=NonNegativeIntegers, initialize=instance.m)

    # Parameter: number of jobs
    model.n = Param(within=NonNegativeIntegers, initialize=instance.n)

    # Parameter: allowable number of migrations
    model.g = Param(within=NonNegativeIntegers, initialize=instance.g)

    # Parameter: set of machines
    model.M = RangeSet(0, model.m - 1)  # set of hot streams

    # Parameter: subset of failed machines
    model.MF = Set(within=model.M, initialize=instance.failed_machines)

    # Parameter: set of machines
    model.J = RangeSet(0, model.n - 1)  # set of hot streams

    # Parameter: subset of assigned jobs
    model.JA = Set(within=model.J, initialize=instance.bounded_migration_jobs)

    # Parameter: processing times
    model.p = Param(model.J,
                    within=NonNegativeIntegers,
                    initialize=lambda model, j: instance.p[j])

    # Parameter: machines of assigned jobs
    model.mu = Param(model.JA,
                     within=NonNegativeIntegers,
                     initialize=lambda model, j: instance.mu[j])

    # Variable: assignment of job j to machine i
    model.x = Var(model.M, model.J, within=Binary)

    # Variable: completion time of machine i
    model.C = Var(model.M, within=NonNegativeIntegers)

    # Variable: makespan
    model.Cmax = Var(within=NonNegativeIntegers)

    # Objective: makespan minimization
    def makespan_rule(model):
        return model.Cmax

    model.obj_value = Objective(rule=makespan_rule, sense=minimize)

    # Constraint: makespan computation
    def makespan_computation_rule(model, i):
        return model.Cmax >= model.C[i]

    model.makespan_computation_constraint = Constraint(
        model.M, rule=makespan_computation_rule)

    # Constraint: machine completion time computation
    def machine_completion_computation_rule(model, i):
        return model.C[i] == sum(model.p[j] * model.x[i, j] for j in model.J)

    model.machine_completion_computation_constraint = Constraint(
        model.M, rule=machine_completion_computation_rule)

    # Constraint: job assignment
    def job_assignment_rule(model, j):
        return sum(model.x[i, j] for i in model.M) == 1

    model.job_assignment_constraint = Constraint(model.J,
                                                 rule=job_assignment_rule)

    # Constraint: bounded migrations
    def bounded_migrations_rule(model, j):
        return sum(
            sum(model.x[i, j] for i in model.M if model.mu[j] != i)
            for j in model.JA) <= model.g

    model.bounded_migrations_constraint = Constraint(
        model.J, rule=bounded_migrations_rule)

    # Constraint: failed machines
    def failed_machines_rule(model, i):
        return sum(model.x[i, j] for j in model.J) == 0

    model.failed_machines_constraint = Constraint(model.MF,
                                                  rule=failed_machines_rule)

    opt = SolverFactory(solver.commercial_tool)

    opt.options['threads'] = 1
    opt.options['timelimit'] = solver.time_limit
    opt.options['logfile'] = recovery_log_file(instance, solver, sol_id)
    opt.options['mipgap'] = solver.gap_tolerance

    milp_instance = model.create_instance()
    results = opt.solve(milp_instance)
    #instance.load(results)

    epsilon = 10**(-5)
    y = [
        i for j in range(instance.n) for i in range(instance.m)
        if milp_instance.x[i, j].value >= 1 - epsilon
    ]
    C = [milp_instance.C[i].value for i in range(instance.m)]

    upper_bound = results.problem.upper_bound
    #lower_bound = results.problem.lower_bound
    #relative_gap = (upper_bound - lower_bound) * 1.0 / upper_bound
    elapsed_time = results.solver.time
    #(nodes_explored, nodes_left) = read_nodes(instance, solver)

    schedule = Schedule(y, C)
    schedule = order_lexicographically(instance, schedule)

    solver.set_recovery_results(upper_bound, elapsed_time)

    return schedule
    [x[m, t] for m in range(1, M + 1) for t in range(1, T + 1)]),
                          sense=pyo.maximize)

#constraints
model.C1 = pyo.ConstraintList()
for t in range(1, T + 1):
    model.C1.add(expr=2 * x[2, t] - 8 * x[3, t] <= 0)

model.C2 = pyo.ConstraintList()
for t in range(3, T + 1):
    model.C2.add(expr=x[2, t] - 2 * x[3, t - 2] + x[4, t] >= 1)

model.C3 = pyo.ConstraintList()
for t in range(1, T + 1):
    model.C3.add(expr=sum([x[m, t] for m in range(1, M + 1)]) <= 50)

model.C4 = pyo.ConstraintList()
for t in range(2, T + 1):
    model.C4.add(expr=x[1, t] + x[2, t - 1] + x[3, t] + x[4, t] <= 10)

model.C5 = pyo.ConstraintList()
for m in range(1, M + 1):
    for t in range(1, T + 1):
        model.C5.add(pyo.inequality(0, x[m, t], 10))

#solve
opt = SolverFactory('glpk', executable='/usr/local/bin/glpsol')
opt.options["mipgap"] = 0
results = opt.solve(model, tee=True, timelimit=10)

print(pyo.value(model.obj))
Example #54
0
#####################################

# Create a model
model = AbstractModel('RW')
#model = ConcreteModel('RW')
flow = AbstractModel()

# import the flow problem from flow_problem. Solve the problem and load the results
from flow_problem_v2 import model as flow_model

# Create an instance for the flow problem
flow_instance = flow_model.create(datafile)
#flow_instance.pprint()

# Solve the flow problem
flow_results = optimizer.solve(flow_instance)
#print(flow_results)
flow_instance.load(flow_results)
#flow_instance.pprint()

############################
#
# Definition of the statespace
#  and its partitions.
# Load the data from all the datafiles
#
############################

model.dimen = Param(within=PositiveIntegers)
model.dimset = RangeSet(
    0, model.dimen - 1,
Example #55
0
def solve_fp_subproblem(solve_data, config):
    """
    Solves the feasibility pump NLP

    This function sets up the 'fp_nlp' by relax integer variables.
    precomputes dual values, deactivates trivial constraints, and then solves NLP model.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    fp_nlp: Pyomo model
        Fixed-NLP from the model
    results: Pyomo results object
        result from solving the Fixed-NLP
    """

    fp_nlp = solve_data.working_model.clone()
    MindtPy = fp_nlp.MindtPy_utils
    config.logger.info('FP-NLP %s: Solve feasibility pump NLP subproblem.'
                       % (solve_data.fp_iter,))

    # Set up NLP
    fp_nlp.MindtPy_utils.objective_list[-1].deactivate()
    if solve_data.objective_sense == minimize:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value <= solve_data.UB)
    else:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value >= solve_data.LB)

    # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations
    # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP'
    # the norm type is consistant with the norm obj of the FP-main problem.
    if config.fp_norm_constraint:
        if config.fp_main_norm == 'L1':
            # TODO: check if we can access the block defined in FP-main problem
            generate_norm1_norm_constraint(
                fp_nlp, solve_data.mip, config, discrete_only=True)
        elif config.fp_main_norm == 'L2':
            fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2
                                                         for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0)
        elif config.fp_main_norm == 'L_infinity':
            fp_nlp.norm_constraint = ConstraintList()
            rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list))
            for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list):
                fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs)

    MindtPy.fp_nlp_obj = generate_norm2sq_objective_function(
        fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only)

    MindtPy.cuts.deactivate()
    TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp)
    try:
        TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(
            fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance)
    except ValueError:
        config.logger.warning(
            'infeasibility detected in deactivate_trivial_constraints')
        results = SolverResults()
        results.solver.termination_condition = tc.infeasible
        return fp_nlp, results
    # Solve the NLP
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        with time_code(solve_data.timing, 'fp subproblem'):
            results = nlpopt.solve(
                fp_nlp, tee=config.nlp_solver_tee, **nlp_args)
    return fp_nlp, results
Example #56
0
class Test(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        global scip_available
        import pyomo.environ
        from pyomo.solvers.tests.solvers import test_solver_cases
        scip_available = test_solver_cases('scip', 'nl').available

    def setUp(self):
        if not scip_available:
            self.skipTest("The 'scipampl' command is not available")
        TempfileManager.push()

        self.scip = SolverFactory('scip', solver_io='nl')

        m = self.model = ConcreteModel()
        m.v = Var()
        m.o = Objective(expr=m.v)
        m.c = Constraint(expr=m.v >= 1)

    def tearDown(self):
        TempfileManager.pop(remove=deleteFiles or self.currentTestPassed())

    def compare_json(self, file1, file2):
        with open(file1, 'r') as out, \
            open(file2, 'r') as txt:
            self.assertStructuredAlmostEqual(json.load(txt),
                                             json.load(out),
                                             abstol=1e-7,
                                             allow_second_superset=True)

    def test_version_scip(self):
        self.assertTrue(self.scip.version() is not None)
        self.assertTrue(type(self.scip.version()) is tuple)
        self.assertEqual(len(self.scip.version()), 4)

    def test_scip_solve_from_instance(self):
        # Test scip solve from a pyomo instance and load the solution
        results = self.scip.solve(self.model, suffixes=['.*'])
        # We don't want the test to care about which Scip version we are using
        self.model.solutions.store_to(results)
        results.Solution(0).Message = "Scip"
        results.Solver.Message = "Scip"
        results.Solver.Time = 0
        _out = TempfileManager.create_tempfile(".txt")
        results.write(filename=_out, times=False, format='json')
        self.compare_json(
            _out, join(currdir, "test_scip_solve_from_instance.baseline"))

    def test_scip_solve_from_instance_options(self):

        # Creating a dummy scip.set file in the cwd
        # will cover the code that prints a warning
        _cwd = os.getcwd()
        tmpdir = TempfileManager.create_tempdir()
        try:
            os.chdir(tmpdir)
            open(join(tmpdir, 'scip.set'), "w").close()
            # Test scip solve from a pyomo instance and load the solution
            with LoggingIntercept() as LOG:
                results = self.scip.solve(self.model,
                                          suffixes=['.*'],
                                          options={"limits/softtime": 100})
            self.assertRegex(
                LOG.getvalue().replace("\n", " "),
                r"A file named (.*) exists in the current working "
                r"directory, but SCIP options are being "
                r"set using a separate options file. The "
                r"options file \1 will be ignored.")
        finally:
            os.chdir(_cwd)
        # We don't want the test to care about which Scip version we are using
        self.model.solutions.store_to(results)
        results.Solution(0).Message = "Scip"
        results.Solver.Message = "Scip"
        results.Solver.Time = 0
        _out = TempfileManager.create_tempfile(".txt")
        results.write(filename=_out, times=False, format='json')
        self.compare_json(
            _out, join(currdir, "test_scip_solve_from_instance.baseline"))
model.trans_constraint_y = Constraint(model.vertices, rule=transformation_y)

def distance_from_vertex_to_contact(model, v, c):
    return (model.distances_to_contacts[v,c] ==
            ((model.transformed_xs[v] - model.xs_of_contacts[c])**2 +
            (model.transformed_ys[v] - model.ys_of_contacts[c])**2))
model.distances_to_contacts_constraint = Constraint(model.vertices, model.contacts, rule=distance_from_vertex_to_contact)

def only_one_best_distance(model, c):
    return 1 == sum(model.best_distances_mask[v,c] for v in model.vertices)
model.only_one_best_distance = Constraint(model.contacts, rule=only_one_best_distance)

def multiply_by_mask(model, v, c):
    return (model.filtered_distances_to_contacts[v,c] ==
            model.best_distances_mask[v,c] * model.distances_to_contacts[v,c])
model.multiply_by_mask = Constraint(model.vertices, model.contacts, rule=multiply_by_mask)

def best_distances(model, c):
    return model.best_distances_to_contacts[c] == sum(model.filtered_distances_to_contacts[v,c] for v in model.vertices)
model.best_distances_to_contacts_constraint = Constraint(model.contacts, rule=best_distances)

def selected_distances_are_minimal(model, v, c):
    return model.best_distances_to_contacts[c] <= model.distances_to_contacts[v,c]
model.best_distances_are_minimal_constraint = Constraint(model.vertices, model.contacts, rule=selected_distances_are_minimal)

instance = model.create_instance(data)
results = solver.solve(instance, tee=True)
instance.solutions.load_from(results)

instance.display()
Example #58
0
def solve_linear_GDP(linear_GDP_model, solve_data, config):
    m = linear_GDP_model
    GDPopt = m.GDPopt_utils
    # Transform disjunctions
    TransformationFactory('gdp.bigm').apply_to(m)

    preprocessing_transformations = [
        # Propagate variable bounds
        'contrib.propagate_eq_var_bounds',
        # Detect fixed variables
        'contrib.detect_fixed_vars',
        # Propagate fixed variables
        'contrib.propagate_fixed_vars',
        # Remove zero terms in linear expressions
        'contrib.remove_zero_terms',
        # Remove terms in equal to zero summations
        'contrib.propagate_zero_sum',
        # Transform bound constraints
        'contrib.constraints_to_var_bounds',
        # Detect fixed variables
        'contrib.detect_fixed_vars',
        # Remove terms in equal to zero summations
        'contrib.propagate_zero_sum',
        # Remove trivial constraints
        'contrib.deactivate_trivial_constraints'
    ]
    for xfrm in preprocessing_transformations:
        TransformationFactory(xfrm).apply_to(m)

    # Deactivate extraneous IMPORT/EXPORT suffixes
    getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
    getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()

    # Load solutions is false because otherwise an annoying error message
    # appears for infeasible models.
    mip_solver = SolverFactory(config.mip)
    if not mip_solver.available():
        raise RuntimeError("MIP solver %s is not available." % config.mip)
    results = mip_solver.solve(m, load_solutions=False, **config.mip_options)
    terminate_cond = results.solver.termination_condition
    if terminate_cond is tc.infeasibleOrUnbounded:
        # Linear solvers will sometimes tell me that it's infeasible or
        # unbounded during presolve, but fails to distinguish. We need to
        # resolve with a solver option flag on.
        tmp_options = deepcopy(config.mip_options)
        # TODO This solver option is specific to Gurobi.
        tmp_options['DualReductions'] = 0
        results = mip_solver.solve(m, load_solutions=False, **tmp_options)
        terminate_cond = results.solver.termination_condition

    if terminate_cond is tc.optimal:
        m.solutions.load_from(results)
        return True, list(v.value for v in GDPopt.working_var_list)
    elif terminate_cond is tc.infeasible:
        config.logger.info(
            'Linear GDP is infeasible. '
            'Problem may have no more feasible discrete configurations.')
        return False
    elif terminate_cond is tc.maxTimeLimit:
        # TODO check that status is actually ok and everything is feasible
        config.logger.info(
            'Unable to optimize linear GDP problem within time limit. '
            'Using current solver feasible solution.')
        results.solver.status = SolverStatus.ok
        m.solutions.load_from(results)
        return True, list(v.value for v in GDPopt.working_var_list)
    elif (terminate_cond is tc.other
          and results.solution.status is SolutionStatus.feasible):
        # load the solution and suppress the warning message by setting
        # solver status to ok.
        config.logger.info('Linear GDP solver reported feasible solution, '
                           'but not guaranteed to be optimal.')
        results.solver.status = SolverStatus.ok
        m.solutions.load_from(results)
        return True, list(v.value for v in GDPopt.working_var_list)
    else:
        raise ValueError('GDPopt unable to handle linear GDP '
                         'termination condition '
                         'of %s. Solver message: %s' %
                         (terminate_cond, results.solver.message))
Example #59
0
into gen_inc_heat_rates.tab if you want to play around with the model
behavior.

In both versions of incremental heat rate tables, I gave natural gas
combustion turbines a very minor heat rate penalty to discourage
committing more capacity than is needed. I changed the incremental heat
rate to 99 percent of the full load heat rate, with 1 percent of the
fuel use incurred at 0 electricity output.

For this to work, you need to ensure that the switch_mod package
directory is in your python search path. See the README for more info.

"""

from pyomo.environ import *
from pyomo.opt import SolverFactory
from switch_mod.utilities import define_AbstractModel

switch_model = define_AbstractModel("switch_mod", "local_td", "project.unitcommit", "fuel_cost")
switch_instance = switch_model.load_inputs(inputs_dir="inputs")

opt = SolverFactory("cplex")

results = opt.solve(switch_instance, keepfiles=False, tee=False)
switch_model.save_results(results, switch_instance, "outputs")

# Dump all results
# switch_instance.load(results)
results.write()
switch_instance.pprint()
Example #60
0
def main(argv):
    slice_size = 60
    opt_func = "cost"

    if len(argv) != 3:
        print("Not enough arguments: using default values, 15 cost off")
    else:
        slice_size = int(argv[0])
        opt_func = argv[1]
        print(opt_func)

    sload_flag = 1
    suchp_flag = 1
    sgas_chp_flag = 1

    if argv[2] == 'off':
        sload_flag = 0
        suchp_flag = 0
        sgas_chp_flag = 0

    debug = False

    # Data preprocessing
    load_profiles_df = pd.read_excel(
        "Summer_Load_Profiles.xlsx", header=None) + pd.read_excel(
            "Winter_Load_Profiles.xlsx", header=None)
    pv_profiles_df = pd.read_excel("Summer_PV_Profiles.xlsx", header=None)
    uchp_profiles_df = pd.read_excel("Winter_uCHP_Profiles.xlsx", header=None)
    prices_df = pd.read_csv("pricesGME.csv", usecols=[1])

    # Adding noise
    load_profiles_df = load_profiles_df + np.random.normal(
        0, 0.1, [load_profiles_df.shape[0], load_profiles_df.shape[1]])
    load_profiles_df = load_profiles_df.clip(lower=0)

    pv_profiles_df = pv_profiles_df + np.random.normal(
        0, 0.1, [pv_profiles_df.shape[0], pv_profiles_df.shape[1]])
    pv_profiles_df = pv_profiles_df.clip(
        lower=0)  # per fare in modo che non ci siano valori negativi

    prices_df = prices_df + np.random.normal(
        0, 0.1, [prices_df.shape[0], prices_df.shape[1]])

    scaled_load_df = change_scale(5, slice_size, load_profiles_df, debug)
    scaled_pv_df = change_scale(5, slice_size, pv_profiles_df, debug)
    scaled_prices_df = change_scale(60, slice_size, prices_df, debug)
    scaled_prices_df.columns = ['prices']

    pod_list_conf = parse_config('config.conf')
    init_pods(pod_list_conf, scaled_load_df, scaled_pv_df)

    # definizione delle costanti
    uchp_min = uchp_profiles_df.values.min()  # kW
    uchp_max = uchp_profiles_df.values.max()  # kW
    cuchp = 0.9
    cchp_gas = 0.039
    gas_chp_min = 0
    gas_chp_max = 2
    eta = 0.9
    gin_min = 0
    gout_min = 0
    gin_max = 4
    gout_max = 4
    sin_max = 4
    sout_max = 4
    charge_init = 2  ## inizializzato il valore della batteria
    charge_max = 6

    T = int(scaled_load_df[0].count())  # in questo caso 96
    tildeload_df = pd.DataFrame()
    sload_df = pd.DataFrame()
    suchp_df = pd.DataFrame()
    sgas_chp_df = pd.DataFrame()

    fixed_index_list = []
    fixed_time_list = range(T)
    previous_charge = [None] * len(pod_list_conf)
    previous_uchp = [None] * len(pod_list_conf)
    previous_gaschp = [None] * len(pod_list_conf)

    # liste per salvare l'output ad ogni ciclo di t
    tildeloadlist = []
    pvlist = []
    tildeuchplist = []
    tildegaschplist = []
    gridINlist = []
    gridOUTlist = []
    stINlist = []
    stOUTlist = []
    tot_time = []

    # pod
    for pod in pod_list_conf:
        fixed_index_list.append(pod[0])

    # pv
    pv_index_list = []
    for pod in pod_list_conf:
        for el in pod[2]:
            if el == 'pv':
                pv_index_list.append(pod[0])

    # uchp
    uchp_index_list = []
    for pod in pod_list_conf:
        for el in pod[2]:
            if el == 'uchp':
                uchp_index_list.append(pod[0])

    # se considero la fase offline, vado a cercare i valori shiftati del chp, altrimenti metto il df tutto a zero
    if uchp_index_list:
        if suchp_flag == 1:
            suchp_df = pd.read_csv('suchp.csv')
        else:
            for t in fixed_time_list:
                for i in uchp_index_list:
                    suchp_df.loc[t, str(i)] = 0

    # gas_chp
    gas_chp_index_list = []
    for pod in pod_list_conf:
        for el in pod[2]:
            if el == 'gas_chp':
                gas_chp_index_list.append(pod[0])

    if gas_chp_index_list:
        if sgas_chp_flag == 1:
            sgas_chp_df = pd.read_csv('sgas_chp.csv')
        else:
            for t in fixed_time_list:
                for i in gas_chp_index_list:
                    sgas_chp_df.loc[t, str(i)] = 0

    # load
    load_index_list = []
    for pod in pod_list_conf:
        for el in pod[2]:
            if el == 'load':
                load_index_list.append(pod[0])

    if load_index_list:
        if sload_flag == 1:
            sload_df = pd.read_csv('sload.csv')
        else:
            for t in fixed_time_list:
                for i in load_index_list:
                    sload_df.loc[t, str(i)] = 0

    # inizializzo la scrittura del file di output

    f = open("output_online.txt", "a")

    obj_value_list = []

    ### INIZIO MODELLO - PER OGNI T

    for t in fixed_time_list:

        model = ConcreteModel()

        # variabili sempre presenti
        model.Pgin = Var(fixed_index_list, domain=NonNegativeReals)
        model.Pgout = Var(fixed_index_list, domain=NonNegativeReals)

        # variabili dipendenti dalla costruzione del pod
        # charge
        charge_index_list = []
        for pod in pod_list_conf:
            for el in pod[2]:
                if el == 'storage':
                    charge_index_list.append(
                        pod[0]
                    )  #prendiamo la lista dei pod, e controlliamo quali hanno st, quindi aggiungiamo il loro indice a charge_index_list

        if charge_index_list:  #se la lista non è vuota
            model.charge = Var(charge_index_list, domain=NonNegativeReals)
            model.Psin = Var(charge_index_list, domain=NonNegativeReals)
            model.Psout = Var(charge_index_list, domain=NonNegativeReals)

        # uchp
        if uchp_index_list:
            model.Puchp = Var(uchp_index_list)
            model.tildeuchp = Var()

        # gas_chp
        if gas_chp_index_list:
            model.Pgas_chp = Var(gas_chp_index_list)
            model.tildegas_chp = Var()

        load_index_norm = []
        for el in load_index_list:
            load_index_norm.append(el % 100)

        reduced_scaled_load_df = scaled_load_df[
            load_index_norm]  # andiamo a prendere da load_df solo le colonne dei pod che effettivamente ci servono - e hanno il load
        # In questo modo noi preleviamo le colonne corrispondenti tra loro per la somma
        # a prescindere dall'indice; ciò dovrebbe coprirci anche nei casi in cui abbiamo
        # più pod con più load: le colonne saranno sempre disposte sempre nello stesso ordine
        # tra i due df
        reduced_scaled_load_df.columns = load_index_list
        #print(reduced_scaled_load_df)

        for col1, col2, i in zip(reduced_scaled_load_df.columns,
                                 sload_df.columns, load_index_list):
            # print(reduced_scaled_load_df[col1]) # colonna del primo, a prescindere dall'indice
            # print(sload_df[col2]) # colonna del secondo a prescindere dall'indice
            # print(i) # indice corretto di riferimento per il df finale sommato
            array = reduced_scaled_load_df[col1].values + sload_df[col2].values
            tildeload_df[i] = array

        tildeload = tildeload_df[load_index_list].sum(axis=1).clip(
            lower=0).values.tolist()

        def objective(model):
            result = 0

            for i, pod in enumerate(pod_list_conf):
                if opt_func == "cost":
                    result += model.Pgin[i] * scaled_prices_df[
                        'prices'].values.tolist()[t] - model.Pgout[
                            i] * scaled_prices_df['prices'].values.tolist()[t]
                    if 'uchp' in pod[2]:
                        result += cuchp * model.tildeuchp
                    if 'gas_chp' in pod[2]:
                        result += cchp_gas * model.tildegas_chp
                    if 'storage' in pod[2]:
                        result += model.Psout[i] * scaled_prices_df[
                            'prices'].values.tolist()[t]
                elif opt_func == "grid":
                    result += model.Pgin[i] * scaled_prices_df[
                        'prices'].values.tolist()[t] + model.Pgout[
                            i] * scaled_prices_df['prices'].values.tolist()[t]

            return result

        model.OBJ = Objective(rule=objective)

        #### CONSTRAINTS

        # UCHP:

        if uchp_index_list:
            model.bound_uchp = ConstraintList()
            model.bound_tildeuchp = ConstraintList()
            model.bound_time_uchp = ConstraintList()

        for i in uchp_index_list:
            model.bound_uchp.add(
                inequality(uchp_min, model.tildeuchp, uchp_max))

        if uchp_index_list:
            left_side = model.tildeuchp
            right_side = 0
            for i in uchp_index_list:
                right_side += model.Puchp[i] + suchp_df[str(
                    i)].values.tolist()[t]
            model.bound_tildeuchp.add(left_side == right_side)

        if uchp_index_list:
            multipl_constant = int(60 / slice_size)
            for i in uchp_index_list:
                if t in range(
                        0 * multipl_constant + 1,
                        4 * multipl_constant) and t < 4 * multipl_constant - 1:
                    model.bound_time_uchp.add(
                        previous_uchp[i] == model.tildeuchp)

                if t in range(
                        4 * multipl_constant,
                        8 * multipl_constant) and t < 8 * multipl_constant - 1:
                    model.bound_time_uchp.add(
                        previous_uchp[i] == model.tildeuchp)

                if t in range(
                        8 * multipl_constant, 12 *
                        multipl_constant) and t < 12 * multipl_constant - 1:
                    model.bound_time_uchp.add(
                        previous_uchp[i] == model.tildeuchp)

                if t in range(
                        12 * multipl_constant, 16 *
                        multipl_constant) and t < 16 * multipl_constant - 1:
                    model.bound_time_uchp.add(
                        previous_uchp[i] == model.tildeuchp)

                if t in range(
                        16 * multipl_constant, 20 *
                        multipl_constant) and t < 20 * multipl_constant - 1:
                    model.bound_time_uchp.add(
                        previous_uchp[i] == model.tildeuchp)

                if t in range(
                        20 * multipl_constant, 24 *
                        multipl_constant) and t < 24 * multipl_constant - 1:
                    model.bound_time_uchp.add(
                        previous_uchp[i] == model.tildeuchp)

        #multipl_constant = int(60/slice_size)

        # GAS_CHP:

        if gas_chp_index_list:
            model.bound_gaschp = ConstraintList()
            model.bound_tildegaschp = ConstraintList()
            model.bound_time_gaschp = ConstraintList()

        for i in gas_chp_index_list:
            model.bound_gaschp.add(
                inequality(gas_chp_min, model.tildegas_chp, gas_chp_max))

        if gas_chp_index_list:
            left_side = model.tildegas_chp
            right_side = 0
            for i in gas_chp_index_list:
                right_side += model.Pgas_chp[i] + sgas_chp_df[str(
                    i)].values.tolist()[t]
            model.bound_tildegaschp.add(left_side == right_side)

        if gas_chp_index_list:
            multipl_constant = int(60 / slice_size)
            for i in gas_chp_index_list:
                if t in range(
                        0 * multipl_constant + 1,
                        4 * multipl_constant) and t < 4 * multipl_constant - 1:
                    model.bound_time_gaschp.add(
                        previous_gaschp[i] == model.tildegas_chp)

                if t in range(
                        4 * multipl_constant,
                        8 * multipl_constant) and t < 8 * multipl_constant - 1:
                    model.bound_time_gaschp.add(
                        previous_gaschp[i] == model.tildegas_chp)

                if t in range(
                        8 * multipl_constant, 12 *
                        multipl_constant) and t < 12 * multipl_constant - 1:
                    model.bound_time_gaschp.add(
                        previous_gaschp[i] == model.tildegas_chp)

                if t in range(
                        12 * multipl_constant, 16 *
                        multipl_constant) and t < 16 * multipl_constant - 1:
                    model.bound_time_gaschp.add(
                        previous_gaschp[i] == model.tildegas_chp)

                if t in range(
                        16 * multipl_constant, 20 *
                        multipl_constant) and t < 20 * multipl_constant - 1:
                    model.bound_time_gaschp.add(
                        previous_gaschp[i] == model.tildegas_chp)

                if t in range(
                        20 * multipl_constant, 24 *
                        multipl_constant) and t < 24 * multipl_constant - 1:
                    model.bound_time_gaschp.add(
                        previous_gaschp[i] == model.tildegas_chp)

        #multipl_constant = int(60/slice_size)
        #for i in gas_chp_index_list:
        #model.bound_tildegaschp.add( previous_gaschp[i] == model.Pgas_chp[i])

        # STORAGE:

        if charge_index_list:
            model.bound_charge = ConstraintList()

        for i in charge_index_list:
            model.bound_charge.add(model.Psin[i] <= sin_max)
            model.bound_charge.add(model.Psout[i] <= sout_max)

            #constraint per l'istante iniziale
            if t == 0:
                model.bound_charge.add(model.charge[i] == charge_init)
                model.bound_charge.add(model.charge[i] <= charge_max)
                model.bound_charge.add(model.charge[i] >= 0)

                model.bound_charge.add(
                    model.Psin[i] <= charge_max - charge_init)
                model.bound_charge.add(model.Psout[i] <= charge_init)
            else:
                model.bound_charge.add(model.charge[i] == previous_charge[i] -
                                       eta * model.Psin[i] +
                                       eta * model.Psout[i])

                model.bound_charge.add(model.charge[i] <= charge_max)
                model.bound_charge.add(model.charge[i] >= 0)
                model.bound_charge.add(
                    model.Psin[i] <= charge_max - previous_charge[i])
                model.bound_charge.add(model.Psout[i] <= previous_charge[i])

        # GRID:

        model.bound_gin = ConstraintList()
        model.bound_gout = ConstraintList()
        for i, pod in enumerate(pod_list_conf):
            model.bound_gin.add(inequality(gin_min, model.Pgin[i], gin_max))
            model.bound_gout.add(inequality(gout_min, model.Pgout[i],
                                            gout_max))

        # BILANCIAMENTO:
        # Potrebbe contenere un termine relativo ad un componente load

        model.bound_tildeload = ConstraintList()

        right_side = 0
        left_side = tildeload[t]
        for i, pod in enumerate(pod_list_conf):
            right_side += model.Pgin[i] - model.Pgout[i]
            if 'uchp' in pod[2]:
                right_side += model.Puchp[i] + suchp_df[str(
                    i)].values.tolist()[t]
            if 'gas_chp' in pod[2]:
                right_side += model.Pgas_chp[i] + sgas_chp_df[str(
                    i)].values.tolist()[t]
            if 'storage' in pod[2]:
                right_side += model.Psin[i] - model.Psout[i]
            if 'pv' in pod[2]:
                right_side += pod[3]['pv'].values.tolist()[t]

        model.bound_tildeload.add(left_side == right_side)

        #model.pprint()

        opt = SolverFactory('gurobi')
        results = opt.solve(model, tee=True)

        # in alternativa model.load(results)
        model.solutions.store_to(results)
        #results.write()

        # per salvare il valore dell'iterazione precedente di charge, per usarlo nel constraint
        for i, pod in enumerate(pod_list_conf):
            if 'storage' in pod[2]:
                previous_charge[i] = model.charge[i].value

        for i, pod in enumerate(pod_list_conf):
            if 'uchp' in pod[2]:
                previous_uchp[i] = model.tildeuchp.value
                #print(previous_uchp[i])
            if 'gas_chp' in pod[2]:
                previous_gaschp[i] = model.tildegas_chp.value
                #print(previous_gaschp[i])
        '''# per salvare tutti i valori delle variabili di ogni pod
        # PER OGNI POD i:
        #       var_1[i] ... var_N[i]
        # 0      valore  ...  valore
        # 1      valore  ...  valore
        # ...    valore  ...  valore
        # T      valore  ...  valore
        # 
        # PER COSTRUIRLI:
        # - Iteriamo ad ogni t, quindi ad ogni t noi possiamo costruire già tante strutture quanti sono i pod
        #   e in particolare aggiungere una riga ad ogni t.
        # - Per fare ciò ci serviranno dei df di appoggio (uno per ogni pod), che istanziamo all'inizio del programma
        #   e riempiamo alla fine di ogni iterazione in T.
        # - Alla fine del ciclo principale in T, salveremo (in un ciclo ausiliario) tutti questi df come csv.

        for i, pod in enumerate(pod_list_conf):
            labels = []
            row_as_list = []
            # temp_df = temp_df.append(model.Pgin[i], model.Pgout[i])
            labels.append('Pgin_{}'.format(pod[0]))
            labels.append('Pgout_{}'.format(pod[0]))
            row_as_list.append(model.Pgin[i].value)
            row_as_list.append(model.Pgout[i].value)
            if 'chp' in pod[2]:
                # temp_df = temp_df.append(model.Pchp[i])
                labels.append('Pchp_{}'.format(pod[0]))
                row_as_list.append(model.Pchp[i].value)
            if 'storage' in pod[2]:
                # temp_df = temp_df.append(model.Sout[i], model.Sin[i])
                labels.append('Psin_{}'.format(pod[0]))
                labels.append('Psout_{}'.format(pod[0]))
                row_as_list.append(model.Psin[i].value)
                row_as_list.append(model.Psout[i].value)
            if 'pv' in pod[2]:
                # temp_df = temp_df.append(model.Ppv[i])
                labels.append('Ppv_{}'.format(pod[0]))
                row_as_list.append(pod[3]['pv'].values.tolist()[t])
            if 'load' in pod[2]:
                # temp_df = temp_df.append(model.Sload[i]) 
                labels.append('Pload_{}'.format(pod[0]))
                row_as_list.append(pod[3]['load'].values.tolist()[t])
            
            row_as_series = pd.Series(row_as_list)

            pod[4] = pod[4].append(row_as_series, ignore_index = True)
            
            if t == T-1:
                pod[4].columns = labels
                pod[4].to_csv('pod_{}.csv'.format(pod[0]), index=False)
                print("PRINTING RESULTS FOR POD " + str(i))
                print(pod[4])'''

        # TEMPO DI RISOLUZIONE
        solver_time = get_info_from_results(results, 'Time: ')
        tot_time.append(float(solver_time))

        obj_value_list.append(float(model.OBJ()))

        # CONTROLLO SULLA TERMINAZIONE
        if (results.solver.termination_condition ==
                TerminationCondition.optimal):
            print("Modello risolto correttamente")
        elif (results.solver.termination_condition ==
              TerminationCondition.infeasible):
            print("La condizione di terminazione è INFEASIBLE")
        else:
            print("Errore: Solver Status", results.solver.status)
        '''stdout_backup = sys.stdout

        with open('results_online_step_{}.yml'.format(t), 'a') as f:
            sys.stdout = f
            results.write()

        sys.stdout = stdout_backup'''

        # PLOT OUTPUT

        if pv_index_list:
            acc = 0
            for i in pv_index_list:
                acc += pod_list_conf[i][3]['pv'].values.tolist()[t]
            pvlist.append(acc)

        if load_index_list:
            tildeloadlist.append(tildeload[t])

        if uchp_index_list:
            tildeuchplist.append(model.tildeuchp.value)

        if gas_chp_index_list:
            tildegaschplist.append(model.tildegas_chp.value)

        acc = 0
        for i in range(len(pod_list_conf)):
            acc += model.Pgin[i].value
        gridINlist.append(acc)

        acc = 0
        for i in range(len(pod_list_conf)):
            acc += model.Pgout[i].value
        gridOUTlist.append(-acc)

        if charge_index_list:
            acc = 0
            for i in charge_index_list:
                acc += model.Psin[i].value
            stINlist.append(acc)

        if charge_index_list:
            acc = 0
            for i in charge_index_list:
                acc += model.Psout[i].value
            stOUTlist.append(-acc)
    # Fine ciclo

    tot_time_sum = sum(tot_time)
    #print("SOLVER TIME: " + str(tot_time_sum))
    #print(obj_value_list)

    f.write("Components: {}\n".format(
        len(pv_index_list) + len(load_index_list) + len(gas_chp_index_list) +
        len(uchp_index_list) + len(charge_index_list)))
    f.write("    # Pv: {}\n".format(len(pv_index_list)))
    f.write("    # Load: {}\n".format(len(load_index_list)))
    f.write("    # Gas Chp: {}\n".format(len(gas_chp_index_list)))
    f.write("    # Uchp: {}\n".format(len(uchp_index_list)))
    f.write("    # Storage: {}\n".format(len(charge_index_list)))
    f.write("SOLVER TIME:" + str(tot_time_sum) + "\n")
    f.write("OBJ function {}:".format(argv[1]) + "\n")
    f.write("MIN OBJ: {}, MAX OBJ: {}, MEAN OBJ: {}\n".format(
        min(obj_value_list), max(obj_value_list), np.mean(obj_value_list)))
    f.write("TOTAL OBJ: {}\n".format(sum(obj_value_list)))
    f.write("FULL LIST: \n")
    for el in obj_value_list:
        if el < 0:
            f.write("     {}\n".format(str(el)))
        else:
            f.write("      {}\n".format(str(el)))
    f.write("\n")
    f.close()

    resultimg, result = plt.subplots(figsize=(20, 10))
    images, = result.plot(tildeloadlist, linestyle='-', color='red')
    images, = result.plot(pvlist, linestyle='-', color='green')
    images, = result.plot(tildeuchplist, linestyle='-', color='purple')
    images, = result.plot(tildegaschplist, linestyle='-', color='magenta')
    images, = result.plot([sum(x) for x in zip(gridINlist, gridOUTlist)],
                          linestyle='-',
                          color='#3a55a1',
                          linewidth=2)
    #images, = result.plot(gridOUTlist, linestyle='-', color='blue')
    images, = result.plot([sum(x) for x in zip(stINlist, stOUTlist)],
                          linestyle='-',
                          color='#fa7e25',
                          linewidth=2)
    #images, = result.plot(stOUTlist, linestyle='-', color='orange')
    result.legend(['Load', 'PV', 'UChp', 'GASChp', 'Grid', 'Storage'],
                  fancybox=True,
                  framealpha=0.5)

    tilde = np.interp(np.arange(0.0, 96.0, 0.1), fixed_time_list,
                      tildeloadlist)
    result.fill_between(np.arange(0.0, 96.0, 0.1),
                        tilde,
                        0,
                        facecolor='red',
                        alpha=0.3)

    if pv_index_list:
        pv = np.interp(np.arange(0.0, 96.0, 0.1), fixed_time_list, pvlist)
        result.fill_between(np.arange(0.0, 96.0, 0.1),
                            pv,
                            0,
                            facecolor='green',
                            alpha=0.5)

    if uchp_index_list:
        uchp = np.interp(np.arange(0.0, 96.0, 0.1), fixed_time_list,
                         tildeuchplist)
        result.fill_between(np.arange(0.0, 96.0, 0.1),
                            uchp,
                            0,
                            facecolor='purple',
                            alpha=0.1)

    if gas_chp_index_list:
        gaschp = np.interp(np.arange(0.0, 96.0, 0.1), fixed_time_list,
                           tildegaschplist)
        result.fill_between(np.arange(0.0, 96.0, 0.1),
                            gaschp,
                            0,
                            facecolor='purple',
                            alpha=0.1)

    #plt.plot(results_list, linestyle='--', marker='o', color='b')
    plt.ylabel('Energy value (kW)')
    plt.xlabel('Time instant')
    plt.locator_params(axis='x', nbins=96)
    plt.grid(True)
    resultimg = plt.savefig('results_online.png', dpi=200)

    plt.close(resultimg)

    # GRAFICO A CIAMBELLA CON PERCENTUALI

    fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect="equal"))

    data = [
        sum(pvlist),
        sum(tildeuchplist),
        sum(tildegaschplist),
        abs(sum([sum(x) for x in zip(gridINlist, gridOUTlist)])),
        abs(sum([sum(x) for x in zip(stINlist, stOUTlist)]))
    ]
    labels = ['PV', 'UChp', 'GASChp', 'Grid', 'Storage']

    def func(pct, allvals):
        absolute = int(pct / 100. * np.sum(allvals))
        return "{:.1f}%\n({:d} kWh)".format(pct, absolute)

    wedges, texts, autotexts = ax.pie(data,
                                      wedgeprops=dict(width=0.5),
                                      startangle=-40,
                                      autopct=lambda pct: func(pct, data),
                                      pctdistance=0.8,
                                      textprops={
                                          'color': "w",
                                          'fontsize': 7
                                      })

    bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
    kw = dict(arrowprops=dict(arrowstyle="-"),
              bbox=bbox_props,
              zorder=0,
              va="center")

    for i, p in enumerate(wedges):
        ang = (p.theta2 - p.theta1) / 2. + p.theta1
        y = np.sin(np.deg2rad(ang))
        x = np.cos(np.deg2rad(ang))
        horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
        connectionstyle = "angle,angleA=0,angleB={}".format(ang)
        kw["arrowprops"].update({"connectionstyle": connectionstyle})
        ax.annotate(labels[i],
                    xy=(x, y),
                    xytext=(1.35 * np.sign(x), 1.4 * y),
                    horizontalalignment=horizontalalignment,
                    **kw)

    ax = plt.savefig('pie_online.png', dpi=200)

    plt.close(ax)