Пример #1
0
    def convert_prob(self):
        self.logger.info("Converting optimization problem")

        self.model.con_list = ConstraintList()

        # Set of objective functions
        self.model.Os = Set(ordered=True, initialize=[o + 2 for o in self.iter_obj2])

        # Slack for objectives introduced as constraints
        self.model.Slack = Var(self.model.Os, within=NonNegativeReals)
        self.model.e = Param(
            self.model.Os,
            initialize=[np.nan for _ in self.model.Os],
            within=Any,
            mutable=True,
        )  # RHS of constraints

        # Add p-1 objective functions as constraints
        for o in range(1, self.n_obj):
            self.model.obj_list[1].expr += self.opts.eps * (
                10 ** (-1 * (o - 1)) * self.model.Slack[o + 1] / self.obj_range[o - 1]
            )

            self.model.con_list.add(
                expr=self.model.obj_list[o + 1].expr - self.model.Slack[o + 1]
                == self.model.e[o + 1]
            )
Пример #2
0
    def construct_payoff(self):
        self.logger.info("Constructing payoff")
        self.progress.set_message("constructing payoff")

        def set_payoff(i, j):
            self.obj_activate(j)
            self.solve()
            self.progress.increment()
            self.payoff[i, j] = self.obj_val(j)
            self.obj_deactivate(j)

        self.payoff = np.full((self.n_obj, self.n_obj), np.inf)
        self.model.pcon_list = ConstraintList()

        # Independently optimize each objective function (diagonal elements)
        for i in self.iter_obj:
            for j in self.iter_obj:
                if i == j:
                    set_payoff(i, j)

        # Optimize j having all the i as constraints (off-diagonal elements)
        for i in self.iter_obj:
            self.model.pcon_list.add(expr=self.obj_expr(i) == self.payoff[i, i])

            for j in self.iter_obj:
                if i != j:
                    set_payoff(i, j)
                    self.model.pcon_list.add(expr=self.obj_expr(j) == self.payoff[i, j])

            self.model.pcon_list.clear()
Пример #3
0
    def _discretize_bilinear(self, b, v, v_idx, u, u_idx):
        _z = b.z
        _dv = b.dv[v_idx]
        _u = Var(b.DISCRETIZATION, within=u.domain, bounds=u.bounds)
        logger.info("Discretizing (v=%s)*(u=%s) as u%s_v%s" %
                    (v.name, u.name, u_idx, v_idx))
        b.add_component("u%s_v%s" % (u_idx, v_idx), _u)
        _lb, _ub = u.bounds
        if _lb is None or _ub is None:
            raise RuntimeError("Couldn't relax variable %s: missing "
                               "finite lower/upper bounds." % (u.name))
        _c = ConstraintList()
        b.add_component("c_disaggregate_u%s_v%s" % (u_idx, v_idx), _c)
        for k in b.DISCRETIZATION:
            # _lb * z[v_idx,k] <= _u[k] <= _ub * z[v_idx,k]
            _c.add(expr=_lb * _z[v_idx, k] <= _u[k])
            _c.add(expr=_u[k] <= _ub * _z[v_idx, k])
            # _lb * (1-z[v_idx,k]) <= u - _u[k] <= _ub * (1-z[v_idx,k])
            _c.add(expr=_lb * (1 - _z[v_idx, k]) <= u - _u[k])
            _c.add(expr=u - _u[k] <= _ub * (1 - _z[v_idx, k]))

        _v_lb, _v_ub = v.bounds
        _bnd_rng = (_v_lb * _lb, _v_lb * _ub, _v_ub * _lb, _v_ub * _ub)
        _w = Var(bounds=(min(_bnd_rng), max(_bnd_rng)))
        b.add_component("w%s_v%s" % (u_idx, v_idx), _w)

        K = max(b.DISCRETIZATION)

        _dw = Var(bounds=(min(0, _lb * 2**-K, _ub * 2**-K),
                          max(0, _lb * 2**-K, _ub * 2**-K)))
        b.add_component("dw%s_v%s" % (u_idx, v_idx), _dw)

        _c = Constraint(expr=_w == _v_lb * u + (_v_ub - _v_lb) *
                        (sum(2**-k * _u[k] for k in b.DISCRETIZATION) + _dw))
        b.add_component("c_bilinear_u%s_v%s" % (u_idx, v_idx), _c)

        _c = ConstraintList()
        b.add_component("c_mccormick_u%s_v%s" % (u_idx, v_idx), _c)
        # u_lb * dv <= dw <= u_ub * dv
        _c.add(expr=_lb * _dv <= _dw)
        _c.add(expr=_dw <= _ub * _dv)
        # (u-u_ub)*2^-K + u_ub*dv <= dw <= (u-u_lb)*2^-K + u_lb*dv
        _c.add(expr=(u - _ub) * 2**-K + _ub * _dv <= _dw)
        _c.add(expr=_dw <= (u - _lb) * 2**-K + _lb * _dv)

        return _w
Пример #4
0
def replace_uncertain_bounds_with_constraints(model, uncertain_params):
    """
    For variables of which the bounds are dependent on the parameters
    in the list `uncertain_params`, remove the bounds and add
    explicit variable bound inequality constraints.

    :param model: Model in which to make the bounds/constraint replacements
    :type model: class:`pyomo.core.base.PyomoModel.ConcreteModel`
    :param uncertain_params: List of uncertain model parameters
    :type uncertain_params: list
    """
    uncertain_param_set = ComponentSet(uncertain_params)

    # component for explicit inequality constraints
    uncertain_var_bound_constrs = ConstraintList()
    model.add_component(unique_component_name(model,
                                              'uncertain_var_bound_cons'),
                        uncertain_var_bound_constrs)

    # get all variables in active objective and constraint expression(s)
    vars_in_cons = ComponentSet(get_vars_from_component(model, Constraint))
    vars_in_obj = ComponentSet(get_vars_from_component(model, Objective))

    for v in vars_in_cons | vars_in_obj:
        # get mutable parameters in variable bounds expressions
        ub = v.upper
        mutable_params_ub = ComponentSet(identify_mutable_parameters(ub))
        lb = v.lower
        mutable_params_lb = ComponentSet(identify_mutable_parameters(lb))

        # add explicit inequality constraint(s), remove variable bound(s)
        if mutable_params_ub & uncertain_param_set:
            if type(ub) is NPV_MinExpression:
                upper_bounds = ub.args
            else:
                upper_bounds = (ub,)
            for u_bnd in upper_bounds:
                uncertain_var_bound_constrs.add(v - u_bnd <= 0)
            v.setub(None)
        if mutable_params_lb & uncertain_param_set:
            if type(ub) is NPV_MaxExpression:
                lower_bounds = lb.args
            else:
                lower_bounds = (lb,)
            for l_bnd in lower_bounds:
                uncertain_var_bound_constrs.add(l_bnd - v <= 0)
            v.setlb(None)
Пример #5
0
    def make_noisy(self, cov_dict, conf_level=2):
        self.d1.name = "Noisy plant (d1)"
        k = 0
        for x in self.states:
            s = getattr(self.d1, x)  #: state
            xicc = getattr(self.d1, x + "_icc")
            xicc.deactivate()
            for j in self.state_vars[x]:
                self.xp_l.append(s[(1, 0) + j])
                self.xp_key[(x, j)] = k
                k += 1

        self.d1.xS_pnoisy = Set(initialize=[
            i for i in range(0, len(self.xp_l))
        ])  #: Create set of noisy_states
        self.d1.w_pnoisy = Var(self.d1.xS_pnoisy,
                               initialize=0.0)  #: Model disturbance
        self.d1.Q_pnoisy = Param(self.d1.xS_pnoisy, initialize=1, mutable=True)
        self.d1.obj_fun_noisy = Objective(
            sense=maximize,
            expr=0.5 * sum(self.d1.Q_pnoisy[k] * self.d1.w_pnoisy[k]**2
                           for k in self.d1.xS_pnoisy))
        self.d1.ics_noisy = ConstraintList()

        k = 0
        for x in self.states:
            s = getattr(self.d1, x)  #: state
            xic = getattr(self.d1, x + "_ic")
            for j in self.state_vars[x]:
                expr = s[(1, 1) + j] == xic[j] + self.d1.w_pnoisy[k]
                self.d1.ics_noisy.add(expr)
                k += 1

        for key in cov_dict:
            vni = key
            v_i = self.xp_key[vni]
            self.d1.Q_pnoisy[v_i].value = cov_dict[vni]
            self.d1.w_pnoisy[v_i].setlb(-conf_level * cov_dict[vni])
            self.d1.w_pnoisy[v_i].setub(conf_level * cov_dict[vni])

        with open("debug.txt", "w") as f:
            self.d1.Q_pnoisy.display(ostream=f)
            self.d1.obj_fun_noisy.pprint(ostream=f)
            self.d1.ics_noisy.pprint(ostream=f)
            self.d1.w_pnoisy.display(ostream=f)
Пример #6
0
def substitute_ssv_in_dr_constraints(model, constraint):
    '''
    Generate the standard_repn for the dr constraints. Generate new expression with replace_expression to ignore
    the ssv component.
    Then, replace_expression with substitution_map between ssv and the new expression.
    Deactivate or del_component the original dr equation.
    Then, return modified model and do coefficient matching as normal.
    :param model: the working_model
    :param constraint: an equality constraint from the working model identified to be of the form h(x,z,q) = 0.
    :return:
    '''
    dr_eqns = model.util.decision_rule_eqns
    fsv = ComponentSet(model.util.first_stage_variables)
    if not hasattr(model, "dr_substituted_constraints"):
        model.dr_substituted_constraints = ConstraintList()
    for eqn in dr_eqns:
        repn = generate_standard_repn(eqn.body, compute_values=False)
        new_expression = 0
        map_linear_coeff_to_var = [x for x in zip(repn.linear_coefs, repn.linear_vars) if x[1] in ComponentSet(fsv)]
        map_quad_coeff_to_var = [x for x in zip(repn.quadratic_coefs, repn.quadratic_vars) if x[1] in ComponentSet(fsv)]
        if repn.linear_coefs:
            for coeff, var in map_linear_coeff_to_var:
                new_expression += coeff * var
        if repn.quadratic_coefs:
            for coeff, var in map_quad_coeff_to_var:
                new_expression += coeff * var[0] * var[1] # var here is a 2-tuple

        model.no_ssv_dr_expr = Expression(expr=new_expression)
        substitution_map = {}
        substitution_map[id(repn.linear_vars[-1])] = model.no_ssv_dr_expr.expr

    model.dr_substituted_constraints.add(
            replace_expressions(expr=constraint.lower,
                                     substitution_map=substitution_map) ==
            replace_expressions(expr=constraint.body,
                                     substitution_map=substitution_map))

    # === Delete the original constraint
    model.del_component(constraint.name)
    model.del_component("no_ssv_dr_expr")

    return model.dr_substituted_constraints[max(model.dr_substituted_constraints.keys())]
    def _relax_bilinear(self, b, u, v):
        u_lb, u_ub = u.bounds
        v_lb, v_ub = v.bounds
        if u_lb is None or u_ub is None:
             raise RuntimeError("Couldn't relax variable %s: missing "
                               "finite lower/upper bounds." % (u.cname(True)))
        if v_lb is None or v_ub is None:
             raise RuntimeError("Couldn't relax variable %s: missing "
                               "finite lower/upper bounds." % (v.cname(True)))
        w = Var(bounds=(min(u_lb*v_lb, u_lb*v_ub, u_ub*v_lb, u_ub*v_ub),
                         max(u_lb*v_lb, u_lb*v_ub, u_ub*v_lb, u_ub*v_ub)))
        b.add_component("w_%s_%s" % (u.cname(), v.cname()), w)

        _c = ConstraintList(noruleinit=True)
        b.add_component( "c_mccormick_%s_%s" % (u.cname(), v.cname()), _c )

        _c.add(expr=w >= u * v_lb + u_lb * v - u_lb*v_lb)
        _c.add(expr=w >= u * v_ub + u_ub * v - u_ub*v_ub)
        _c.add(expr=w <= u * v_lb + u_ub * v - u_ub*v_lb)
        _c.add(expr=w <= u * v_ub + u_lb * v - u_lb*v_ub)

        return w
Пример #8
0
    def _apply_to(self, instance, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("Calling ConnectorExpander")

        connectorsFound = False
        for c in instance.component_data_objects(Connector):
            connectorsFound = True
            break
        if not connectorsFound:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("   Connectors found!")

        self._name_buffer = {}

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        # List of the connectors in the order in which we found them
        # (this should be deterministic, provided that the user's model
        # is deterministic)
        connector_list = []
        # list of constraints with connectors: tuple(constraint, connector_set)
        # (this should be deterministic, provided that the user's model
        # is deterministic)
        constraint_list = []
        # ID of the next connector group (set of matched connectors)
        groupID = 0
        # connector_groups stars out as a dict of {id(set): (groupID, set)}
        # If you sort by the groupID, then this will be deterministic.
        connector_groups = dict()
        # map of connector to the set of connectors that must match it
        matched_connectors = ComponentMap()
        # The set of connectors found in the current constraint
        found = ComponentSet()

        connector_types = set([SimpleConnector, _ConnectorData])
        for constraint in instance.component_data_objects(
                Constraint, sort=SortComponents.deterministic):
            ref = None
            for c in EXPR.identify_components(constraint.body,
                                              connector_types):
                found.add(c)
                if c in matched_connectors:
                    if ref is None:
                        # The first connector in this constraint has
                        # already been seen.  We will use that Set as
                        # the reference
                        ref = matched_connectors[c]
                    elif ref is not matched_connectors[c]:
                        # We already have a reference group; merge this
                        # new group into it.
                        #
                        # Optimization: this merge is linear in the size
                        # of the src set.  If the reference set is
                        # smaller, save time by switching to a new
                        # reference set.
                        src = matched_connectors[c]
                        if len(ref) < len(src):
                            ref, src = src, ref
                        ref.update(src)
                        for _ in src:
                            matched_connectors[_] = ref
                        del connector_groups[id(src)]
                    # else: pass
                    #   The new group *is* the reference group;
                    #   there is nothing to do.
                else:
                    # The connector has not been seen before.
                    connector_list.append(c)
                    if ref is None:
                        # This is the first connector in the constraint:
                        # start a new reference set.
                        ref = ComponentSet()
                        connector_groups[id(ref)] = (groupID, ref)
                        groupID += 1
                    # This connector hasn't been seen.  Record it.
                    ref.add(c)
                    matched_connectors[c] = ref
            if ref is not None:
                constraint_list.append((constraint, found))
                found = ComponentSet()

        # Validate all connector sets and expand the empty ones
        known_conn_sets = {}
        for groupID, conn_set in sorted(itervalues(connector_groups)):
            known_conn_sets[id(conn_set)] \
                = self._validate_and_expand_connector_set(conn_set)

        # Expand each constraint
        for constraint, conn_set in constraint_list:
            cList = ConstraintList()
            constraint.parent_block().add_component(
                '%s.expanded' % (constraint.getname(
                    fully_qualified=False, name_buffer=self._name_buffer), ),
                cList)
            connId = next(iter(conn_set))
            ref = known_conn_sets[id(matched_connectors[connId])]
            for k, v in sorted(iteritems(ref)):
                if v[1] >= 0:
                    _iter = v[0]
                else:
                    _iter = (v[0], )
                for idx in _iter:
                    substitution = {}
                    for c in conn_set:
                        if v[1] >= 0:
                            new_v = c.vars[k][idx]
                        elif k in c.aggregators:
                            new_v = c.vars[k].add()
                        else:
                            new_v = c.vars[k]
                        substitution[id(c)] = new_v
                    cList.add((constraint.lower,
                               EXPR.clone_expression(constraint.body,
                                                     substitution),
                               constraint.upper))
            constraint.deactivate()

        # Now, go back and implement VarList aggregators
        for conn in connector_list:
            block = conn.parent_block()
            for var, aggregator in iteritems(conn.aggregators):
                c = Constraint(expr=aggregator(block, conn.vars[var]))
                block.add_component(
                    '%s.%s.aggregate' %
                    (conn.getname(fully_qualified=True,
                                  name_buffer=self._name_buffer), var), c)
Пример #9
0
def ROSolver_iterative_solve(model_data, config):
    '''
    GRCS algorithm implementation
    :model_data: ROSolveData object with deterministic model information
    :config: ConfigBlock for the instance being solved
    '''

    # === The "violation" e.g. uncertain parameter values added to the master problem are nominal in iteration 0
    #     User can supply a nominal_uncertain_param_vals if they want to set nominal to a certain point,
    #     Otherwise, the default init value for the params is used as nominal_uncertain_param_vals
    violation = list(p for p in config.nominal_uncertain_param_vals)

    # === Do coefficient matching
    constraints = [
        c for c in model_data.working_model.component_data_objects(Constraint)
        if c.equality and c not in ComponentSet(
            model_data.working_model.util.decision_rule_eqns)
    ]
    model_data.working_model.util.h_x_q_constraints = ComponentSet()
    for c in constraints:
        coeff_matching_success, robust_infeasible = coefficient_matching(
            model=model_data.working_model,
            constraint=c,
            uncertain_params=model_data.working_model.util.uncertain_params,
            config=config)
        if not coeff_matching_success and not robust_infeasible:
            raise ValueError(
                "Equality constraint \"%s\" cannot be guaranteed to be robustly feasible, "
                "given the current partitioning between first-stage, second-stage and state variables. "
                "You might consider editing this constraint to reference some second-stage "
                "and/or state variable(s)." % c.name)
        elif not coeff_matching_success and robust_infeasible:
            config.progress_logger.info(
                "PyROS has determined that the model is robust infeasible. "
                "One reason for this is that equality constraint \"%s\" cannot be satisfied "
                "against all realizations of uncertainty, "
                "given the current partitioning between first-stage, second-stage and state variables. "
                "You might consider editing this constraint to reference some (additional) second-stage "
                "and/or state variable(s)." % c.name)
            return None, None
        else:
            pass

    # h(x,q) == 0 becomes h'(x) == 0
    for c in model_data.working_model.util.h_x_q_constraints:
        c.deactivate()

    # === Build the master problem and master problem data container object
    master_data = master_problem_methods.initial_construct_master(model_data)

    # === If using p_robustness, add ConstraintList for additional constraints
    if config.p_robustness:
        master_data.master_model.p_robust_constraints = ConstraintList()

    # === Add scenario_0
    master_data.master_model.scenarios[0, 0].transfer_attributes_from(
        master_data.original.clone())
    if len(master_data.master_model.scenarios[
            0, 0].util.uncertain_params) != len(violation):
        raise ValueError

    # === Set the nominal uncertain parameters to the violation values
    for i, v in enumerate(violation):
        master_data.master_model.scenarios[
            0, 0].util.uncertain_params[i].value = v

    # === Add objective function (assuming minimization of costs) with nominal second-stage costs
    if config.objective_focus is ObjectiveType.nominal:
        master_data.master_model.obj = Objective(
            expr=master_data.master_model.scenarios[0,
                                                    0].first_stage_objective +
            master_data.master_model.scenarios[0, 0].second_stage_objective)
    elif config.objective_focus is ObjectiveType.worst_case:
        # === Worst-case cost objective
        master_data.master_model.zeta = Var(initialize=value(
            master_data.master_model.scenarios[0, 0].first_stage_objective +
            master_data.master_model.scenarios[0, 0].second_stage_objective))
        master_data.master_model.obj = Objective(
            expr=master_data.master_model.zeta)
        master_data.master_model.scenarios[0, 0].epigraph_constr = Constraint(
            expr=master_data.master_model.scenarios[0,
                                                    0].first_stage_objective +
            master_data.master_model.scenarios[0, 0].second_stage_objective <=
            master_data.master_model.zeta)
        master_data.master_model.scenarios[
            0,
            0].util.first_stage_variables.append(master_data.master_model.zeta)

    # === Add deterministic constraints to ComponentSet on original so that these become part of separation model
    master_data.original.util.deterministic_constraints = \
        ComponentSet(c for c in master_data.original.component_data_objects(Constraint, descend_into=True))

    # === Make separation problem model once before entering the solve loop
    separation_model = separation_problem_methods.make_separation_problem(
        model_data=master_data, config=config)

    # === Create separation problem data container object and add information to catalog during solve
    separation_data = SeparationProblemData()
    separation_data.separation_model = separation_model
    separation_data.points_separated = [
    ]  # contains last point separated in the separation problem
    separation_data.points_added_to_master = [
        config.nominal_uncertain_param_vals
    ]  # explicitly robust against in master
    separation_data.constraint_violations = [
    ]  # list of constraint violations for each iteration
    separation_data.total_global_separation_solves = 0  # number of times global solve is used
    separation_data.timing = master_data.timing  # timing object

    # === Keep track of subsolver termination statuses from each iteration
    separation_data.separation_problem_subsolver_statuses = []

    # === Nominal information
    nominal_data = Block()
    nominal_data.nom_fsv_vals = []
    nominal_data.nom_ssv_vals = []
    nominal_data.nom_first_stage_cost = 0
    nominal_data.nom_second_stage_cost = 0
    nominal_data.nom_obj = 0

    # === Time information
    timing_data = Block()
    timing_data.total_master_solve_time = 0
    timing_data.total_separation_local_time = 0
    timing_data.total_separation_global_time = 0
    timing_data.total_dr_polish_time = 0

    dr_var_lists_original = []
    dr_var_lists_polished = []

    k = 0
    while config.max_iter == -1 or k < config.max_iter:
        master_data.iteration = k

        # === Add p-robust constraint if iteration > 0
        if k > 0 and config.p_robustness:
            master_problem_methods.add_p_robust_constraint(
                model_data=master_data, config=config)

        # === Solve Master Problem
        config.progress_logger.info("PyROS working on iteration %s..." % k)
        master_soln = master_problem_methods.solve_master(
            model_data=master_data, config=config)
        #config.progress_logger.info("Done solving Master Problem!")
        master_soln.master_problem_subsolver_statuses = []

        # === Keep track of total time and subsolver termination conditions
        timing_data.total_master_solve_time += get_time_from_solver(
            master_soln.results)
        timing_data.total_master_solve_time += get_time_from_solver(
            master_soln.feasibility_problem_results)

        master_soln.master_problem_subsolver_statuses.append(
            master_soln.results.solver.termination_condition)

        # === Check for robust infeasibility or error or time-out in master problem solve
        if master_soln.master_subsolver_results[
                1] is pyrosTerminationCondition.robust_infeasible:
            term_cond = pyrosTerminationCondition.robust_infeasible
            output_logger(config=config, robust_infeasible=True)
        elif master_soln.pyros_termination_condition is pyrosTerminationCondition.subsolver_error:
            term_cond = pyrosTerminationCondition.subsolver_error
        else:
            term_cond = None
        if term_cond == pyrosTerminationCondition.subsolver_error or \
                term_cond == pyrosTerminationCondition.robust_infeasible:
            update_grcs_solve_data(pyros_soln=model_data,
                                   k=k,
                                   term_cond=term_cond,
                                   nominal_data=nominal_data,
                                   timing_data=timing_data,
                                   separation_data=separation_data,
                                   master_soln=master_soln)
            return model_data, []
        # === Check if time limit reached
        elapsed = get_main_elapsed_time(model_data.timing)
        if config.time_limit:
            if elapsed >= config.time_limit:
                output_logger(config=config, time_out=True, elapsed=elapsed)
                update_grcs_solve_data(
                    pyros_soln=model_data,
                    k=k,
                    term_cond=pyrosTerminationCondition.time_out,
                    nominal_data=nominal_data,
                    timing_data=timing_data,
                    separation_data=separation_data,
                    master_soln=master_soln)
                return model_data, []

        # === Save nominal information
        if k == 0:
            for val in master_soln.fsv_vals:
                nominal_data.nom_fsv_vals.append(val)

            for val in master_soln.ssv_vals:
                nominal_data.nom_ssv_vals.append(val)

            nominal_data.nom_first_stage_cost = master_soln.first_stage_objective
            nominal_data.nom_second_stage_cost = master_soln.second_stage_objective
            nominal_data.nom_obj = value(master_data.master_model.obj)

        if (
                # === Decision rule polishing (do not polish on first iteration if no ssv or if decision_rule_order = 0)
            (config.decision_rule_order != 0
             and len(config.second_stage_variables) > 0 and k != 0)):
            # === Save initial values of DR vars to file
            for varslist in master_data.master_model.scenarios[
                    0, 0].util.decision_rule_vars:
                vals = []
                for dvar in varslist.values():
                    vals.append(dvar.value)
                dr_var_lists_original.append(vals)

            polishing_results = master_problem_methods.minimize_dr_vars(
                model_data=master_data, config=config)
            timing_data.total_dr_polish_time += get_time_from_solver(
                polishing_results)

            #=== Save after polish
            for varslist in master_data.master_model.scenarios[
                    0, 0].util.decision_rule_vars:
                vals = []
                for dvar in varslist.values():
                    vals.append(dvar.value)
                dr_var_lists_polished.append(vals)

        # === Set up for the separation problem
        separation_data.opt_fsv_vals = [
            v.value for v in master_soln.master_model.scenarios[
                0, 0].util.first_stage_variables
        ]
        separation_data.opt_ssv_vals = master_soln.ssv_vals

        # === Provide master model scenarios to separation problem for initialization options
        separation_data.master_scenarios = master_data.master_model.scenarios

        if config.objective_focus is ObjectiveType.worst_case:
            separation_model.util.zeta = value(master_soln.master_model.obj)

        # === Solve Separation Problem
        separation_data.iteration = k
        separation_data.master_nominal_scenario = master_data.master_model.scenarios[
            0, 0]

        separation_data.master_model = master_data.master_model

        separation_solns, violating_realizations, constr_violations, is_global, \
            local_sep_time, global_sep_time = \
                separation_problem_methods.solve_separation_problem(model_data=separation_data, config=config)

        for sep_soln_list in separation_solns:
            for s in sep_soln_list:
                separation_data.separation_problem_subsolver_statuses.append(
                    s.termination_condition)

        if is_global:
            separation_data.total_global_separation_solves += 1

        timing_data.total_separation_local_time += local_sep_time
        timing_data.total_separation_global_time += global_sep_time

        separation_data.constraint_violations.append(constr_violations)

        if not any(s.found_violation for solve_data_list in separation_solns
                   for s in solve_data_list):
            separation_data.points_separated = []
        else:
            separation_data.points_separated = violating_realizations

        # === Check if time limit reached
        elapsed = get_main_elapsed_time(model_data.timing)
        if config.time_limit:
            if elapsed >= config.time_limit:
                output_logger(config=config, time_out=True, elapsed=elapsed)
                termination_condition = pyrosTerminationCondition.time_out
                update_grcs_solve_data(pyros_soln=model_data,
                                       k=k,
                                       term_cond=termination_condition,
                                       nominal_data=nominal_data,
                                       timing_data=timing_data,
                                       separation_data=separation_data,
                                       master_soln=master_soln)
                return model_data, separation_solns

        # === Check if we exit due to solver returning unsatisfactory statuses (not in permitted_termination_conditions)
        local_solve_term_conditions = {
            TerminationCondition.optimal, TerminationCondition.locallyOptimal,
            TerminationCondition.globallyOptimal
        }
        global_solve_term_conditions = {
            TerminationCondition.optimal, TerminationCondition.globallyOptimal
        }
        if (is_global and any((s.termination_condition not in global_solve_term_conditions)
                                  for sep_soln_list in separation_solns for s in sep_soln_list)) or \
            (not is_global and any((s.termination_condition not in local_solve_term_conditions)
                                  for sep_soln_list in separation_solns for s in sep_soln_list)):
            termination_condition = pyrosTerminationCondition.subsolver_error
            update_grcs_solve_data(pyros_soln=model_data,
                                   k=k,
                                   term_cond=termination_condition,
                                   nominal_data=nominal_data,
                                   timing_data=timing_data,
                                   separation_data=separation_data,
                                   master_soln=master_soln)
            return model_data, separation_solns

        # === Check if we terminate due to robust optimality or feasibility
        if not any(s.found_violation for sep_soln_list in separation_solns
                   for s in sep_soln_list) and is_global:
            if config.solve_master_globally and config.objective_focus is ObjectiveType.worst_case:
                output_logger(config=config, robust_optimal=True)
                termination_condition = pyrosTerminationCondition.robust_optimal
            else:
                output_logger(config=config, robust_feasible=True)
                termination_condition = pyrosTerminationCondition.robust_feasible
            update_grcs_solve_data(pyros_soln=model_data,
                                   k=k,
                                   term_cond=termination_condition,
                                   nominal_data=nominal_data,
                                   timing_data=timing_data,
                                   separation_data=separation_data,
                                   master_soln=master_soln)
            return model_data, separation_solns

        # === Add block to master at violation
        master_problem_methods.add_scenario_to_master(master_data,
                                                      violating_realizations)
        separation_data.points_added_to_master.append(violating_realizations)

        k += 1

    output_logger(config=config, max_iter=True)
    update_grcs_solve_data(pyros_soln=model_data,
                           k=k,
                           term_cond=pyrosTerminationCondition.max_iter,
                           nominal_data=nominal_data,
                           timing_data=timing_data,
                           separation_data=separation_data,
                           master_soln=master_soln)

    # === In this case we still return the final solution objects for the last iteration
    return model_data, separation_solns
Пример #10
0
    def _apply_to(self, instance, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("Calling ConnectorExpander")

        connectorsFound = False
        for c in instance.component_data_objects(Connector):
            connectorsFound = True
            break
        if not connectorsFound:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("   Connectors found!")

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        connector_types = set([SimpleConnector, _ConnectorData])
        constraint_list = []
        connector_list = []
        matched_connectors = {}
        found = dict()
        for constraint in instance.component_data_objects(Constraint):
            for c in expr.identify_variables(
                    constraint.body, include_potentially_variable=True):
                if c.__class__ in connector_types:
                    found[id(c)] = c
            if not found:
                continue

            # Note that it is important to copy the set of found
            # connectors, since the matching routine below will
            # manipulate sets in place.
            found_this_constraint = dict(found)
            constraint_list.append((constraint, found_this_constraint))

            # Find all the connectors that are used in the constraint,
            # so we know which connectors to validate against each
            # other.  Note that the validation must be transitive (that
            # is, if con1 has a & b and con2 has b & c, then a,b, and c
            # must all validate against each other.
            for cId, c in iteritems(found_this_constraint):
                if cId in matched_connectors:
                    oldSet = matched_connectors[cId]
                    found.update(oldSet)
                    for _cId in oldSet:
                        matched_connectors[_cId] = found
                else:
                    connector_list.append(c)
                matched_connectors[cId] = found

            # Reset found back to empty (this is more efficient as the
            # bulk of the constraints in the model will not have
            # connectors - so if we did this at the top of the loop, we
            # would spend a lot of time clearing empty sets
            found = {}

        # Validate all connector sets and expand the empty ones
        known_conn_sets = {}
        for connector in connector_list:
            conn_set = matched_connectors[id(connector)]
            if id(conn_set) in known_conn_sets:
                continue
            known_conn_sets[id(conn_set)] \
                = self._validate_and_expand_connector_set(conn_set)

        # Expand each constraint
        for constraint, conn_set in constraint_list:
            cList = ConstraintList()
            constraint.parent_block().add_component(
                '%s.expanded' % (constraint.local_name, ), cList)
            connId = next(iterkeys(conn_set))
            ref = known_conn_sets[id(matched_connectors[connId])]
            for k, v in sorted(iteritems(ref)):
                if v[1] >= 0:
                    _iter = v[0]
                else:
                    _iter = (v[0], )
                for idx in _iter:
                    substitution = {}
                    for c in itervalues(conn_set):
                        if v[1] >= 0:
                            new_v = c.vars[k][idx]
                        elif k in c.aggregators:
                            new_v = c.vars[k].add()
                        else:
                            new_v = c.vars[k]
                        substitution[id(c)] = new_v
                    cList.add((constraint.lower,
                               expr.clone_expression(constraint.body,
                                                     substitution),
                               constraint.upper))
            constraint.deactivate()

        # Now, go back and implement VarList aggregators
        for conn in connector_list:
            block = conn.parent_block()
            for var, aggregator in iteritems(conn.aggregators):
                c = Constraint(expr=aggregator(block, conn.vars[var]))
                block.add_component('%s.%s.aggregate' % (conn.local_name, var),
                                    c)
Пример #11
0
    def __init__(self, **kwargs):
        NmpcGen.__init__(self, **kwargs)
        self.int_file_mhe_suf = int(time.time())-1

        # Need a list of relevant measurements y

        self.y = kwargs.pop('y', [])
        self.y_vars = kwargs.pop('y_vars', {})

        # Need a list or relevant noisy-states z

        self.x_noisy = kwargs.pop('x_noisy', [])
        self.x_vars = kwargs.pop('x_vars', {})
        self.deact_ics = kwargs.pop('del_ics', True)
        self.diag_Q_R = kwargs.pop('diag_QR', True)  #: By default use diagonal matrices for Q and R matrices
        self.u = kwargs.pop('u', [])
        self.IgnoreProcessNoise = kwargs.pop('IgnoreProcessNoise', False)


        print("-" * 120)
        print("I[[create_lsmhe]] lsmhe (full) model created.")
        print("-" * 120)
        nstates = sum(len(self.x_vars[x]) for x in self.x_noisy)

        self.journalizer("I", self._c_it, "MHE with \t", str(nstates) + "states")
        self.journalizer("I", self._c_it, "MHE with \t", str(nstates*self.nfe_t*self.ncp_t) + "noise vars")
        self.lsmhe = self.d_mod(self.nfe_t, self.ncp_t, _t=self._t)
        self.lsmhe.name = "LSMHE (Least-Squares MHE)"
        self.lsmhe.create_bounds()
        #: create x_pi constraint

        #: Create list of noisy-states vars
        self.xkN_l = []
        self.xkN_nexcl = []
        self.xkN_key = {}
        k = 0
        for x in self.x_noisy:
            n_s = getattr(self.lsmhe, x)  #: Noisy-state
            for jth in self.x_vars[x]:  #: the jth variable
                self.xkN_l.append(n_s[(1, 0) + jth])
                self.xkN_nexcl.append(1)  #: non-exclusion list for active bounds
                self.xkN_key[(x, jth)] = k
                k += 1

        self.lsmhe.xkNk_mhe = Set(initialize=[i for i in range(0, len(self.xkN_l))])  #: Create set of noisy_states
        self.lsmhe.x_0_mhe = Param(self.lsmhe.xkNk_mhe, initialize=0.0, mutable=True)  #: Prior-state
        self.lsmhe.wk_mhe = Param(self.lsmhe.fe_t, self.lsmhe.cp_ta, self.lsmhe.xkNk_mhe, initialize=0.0) \
            if self.IgnoreProcessNoise else Expression(self.lsmhe.fe_t, self.lsmhe.cp_ta, self.lsmhe.xkNk_mhe)  #: Model disturbance
        self.lsmhe.PikN_mhe = Param(self.lsmhe.xkNk_mhe, self.lsmhe.xkNk_mhe,
                                initialize=lambda m, i, ii: 1. if i == ii else 0.0, mutable=True)  #: Prior-Covariance
        self.lsmhe.Q_mhe = Param(range(1, self.nfe_t), self.lsmhe.xkNk_mhe, initialize=1, mutable=True) if self.diag_Q_R\
            else Param(range(1, self.nfe_t), self.lsmhe.xkNk_mhe, self.lsmhe.xkNk_mhe,
                             initialize=lambda m, t, i, ii: 1. if i == ii else 0.0, mutable=True)  #: Disturbance-weight
        j = 0
        for i in self.x_noisy:
            de_exp = getattr(self.lsmhe, "de_" + i)
            for k in self.x_vars[i]:
                for tfe in range(1, self.nfe_t+1):
                    for tcp in range(1, self.ncp_t + 1):
                        self.lsmhe.wk_mhe[tfe, tcp, j].set_value(de_exp[(tfe, tcp) + k]._body)
                        de_exp[(tfe, tcp) + k].deactivate()
                j += 1



        #: Create list of measurements vars
        self.yk_l = {}
        self.yk_key = {}
        k = 0
        self.yk_l[1] = []
        for y in self.y:
            m_v = getattr(self.lsmhe, y)  #: Measured "state"
            for jth in self.y_vars[y]:  #: the jth variable
                self.yk_l[1].append(m_v[(1, self.ncp_t) + jth])
                self.yk_key[(y, jth)] = k  #: The key needs to be created only once, that is why the loop was split
                k += 1

        for t in range(2, self.nfe_t + 1):
            self.yk_l[t] = []
            for y in self.y:
                m_v = getattr(self.lsmhe, y)  #: Measured "state"
                for jth in self.y_vars[y]:  #: the jth variable
                    self.yk_l[t].append(m_v[(t, self.ncp_t) + jth])

        self.lsmhe.ykk_mhe = Set(initialize=[i for i in range(0, len(self.yk_l[1]))])  #: Create set of measured_vars
        self.lsmhe.nuk_mhe = Var(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=0.0)   #: Measurement noise
        self.lsmhe.yk0_mhe = Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=1.0, mutable=True)
        self.lsmhe.hyk_c_mhe = Constraint(self.lsmhe.fe_t, self.lsmhe.ykk_mhe,
                                          rule=
                                          lambda mod, t, i:mod.yk0_mhe[t, i] - self.yk_l[t][i] - mod.nuk_mhe[t, i] == 0.0)
        self.lsmhe.hyk_c_mhe.deactivate()
        self.lsmhe.R_mhe = Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=1.0, mutable=True) if self.diag_Q_R else \
            Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, self.lsmhe.ykk_mhe,
                             initialize=lambda mod, t, i, ii: 1.0 if i == ii else 0.0, mutable=True)
        f = open("file_cv.txt", "w")
        f.close()

        #: Constraints for the input noise
        for u in self.u:
            # cv = getattr(self.lsmhe, u)  #: Get the param
            # c_val = [value(cv[i]) for i in cv.keys()]  #: Current value
            # self.lsmhe.del_component(cv)  #: Delete the param
            # self.lsmhe.add_component(u + "_mhe", Var(self.lsmhe.fe_t, initialize=lambda m, i: c_val[i-1]))
            self.lsmhe.add_component("w_" + u + "_mhe", Var(self.lsmhe.fe_t, initialize=0.0))  #: Noise for input
            self.lsmhe.add_component("w_" + u + "c_mhe", Constraint(self.lsmhe.fe_t))
            self.lsmhe.equalize_u(direction="r_to_u")
            # cc = getattr(self.lsmhe, u + "_c")  #: Get the constraint for input
            con_w = getattr(self.lsmhe, "w_" + u + "c_mhe")  #: Get the constraint-noisy
            var_w = getattr(self.lsmhe, "w_" + u + "_mhe")  #: Get the constraint-noisy
            ce = getattr(self.lsmhe, u + "_e")  #: Get the expression
            cp = getattr(self.lsmhe, u)  #: Get the param

            con_w.rule = lambda m, i: cp[i] == ce[i] + var_w[i]
            con_w.reconstruct()
            con_w.deactivate()

            # con_w.rule = lambda m, i: cp[i] == cv[i] + var_w[i]
            # con_w.reconstruct()
            # with open("file_cv.txt", "a") as f:
            #     cc.pprint(ostream=f)
            #     con_w.pprint(ostream=f)
                # f.close()

        self.lsmhe.U_mhe = Param(range(1, self.nfe_t + 1), self.u, initialize=1, mutable=True)

        #: Deactivate icc constraints
        if self.deact_ics:
            pass
            # for i in self.states:
            #     self.lsmhe.del_component(i + "_icc")
        #: Maybe only for a subset of the states
        else:
            for i in self.states:
                if i in self.x_noisy:
                    ic_con = getattr(self.lsmhe, i + "_icc")
                    for k in self.x_vars[i]:
                        ic_con[k].deactivate()

        #: Put the noise in the continuation equations (finite-element)
        j = 0
        self.lsmhe.noisy_cont = ConstraintList()
        for i in self.x_noisy:
            # cp_con = getattr(self.lsmhe, "cp_" + i)
            cp_exp = getattr(self.lsmhe, "noisy_" + i)
            # self.lsmhe.del_component(cp_con)
            for k in self.x_vars[i]:  #: This should keep the same order
                for t in range(1, self.nfe_t):
                    self.lsmhe.noisy_cont.add(cp_exp[t, k] == 0.0)
                    # self.lsmhe.noisy_cont.add(cp_exp[t, k] == 0.0)
                j += 1
            # cp_con.reconstruct()
        j = 0
        self.lsmhe.noisy_cont.deactivate()

        #: Expressions for the objective function (least-squares)
        self.lsmhe.Q_e_mhe = 0.0 if self.IgnoreProcessNoise else Expression(
            expr=0.5 * sum(
                sum(
                    sum(self.lsmhe.Q_mhe[1, k] * self.lsmhe.wk_mhe[i, j, k]**2 for k in self.lsmhe.xkNk_mhe) for j in range(1, self.ncp_t +1))
                for i in range(1, self.nfe_t+1))) if self.diag_Q_R else Expression(
            expr=sum(sum(self.lsmhe.wk_mhe[i, j] *
                         sum(self.lsmhe.Q_mhe[i, j, k] * self.lsmhe.wk_mhe[i, 1, k] for k in self.lsmhe.xkNk_mhe)
                         for j in self.lsmhe.xkNk_mhe) for i in range(1, self.nfe_t)))

        self.lsmhe.R_e_mhe = Expression(
            expr=0.5 * sum(
                sum(
                    self.lsmhe.R_mhe[i, k] * self.lsmhe.nuk_mhe[i, k]**2 for k in self.lsmhe.ykk_mhe)
                for i in self.lsmhe.fe_t)) if self.diag_Q_R else Expression(
            expr=sum(sum(self.lsmhe.nuk_mhe[i, j] *
                         sum(self.lsmhe.R_mhe[i, j, k] * self.lsmhe.nuk_mhe[i, k] for k in self.lsmhe.ykk_mhe)
                         for j in self.lsmhe.ykk_mhe) for i in self.lsmhe.fe_t))
        expr_u_obf = 0
        for i in self.lsmhe.fe_t:
            for u in self.u:
                var_w = getattr(self.lsmhe, "w_" + u + "_mhe")  #: Get the constraint-noisy
                expr_u_obf += self.lsmhe.U_mhe[i, u] * var_w[i] ** 2

        self.lsmhe.U_e_mhe = Expression(expr=0.5 * expr_u_obf)  # how about this
        # with open("file_cv.txt", "a") as f:
        #     self.lsmhe.U_e_mhe.pprint(ostream=f)
        #     f.close()

        self.lsmhe.Arrival_e_mhe = Expression(
            expr=0.5 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) *
                     sum(self.lsmhe.PikN_mhe[j, k] * (self.xkN_l[k] - self.lsmhe.x_0_mhe[k]) for k in self.lsmhe.xkNk_mhe)
                     for j in self.lsmhe.xkNk_mhe))

        self.lsmhe.Arrival_dummy_e_mhe = Expression(
            expr=100000.0 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) ** 2 for j in self.lsmhe.xkNk_mhe))

        self.lsmhe.obfun_dum_mhe_deb = Objective(sense=minimize,
                                             expr=self.lsmhe.Q_e_mhe)
        self.lsmhe.obfun_dum_mhe = Objective(sense=minimize,
                                             expr=self.lsmhe.R_e_mhe + self.lsmhe.Q_e_mhe + self.lsmhe.U_e_mhe) # no arrival
        self.lsmhe.obfun_dum_mhe.deactivate()

        self.lsmhe.obfun_mhe_first = Objective(sense=minimize,
                                         expr=self.lsmhe.Arrival_dummy_e_mhe + self.lsmhe.Q_e_mhe)
        self.lsmhe.obfun_mhe_first.deactivate()


        self.lsmhe.obfun_mhe = Objective(sense=minimize,
                                         expr=self.lsmhe.Arrival_dummy_e_mhe + self.lsmhe.R_e_mhe + self.lsmhe.Q_e_mhe + self.lsmhe.U_e_mhe)
        self.lsmhe.obfun_mhe.deactivate()

        # with open("file_cv.txt", "a") as f:
        #     self.lsmhe.obfun_mhe.pprint(ostream=f)
        #     f.close()

        self._PI = {}  #: Container of the KKT matrix
        self.xreal_W = {}
        self.curr_m_noise = {}   #: Current measurement noise
        self.curr_y_offset = {}  #: Current offset of measurement
        for y in self.y:
            for j in self.y_vars[y]:
                self.curr_m_noise[(y, j)] = 0.0
                self.curr_y_offset[(y, j)] = 0.0

        self.s_estimate = {}
        self.s_real = {}
        for x in self.x_noisy:
            self.s_estimate[x] = []
            self.s_real[x] = []

        self.y_estimate = {}
        self.y_real = {}
        self.y_noise_jrnl = {}
        self.yk0_jrnl = {}
        for y in self.y:
            self.y_estimate[y] = []
            self.y_real[y] = []
            self.y_noise_jrnl[y] = []
            self.yk0_jrnl[y] = []
Пример #12
0
    def _add_optimality_conditions(self, instance, submodel):
        """
        Add optimality conditions for the submodel

        This assumes that the original model has the form:

            min c1*x + d1*y
                A3*x <= b3
                A1*x + B1*y <= b1
                min c2*x + d2*y + x'*Q*y
                    A2*x + B2*y + x'*E2*y <= b2
                    y >= 0

        NOTE THE VARIABLE BOUNDS!
        """
        #
        # Populate the block with the linear constraints.
        # Note that we don't simply clone the current block.
        # We need to collect a single set of equations that
        # can be easily expressed.
        #
        d2 = {}
        B2 = {}
        vtmp = {}
        utmp = {}
        sids_set = set()
        sids_list = []
        #
        block = Block(concrete=True)
        block.u = VarList()
        block.v = VarList()
        block.c1 = ConstraintList()
        block.c2 = ComplementarityList()
        block.c3 = ComplementarityList()
        #
        # Collect submodel objective terms
        #
        # TODO: detect fixed variables
        #
        for odata in submodel.component_data_objects(Objective, active=True):
            if odata.sense == maximize:
                d_sense = -1
            else:
                d_sense = 1
            #
            # Iterate through the variables in the representation
            #
            o_terms = generate_standard_repn(odata.expr, compute_values=False)
            #
            # Linear terms
            #
            for i, var in enumerate(o_terms.linear_vars):
                if var.parent_component().local_name in self._fixed_upper_vars:
                    #
                    # Skip fixed upper variables
                    #
                    continue
                #
                # Store the coefficient for the variable.  The coefficient is
                # negated if the objective is maximized.
                #
                id_ = id(var)
                d2[id_] = d_sense * o_terms.linear_coefs[i]
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
            #
            # Quadratic terms
            #
            for i, var in enumerate(o_terms.quadratic_vars):
                if var[0].parent_component(
                ).local_name in self._fixed_upper_vars:
                    if var[1].parent_component(
                    ).local_name in self._fixed_upper_vars:
                        #
                        # Skip fixed upper variables
                        #
                        continue
                    #
                    # Add the linear term
                    #
                    id_ = id(var[1])
                    d2[id_] = d2.get(
                        id_, 0) + d_sense * o_terms.quadratic_coefs[i] * var[0]
                    if not id_ in sids_set:
                        sids_set.add(id_)
                        sids_list.append(id_)
                elif var[1].parent_component(
                ).local_name in self._fixed_upper_vars:
                    #
                    # Add the linear term
                    #
                    id_ = id(var[0])
                    d2[id_] = d2.get(
                        id_, 0) + d_sense * o_terms.quadratic_coefs[i] * var[1]
                    if not id_ in sids_set:
                        sids_set.add(id_)
                        sids_list.append(id_)
                else:
                    raise RuntimeError(
                        "Cannot apply this transformation to a problem with quadratic terms where both variables are in the lower level."
                    )
            #
            # Stop after the first objective
            #
            break
        #
        # Iterate through all lower level variables, adding dual variables
        # and complementarity slackness conditions for y bound constraints
        #
        for vcomponent in instance.component_objects(Var, active=True):
            if vcomponent.local_name in self._fixed_upper_vars:
                #
                # Skip fixed upper variables
                #
                continue
            for ndx in vcomponent:
                #
                # For each index, get the bounds for the variable
                #
                lb, ub = vcomponent[ndx].bounds
                if not lb is None:
                    #
                    # Add the complementarity slackness condition for a lower bound
                    #
                    v = block.v.add()
                    block.c3.add(complements(vcomponent[ndx] >= lb, v >= 0))
                else:
                    v = None
                if not ub is None:
                    #
                    # Add the complementarity slackness condition for an upper bound
                    #
                    w = block.v.add()
                    vtmp[id(vcomponent[ndx])] = w
                    block.c3.add(complements(vcomponent[ndx] <= ub, w >= 0))
                else:
                    w = None
                if not (v is None and w is None):
                    #
                    # Record the variables for which complementarity slackness conditions
                    # were created.
                    #
                    id_ = id(vcomponent[ndx])
                    vtmp[id_] = (v, w)
                    if not id_ in sids_set:
                        sids_set.add(id_)
                        sids_list.append(id_)
        #
        # Iterate through all constraints, adding dual variables and
        # complementary slackness conditions (for inequality constraints)
        #
        for cdata in submodel.component_data_objects(Constraint, active=True):
            if cdata.equality:
                # Don't add a complementary slackness condition for an equality constraint
                u = block.u.add()
                utmp[id(cdata)] = (None, u)
            else:
                if not cdata.lower is None:
                    #
                    # Add the complementarity slackness condition for a greater-than inequality
                    #
                    u = block.u.add()
                    block.c2.add(
                        complements(-cdata.body <= -cdata.lower, u >= 0))
                else:
                    u = None
                if not cdata.upper is None:
                    #
                    # Add the complementarity slackness condition for a less-than inequality
                    #
                    w = block.u.add()
                    block.c2.add(complements(cdata.body <= cdata.upper,
                                             w >= 0))
                else:
                    w = None
                if not (u is None and w is None):
                    utmp[id(cdata)] = (u, w)
            #
            # Store the coefficients for the contraint variables that are not fixed
            #
            c_terms = generate_standard_repn(cdata.body, compute_values=False)
            #
            # Linear terms
            #
            for i, var in enumerate(c_terms.linear_vars):
                if var.parent_component().local_name in self._fixed_upper_vars:
                    continue
                id_ = id(var)
                B2.setdefault(id_, {}).setdefault(id(cdata),
                                                  c_terms.linear_coefs[i])
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
            #
            # Quadratic terms
            #
            for i, var in enumerate(c_terms.quadratic_vars):
                if var[0].parent_component(
                ).local_name in self._fixed_upper_vars:
                    if var[1].parent_component(
                    ).local_name in self._fixed_upper_vars:
                        continue
                    id_ = id(var[1])
                    if id_ in B2:
                        B2[id_][id(
                            cdata)] = c_terms.quadratic_coefs[i] * var[0]
                    else:
                        B2.setdefault(id_, {}).setdefault(
                            id(cdata), c_terms.quadratic_coefs[i] * var[0])
                    if not id_ in sids_set:
                        sids_set.add(id_)
                        sids_list.append(id_)
                elif var[1].parent_component(
                ).local_name in self._fixed_upper_vars:
                    id_ = id(var[0])
                    if id_ in B2:
                        B2[id_][id(
                            cdata)] = c_terms.quadratic_coefs[i] * var[1]
                    else:
                        B2.setdefault(id_, {}).setdefault(
                            id(cdata), c_terms.quadratic_coefs[i] * var[1])
                    if not id_ in sids_set:
                        sids_set.add(id_)
                        sids_list.append(id_)
                else:
                    raise RuntimeError(
                        "Cannot apply this transformation to a problem with quadratic terms where both variables are in the lower level."
                    )
        #
        # Generate stationarity equations
        #
        tmp__ = (None, None)
        for vid in sids_list:
            exp = d2.get(vid, 0)
            #
            lb_dual, ub_dual = vtmp.get(vid, tmp__)
            if vid in vtmp:
                if not lb_dual is None:
                    exp -= lb_dual  # dual for variable lower bound
                if not ub_dual is None:
                    exp += ub_dual  # dual for variable upper bound
            #
            B2_ = B2.get(vid, {})
            utmp_keys = list(utmp.keys())
            if self._deterministic:
                utmp_keys.sort(key=lambda x: utmp[x][0].local_name if utmp[x][
                    1] is None else utmp[x][1].local_name)
            for uid in utmp_keys:
                if uid in B2_:
                    lb_dual, ub_dual = utmp[uid]
                    if not lb_dual is None:
                        exp -= B2_[uid] * lb_dual
                    if not ub_dual is None:
                        exp += B2_[uid] * ub_dual
            if type(exp) in six.integer_types or type(exp) is float:
                # TODO: Annotate the model as unbounded
                raise IOError("Unbounded variable without side constraints")
            else:
                block.c1.add(exp == 0)
        #
        # Return block
        #
        return block
Пример #13
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPopt version %s using %s algorithm"
                % (".".join(map(str, self.version())), config.strategy)
            )
            config.logger.info(
                """
If you use this software, you may cite the following:
- Implementation:
    Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
    Pyomo.GDP: Disjunctive Models in Python. 
    Proc. of the 13th Intl. Symposium on Process Systems Eng.
    San Diego, 2018.
- LOA algorithm:
    Türkay, M; Grossmann, IE.
    Logic-based MINLP algorithms for the optimal synthesis of process networks.
    Comp. and Chem. Eng. 1996, 20(8), 959–978.
    DOI: 10.1016/0098-1354(95)00219-7.
- GLOA algorithm:
    Lee, S; Grossmann, IE.
    A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems
    Comp. and Chem. Eng. 2001, 25, 1675-1697.
    DOI: 10.1016/S0098-1354(01)00732-3
                """.strip()
            )
            solve_data.results.solver.name = 'GDPopt %s - %s' % (
                str(self.version()), config.strategy)

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            setup_results_object(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.variable_list)
            solve_data.best_solution_found = None

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in working model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.GDPopt_utils.variable_list,
                    to_list=GDPopt.variable_list,
                    config=config)
                # Update values in original model
                copy_var_list_values(
                    GDPopt.variable_list,
                    solve_data.original_model.GDPopt_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.master_iteration

        return solve_data.results
Пример #14
0
def minimize_dr_vars(model_data, config):
    """
    Decision rule polishing: For a given optimal design (x) determined in separation,
    and the optimal value for control vars (z), choose min magnitude decision_rule_var
    values.
    """
    #config.progress_logger.info("Executing decision rule variable polishing solve.")
    model = model_data.master_model
    polishing_model = model.clone()

    first_stage_variables = polishing_model.scenarios[
        0, 0].util.first_stage_variables
    decision_rule_vars = polishing_model.scenarios[0,
                                                   0].util.decision_rule_vars

    polishing_model.obj.deactivate()
    index_set = decision_rule_vars[0].index_set()
    polishing_model.tau_vars = []
    # ==========
    for idx in range(len(decision_rule_vars)):
        polishing_model.scenarios[0, 0].add_component(
            "polishing_var_" + str(idx),
            Var(index_set, initialize=1e6, domain=NonNegativeReals))
        polishing_model.tau_vars.append(
            getattr(polishing_model.scenarios[0, 0],
                    "polishing_var_" + str(idx)))
    # ==========
    this_iter = polishing_model.scenarios[
        max(polishing_model.scenarios.keys())[0], 0]
    nom_block = polishing_model.scenarios[0, 0]
    if config.objective_focus == ObjectiveType.nominal:
        obj_val = value(this_iter.second_stage_objective +
                        this_iter.first_stage_objective)
        polishing_model.scenarios[0,0].polishing_constraint = \
            Constraint(expr=obj_val >= nom_block.second_stage_objective + nom_block.first_stage_objective)
    elif config.objective_focus == ObjectiveType.worst_case:
        polishing_model.zeta.fix(
        )  # Searching equivalent optimal solutions given optimal zeta

    # === Make absolute value constraints on polishing_vars
    polishing_model.scenarios[
        0, 0].util.absolute_var_constraints = cons = ConstraintList()
    uncertain_params = nom_block.util.uncertain_params
    if config.decision_rule_order == 1:
        for i, tau in enumerate(polishing_model.tau_vars):
            for j in range(len(this_iter.util.decision_rule_vars[i])):
                if j == 0:
                    cons.add(
                        -tau[j] <= this_iter.util.decision_rule_vars[i][j])
                    cons.add(this_iter.util.decision_rule_vars[i][j] <= tau[j])
                else:
                    cons.add(
                        -tau[j] <= this_iter.util.decision_rule_vars[i][j] *
                        uncertain_params[j - 1])
                    cons.add(this_iter.util.decision_rule_vars[i][j] *
                             uncertain_params[j - 1] <= tau[j])
    elif config.decision_rule_order == 2:
        l = list(range(len(uncertain_params)))
        index_pairs = list(it.combinations(l, 2))
        for i, tau in enumerate(polishing_model.tau_vars):
            Z = this_iter.util.decision_rule_vars[i]
            indices = list(k for k in range(len(Z)))
            for r in indices:
                if r == 0:
                    cons.add(-tau[r] <= Z[r])
                    cons.add(Z[r] <= tau[r])
                elif r <= len(uncertain_params) and r > 0:
                    cons.add(-tau[r] <= Z[r] * uncertain_params[r - 1])
                    cons.add(Z[r] * uncertain_params[r - 1] <= tau[r])
                elif r <= len(indices) - len(uncertain_params) - 1 and r > len(
                        uncertain_params):
                    cons.add(-tau[r] <= Z[r] * uncertain_params[index_pairs[
                        r - len(uncertain_params) - 1][0]] * uncertain_params[
                            index_pairs[r - len(uncertain_params) - 1][1]])
                    cons.add(Z[r] * uncertain_params[index_pairs[
                        r - len(uncertain_params) - 1][0]] *
                             uncertain_params[index_pairs[
                                 r - len(uncertain_params) - 1][1]] <= tau[r])
                elif r > len(indices) - len(uncertain_params) - 1:
                    cons.add(-tau[r] <= Z[r] *
                             uncertain_params[r - len(index_pairs) -
                                              len(uncertain_params) - 1]**2)
                    cons.add(Z[r] * uncertain_params[r - len(index_pairs) -
                                                     len(uncertain_params) -
                                                     1]**2 <= tau[r])
    else:
        raise NotImplementedError(
            "Decision rule variable polishing has not been generalized to decision_rule_order "
            + str(config.decision_rule_order) + ".")

    polishing_model.scenarios[0,0].polishing_obj = \
        Objective(expr=sum(sum(tau[j] for j in tau.index_set()) for tau in polishing_model.tau_vars))

    # === Fix design
    for d in first_stage_variables:
        d.fix()

    # === Unfix DR vars
    num_dr_vars = len(model.scenarios[
        0, 0].util.decision_rule_vars[0])  # there is at least one dr var
    num_uncertain_params = len(config.uncertain_params)

    if model.const_efficiency_applied:
        for d in decision_rule_vars:
            for i in range(1, num_dr_vars):
                d[i].fix(0)
                d[0].unfix()
    elif model.linear_efficiency_applied:
        for d in decision_rule_vars:
            d.unfix()
            for i in range(num_uncertain_params + 1, num_dr_vars):
                d[i].fix(0)
    else:
        for d in decision_rule_vars:
            d.unfix()

    # === Unfix all control var values
    for block in polishing_model.scenarios.values():
        for c in block.util.second_stage_variables:
            c.unfix()
        if model.const_efficiency_applied:
            for d in block.util.decision_rule_vars:
                for i in range(1, num_dr_vars):
                    d[i].fix(0)
                    d[0].unfix()
        elif model.linear_efficiency_applied:
            for d in block.util.decision_rule_vars:
                d.unfix()
                for i in range(num_uncertain_params + 1, num_dr_vars):
                    d[i].fix(0)
        else:
            for d in block.util.decision_rule_vars:
                d.unfix()

    # === Solve the polishing model
    polish_soln = MasterResult()
    solver = config.global_solver

    if not solver.available():
        raise RuntimeError("NLP solver %s is not available." % config.solver)
    try:
        results = solver.solve(polishing_model, tee=config.tee)
        polish_soln.termination_condition = results.solver.termination_condition
    except ValueError as err:
        polish_soln.pyros_termination_condition = pyrosTerminationCondition.subsolver_error
        polish_soln.termination_condition = tc.error
        raise

    polish_soln.fsv_values = list(
        v.value
        for v in polishing_model.scenarios[0, 0].util.first_stage_variables)
    polish_soln.ssv_values = list(
        v.value
        for v in polishing_model.scenarios[0, 0].util.second_stage_variables)
    polish_soln.first_stage_objective = value(nom_block.first_stage_objective)
    polish_soln.second_stage_objective = value(
        nom_block.second_stage_objective)

    # === Process solution by termination condition
    acceptable = [tc.optimal, tc.locallyOptimal, tc.feasible]
    if polish_soln.termination_condition not in acceptable:
        return results

    for i, d in enumerate(
            model_data.master_model.scenarios[0, 0].util.decision_rule_vars):
        for index in d:
            d[index].set_value(polishing_model.scenarios[
                0, 0].util.decision_rule_vars[i][index].value,
                               skip_validation=True)

    return results
Пример #15
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        created_GDPopt_block = False

        old_logger_level = config.logger.getEffectiveLevel()
        try:
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info("---Starting GDPopt---")

            # Create a model block on which to store GDPopt-specific utility
            # modeling objects.
            if hasattr(model, 'GDPopt_utils'):
                raise RuntimeError(
                    "GDPopt needs to create a Block named GDPopt_utils "
                    "on the model object, but an attribute with that name "
                    "already exists.")
            else:
                created_GDPopt_block = True
                model.GDPopt_utils = Block(
                    doc="Container for GDPopt solver utility modeling objects")

            solve_data.original_model = model

            solve_data.working_model = clone_orig_model_with_lists(model)
            GDPopt = solve_data.working_model.GDPopt_utils
            record_original_model_statistics(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Reformulate integer variables to binary
            reformulate_integer_variables(solve_data.working_model, config)

            # Save ordered lists of main modeling components, so that data can
            # be easily transferred between future model clones.
            build_ordered_component_lists(solve_data.working_model)
            record_working_model_statistics(solve_data, config)
            solve_data.results.solver.name = 'GDPopt ' + str(self.version())

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.working_var_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = solve_data.initial_var_values

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Maps in order to keep track of certain generated constraints
            GDPopt.oa_cut_map = ComponentMap()

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            GDPopt_iteration_loop(solve_data, config)

            # Update values in working model
            copy_var_list_values(
                from_list=solve_data.best_solution_found,
                to_list=GDPopt.working_var_list,
                config=config)
            GDPopt.objective_value.set_value(
                value(solve_data.working_objective_expr, exception=False))

            # Update values in original model
            copy_var_list_values(
                GDPopt.orig_var_list,
                solve_data.original_model.GDPopt_utils.orig_var_list,
                config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        finally:
            config.logger.setLevel(old_logger_level)
            if created_GDPopt_block:
                model.del_component('GDPopt_utils')
Пример #16
0
def coefficient_matching(model, constraint, uncertain_params, config):
    '''
    :param model: master problem model
    :param constraint: the constraint from the master problem model
    :param uncertain_params: the list of uncertain parameters
    :param first_stage_variables: the list of effective first-stage variables (includes ssv if decision_rule_order = 0)
    :return: True if the coefficient matching was successful, False if its proven robust_infeasible due to
             constraints of the form 1 == 0
    '''
    # === Returned flags
    successful_matching = True
    robust_infeasible = False

    # === Efficiency for q_LB = q_UB
    actual_uncertain_params = []

    for i in range(len(uncertain_params)):
        if not is_certain_parameter(uncertain_param_index=i, config=config):
            actual_uncertain_params.append(uncertain_params[i])

    # === Add coefficient matching constraint list
    if not hasattr(model, "coefficient_matching_constraints"):
        model.coefficient_matching_constraints = ConstraintList()
    if not hasattr(model, "swapped_constraints"):
        model.swapped_constraints = ConstraintList()

    variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
    params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    first_stage_variables = model.util.first_stage_variables
    second_stage_variables = model.util.second_stage_variables

    # === Determine if we need to do DR expression/ssv substitution to
    #     make h(x,z,q) == 0 into h(x,d,q) == 0 (which is just h(x,q) == 0)
    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        # h(x, q) == 0
        pass
    elif all(v in ComponentSet(first_stage_variables + second_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        constraint = substitute_ssv_in_dr_constraints(model=model, constraint=constraint)
        variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
        params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    else:
        pass

    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):

        # Swap param objects for variable objects in this constraint
        model.param_set = []
        for i in range(len(list(variables_in_constraint))):
            # Initialize Params to non-zero value due to standard_repn bug
            model.add_component("p_%s" % i, Param(initialize=1, mutable=True))
            model.param_set.append(getattr(model, "p_%s" % i))

        model.variable_set = []
        for i in range(len(list(actual_uncertain_params))):
            model.add_component("x_%s" % i, Var(initialize=1))
            model.variable_set.append(getattr(model, "x_%s" % i))

        original_var_to_param_map = list(zip(list(variables_in_constraint), model.param_set))
        original_param_to_vap_map = list(zip(list(actual_uncertain_params), model.variable_set))

        var_to_param_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_forward[id(var)] = param

        param_to_var_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_forward[id(param)] = var

        var_to_param_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_reverse[id(param)] = var

        param_to_var_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_reverse[id(var)] = param

        model.swapped_constraints.add(
            replace_expressions(
                expr=replace_expressions(expr=constraint.lower,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward) ==
            replace_expressions(
                expr=replace_expressions(expr=constraint.body,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward))

        swapped = model.swapped_constraints[max(model.swapped_constraints.keys())]

        val = generate_standard_repn(swapped.body, compute_values=False)

        if val.constant is not None:
            if type(val.constant) not in native_types:
                temp_expr = replace_expressions(val.constant, substitution_map=var_to_param_substitution_map_reverse)
                if temp_expr.is_potentially_variable():
                    model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
            elif math.isclose(value(val.constant), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                pass
            else:
                successful_matching = False
                robust_infeasible = True
        if val.linear_coefs is not None:
            for coeff in val.linear_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.quadratic_coefs:
            for coeff in val.quadratic_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.nonlinear_expr is not None:
            successful_matching = False
            robust_infeasible = False

        if successful_matching:
            model.util.h_x_q_constraints.add(constraint)

    for i in range(len(list(variables_in_constraint))):
        model.del_component("p_%s" % i)

    for i in range(len(list(params_in_constraint))):
        model.del_component("x_%s" % i)

    model.del_component("swapped_constraints")
    model.del_component("swapped_constraints_index")

    return successful_matching, robust_infeasible
Пример #17
0
    def create_model(self):
        """
        Create and return the mathematical model.
        """

        if options.DEBUG:
            logging.info("Creating model for day %d" % self.day_id)

        # Obtain the orders book
        book = self.orders
        complexOrders = self.complexOrders

        # Create the optimization model
        model = ConcreteModel()
        model.periods = Set(initialize=book.periods)
        maxPeriod = max(book.periods)
        model.bids = Set(initialize=range(len(book.bids)))
        model.L = Set(initialize=book.locations)
        model.sBids = Set(initialize=[
            i for i in range(len(book.bids)) if book.bids[i].type == 'SB'
        ])
        model.bBids = Set(initialize=[
            i for i in range(len(book.bids)) if book.bids[i].type == 'BB'
        ])
        model.cBids = RangeSet(len(complexOrders))  # Complex orders
        model.C = RangeSet(len(self.connections))
        model.directions = RangeSet(2)  # 1 == up, 2 = down TODO: clean

        # Variables
        model.xs = Var(model.sBids, domain=Reals,
                       bounds=(0.0, 1.0))  # Single period bids acceptance
        model.xb = Var(model.bBids, domain=Binary)  # Block bids acceptance
        model.xc = Var(model.cBids, domain=Binary)  # Complex orders acceptance
        model.pi = Var(model.L * model.periods,
                       domain=Reals,
                       bounds=self.priceCap)  # Market prices
        model.s = Var(model.bids, domain=NonNegativeReals)  # Bids
        model.sc = Var(model.cBids, domain=NonNegativeReals)  # complex orders
        model.complexVolume = Var(model.cBids, model.periods,
                                  domain=Reals)  # Bids
        model.pi_lg_up = Var(model.cBids * model.periods,
                             domain=NonNegativeReals)  # Market prices
        model.pi_lg_down = Var(model.cBids * model.periods,
                               domain=NonNegativeReals)  # Market prices
        model.pi_lg = Var(model.cBids * model.periods,
                          domain=Reals)  # Market prices

        def flowBounds(m, c, d, t):
            capacity = self.connections[c - 1].capacity_up[t] if d == 1 else \
                self.connections[c - 1].capacity_down[t]
            return (0, capacity)

        model.f = Var(model.C * model.directions * model.periods,
                      domain=NonNegativeReals,
                      bounds=flowBounds)
        model.u = Var(model.C * model.directions * model.periods,
                      domain=NonNegativeReals)

        # Objective
        def primalObj(m):
            # Single period bids cost
            expr = summation(
                {i: book.bids[i].price * book.bids[i].volume
                 for i in m.sBids}, m.xs)
            # Block bids cost
            expr += summation(
                {
                    i: book.bids[i].price * sum(book.bids[i].volumes.values())
                    for i in m.bBids
                }, m.xb)
            return -expr

        if options.PRIMAL and not options.DUAL:
            model.obj = Objective(rule=primalObj, sense=maximize)

        def primalDualObj(m):
            return primalObj(m) + sum(1e-5 * m.xc[i] for i in model.cBids)

        if options.PRIMAL and options.DUAL:
            model.obj = Objective(rule=primalDualObj, sense=maximize)

        # Complex order constraint
        if options.PRIMAL and options.DUAL:
            model.deactivate_suborders = ConstraintList()
            for o in model.cBids:
                sub_ids = complexOrders[o - 1].ids
                curves = complexOrders[o - 1].curves
                for id in sub_ids:
                    bid = book.bids[id]
                    if bid.period <= complexOrders[o - 1].SSperiods and bid.price == \
                            curves[bid.period].bids[0].price:
                        pass  # This bid, first step of the cruve in the scheduled stop periods, is not automatically deactivated when MIC constraint is not satisfied
                    else:
                        model.deactivate_suborders.add(
                            model.xs[id] <= model.xc[o])

        # Ramping constraints for complex orders
        def complex_volume_def_rule(m, o, p):
            sub_ids = complexOrders[o - 1].ids
            return m.complexVolume[o, p] == sum(m.xs[i] * book.bids[i].volume
                                                for i in sub_ids
                                                if book.bids[i].period == p)

        if options.PRIMAL:
            model.complex_volume_def = Constraint(model.cBids,
                                                  model.periods,
                                                  rule=complex_volume_def_rule)

        def complex_lg_down_rule(m, o, p):
            if p + 1 > maxPeriod or complexOrders[o - 1].ramp_down == None:
                return Constraint.Skip
            else:
                return m.complexVolume[o, p] - m.complexVolume[o, p + 1] <= complexOrders[
                                                                                o - 1].ramp_down * \
                                                                            m.xc[o]

        if options.PRIMAL and options.APPLY_LOAD_GRADIENT:
            model.complex_lg_down = Constraint(model.cBids,
                                               model.periods,
                                               rule=complex_lg_down_rule)

        def complex_lg_up_rule(m, o, p):
            if p + 1 > maxPeriod or complexOrders[o - 1].ramp_up == None:
                return Constraint.Skip
            else:
                return m.complexVolume[o, p + 1] - m.complexVolume[
                    o, p] <= complexOrders[o - 1].ramp_up

        if options.PRIMAL and options.APPLY_LOAD_GRADIENT:
            model.complex_lg_up = Constraint(
                model.cBids, model.periods,
                rule=complex_lg_up_rule)  # Balance constraint

        # Energy balance constraints
        balanceExpr = {l: {t: 0.0 for t in model.periods} for l in model.L}
        for i in model.sBids:  # Simple bids
            bid = book.bids[i]
            balanceExpr[bid.location][bid.period] += bid.volume * model.xs[i]
        for i in model.bBids:  # Block bids
            bid = book.bids[i]
            for t, v in bid.volumes.items():
                balanceExpr[bid.location][t] += v * model.xb[i]

        def balanceCstr(m, l, t):
            export = 0.0
            for c in model.C:
                if self.connections[c - 1].from_id == l:
                    export += m.f[c, 1, t] - m.f[c, 2, t]
                elif self.connections[c - 1].to_id == l:
                    export += m.f[c, 2, t] - m.f[c, 1, t]
            return balanceExpr[l][t] == export

        if options.PRIMAL:
            model.balance = Constraint(model.L * book.periods,
                                       rule=balanceCstr)

        # Surplus of single period bids
        def sBidSurplus(m, i):  # For the "usual" step orders
            bid = book.bids[i]
            if i in self.plain_single_orders:
                return m.s[i] >= (m.pi[bid.location, bid.period] -
                                  bid.price) * bid.volume
            else:
                return Constraint.Skip

        if options.DUAL:
            model.sBidSurplus = Constraint(model.sBids, rule=sBidSurplus)

        # Surplus definition for complex suborders accounting for impact of load gradient condition
        if options.DUAL:
            model.complex_sBidSurplus = ConstraintList()
            for o in model.cBids:
                sub_ids = complexOrders[o - 1].ids
                l = complexOrders[o - 1].location
                for i in sub_ids:
                    bid = book.bids[i]
                    model.complex_sBidSurplus.add(
                        model.s[i] >=
                        (model.pi[l, bid.period] + model.pi_lg[o, bid.period] -
                         bid.price) * bid.volume)

        def LG_price_def_rule(m, o, p):
            l = complexOrders[o - 1].location

            exp = 0
            if options.APPLY_LOAD_GRADIENT:
                D = complexOrders[o - 1].ramp_down
                U = complexOrders[o - 1].ramp_up
                if D is not None:
                    exp += (m.pi_lg_down[o, p - 1] if p > 1 else
                            0) - (m.pi_lg_down[o, p] if p < maxPeriod else 0)
                if U is not None:
                    exp -= (m.pi_lg_up[o, p - 1] if p > 1 else
                            0) - (m.pi_lg_up[o, p] if p < maxPeriod else 0)

            return m.pi_lg[o, p] == exp

        if options.DUAL:
            model.LG_price_def = Constraint(model.cBids,
                                            model.periods,
                                            rule=LG_price_def_rule)

        # Surplus of block bids
        def bBidSurplus(m, i):
            bid = book.bids[i]
            bidVolume = -sum(bid.volumes.values())
            bigM = (self.priceCap[1] -
                    self.priceCap[0]) * bidVolume  # FIXME tighten BIGM
            return m.s[i] + sum([
                m.pi[bid.location, t] * -v for t, v in bid.volumes.items()
            ]) >= bid.cost * bidVolume + bigM * (1 - m.xb[i])

        if options.DUAL:
            model.bBidSurplus = Constraint(model.bBids, rule=bBidSurplus)

        # Surplus of complex orders
        def cBidSurplus(m, o):
            complexOrder = complexOrders[o - 1]
            sub_ids = complexOrder.ids
            if book.bids[sub_ids[0]].volume > 0:  # supply
                bigM = sum((self.priceCap[1] - book.bids[i].price) *
                           book.bids[i].volume for i in sub_ids)
            else:
                bigM = sum((book.bids[i].price - self.priceCap[0]) *
                           book.bids[i].volume for i in sub_ids)
            return m.sc[o] + bigM * (1 - m.xc[o]) >= sum(m.s[i]
                                                         for i in sub_ids)

        if options.DUAL:
            model.cBidSurplus = Constraint(model.cBids, rule=cBidSurplus)

        # Surplus of complex orders
        def cBidSurplus_2(m, o):
            complexOrder = complexOrders[o - 1]
            expr = 0
            for i in complexOrder.ids:
                bid = book.bids[i]
                if (bid.period <= complexOrder.SSperiods) and (
                        bid.price
                        == complexOrder.curves[bid.period].bids[0].price):
                    expr += m.s[i]
            return m.sc[o] >= expr

        if options.DUAL:
            model.cBidSurplus_2 = Constraint(
                model.cBids, rule=cBidSurplus_2)  # MIC constraint

        def cMIC(m, o):
            complexOrder = complexOrders[o - 1]

            if complexOrder.FT == 0 and complexOrder.VT == 0:
                return Constraint.Skip

            expr = 0
            bigM = complexOrder.FT
            for i in complexOrder.ids:
                bid = book.bids[i]
                if (bid.period <= complexOrder.SSperiods) and (
                        bid.price
                        == complexOrder.curves[bid.period].bids[0].price):
                    bigM += (bid.volume * (self.priceCap[1] - bid.price)
                             )  # FIXME assumes order is supply
                expr += bid.volume * m.xs[i] * (bid.price - complexOrder.VT)

            return m.sc[o] + expr + bigM * (1 - m.xc[o]) >= complexOrder.FT

        if options.DUAL and options.PRIMAL:
            model.cMIC = Constraint(model.cBids, rule=cMIC)

        # Dual connections capacity
        def dualCapacity(m, c, t):
            exportPrices = 0.0
            for l in m.L:
                if l == self.connections[c - 1].from_id:
                    exportPrices += m.pi[l, t]
                elif l == self.connections[c - 1].to_id:
                    exportPrices -= m.pi[l, t]
            return m.u[c, 1, t] - m.u[c, 2, t] + exportPrices == 0.0

        if options.DUAL:
            model.dualCapacity = Constraint(model.C * model.periods,
                                            rule=dualCapacity)

        # Dual optimality
        def dualObj(m):
            dualObj = summation(m.s) + summation(m.sc)

            for o in m.cBids:
                sub_ids = complexOrders[o - 1].ids
                for id in sub_ids:
                    dualObj -= m.s[
                        id]  # Remove contribution of complex suborders which were accounted for in prevous summation over single bids

                if options.APPLY_LOAD_GRADIENT:
                    ramp_down = complexOrders[o - 1].ramp_down
                    ramp_up = complexOrders[o - 1].ramp_up
                    for p in m.periods:
                        if p == maxPeriod:
                            continue
                        if ramp_down is not None:
                            dualObj += ramp_down * m.pi_lg_down[
                                o, p]  # Add contribution of load gradient
                        if ramp_up is not None:
                            dualObj += ramp_up * m.pi_lg_up[
                                o, p]  # Add contribution of load gradient

            for c in model.C:
                for t in m.periods:
                    dualObj += self.connections[c - 1].capacity_up[t] * m.u[c,
                                                                            1,
                                                                            t]
                    dualObj += self.connections[c -
                                                1].capacity_down[t] * m.u[c, 2,
                                                                          t]

            return dualObj

        if not options.PRIMAL:
            model.obj = Objective(rule=dualObj, sense=minimize)

        def primalEqualsDual(m):
            return primalObj(m) >= dualObj(m)

        if options.DUAL and options.PRIMAL:
            model.primalEqualsDual = Constraint(rule=primalEqualsDual)

        self.model = model