Esempio n. 1
0
def add_decision_rule_variables(model_data, config):
    '''
    Function to add decision rule (DR) variables to the working model. DR variables become first-stage design
    variables which do not get copied at each iteration. Currently support static_approx (no DR), affine DR,
    and quadratic DR.
    :param model_data: the data container for the working model
    :param config: the config block
    :return:
    '''
    second_stage_variables = model_data.working_model.util.second_stage_variables
    first_stage_variables = model_data.working_model.util.first_stage_variables
    uncertain_params = model_data.working_model.util.uncertain_params
    decision_rule_vars = []
    degree = config.decision_rule_order
    bounds = (None, None)
    if degree == 0:
        for i in range(len(second_stage_variables)):
            model_data.working_model.add_component(
                    "decision_rule_var_" + str(i),
                    Var(initialize=value(second_stage_variables[i], exception=False),
                        bounds=bounds,domain=Reals)
            )
            first_stage_variables.extend(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values())
            decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i)))
    elif degree == 1:
        for i in range(len(second_stage_variables)):
            index_set = list(range(len(uncertain_params) + 1))
            model_data.working_model.add_component("decision_rule_var_" + str(i),
                    Var(index_set,
                        initialize=0,
                        bounds=bounds,
                        domain=Reals))
            # === For affine drs, the [0]th constant term is initialized to the control variable values, all other terms are initialized to 0
            getattr(model_data.working_model, "decision_rule_var_" + str(i))[0].set_value(value(second_stage_variables[i], exception=False), skip_validation=True)
            first_stage_variables.extend(list(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values()))
            decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i)))
    elif degree == 2 or degree == 3 or degree == 4:
        for i in range(len(second_stage_variables)):
            num_vars = int(sp.special.comb(N=len(uncertain_params) + degree, k=degree))
            dict_init = {}
            for r in range(num_vars):
                if r == 0:
                    dict_init.update({r: value(second_stage_variables[i], exception=False)})
                else:
                    dict_init.update({r: 0})
            model_data.working_model.add_component("decision_rule_var_" + str(i),
                                                   Var(list(range(num_vars)), initialize=dict_init, bounds=bounds,
                                                       domain=Reals))
            first_stage_variables.extend(
                list(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values()))
            decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i)))
    else:
        raise ValueError(
            "Decision rule order " + str(config.decision_rule_order) +
            " is not yet supported. PyROS supports polynomials of degree 0 (static approximation), 1, 2.")
    model_data.working_model.util.decision_rule_vars = decision_rule_vars
Esempio n. 2
0
def load_final_solution(model_data, master_soln, config):
    '''
    load the final solution into the original model object
    :param model_data: model data container object
    :param master_soln: results data container object returned to user
    :return:
    '''
    if config.objective_focus == ObjectiveType.nominal:
        model = model_data.original_model
        soln = master_soln.nominal_block
    elif config.objective_focus == ObjectiveType.worst_case:
        model = model_data.original_model
        indices = range(len(master_soln.master_model.scenarios))
        k = max(indices, key=lambda i: value(master_soln.master_model.scenarios[i, 0].first_stage_objective +
                                             master_soln.master_model.scenarios[i, 0].second_stage_objective))
        soln = master_soln.master_model.scenarios[k, 0]

    src_vars = getattr(model, 'tmp_var_list')
    local_vars = getattr(soln, 'tmp_var_list')
    varMap = list(zip(src_vars, local_vars))

    for src, local in varMap:
        src.set_value(local.value, skip_validation=True)

    return
Esempio n. 3
0
    def point_in_set(self, point):
        """
        Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False.

        Args:
            point: The point being checked for membership in the set.
                   The coordinates of the point should be supplied in the same order as the elements of ``uncertain_params``
                   that is to be supplied to the PyROS solve statement.
                   This point must match the dimension of the uncertain parameters of the set.
        """

        # === Ensure point is of correct dimensionality as the uncertain parameters
        if len(point) != self.dim:
            raise AttributeError("Point must have same dimensions as uncertain parameters.")

        m = ConcreteModel()
        the_params = []
        for i in range(self.dim):
            m.add_component("x_%s" % i, Var(initialize=point[i]))
            the_params.append(getattr(m, "x_%s" % i))

        # === Generate constraint for set
        set_constraint = self.set_as_constraint(uncertain_params=the_params)

        # === value() returns True if the constraint is satisfied, False else.
        is_in_set = all(value(con.expr) for con in set_constraint.values())

        return is_in_set
Esempio n. 4
0
def solve_master_feasibility_problem(model_data, config):
    """
    Solve a slack variable based feasibility model for the master problem
    """
    model = model_data.master_model.clone()
    for o in model.component_data_objects(Objective):
        o.deactivate()
    TransformationFactory("core.add_slack_variables").apply_to(model)
    solver = config.global_solver

    if not solver.available():
        raise RuntimeError("NLP solver %s is not available." % config.solver)
    try:
        results = solver.solve(model, tee=config.tee)
    except ValueError as err:
        if 'Cannot load a SolverResults object with bad status: error' in str(
                err):
            results.solver.termination_condition = tc.error
            results.solver.message = str(err)
        else:
            raise

    if check_optimal_termination(results) and value(
            model._core_add_slack_variables._slack_objective) <= 0:
        # If this led to a feasible solution, continue with this model
        # Load solution into master
        for v in model.component_data_objects(Var):
            master_v = model_data.master_model.find_component(v)
            if master_v is not None:
                master_v.set_value(v.value, skip_validation=True)
    return results
Esempio n. 5
0
    def _get_table(self):
        from pyomo.core.expr import value

        tmp = []
        if not self.options.columns is None:
            tmp.append(self.options.columns)
        if not self.options.set is None:
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(self.options.set.dimen):
                    cols.append(self.options.set.local_name + str(i))
                tmp.append(cols)
            # Get rows
            if not self.options.sort is None:
                for data in sorted(self.options.set):
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
            else:
                for data in self.options.set:
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
        elif not self.options.param is None:
            if type(self.options.param) in (list, tuple):
                _param = self.options.param
            else:
                _param = [self.options.param]
            tmp = []
            # Collect data
            for index in _param[0]:
                if index is None:
                    row = []
                elif type(index) in (list, tuple):
                    row = list(index)
                else:
                    row = [index]
                for param in _param:
                    row.append(value(param[index]))
                tmp.append(row)
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(len(tmp[0]) - len(_param)):
                    cols.append('I' + str(i))
                for param in _param:
                    cols.append(param)
                tmp = [cols] + tmp
        return tmp
Esempio n. 6
0
    def _get_table(self):
        from pyomo.core.expr import value

        tmp = []
        if not self.options.columns is None:
            tmp.append(self.options.columns)
        if not self.options.set is None:
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(self.options.set.dimen):
                    cols.append(self.options.set.local_name+str(i))
                tmp.append(cols)
            # Get rows
            if not self.options.sort is None:
                for data in sorted(self.options.set):
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
            else:
                for data in self.options.set:
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
        elif not self.options.param is None:
            if type(self.options.param) in (list,tuple):
                _param = self.options.param
            else:
                _param = [self.options.param]
            tmp = []
            # Collect data
            for index in _param[0]:
                if index is None:
                    row = []
                elif type(index) in (list,tuple):
                    row = list(index)
                else:
                    row = [index]
                for param in _param:
                    row.append(value(param[index]))
                tmp.append(row)
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(len(tmp[0])-len(_param)):
                    cols.append('I'+str(i))
                for param in _param:
                    cols.append(param)
                tmp = [cols] + tmp
        return tmp
Esempio n. 7
0
def coefficient_matching(model, constraint, uncertain_params, config):
    '''
    :param model: master problem model
    :param constraint: the constraint from the master problem model
    :param uncertain_params: the list of uncertain parameters
    :param first_stage_variables: the list of effective first-stage variables (includes ssv if decision_rule_order = 0)
    :return: True if the coefficient matching was successful, False if its proven robust_infeasible due to
             constraints of the form 1 == 0
    '''
    # === Returned flags
    successful_matching = True
    robust_infeasible = False

    # === Efficiency for q_LB = q_UB
    actual_uncertain_params = []

    for i in range(len(uncertain_params)):
        if not is_certain_parameter(uncertain_param_index=i, config=config):
            actual_uncertain_params.append(uncertain_params[i])

    # === Add coefficient matching constraint list
    if not hasattr(model, "coefficient_matching_constraints"):
        model.coefficient_matching_constraints = ConstraintList()
    if not hasattr(model, "swapped_constraints"):
        model.swapped_constraints = ConstraintList()

    variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
    params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    first_stage_variables = model.util.first_stage_variables
    second_stage_variables = model.util.second_stage_variables

    # === Determine if we need to do DR expression/ssv substitution to
    #     make h(x,z,q) == 0 into h(x,d,q) == 0 (which is just h(x,q) == 0)
    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        # h(x, q) == 0
        pass
    elif all(v in ComponentSet(first_stage_variables + second_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        constraint = substitute_ssv_in_dr_constraints(model=model, constraint=constraint)
        variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
        params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    else:
        pass

    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):

        # Swap param objects for variable objects in this constraint
        model.param_set = []
        for i in range(len(list(variables_in_constraint))):
            # Initialize Params to non-zero value due to standard_repn bug
            model.add_component("p_%s" % i, Param(initialize=1, mutable=True))
            model.param_set.append(getattr(model, "p_%s" % i))

        model.variable_set = []
        for i in range(len(list(actual_uncertain_params))):
            model.add_component("x_%s" % i, Var(initialize=1))
            model.variable_set.append(getattr(model, "x_%s" % i))

        original_var_to_param_map = list(zip(list(variables_in_constraint), model.param_set))
        original_param_to_vap_map = list(zip(list(actual_uncertain_params), model.variable_set))

        var_to_param_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_forward[id(var)] = param

        param_to_var_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_forward[id(param)] = var

        var_to_param_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_reverse[id(param)] = var

        param_to_var_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_reverse[id(var)] = param

        model.swapped_constraints.add(
            replace_expressions(
                expr=replace_expressions(expr=constraint.lower,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward) ==
            replace_expressions(
                expr=replace_expressions(expr=constraint.body,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward))

        swapped = model.swapped_constraints[max(model.swapped_constraints.keys())]

        val = generate_standard_repn(swapped.body, compute_values=False)

        if val.constant is not None:
            if type(val.constant) not in native_types:
                temp_expr = replace_expressions(val.constant, substitution_map=var_to_param_substitution_map_reverse)
                if temp_expr.is_potentially_variable():
                    model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
            elif math.isclose(value(val.constant), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                pass
            else:
                successful_matching = False
                robust_infeasible = True
        if val.linear_coefs is not None:
            for coeff in val.linear_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.quadratic_coefs:
            for coeff in val.quadratic_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.nonlinear_expr is not None:
            successful_matching = False
            robust_infeasible = False

        if successful_matching:
            model.util.h_x_q_constraints.add(constraint)

    for i in range(len(list(variables_in_constraint))):
        model.del_component("p_%s" % i)

    for i in range(len(list(params_in_constraint))):
        model.del_component("x_%s" % i)

    model.del_component("swapped_constraints")
    model.del_component("swapped_constraints_index")

    return successful_matching, robust_infeasible
Esempio n. 8
0
    def solve(self, model, first_stage_variables, second_stage_variables,
              uncertain_params, uncertainty_set, local_solver, global_solver,
              **kwds):
        """Solve the model.

        Parameters
        ----------
        model: ConcreteModel
            A ``ConcreteModel`` object representing the deterministic
            model, cast as a minimization problem.
        first_stage_variables: List[Var]
            The list of ``Var`` objects referenced in ``model``
            representing the design variables.
        second_stage_variables: List[Var]
            The list of ``Var`` objects referenced in ``model``
            representing the control variables.
        uncertain_params: List[Param]
            The list of ``Param`` objects referenced in ``model``
            representing the uncertain parameters.  MUST be ``mutable``.
            Assumes entries are provided in consistent order with the
            entries of 'nominal_uncertain_param_vals' input.
        uncertainty_set: UncertaintySet
            ``UncertaintySet`` object representing the uncertainty space
            that the final solutions will be robust against.
        local_solver: Solver
            ``Solver`` object to utilize as the primary local NLP solver.
        global_solver: Solver
            ``Solver`` object to utilize as the primary global NLP solver.

        """

        # === Add the explicit arguments to the config
        config = self.CONFIG(kwds.pop('options', {}))
        config.first_stage_variables = first_stage_variables
        config.second_stage_variables = second_stage_variables
        config.uncertain_params = uncertain_params
        config.uncertainty_set = uncertainty_set
        config.local_solver = local_solver
        config.global_solver = global_solver

        dev_options = kwds.pop('dev_options', {})
        config.set_value(kwds)
        config.set_value(dev_options)

        model = model

        # === Validate kwarg inputs
        validate_kwarg_inputs(model, config)

        # === Validate ability of grcs RO solver to handle this model
        if not model_is_valid(model):
            raise AttributeError(
                "This model structure is not currently handled by the ROSolver."
            )

        # === Define nominal point if not specified
        if len(config.nominal_uncertain_param_vals) == 0:
            config.nominal_uncertain_param_vals = list(
                p.value for p in config.uncertain_params)
        elif len(config.nominal_uncertain_param_vals) != len(
                config.uncertain_params):
            raise AttributeError(
                "The nominal_uncertain_param_vals list must be the same length"
                "as the uncertain_params list")

        # === Create data containers
        model_data = ROSolveResults()
        model_data.timing = Bunch()

        # === Set up logger for logging results
        with time_code(model_data.timing, 'total', is_main_timer=True):
            config.progress_logger.setLevel(logging.INFO)

            # === PREAMBLE
            output_logger(config=config,
                          preamble=True,
                          version=str(self.version()))

            # === DISCLAIMER
            output_logger(config=config, disclaimer=True)

            # === A block to hold list-type data to make cloning easy
            util = Block(concrete=True)
            util.first_stage_variables = config.first_stage_variables
            util.second_stage_variables = config.second_stage_variables
            util.uncertain_params = config.uncertain_params

            model_data.util_block = unique_component_name(model, 'util')
            model.add_component(model_data.util_block, util)
            # Note:  model.component(model_data.util_block) is util

            # === Validate uncertainty set happens here, requires util block for Cardinality and FactorModel sets
            validate_uncertainty_set(config=config)

            # === Deactivate objective on model
            for o in model.component_data_objects(Objective):
                o.deactivate()

            # === Leads to a logger warning here for inactive obj when cloning
            model_data.original_model = model
            # === For keeping track of variables after cloning
            cname = unique_component_name(model_data.original_model,
                                          'tmp_var_list')
            src_vars = list(
                model_data.original_model.component_data_objects(Var))
            setattr(model_data.original_model, cname, src_vars)
            model_data.working_model = model_data.original_model.clone()

            # === Add objective expressions
            identify_objective_functions(model_data.working_model, config)

            # === Put model in standard form
            transform_to_standard_form(model_data.working_model)

            # === Replace variable bounds depending on uncertain params with
            #     explicit inequality constraints
            replace_uncertain_bounds_with_constraints(
                model_data.working_model,
                model_data.working_model.util.uncertain_params)

            # === Add decision rule information
            add_decision_rule_variables(model_data, config)
            add_decision_rule_constraints(model_data, config)

            # === Move bounds on control variables to explicit ineq constraints
            wm_util = model_data.working_model

            # === Assuming all other Var objects in the model are state variables
            fsv = ComponentSet(
                model_data.working_model.util.first_stage_variables)
            ssv = ComponentSet(
                model_data.working_model.util.second_stage_variables)
            sv = ComponentSet()
            model_data.working_model.util.state_vars = []
            for v in model_data.working_model.component_data_objects(Var):
                if v not in fsv and v not in ssv and v not in sv:
                    model_data.working_model.util.state_vars.append(v)
                    sv.add(v)

            # Bounds on second stage variables and state variables are separation objectives,
            #  they are brought in this was as explicit constraints
            for c in model_data.working_model.util.second_stage_variables:
                turn_bounds_to_constraints(c, wm_util, config)

            for c in model_data.working_model.util.state_vars:
                turn_bounds_to_constraints(c, wm_util, config)

            # === Make control_variable_bounds array
            wm_util.ssv_bounds = []
            for c in model_data.working_model.component_data_objects(
                    Constraint, descend_into=True):
                if "bound_con" in c.name:
                    wm_util.ssv_bounds.append(c)

            # === Solve and load solution into model
            pyros_soln, final_iter_separation_solns = ROSolver_iterative_solve(
                model_data, config)

            return_soln = ROSolveResults()
            if pyros_soln is not None and final_iter_separation_solns is not None:
                if config.load_solution and \
                        (pyros_soln.pyros_termination_condition is pyrosTerminationCondition.robust_optimal or
                         pyros_soln.pyros_termination_condition is pyrosTerminationCondition.robust_feasible):
                    load_final_solution(model_data, pyros_soln.master_soln,
                                        config)

                # === Return time info
                model_data.total_cpu_time = get_main_elapsed_time(
                    model_data.timing)
                iterations = pyros_soln.total_iters + 1

                # === Return config to user
                return_soln.config = config
                # Report the negative of the objective value if it was originally maximize, since we use the minimize form in the algorithm
                if next(model.component_data_objects(
                        Objective)).sense == maximize:
                    negation = -1
                else:
                    negation = 1
                if config.objective_focus == ObjectiveType.nominal:
                    return_soln.final_objective_value = negation * value(
                        pyros_soln.master_soln.master_model.obj)
                elif config.objective_focus == ObjectiveType.worst_case:
                    return_soln.final_objective_value = negation * value(
                        pyros_soln.master_soln.master_model.zeta)
                return_soln.pyros_termination_condition = pyros_soln.pyros_termination_condition

                return_soln.time = model_data.total_cpu_time
                return_soln.iterations = iterations

                # === Remove util block
                model.del_component(model_data.util_block)

                del pyros_soln.util_block
                del pyros_soln.working_model
            else:
                return_soln.pyros_termination_condition = pyrosTerminationCondition.robust_infeasible
                return_soln.final_objective_value = None
                return_soln.time = get_main_elapsed_time(model_data.timing)
                return_soln.iterations = 0
        return return_soln
Esempio n. 9
0
def minimize_dr_vars(model_data, config):
    """
    Decision rule polishing: For a given optimal design (x) determined in separation,
    and the optimal value for control vars (z), choose min magnitude decision_rule_var
    values.
    """
    #config.progress_logger.info("Executing decision rule variable polishing solve.")
    model = model_data.master_model
    polishing_model = model.clone()

    first_stage_variables = polishing_model.scenarios[
        0, 0].util.first_stage_variables
    decision_rule_vars = polishing_model.scenarios[0,
                                                   0].util.decision_rule_vars

    polishing_model.obj.deactivate()
    index_set = decision_rule_vars[0].index_set()
    polishing_model.tau_vars = []
    # ==========
    for idx in range(len(decision_rule_vars)):
        polishing_model.scenarios[0, 0].add_component(
            "polishing_var_" + str(idx),
            Var(index_set, initialize=1e6, domain=NonNegativeReals))
        polishing_model.tau_vars.append(
            getattr(polishing_model.scenarios[0, 0],
                    "polishing_var_" + str(idx)))
    # ==========
    this_iter = polishing_model.scenarios[
        max(polishing_model.scenarios.keys())[0], 0]
    nom_block = polishing_model.scenarios[0, 0]
    if config.objective_focus == ObjectiveType.nominal:
        obj_val = value(this_iter.second_stage_objective +
                        this_iter.first_stage_objective)
        polishing_model.scenarios[0,0].polishing_constraint = \
            Constraint(expr=obj_val >= nom_block.second_stage_objective + nom_block.first_stage_objective)
    elif config.objective_focus == ObjectiveType.worst_case:
        polishing_model.zeta.fix(
        )  # Searching equivalent optimal solutions given optimal zeta

    # === Make absolute value constraints on polishing_vars
    polishing_model.scenarios[
        0, 0].util.absolute_var_constraints = cons = ConstraintList()
    uncertain_params = nom_block.util.uncertain_params
    if config.decision_rule_order == 1:
        for i, tau in enumerate(polishing_model.tau_vars):
            for j in range(len(this_iter.util.decision_rule_vars[i])):
                if j == 0:
                    cons.add(
                        -tau[j] <= this_iter.util.decision_rule_vars[i][j])
                    cons.add(this_iter.util.decision_rule_vars[i][j] <= tau[j])
                else:
                    cons.add(
                        -tau[j] <= this_iter.util.decision_rule_vars[i][j] *
                        uncertain_params[j - 1])
                    cons.add(this_iter.util.decision_rule_vars[i][j] *
                             uncertain_params[j - 1] <= tau[j])
    elif config.decision_rule_order == 2:
        l = list(range(len(uncertain_params)))
        index_pairs = list(it.combinations(l, 2))
        for i, tau in enumerate(polishing_model.tau_vars):
            Z = this_iter.util.decision_rule_vars[i]
            indices = list(k for k in range(len(Z)))
            for r in indices:
                if r == 0:
                    cons.add(-tau[r] <= Z[r])
                    cons.add(Z[r] <= tau[r])
                elif r <= len(uncertain_params) and r > 0:
                    cons.add(-tau[r] <= Z[r] * uncertain_params[r - 1])
                    cons.add(Z[r] * uncertain_params[r - 1] <= tau[r])
                elif r <= len(indices) - len(uncertain_params) - 1 and r > len(
                        uncertain_params):
                    cons.add(-tau[r] <= Z[r] * uncertain_params[index_pairs[
                        r - len(uncertain_params) - 1][0]] * uncertain_params[
                            index_pairs[r - len(uncertain_params) - 1][1]])
                    cons.add(Z[r] * uncertain_params[index_pairs[
                        r - len(uncertain_params) - 1][0]] *
                             uncertain_params[index_pairs[
                                 r - len(uncertain_params) - 1][1]] <= tau[r])
                elif r > len(indices) - len(uncertain_params) - 1:
                    cons.add(-tau[r] <= Z[r] *
                             uncertain_params[r - len(index_pairs) -
                                              len(uncertain_params) - 1]**2)
                    cons.add(Z[r] * uncertain_params[r - len(index_pairs) -
                                                     len(uncertain_params) -
                                                     1]**2 <= tau[r])
    else:
        raise NotImplementedError(
            "Decision rule variable polishing has not been generalized to decision_rule_order "
            + str(config.decision_rule_order) + ".")

    polishing_model.scenarios[0,0].polishing_obj = \
        Objective(expr=sum(sum(tau[j] for j in tau.index_set()) for tau in polishing_model.tau_vars))

    # === Fix design
    for d in first_stage_variables:
        d.fix()

    # === Unfix DR vars
    num_dr_vars = len(model.scenarios[
        0, 0].util.decision_rule_vars[0])  # there is at least one dr var
    num_uncertain_params = len(config.uncertain_params)

    if model.const_efficiency_applied:
        for d in decision_rule_vars:
            for i in range(1, num_dr_vars):
                d[i].fix(0)
                d[0].unfix()
    elif model.linear_efficiency_applied:
        for d in decision_rule_vars:
            d.unfix()
            for i in range(num_uncertain_params + 1, num_dr_vars):
                d[i].fix(0)
    else:
        for d in decision_rule_vars:
            d.unfix()

    # === Unfix all control var values
    for block in polishing_model.scenarios.values():
        for c in block.util.second_stage_variables:
            c.unfix()
        if model.const_efficiency_applied:
            for d in block.util.decision_rule_vars:
                for i in range(1, num_dr_vars):
                    d[i].fix(0)
                    d[0].unfix()
        elif model.linear_efficiency_applied:
            for d in block.util.decision_rule_vars:
                d.unfix()
                for i in range(num_uncertain_params + 1, num_dr_vars):
                    d[i].fix(0)
        else:
            for d in block.util.decision_rule_vars:
                d.unfix()

    # === Solve the polishing model
    polish_soln = MasterResult()
    solver = config.global_solver

    if not solver.available():
        raise RuntimeError("NLP solver %s is not available." % config.solver)
    try:
        results = solver.solve(polishing_model, tee=config.tee)
        polish_soln.termination_condition = results.solver.termination_condition
    except ValueError as err:
        polish_soln.pyros_termination_condition = pyrosTerminationCondition.subsolver_error
        polish_soln.termination_condition = tc.error
        raise

    polish_soln.fsv_values = list(
        v.value
        for v in polishing_model.scenarios[0, 0].util.first_stage_variables)
    polish_soln.ssv_values = list(
        v.value
        for v in polishing_model.scenarios[0, 0].util.second_stage_variables)
    polish_soln.first_stage_objective = value(nom_block.first_stage_objective)
    polish_soln.second_stage_objective = value(
        nom_block.second_stage_objective)

    # === Process solution by termination condition
    acceptable = [tc.optimal, tc.locallyOptimal, tc.feasible]
    if polish_soln.termination_condition not in acceptable:
        return results

    for i, d in enumerate(
            model_data.master_model.scenarios[0, 0].util.decision_rule_vars):
        for index in d:
            d[index].set_value(polishing_model.scenarios[
                0, 0].util.decision_rule_vars[i][index].value,
                               skip_validation=True)

    return results
Esempio n. 10
0
def solver_call_master(model_data, config, solver, solve_data):
    '''
    Function interfacing with optimization solver
    :param model_data:
    :param config:
    :param solver:
    :param solve_data:
    :param is_global:
    :return:
    '''
    nlp_model = model_data.master_model
    master_soln = solve_data
    solver_term_cond_dict = {}

    if config.solve_master_globally:
        backup_solvers = deepcopy(config.backup_global_solvers)
    else:
        backup_solvers = deepcopy(config.backup_local_solvers)
    backup_solvers.insert(0, solver)

    if not solver.available():
        raise RuntimeError("NLP solver %s is not available." % config.solver)

    higher_order_decision_rule_efficiency(config, model_data)

    while len(backup_solvers) > 0:
        solver = backup_solvers.pop(0)
        try:
            results = solver.solve(nlp_model, tee=config.tee)
        except ValueError as err:
            if 'Cannot load a SolverResults object with bad status: error' in str(
                    err):
                results.solver.termination_condition = tc.error
                results.solver.message = str(err)
                master_soln.results = results
                master_soln.pyros_termination_condition = pyrosTerminationCondition.subsolver_error
                return master_soln, ()
            else:
                raise
        solver_term_cond_dict[str(solver)] = str(
            results.solver.termination_condition)
        master_soln.termination_condition = results.solver.termination_condition
        master_soln.pyros_termination_condition = None  # determined later in the algorithm
        master_soln.fsv_vals = list(
            v.value
            for v in nlp_model.scenarios[0, 0].util.first_stage_variables)

        if config.objective_focus is ObjectiveType.nominal:
            master_soln.ssv_vals = list(
                v.value
                for v in nlp_model.scenarios[0, 0].util.second_stage_variables)
            master_soln.second_stage_objective = value(
                nlp_model.scenarios[0, 0].second_stage_objective)
        else:
            idx = max(nlp_model.scenarios.keys())[0]
            master_soln.ssv_vals = list(
                v.value
                for v in nlp_model.scenarios[idx,
                                             0].util.second_stage_variables)
            master_soln.second_stage_objective = value(
                nlp_model.scenarios[idx, 0].second_stage_objective)
        master_soln.first_stage_objective = value(
            nlp_model.scenarios[0, 0].first_stage_objective)

        master_soln.nominal_block = nlp_model.scenarios[0, 0]
        master_soln.results = results
        master_soln.master_model = nlp_model

        master_soln.master_subsolver_results = process_termination_condition_master_problem(
            config=config, results=results)

        if master_soln.master_subsolver_results[0] == False:
            return master_soln

    # === At this point, all sub-solvers have been tried and none returned an acceptable status or return code
    save_dir = config.subproblem_file_directory
    if save_dir and config.keepfiles:
        name = os.path.join(
            save_dir,
            config.uncertainty_set.type + "_" + model_data.original.name +
            "_master_" + str(model_data.iteration) + ".bar")
        nlp_model.write(name, io_options={'symbolic_solver_labels': True})
        output_logger(config=config,
                      master_error=True,
                      status_dict=solver_term_cond_dict,
                      filename=name,
                      iteration=model_data.iteration)
    master_soln.pyros_termination_condition = pyrosTerminationCondition.subsolver_error
    return master_soln
Esempio n. 11
0
def construct_master_feasibility_problem(model_data, config):
    """
    Construct a slack-variable based master feasibility model.
    Initialize all model variables appropriately, and scale slack variables
    as well.

    Parameters
    ----------
    model_data : MasterProblemData
        Master problem data.
    config : ConfigDict
        PyROS solver config.

    Returns
    -------
    model : ConcreteModel
        Slack variable model.
    """

    model = model_data.master_model.clone()
    for obj in model.component_data_objects(Objective):
        obj.deactivate()
    iteration = model_data.iteration

    # first stage vars are already initialized appropriately.
    # initialize second-stage DOF variables using DR equation expressions
    if model.scenarios[iteration, 0].util.second_stage_variables:
        for blk in model.scenarios[iteration, :]:
            for eq in blk.util.decision_rule_eqns:
                vars_in_dr_eq = ComponentSet(identify_variables(eq.body))
                ssv_set = ComponentSet(blk.util.second_stage_variables)

                # get second-stage var in DR eqn. should only be one var
                ssv_in_dr_eq = [
                    var for var in vars_in_dr_eq if var in ssv_set
                ][0]

                # update var value for initialization
                # fine since DR eqns are f(d) - z == 0 (not z - f(d) == 0)
                ssv_in_dr_eq.set_value(0)
                ssv_in_dr_eq.set_value(eq.body)

    # initialize state vars to previous master solution values
    if iteration != 0:
        stvar_map = get_state_vars(model, [iteration, iteration - 1])
        for current, prev in zip(stvar_map[iteration],
                                 stvar_map[iteration - 1]):
            current.set_value(prev)

    # constraints to which slacks should be added
    # (all the constraints for the current iteration, except the DR eqns)
    targets = []
    for blk in model.scenarios[iteration, :]:
        if blk.util.second_stage_variables:
            dr_eqs = blk.util.decision_rule_eqns
        else:
            dr_eqs = list()

        targets.extend([
            con for con in blk.component_data_objects(
                Constraint, active=True, descend_into=True)
            if con not in dr_eqs
        ])

    # retain original constraint exprs (for slack initialization and scaling)
    pre_slack_con_exprs = ComponentMap([(con, con.body) for con in targets])

    # add slack variables and objective
    # inequalities g(v) <= b become g(v) -- s^-<= b
    # equalities h(v) == b become h(v) -- s^- + s^+ == b
    TransformationFactory("core.add_slack_variables").apply_to(model,
                                                               targets=targets)
    slack_vars = ComponentSet(
        model._core_add_slack_variables.component_data_objects(
            Var, descend_into=True))

    # initialize and scale slack variables
    for con in pre_slack_con_exprs:
        # obtain slack vars in updated constraints
        # and their coefficients (+/-1) in the constraint expression
        repn = generate_standard_repn(con.body)
        slack_var_coef_map = ComponentMap()
        for idx in range(len(repn.linear_vars)):
            var = repn.linear_vars[idx]
            if var in slack_vars:
                slack_var_coef_map[var] = repn.linear_coefs[idx]

        slack_substitution_map = dict()

        for slack_var in slack_var_coef_map:
            # coefficient determines whether the slack is a +ve or -ve slack
            if slack_var_coef_map[slack_var] == -1:
                con_slack = max(0, value(pre_slack_con_exprs[con]))
            else:
                con_slack = max(0, -value(pre_slack_con_exprs[con]))

            # initialize slack var, evaluate scaling coefficient
            scaling_coeff = 1
            slack_var.set_value(con_slack)

            # update expression replacement map
            slack_substitution_map[id(slack_var)] = (scaling_coeff * slack_var)

        # finally, scale slack(s)
        con.set_value((
            replace_expressions(con.lower, slack_substitution_map),
            replace_expressions(con.body, slack_substitution_map),
            replace_expressions(con.upper, slack_substitution_map),
        ))

    return model