Beispiel #1
0
    def _generate_model(self):
        self.model = None
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.s = RangeSet(1, 12)
        model.x = Var(model.s)
        model.x[1].setlb(-1)
        model.x[1].setub(1)
        model.x[2].setlb(-1)
        model.x[2].setub(1)
        model.obj = Objective(expr=sum(model.x[i] * ((-1)**(i + 1))
                                       for i in model.x.index_set()))
        model.c = ConstraintList()
        # to make the variable used in the constraint match the name
        model.c.add(Constraint.Skip)
        model.c.add(Constraint.Skip)
        model.c.add(model.x[3] >= -1.)
        model.c.add(model.x[4] <= 1.)
        model.c.add(model.x[5] == -1.)
        model.c.add(model.x[6] == -1.)
        model.c.add(model.x[7] == 1.)
        model.c.add(model.x[8] == 1.)
        model.c.add((-1., model.x[9], -1.))
        model.c.add((-1., model.x[10], -1.))
        model.c.add((1., model.x[11], 1.))
        model.c.add((1., model.x[12], 1.))

        model.c_inactive = ConstraintList()
        # to make the variable used in the constraint match the name
        model.c_inactive.add(Constraint.Skip)
        model.c_inactive.add(Constraint.Skip)
        model.c_inactive.add(model.x[3] >= -2.)
        model.c_inactive.add(model.x[4] <= 2.)
Beispiel #2
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.s = Set(initialize=[1,2])
        model.x = Var()
        model.y = Var()
        model.z = Var(bounds=(0,None))

        model.obj = Objective(model.s,
                              rule=inactive_index_LP_obj_rule)
        model.OBJ = Objective(expr=model.x+model.y)
        model.obj[1].deactivate()
        model.OBJ.deactivate()
        model.c1 = ConstraintList()
        model.c1.add(model.x<=1)   # index=1
        model.c1.add(model.x>=-1)  # index=2
        model.c1.add(model.y<=1)   # index=3
        model.c1.add(model.y>=-1)  # index=4
        model.c1[1].deactivate()
        model.c1[4].deactivate()
        model.c2 = Constraint(model.s,
                              rule=inactive_index_LP_c2_rule)

        model.b = Block()
        model.b.c = Constraint(expr=model.z >= 2)
        model.B = Block(model.s)
        model.B[1].c = Constraint(expr=model.z >= 3)
        model.B[2].c = Constraint(expr=model.z >= 1)

        model.b.deactivate()
        model.B.deactivate()
        model.B[2].activate()
Beispiel #3
0
def generate_norm_inf_objective_function(model, setpoint_model, discrete_only=False):
    """This function generates objective (PF-OA main problem) for minimum Norm Infinity distance to setpoint_model.

    Norm-Infinity distance of (x,y) = \max_i |x_i - y_i|.

    Args:
        model (Pyomo model): the model that needs new objective function.
        setpoint_model (Pyomo model): the model that provides the base point for us to calculate the distance.
        discrete_only (bool, optional): whether only optimize on distance between the discrete variables. Defaults to False.

    Returns:
        Objective: the norm infinity objective function
    """
    # skip objective_value variable and slack_var variables
    var_filter = (lambda v: v.is_integer()) if discrete_only \
        else (lambda v: v.name != 'MindtPy_utils.objective_value' and
              'MindtPy_utils.feas_opt.slack_var' not in v.name)
    model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list))
    setpoint_vars = list(
        filter(var_filter, setpoint_model.MindtPy_utils.variable_list))
    assert len(model_vars) == len(
        setpoint_vars), 'Trying to generate Norm Infinity objective function for models with different number of variables'
    model.MindtPy_utils.del_component('L_infinity_obj')
    obj_blk = model.MindtPy_utils.L_infinity_obj = Block()
    obj_blk.L_infinity_obj_var = Var(domain=Reals, bounds=(0, None))
    obj_blk.abs_reform = ConstraintList()
    for v_model, v_setpoint in zip(model_vars,
                                   setpoint_vars):
        obj_blk.abs_reform.add(
            expr=v_model - v_setpoint.value >= -obj_blk.L_infinity_obj_var)
        obj_blk.abs_reform.add(
            expr=v_model - v_setpoint.value <= obj_blk.L_infinity_obj_var)

    return Objective(expr=obj_blk.L_infinity_obj_var)
Beispiel #4
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.f = Var()
        model.x = Var(bounds=(1,3))
        model.fi = Param([1,2,3],mutable=True)
        model.fi[1] = 1.0
        model.fi[2] = 2.0
        model.fi[3] = 0.0
        model.xi = Param([1,2,3],mutable=True)
        model.xi[1] = 1.0
        model.xi[2] = 2.0
        model.xi[3] = 3.0
        model.p = Var(within=NonNegativeReals)
        model.n = Var(within=NonNegativeReals)
        model.lmbda = Var([1,2,3])
        model.obj = Objective(expr=model.p+model.n)
        model.c1 = ConstraintList()
        model.c1.add((0.0, model.lmbda[1], 1.0))
        model.c1.add((0.0, model.lmbda[2], 1.0))
        model.c1.add(0.0 <= model.lmbda[3])
        model.c2 = SOSConstraint(var=model.lmbda, index=[1,2,3], sos=2)
        model.c3 = Constraint(expr=sum_product(model.lmbda) == 1)
        model.c4 = Constraint(expr=model.f==sum_product(model.fi,model.lmbda))
        model.c5 = Constraint(expr=model.x==sum_product(model.xi,model.lmbda))
        model.x = 2.75
        model.x.fixed = True

        # Make an empty SOSConstraint
        model.c6 = SOSConstraint(var=model.lmbda, index=[1,2,3], sos=2)
        model.c6.set_items([],[])
        assert len(list(model.c6.get_items())) == 0
Beispiel #5
0
def add_affine_cuts(nlp_result, solve_data, config):
    with time_code(solve_data.timing, "affine cut generation"):
        m = solve_data.linear_GDP
        if config.calc_disjunctive_bounds:
            with time_code(solve_data.timing, "disjunctive variable bounding"):
                TransformationFactory(
                    'contrib.compute_disj_var_bounds').apply_to(
                        m,
                        solver=config.mip_solver
                        if config.obbt_disjunctive_bounds else None)
        config.logger.info("Adding affine cuts.")
        GDPopt = m.GDPopt_utils
        counter = 0
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        for constr in constraints_in_True_disjuncts(m, config):
            # Note: this includes constraints that are deactivated in the current model (linear_GDP)

            disjunctive_var_bounds = disjunctive_bounds(constr.parent_block())

            if constr.body.polynomial_degree() in (1, 0):
                continue

            vars_in_constr = list(identify_variables(constr.body))
            if any(var.value is None for var in vars_in_constr):
                continue  # a variable has no values

            # mcpp stuff
            mc_eqn = mc(constr.body, disjunctive_var_bounds)
            # mc_eqn = mc(constr.body)
            ccSlope = mc_eqn.subcc()
            cvSlope = mc_eqn.subcv()
            ccStart = mc_eqn.concave()
            cvStart = mc_eqn.convex()
            ub_int = min(
                constr.upper,
                mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
            lb_int = max(
                constr.lower,
                mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

            parent_block = constr.parent_block()
            # Create a block on which to put outer approximation cuts.
            aff_utils = parent_block.component('GDPopt_aff')
            if aff_utils is None:
                aff_utils = parent_block.GDPopt_aff = Block(
                    doc="Block holding affine constraints")
                aff_utils.GDPopt_aff_cons = ConstraintList()
            aff_cuts = aff_utils.GDPopt_aff_cons
            concave_cut = sum(ccSlope[var] * (var - var.value)
                              for var in vars_in_constr) + ccStart >= lb_int
            convex_cut = sum(cvSlope[var] * (var - var.value)
                             for var in vars_in_constr) + cvStart <= ub_int
            aff_cuts.add(expr=concave_cut)
            aff_cuts.add(expr=convex_cut)
            counter += 2

        config.logger.info("Added %s affine cuts" % counter)
Beispiel #6
0
def setup_solver_environment(model, config):
    solve_data = GDPoptSolveData()  # data object for storing solver state
    solve_data.config = config
    solve_data.results = SolverResults()
    solve_data.timing = Container()
    min_logging_level = logging.INFO if config.tee else None
    with time_code(solve_data.timing, 'total', is_main_timer=True), \
            lower_logger_level_to(config.logger, min_logging_level), \
            create_utility_block(model, 'GDPopt_utils', solve_data):

        # Create a working copy of the original model
        solve_data.original_model = model
        solve_data.working_model = model.clone()
        setup_results_object(solve_data, config)
        solve_data.active_strategy = config.strategy
        util_block = solve_data.working_model.GDPopt_utils

        # Save model initial values.
        # These can be used later to initialize NLP subproblems.
        solve_data.initial_var_values = list(v.value
                                             for v in util_block.variable_list)
        solve_data.best_solution_found = None

        # Integer cuts exclude particular discrete decisions
        util_block.integer_cuts = ConstraintList(doc='integer cuts')

        # Set up iteration counters
        solve_data.master_iteration = 0
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # set up bounds
        solve_data.LB = float('-inf')
        solve_data.UB = float('inf')
        solve_data.iteration_log = {}

        # Flag indicating whether the solution improved in the past
        # iteration or not
        solve_data.feasible_solution_improved = False

        yield solve_data  # yield setup solver environment

        if (solve_data.best_solution_found is not None
                and solve_data.best_solution_found
                is not solve_data.original_model):
            # Update values on the original model
            copy_var_list_values(
                from_list=solve_data.best_solution_found.GDPopt_utils.
                variable_list,
                to_list=solve_data.original_model.GDPopt_utils.variable_list,
                config=config)

    # Finalize results object
    solve_data.results.problem.lower_bound = solve_data.LB
    solve_data.results.problem.upper_bound = solve_data.UB
    solve_data.results.solver.iterations = solve_data.master_iteration
    solve_data.results.solver.timing = solve_data.timing
    solve_data.results.solver.user_time = solve_data.timing.total
    solve_data.results.solver.wallclock_time = solve_data.timing.total
Beispiel #7
0
    def _create_transformation_block(self, context):
        new_xfrm_block_name = unique_component_name(context, 'logic_to_linear')
        new_xfrm_block = Block(doc="Transformation objects for logic_to_linear")
        setattr(context, new_xfrm_block_name, new_xfrm_block)

        new_xfrm_block.transformed_constraints = ConstraintList()
        new_xfrm_block.augmented_vars = BooleanVarList()
        new_xfrm_block.augmented_vars_asbinary = VarList( domain=Binary)

        return new_xfrm_block
Beispiel #8
0
 def test_bad_dof(self):
     m = ConcreteModel()
     m.x = Var()
     m.y = Var()
     m.c = ConstraintList()
     m.c.add(m.x + m.y == 1)
     m.c.add(m.x - m.y == 0)
     m.c.add(2 * m.x - 3 * m.y == 1)
     res = self.ipopt.solve(m)
     self.assertEqual(str(res.solver.status), "warning")
     self.assertEqual(str(res.solver.termination_condition), "other")
     self.assertTrue("Too few degrees of freedom" in res.solver.message)
Beispiel #9
0
def MindtPy_initialize_master(solve_data, config):
    """Initialize the decomposition algorithm.
    This includes generating the initial cuts require to build the master
    problem.
    """
    # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded master problem.
    if config.single_tree:
        var_bound_add(solve_data, config)

    m = solve_data.mip = solve_data.working_model.clone()
    MindtPy = m.MindtPy_utils
    m.dual.deactivate()

    if config.strategy == 'OA':
        calc_jacobians(solve_data, config)  # preload jacobians
        MindtPy.MindtPy_linear_cuts.oa_cuts = ConstraintList(
            doc='Outer approximation cuts')
    # elif config.strategy == 'ECP':
    #     calc_jacobians(solve_data, config)  # preload jacobians
    #     MindtPy.MindtPy_linear_cuts.ecp_cuts = ConstraintList(
    #         doc='Extended Cutting Planes')
    # elif config.strategy == 'PSC':
    #     detect_nonlinear_vars(solve_data, config)
    #     MindtPy.MindtPy_linear_cuts.psc_cuts = ConstraintList(
    #         doc='Partial surrogate cuts')
    # elif config.strategy == 'GBD':
    #     MindtPy.MindtPy_linear_cuts.gbd_cuts = ConstraintList(
    #         doc='Generalized Benders cuts')

    # Set default initialization_strategy
    if config.init_strategy is None:
        if config.strategy == 'OA':
            config.init_strategy = 'rNLP'
        else:
            config.init_strategy = 'max_binary'
    # Do the initialization
    elif config.init_strategy == 'rNLP':
        init_rNLP(solve_data, config)
    elif config.init_strategy == 'max_binary':
        init_max_binaries(solve_data, config)
        # if config.strategy == 'ECP':
        #     add_ecp_cut(solve_data, config)
        # else:

        fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(solve_data, config)
        if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal:
            handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config)
        elif fixed_nlp_result.solver.termination_condition is tc.infeasible:
            handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config)
        else:
            handle_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition,
                                                    solve_data, config)
Beispiel #10
0
def create_linear_dual_from_matrix_repn(c, b, P, d):
    blk = Block()
    n, m = P.shape
    # Add dual variables
    blk.var = Var(range(n), within=NonNegativeReals)
    # Dual objective
    blk.obj = Constraint(expr=quicksum(d[j] * b.var[j] for j in range(n)) <= b)
    # Dual constraints
    blk.cons = ConstraintList()
    for i in range(m):
        blk.cons.add(quicksum(P[i, j] * b.var[j] for j in range(n)) == c[i])

    return blk
Beispiel #11
0
    def set_value(self, expr):
        for e in expr:
            # The user gave us a proper Disjunct block
            # [ESJ 06/21/2019] This is really an issue with the reclassifier,
            # but in the case where you are iteratively adding to an
            # IndexedDisjunct indexed by Any which has already been transformed,
            # the new Disjuncts are Blocks already. This catches them for who
            # they are anyway.
            if isinstance(e, _DisjunctData):
                #if hasattr(e, 'type') and e.ctype == Disjunct:
                self.disjuncts.append(e)
                continue
            # The user was lazy and gave us a single constraint
            # expression or an iterable of expressions
            expressions = []
            if hasattr(e, '__iter__'):
                e_iter = e
            else:
                e_iter = [e]
            for _tmpe in e_iter:
                try:
                    isexpr = _tmpe.is_expression_type()
                except AttributeError:
                    isexpr = False
                if not isexpr or not _tmpe.is_relational():
                    msg = "\n\tin %s" % (type(e), ) if e_iter is e else ""
                    raise ValueError(
                        "Unexpected term for Disjunction %s.\n"
                        "\tExpected a Disjunct object, relational expression, "
                        "or iterable of\n"
                        "\trelational expressions but got %s%s" %
                        (self.name, type(_tmpe), msg))
                else:
                    expressions.append(_tmpe)

            comp = self.parent_component()
            if comp._autodisjuncts is None:
                b = self.parent_block()
                comp._autodisjuncts = Disjunct(Any)
                b.add_component(
                    unique_component_name(b, comp.local_name + "_disjuncts"),
                    comp._autodisjuncts)
                # TODO: I am not at all sure why we need to
                # explicitly construct this block - that should
                # happen automatically.
                comp._autodisjuncts.construct()
            disjunct = comp._autodisjuncts[len(comp._autodisjuncts)]
            disjunct.constraint = c = ConstraintList()
            for e in expressions:
                c.add(e)
            self.disjuncts.append(disjunct)
Beispiel #12
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.neg1 = Param(initialize=-1.0, mutable=True)
        model.pos1 = Param(initialize=1.0, mutable=True)

        model.s = RangeSet(1, 12)
        model.x = Var(model.s)
        model.x[1].setlb(model.neg1)
        model.x[1].setub(model.pos1)
        model.x[2].setlb(model.neg1)
        model.x[2].setub(model.pos1)
        model.obj = Objective(expr=sum(model.x[i] * ((-1)**(i))
                                       for i in model.x.index_set()),
                              sense=maximize)
        model.c = ConstraintList()
        # to make the variable used in the constraint match the name
        model.c.add(Constraint.Skip)
        model.c.add(Constraint.Skip)
        model.c.add(model.x[3] >= -1.)
        model.c.add(model.x[4] <= 1.)
        model.c.add(model.x[5] == -1.)
        model.c.add(model.x[6] == -1.)
        model.c.add(model.x[7] == 1.)
        model.c.add(model.x[8] == 1.)
        model.c.add((model.neg1, model.x[9], model.neg1))
        model.c.add((-1., model.x[10], -1.))
        model.c.add((1., model.x[11], 1.))
        model.c.add((1., model.x[12], 1.))

        model.c_inactive = ConstraintList()
        # to make the variable used in the constraint match the name
        model.c_inactive.add(Constraint.Skip)
        model.c_inactive.add(Constraint.Skip)
        model.c_inactive.add(model.x[3] >= -2.)
        model.c_inactive.add(model.x[4] <= 2.)
Beispiel #13
0
def generate_norm1_norm_constraint(model,
                                   setpoint_model,
                                   config,
                                   discrete_only=True):
    r"""This function generates constraint (PF-OA main problem) for minimum Norm1 distance to setpoint_model.

    Norm constraint is used to guarantees the monotonicity of the norm objective value sequence of all iterations
    Norm1 distance of (x,y) = \sum_i |x_i - y_i|.
    Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' Eq. (16).

    Parameters
    ----------
    model : Pyomo model
        The model that needs the norm constraint.
    setpoint_model : Pyomo model
        The model that provides the base point for us to calculate the distance.
    config : ConfigBlock
        The specific configurations for MindtPy.
    discrete_only : bool, optional
        Whether to only optimize on distance between the discrete variables, by default True.
    """
    var_filter = (lambda v: v.is_integer()) if discrete_only \
        else (lambda v: True)
    model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list))
    setpoint_vars = list(
        filter(var_filter, setpoint_model.MindtPy_utils.variable_list))
    assert len(model_vars) == len(
        setpoint_vars
    ), 'Trying to generate Norm1 norm constraint for models with different number of variables'
    norm_constraint_blk = model.MindtPy_utils.L1_norm_constraint = Block()
    norm_constraint_blk.L1_slack_idx = RangeSet(len(model_vars))
    norm_constraint_blk.L1_slack_var = Var(norm_constraint_blk.L1_slack_idx,
                                           domain=Reals,
                                           bounds=(0, None))
    norm_constraint_blk.abs_reform = ConstraintList()
    for idx, v_model, v_setpoint in zip(norm_constraint_blk.L1_slack_idx,
                                        model_vars, setpoint_vars):
        norm_constraint_blk.abs_reform.add(
            expr=v_model -
            v_setpoint.value >= -norm_constraint_blk.L1_slack_var[idx])
        norm_constraint_blk.abs_reform.add(
            expr=v_model -
            v_setpoint.value <= norm_constraint_blk.L1_slack_var[idx])
    rhs = config.fp_norm_constraint_coef * \
        sum(abs(v_model.value-v_setpoint.value)
            for v_model, v_setpoint in zip(model_vars, setpoint_vars))
    norm_constraint_blk.sum_slack = Constraint(
        expr=sum(norm_constraint_blk.L1_slack_var[idx]
                 for idx in norm_constraint_blk.L1_slack_idx) <= rhs)
Beispiel #14
0
def add_affine_cuts(nlp_result, solve_data, config):
    m = solve_data.linear_GDP
    config.logger.info("Adding affine cuts.")
    GDPopt = m.GDPopt_utils
    for var, val in zip(GDPopt.working_var_list, nlp_result.var_values):
        if val is not None and not var.fixed:
            var.value = val

    for constr in constraints_in_True_disjuncts(m, config):
        # for constr in GDPopt.working_nonlinear_constraints:

        if constr not in GDPopt.working_nonlinear_constraints:
            continue

        # if constr.body.polynomial_degree() in (1, 0):
        #     continue

        # TODO check that constraint is on active Disjunct

        vars_in_constr = list(EXPR.identify_variables(constr.body))
        if any(var.value is None for var in vars_in_constr):
            continue  # a variable has no values

        # mcpp stuff
        mc_eqn = mc(constr.body)
        ccSlope = mc_eqn.subcc()
        cvSlope = mc_eqn.subcv()
        ccStart = mc_eqn.concave()
        cvStart = mc_eqn.convex()
        ub_int = min(constr.upper,
                     mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
        lb_int = max(constr.lower,
                     mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

        parent_block = constr.parent_block()
        # Create a block on which to put outer approximation cuts.
        aff_utils = parent_block.component('GDPopt_aff')
        if aff_utils is None:
            aff_utils = parent_block.GDPopt_aff = Block(
                doc="Block holding affine constraints")
            aff_utils.GDPopt_aff_cons = ConstraintList()
        aff_cuts = aff_utils.GDPopt_aff_cons
        concave_cut = sum(ccSlope[var] * (var - var.value)
                          for var in vars_in_constr) + ccStart >= lb_int
        convex_cut = sum(cvSlope[var] * (var - var.value)
                         for var in vars_in_constr) + cvStart <= ub_int
        aff_cuts.add(expr=concave_cut)
        aff_cuts.add(expr=convex_cut)
Beispiel #15
0
def generate_norm1_objective_function(model,
                                      setpoint_model,
                                      discrete_only=False):
    r"""This function generates objective (PF-OA main problem) for minimum Norm1 distance to setpoint_model.

    Norm1 distance of (x,y) = \sum_i |x_i - y_i|.

    Parameters
    ----------
    model : Pyomo model
        The model that needs new objective function.
    setpoint_model : Pyomo model
        The model that provides the base point for us to calculate the distance.
    discrete_only : bool, optional
        Whether to only optimize on distance between the discrete variables, by default False.

    Returns
    -------
    Objective
        The norm1 objective function.
    """
    # skip objective_value variable and slack_var variables
    var_filter = (lambda v: v.is_integer()) if discrete_only \
        else (lambda v: 'MindtPy_utils.objective_value' not in v.name and
              'MindtPy_utils.feas_opt.slack_var' not in v.name)
    model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list))
    setpoint_vars = list(
        filter(var_filter, setpoint_model.MindtPy_utils.variable_list))
    assert len(model_vars) == len(
        setpoint_vars
    ), 'Trying to generate Norm1 objective function for models with different number of variables'
    model.MindtPy_utils.del_component('L1_obj')
    obj_blk = model.MindtPy_utils.L1_obj = Block()
    obj_blk.L1_obj_idx = RangeSet(len(model_vars))
    obj_blk.L1_obj_var = Var(obj_blk.L1_obj_idx,
                             domain=Reals,
                             bounds=(0, None))
    obj_blk.abs_reform = ConstraintList()
    for idx, v_model, v_setpoint in zip(obj_blk.L1_obj_idx, model_vars,
                                        setpoint_vars):
        obj_blk.abs_reform.add(
            expr=v_model - v_setpoint.value >= -obj_blk.L1_obj_var[idx])
        obj_blk.abs_reform.add(
            expr=v_model - v_setpoint.value <= obj_blk.L1_obj_var[idx])

    return Objective(expr=sum(obj_blk.L1_obj_var[idx]
                              for idx in obj_blk.L1_obj_idx))
Beispiel #16
0
    def set_value(self, expr):
        for e in expr:
            # The user gave us a proper Disjunct block
            if hasattr(e, 'type') and e.type() == Disjunct:
                self.disjuncts.append(e)
                continue
            # The user was lazy and gave us a single constraint
            # expression or an iterable of expressions
            expressions = []
            if hasattr(e, '__iter__'):
                e_iter = e
            else:
                e_iter = [e]
            for _tmpe in e_iter:
                try:
                    isexpr = _tmpe.is_expression()
                except AttributeError:
                    isexpr = False
                if not isexpr or not _tmpe.is_relational():
                    msg = "\n\tin %s" % (type(e), ) if e_iter is e else ""
                    raise ValueError(
                        "Unexpected term for Disjunction %s.\n"
                        "\tExpected a Disjunct object, relational expression, "
                        "or iterable of\n"
                        "\trelational expressions but got %s%s" %
                        (self.name, type(_tmpe), msg))
                else:
                    expressions.append(_tmpe)

            comp = self.parent_component()
            if comp._autodisjuncts is None:
                b = self.parent_block()
                comp._autodisjuncts = Disjunct(Any)
                b.add_component(
                    unique_component_name(b, comp.local_name + "_disjuncts"),
                    comp._autodisjuncts)
                # TODO: I am not at all sure why we need to
                # explicitly construct this block - that should
                # happen automatically.
                comp._autodisjuncts.construct()
            disjunct = comp._autodisjuncts[len(comp._autodisjuncts)]
            disjunct.constraint = c = ConstraintList()
            for e in expressions:
                c.add(e)
            self.disjuncts.append(disjunct)
Beispiel #17
0
def pysp_instance_creation_callback(scenario_name, node_names):
    global cnt

    model = ConcreteModel()
    model.x = Var(bounds=(0,10))
    model.y = Expression(expr=model.x + 1)
    model.z = Var(bounds=(-10, 10))
    model.q = Expression(expr=model.z**2)
    model.StageCost = Expression([1,2])
    model.StageCost.add(1, model.x)
    model.StageCost.add(2, -model.z)
    model.o = Objective(expr=sum_product(model.StageCost))
    model.c = ConstraintList()
    model.c.add(model.x >= cnt)
    model.c.add(model.z <= cnt**2)

    cnt += 1

    return model
Beispiel #18
0
def MindtPy_initialize_master(solve_data, config):
    """Initialize the decomposition algorithm.
    This includes generating the initial cuts require to build the master
    problem.
    """
    m = solve_data.mip = solve_data.working_model.clone()
    MindtPy = m.MindtPy_utils

    m.dual.activate()

    if config.strategy == 'OA':
        calc_jacobians(solve_data, config)  # preload jacobians
        MindtPy.MindtPy_linear_cuts.oa_cuts = ConstraintList(
            doc='Outer approximation cuts')
    # elif config.strategy == 'ECP':
    #     calc_jacobians(solve_data, config)  # preload jacobians
    #     MindtPy.MindtPy_linear_cuts.ecp_cuts = ConstraintList(
    #         doc='Extended Cutting Planes')
    # elif config.strategy == 'PSC':
    #     detect_nonlinear_vars(solve_data, config)
    #     MindtPy.MindtPy_linear_cuts.psc_cuts = ConstraintList(
    #         doc='Partial surrogate cuts')
    # elif config.strategy == 'GBD':
    #     MindtPy.MindtPy_linear_cuts.gbd_cuts = ConstraintList(
    #         doc='Generalized Benders cuts')

    # Set default initialization_strategy
    if config.init_strategy is None:
        if config.strategy == 'OA':
            config.init_strategy = 'rNLP'
        else:
            config.init_strategy = 'max_binary'
    # Do the initialization
    elif config.init_strategy == 'rNLP':
        init_rNLP(solve_data, config)
    elif config.init_strategy == 'max_binary':
        init_max_binaries(solve_data, config)
        # if config.strategy == 'ECP':
        #     add_ecp_cut(solve_data, config)
        # else:
        solve_NLP_subproblem(solve_data, config)
Beispiel #19
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.x = Var(within=NonNegativeReals)
        model.y = Var(within=NonNegativeReals)
        model.z = Var(within=NonNegativeReals)
        model.fixed_var = Var()
        model.fixed_var.fix(0.2)
        model.q1 = Var(bounds=(None, 0.2))
        model.q2 = Var(bounds=(-2, None))
        model.obj = Objective(expr=model.x + model.q1 - model.q2,
                              sense=maximize)
        model.c0 = Constraint(expr=model.x + model.y + model.z == 1)
        model.qc0 = Constraint(
            expr=model.x**2 + model.y**2 + model.fixed_var <= model.z**2)
        model.qc1 = Constraint(expr=model.x**2 <= model.y * model.z)
        model.c = ConstraintList()
        model.c.add((0, -model.q1**2 + model.fixed_var, None))
        model.c.add((None, model.q2**2 + model.fixed_var, 5))
Beispiel #20
0
    def _generate_model(self):
        self.model = None
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.x = Var(domain=RangeSet(float('-inf'), None, 0))
        model.y = Var(bounds=(None, float('inf')))
        model.obj = Objective(expr=model.x - model.y)
        model.c = ConstraintList()
        model.c.add(model.x >= -2)
        model.c.add(model.y <= 3)
        cdata = model.c.add((0, 1, 3))
        assert cdata.lower == 0
        assert cdata.upper == 3
        assert cdata.body() == 1
        assert not cdata.equality
        cdata = model.c.add((0, 2, 3))
        assert cdata.lower == 0
        assert cdata.upper == 3
        assert cdata.body() == 2
        assert not cdata.equality
        cdata = model.c.add((0, 1, None))
        assert cdata.lower is None
        assert cdata.upper == 1
        assert cdata.body() == 0
        assert not cdata.equality
        cdata = model.c.add((None, 0, 1))
        assert cdata.lower is None
        assert cdata.upper == 1
        assert cdata.body() == 0
        assert not cdata.equality
        cdata = model.c.add((1, 1))
        assert cdata.lower == 1
        assert cdata.upper == 1
        assert cdata.body() == 1
        assert cdata.equality
        model.d = Constraint(rule=lambda m: (float('-inf'), m.x, float('inf')))
        assert not model.d.equality
Beispiel #21
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # configuration confirmation
        if config.single_tree:
            config.iteration_limit = 1
            config.add_slack = False
            config.add_nogood_cuts = False
            config.mip_solver = 'cplex_persistent'
            config.logger.info(
                "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'"
            )
        # if the slacks fix to zero, just don't add them
        if config.max_slack == 0.0:
            config.add_slack = False

        if config.strategy == "GOA":
            config.add_nogood_cuts = True
            config.add_slack = True
            config.use_mcpp = True
            config.integer_to_binary = True
            config.use_dual = False
            config.use_fbbt = True

        if config.nlp_solver == "baron":
            config.use_dual = False
        # if ecp tolerance is not provided use bound tolerance
        if config.ecp_tolerance is None:
            config.ecp_tolerance = config.bound_tolerance

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()
        solve_data.curr_int_sol = []
        solve_data.prev_int_sol = []

        if config.use_fbbt:
            fbbt(model)
            config.logger.info(
                "Use the fbbt to tighten the bounds of variables")

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config, use_mcpp=config.use_mcpp)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and config.add_nogood_cuts:
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and config.add_nogood_cuts:
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = Set(
                    initialize=[
                        i
                        for i, constr in enumerate(MindtPy.constraint_list, 1)
                        if constr.body.polynomial_degree() not in (1, 0)
                    ],
                    doc="Integer index set over the nonlinear constraints."
                    "The set corresponds to the index of nonlinear constraint in constraint_set"
                )
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
Beispiel #22
0
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    with time_code(solve_data.timing, 'OA cut generation'):
        m = solve_data.linear_GDP
        GDPopt = m.GDPopt_utils
        sign_adjust = -1 if solve_data.objective_sense == minimize else 1

        # copy values over
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        # TODO some kind of special handling if the dual is phenomenally small?
        config.logger.debug('Adding OA cuts.')

        counter = 0
        if not hasattr(GDPopt, 'jacobians'):
            GDPopt.jacobians = ComponentMap()
        for constr, dual_value in zip(GDPopt.constraint_list,
                                      nlp_result.dual_values):
            if dual_value is None or constr.body.polynomial_degree() in (1, 0):
                continue

            # Determine if the user pre-specified that OA cuts should not be
            # generated for the given constraint.
            parent_block = constr.parent_block()
            ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
            config.logger.debug('Ignore_set %s' % ignore_set)
            if (ignore_set and (constr in ignore_set
                                or constr.parent_component() in ignore_set)):
                config.logger.debug(
                    'OA cut addition for %s skipped because it is in '
                    'the ignore set.' % constr.name)
                continue

            config.logger.debug("Adding OA cut for %s with dual value %s" %
                                (constr.name, dual_value))

            # Cache jacobians
            jacobians = GDPopt.jacobians.get(constr, None)
            if jacobians is None:
                constr_vars = list(identify_variables(constr.body))
                jac_list = differentiate(constr.body, wrt_list=constr_vars)
                jacobians = ComponentMap(zip(constr_vars, jac_list))
                GDPopt.jacobians[constr] = jacobians

            # Create a block on which to put outer approximation cuts.
            oa_utils = parent_block.component('GDPopt_OA')
            if oa_utils is None:
                oa_utils = parent_block.GDPopt_OA = Block(
                    doc="Block holding outer approximation cuts "
                    "and associated data.")
                oa_utils.GDPopt_OA_cuts = ConstraintList()
                oa_utils.GDPopt_OA_slacks = VarList(bounds=(0,
                                                            config.max_slack),
                                                    domain=NonNegativeReals,
                                                    initialize=0)

            oa_cuts = oa_utils.GDPopt_OA_cuts
            slack_var = oa_utils.GDPopt_OA_slacks.add()
            rhs = value(constr.lower) if constr.has_lb() else value(
                constr.upper)
            oa_cuts.add(expr=copysign(1, sign_adjust * dual_value) *
                        (value(constr.body) - rhs + sum(
                            value(jacobians[var]) * (var - value(var))
                            for var in jacobians)) - slack_var <= 0)
            counter += 1

        config.logger.info('Added %s OA cuts' % counter)
Beispiel #23
0
    def reduce_collocation_points(self,
                                  instance,
                                  var=None,
                                  ncp=None,
                                  contset=None):
        """
        This method will add additional constraints to a model to reduce the
        number of free collocation points (degrees of freedom) for a particular
        variable.

        Parameters
        ----------
        instance : Pyomo model
            The discretized Pyomo model to add constraints to

        var : ``pyomo.environ.Var``
            The Pyomo variable for which the degrees of freedom will be reduced

        ncp : int
            The new number of free collocation points for `var`. Must be
            less that the number of collocation points used in discretizing
            the model.

        contset : ``pyomo.dae.ContinuousSet``
            The :py:class:`ContinuousSet<pyomo.dae.ContinuousSet>` that was
            discretized and for which the `var` will have a reduced number
            of degrees of freedom

        """
        if contset is None:
            raise TypeError("A continuous set must be specified using the "
                            "keyword 'contset'")
        if contset.ctype is not ContinuousSet:
            raise TypeError("The component specified using the 'contset' "
                            "keyword must be a ContinuousSet")
        ds = contset

        if len(self._ncp) == 0:
            raise RuntimeError("This method should only be called after using "
                               "the apply() method to discretize the model")
        elif None in self._ncp:
            tot_ncp = self._ncp[None]
        elif ds.name in self._ncp:
            tot_ncp = self._ncp[ds.name]
        else:
            raise ValueError("ContinuousSet '%s' has not been discretized, "
                             "please call the apply_to() method with this "
                             "ContinuousSet to discretize it before calling "
                             "this method" % ds.name)

        if var is None:
            raise TypeError("A variable must be specified")
        if var.ctype is not Var:
            raise TypeError("The component specified using the 'var' keyword "
                            "must be a variable")

        if ncp is None:
            raise TypeError(
                "The number of collocation points must be specified")
        if ncp <= 0:
            raise ValueError(
                "The number of collocation points must be at least 1")
        if ncp > tot_ncp:
            raise ValueError("The number of collocation points used to "
                             "interpolate an individual variable must be less "
                             "than the number used to discretize the original "
                             "model")
        if ncp == tot_ncp:
            # Nothing to be done
            return instance

        # Check to see if the continuousset is an indexing set of the variable
        if var.dim() == 0:
            raise IndexError("ContinuousSet '%s' is not an indexing set of"
                             " the variable '%s'" % (ds.name, var.name))
        varidx = var.index_set()
        if not hasattr(varidx, 'set_tuple'):
            if ds is not varidx:
                raise IndexError("ContinuousSet '%s' is not an indexing set of"
                                 " the variable '%s'" % (ds.name, var.name))
        elif ds not in varidx.set_tuple:
            raise IndexError("ContinuousSet '%s' is not an indexing set of the"
                             " variable '%s'" % (ds.name, var.name))

        if var.name in self._reduced_cp:
            temp = self._reduced_cp[var.name]
            if ds.name in temp:
                raise RuntimeError("Variable '%s' has already been constrained"
                                   " to a reduced number of collocation points"
                                   " over ContinuousSet '%s'.")
            else:
                temp[ds.name] = ncp
        else:
            self._reduced_cp[var.name] = {ds.name: ncp}

        # TODO: Use unique_component_name for this
        list_name = var.local_name + "_interpolation_constraints"

        instance.add_component(list_name, ConstraintList())
        conlist = instance.find_component(list_name)

        t = list(ds)
        fe = ds._fe
        info = get_index_information(var, ds)
        tmpidx = info['non_ds']
        idx = info['index function']

        # Iterate over non_ds indices
        for n in tmpidx:
            # Iterate over finite elements
            for i in range(0, len(fe) - 1):
                # Iterate over collocation points
                for k in range(1, tot_ncp - ncp + 1):
                    if ncp == 1:
                        # Constant over each finite element
                        conlist.add(
                            var[idx(n, i, k)] == var[idx(n, i, tot_ncp)])
                    else:
                        tmp = ds.ord(fe[i]) - 1
                        tmp2 = ds.ord(fe[i + 1]) - 1
                        ti = t[tmp + k]
                        tfit = t[tmp2 - ncp + 1:tmp2 + 1]
                        coeff = self._interpolation_coeffs(ti, tfit)
                        conlist.add(var[idx(n, i, k)] == sum(
                            var[idx(n, i, j)] * next(coeff)
                            for j in range(tot_ncp - ncp + 1, tot_ncp + 1)))

        return instance
Beispiel #24
0
    # Secondary objective is no_pref (avoiding scheduling 1 days in the weekends)
    return sum(m.no_pref[worker]
               for worker in workers) + sum(c * m.needed[worker]
                                            for worker in workers)


# Now we can add the objective function to the model and set it to be minimized.
# The objective now is to find a schedule minimizing the number of workers needed and once that is done, also reduce
# the number of workers who have to work on Sundays but not on Saturdays.
# The constant used to multiply needed workers makes sure that this is the primary objective,
model.obj = Objective(rule=obj_rule, sense=minimize)

# Now we can add the constraints that describe our food store.

# We create a set of constraints on the model.
model.constraints = ConstraintList()

# 1. Constraint to make sure that all shifts are assigned and appropriate number of workers are working,
for day in days:
    for shift in days_shifts[day]:
        if day != 'Sun' and shift in ['morning', 'evening']:
            # Weekdays and Saturday, morning and evenings have 2 workers.
            # Note that constraints are booleans!
            # Here we make sure that for each variable in works for a given worker, day, shift the sum of Binary values
            # is exactly 2.
            model.constraints.add(
                sum(model.works[worker, day, shift]
                    for worker in workers) == 2)
        else:
            # For Sundays or night shifts we need only 1 worker.
            model.constraints.add(
Beispiel #25
0
    def test_handle_termination_condition(self):
        """Test the outer approximation decomposition algorithm."""
        model = SimpleMINLP()
        config = _get_MindtPy_config()
        solve_data = set_up_solve_data(model, config)
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):

            MindtPy = solve_data.working_model.MindtPy_utils

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(
                solve_data,
                config,
                move_linear_objective=(config.init_strategy == 'FP' or
                                       config.add_regularization is not None),
                use_mcpp=config.use_mcpp,
                update_var_con_list=config.add_regularization is None)
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            lin = MindtPy.cuts = Block()
            lin.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(
                    len(MindtPy.nonlinear_constraint_list),
                    doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')

            fixed_nlp = solve_data.working_model.clone()
            TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)

            MindtPy_initialize_main(solve_data, config)

            # test handle_subproblem_other_termination
            termination_condition = tc.maxIterations
            config.add_no_good_cuts = True
            handle_subproblem_other_termination(fixed_nlp,
                                                termination_condition,
                                                solve_data, config)
            self.assertEqual(
                len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts), 1)

            # test handle_main_other_conditions
            main_mip, main_mip_results = solve_main(solve_data, config)
            main_mip_results.solver.termination_condition = tc.infeasible
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.feasible)

            main_mip_results.solver.termination_condition = tc.unbounded
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIn(main_mip.MindtPy_utils.objective_bound,
                          main_mip.component_data_objects(ctype=Constraint))

            main_mip.MindtPy_utils.del_component('objective_bound')
            main_mip_results.solver.termination_condition = tc.infeasibleOrUnbounded
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIn(main_mip.MindtPy_utils.objective_bound,
                          main_mip.component_data_objects(ctype=Constraint))

            main_mip_results.solver.termination_condition = tc.maxTimeLimit
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxTimeLimit)

            main_mip_results.solver.termination_condition = tc.other
            main_mip_results.solution.status = SolutionStatus.feasible
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            for v1, v2 in zip(
                    main_mip.MindtPy_utils.variable_list,
                    solve_data.working_model.MindtPy_utils.variable_list):
                self.assertEqual(v1.value, v2.value)

            # test handle_feasibility_subproblem_tc
            feas_subproblem = solve_data.working_model.clone()
            add_feas_slacks(feas_subproblem, config)
            MindtPy = feas_subproblem.MindtPy_utils
            MindtPy.feas_opt.activate()
            if config.feasibility_norm == 'L1':
                MindtPy.feas_obj = Objective(expr=sum(
                    s for s in MindtPy.feas_opt.slack_var[...]),
                                             sense=minimize)
            elif config.feasibility_norm == 'L2':
                MindtPy.feas_obj = Objective(expr=sum(
                    s * s for s in MindtPy.feas_opt.slack_var[...]),
                                             sense=minimize)
            else:
                MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var,
                                             sense=minimize)

            handle_feasibility_subproblem_tc(tc.optimal, MindtPy, solve_data,
                                             config)
            handle_feasibility_subproblem_tc(tc.infeasible, MindtPy,
                                             solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            solve_data.should_terminate = False
            solve_data.results.solver.status = None
            handle_feasibility_subproblem_tc(tc.maxIterations, MindtPy,
                                             solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            solve_data.should_terminate = False
            solve_data.results.solver.status = None
            handle_feasibility_subproblem_tc(tc.solverFailure, MindtPy,
                                             solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            # test NLP subproblem infeasible
            solve_data.working_model.Y[1].value = 0
            solve_data.working_model.Y[2].value = 0
            solve_data.working_model.Y[3].value = 0
            fixed_nlp, fixed_nlp_results = solve_subproblem(solve_data, config)
            solve_data.working_model.Y[1].value = None
            solve_data.working_model.Y[2].value = None
            solve_data.working_model.Y[3].value = None

            # test handle_nlp_subproblem_tc
            fixed_nlp_results.solver.termination_condition = tc.maxTimeLimit
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data,
                                     config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxTimeLimit)

            fixed_nlp_results.solver.termination_condition = tc.maxEvaluations
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data,
                                     config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxEvaluations)

            fixed_nlp_results.solver.termination_condition = tc.maxIterations
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data,
                                     config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxEvaluations)

            # test handle_fp_main_tc
            config.init_strategy = 'FP'
            solve_data.fp_iter = 1
            init_rNLP(solve_data, config)
            feas_main, feas_main_results = solve_main(solve_data,
                                                      config,
                                                      fp=True)
            feas_main_results.solver.termination_condition = tc.optimal
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, False)

            feas_main_results.solver.termination_condition = tc.maxTimeLimit
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxTimeLimit)

            feas_main_results.solver.termination_condition = tc.infeasible
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)

            feas_main_results.solver.termination_condition = tc.unbounded
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)

            feas_main_results.solver.termination_condition = tc.other
            feas_main_results.solution.status = SolutionStatus.feasible
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, False)

            feas_main_results.solver.termination_condition = tc.solverFailure
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)

            # test generate_norm_constraint
            fp_nlp = solve_data.working_model.clone()
            config.fp_main_norm = 'L1'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(
                fp_nlp.MindtPy_utils.find_component('L1_norm_constraint'))

            config.fp_main_norm = 'L2'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.find_component('norm_constraint'))

            fp_nlp.del_component('norm_constraint')
            config.fp_main_norm = 'L_infinity'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.find_component('norm_constraint'))

            # test set_solver_options
            config.mip_solver = 'gams'
            config.threads = 1
            opt = SolverFactory(config.mip_solver)
            set_solver_options(opt,
                               solve_data,
                               config,
                               'mip',
                               regularization=False)

            config.mip_solver = 'gurobi'
            config.mip_regularization_solver = 'gurobi'
            config.regularization_mip_threads = 1
            opt = SolverFactory(config.mip_solver)
            set_solver_options(opt,
                               solve_data,
                               config,
                               'mip',
                               regularization=True)

            config.nlp_solver = 'gams'
            config.nlp_solver_args['solver'] = 'ipopt'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'ipopth'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'conopt'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'msnlp'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'baron'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            # test algorithm_should_terminate
            solve_data.should_terminate = True
            solve_data.UB = float('inf')
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.noSolution)

            solve_data.UB = 100
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.feasible)

            solve_data.objective_sense = maximize
            solve_data.LB = float('-inf')
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.noSolution)

            solve_data.LB = 100
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.feasible)
Beispiel #26
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.s = Set(initialize=[1, 2])

        model.x_unused = Var(within=Integers)
        model.x_unused.stale = False

        model.x_unused_initialy_stale = Var(within=Integers)
        model.x_unused_initialy_stale.stale = True

        model.X_unused = Var(model.s, within=Integers)
        model.X_unused_initialy_stale = Var(model.s, within=Integers)
        for i in model.s:
            model.X_unused[i].stale = False
            model.X_unused_initialy_stale[i].stale = True

        model.x = Var(within=RangeSet(None, None))
        model.x.stale = False

        model.x_initialy_stale = Var(within=Integers)
        model.x_initialy_stale.stale = True

        model.X = Var(model.s, within=Integers)
        model.X_initialy_stale = Var(model.s, within=Integers)
        for i in model.s:
            model.X[i].stale = False
            model.X_initialy_stale[i].stale = True

        model.obj = Objective(expr= model.x + \
                                    model.x_initialy_stale + \
                                    sum_product(model.X) + \
                                    sum_product(model.X_initialy_stale))

        model.c = ConstraintList()
        model.c.add(model.x >= 1)
        model.c.add(model.x_initialy_stale >= 1)
        model.c.add(model.X[1] >= 0)
        model.c.add(model.X[2] >= 1)
        model.c.add(model.X_initialy_stale[1] >= 0)
        model.c.add(model.X_initialy_stale[2] >= 1)

        # Test that stale flags get set
        # on inactive blocks (where "inactive blocks" mean blocks
        # that do NOT follow a path of all active parent blocks
        # up to the top-level model)
        flat_model = model.clone()
        model.b = Block()
        model.B = Block(model.s)
        model.b.b = flat_model.clone()
        model.B[1].b = flat_model.clone()
        model.B[2].b = flat_model.clone()

        model.b.deactivate()
        model.B.deactivate()
        model.b.b.activate()
        model.B[1].b.activate()
        model.B[2].b.deactivate()
        assert model.b.active is False
        assert model.B[1].active is False
        assert model.B[1].active is False
        assert model.b.b.active is True
        assert model.B[1].b.active is True
        assert model.B[2].b.active is False
Beispiel #27
0
def apply_basic_step(disjunctions_or_constraints):
    #
    # Basic steps only apply to XOR'd disjunctions
    #
    disjunctions = list(obj for obj in disjunctions_or_constraints
                        if obj.ctype == Disjunction)
    constraints = list(obj for obj in disjunctions_or_constraints
                       if obj.ctype == Constraint)
    for d in disjunctions:
        if not d.xor:
            raise ValueError(
                "Basic steps can only be applied to XOR'd disjunctions\n\t"
                "(raised by disjunction %s)" % (d.name, ))
        if not d.active:
            logger.warning("Warning: applying basic step to a previously "
                           "deactivated disjunction (%s)" % (d.name, ))

    ans = Block(concrete=True)
    ans.DISJUNCTIONS = Set(initialize=range(len(disjunctions)))
    ans.INDEX = Set(dimen=len(disjunctions),
                    initialize=_squish_singletons(
                        itertools.product(*tuple(
                            range(len(d.disjuncts)) for d in disjunctions))))

    #
    # Form the individual disjuncts for the new basic step
    #
    ans.disjuncts = Disjunct(ans.INDEX)
    for idx in ans.INDEX:
        #
        # Each source disjunct will be copied (cloned) into its own
        # subblock
        #
        ans.disjuncts[idx].src = Block(ans.DISJUNCTIONS)
        for i in ans.DISJUNCTIONS:
            tmp = _clone_all_but_indicator_vars(
                disjunctions[i].disjuncts[idx[i] if isinstance(idx, tuple
                                                               ) else idx])
            for k, v in list(tmp.component_map().items()):
                if v.parent_block() is not tmp:
                    # Skip indicator_var and binary_indicator_var
                    continue
                tmp.del_component(k)
                ans.disjuncts[idx].src[i].add_component(k, v)
        # Copy in the constraints corresponding to the improper disjunctions
        ans.disjuncts[idx].improper_constraints = ConstraintList()
        for constr in constraints:
            if constr.is_indexed():
                for indx in constr:
                    ans.disjuncts[idx].improper_constraints.add(
                        (constr[indx].lower, constr[indx].body,
                         constr[indx].upper))
                    constr[indx].deactivate()
            # need this so that we can take an improper basic step with a
            # ConstraintData
            else:
                ans.disjuncts[idx].improper_constraints.add(
                    (constr.lower, constr.body, constr.upper))
                constr.deactivate()

    #
    # Link the new disjunct indicator_var's to the original
    # indicator_var's.  Since only one of the new
    #
    NAME_BUFFER = {}
    ans.indicator_links = ConstraintList()
    for i in ans.DISJUNCTIONS:
        for j in range(len(disjunctions[i].disjuncts)):
            orig_var = disjunctions[i].disjuncts[j].indicator_var
            orig_binary_var = orig_var.get_associated_binary()
            ans.indicator_links.add(orig_binary_var == sum(
                ans.disjuncts[idx].binary_indicator_var for idx in ans.INDEX
                if (idx[i] if isinstance(idx, tuple) else idx) == j))
            # and throw on a Reference to original on the block
            for v in (orig_var, orig_binary_var):
                name_base = v.getname(fully_qualified=True,
                                      name_buffer=NAME_BUFFER)
                ans.add_component(unique_component_name(ans, name_base),
                                  Reference(v))

    # Form the new disjunction
    ans.disjunction = Disjunction(expr=[ans.disjuncts[i] for i in ans.INDEX])

    #
    # Deactivate the old disjunctions / disjuncts
    #
    for i in ans.DISJUNCTIONS:
        disjunctions[i].deactivate()
        for d in disjunctions[i].disjuncts:
            d._deactivate_without_fixing_indicator()

    return ans
Beispiel #28
0
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    with time_code(solve_data.timing, 'OA cut generation'):
        m = solve_data.linear_GDP
        GDPopt = m.GDPopt_utils
        sign_adjust = -1 if solve_data.objective_sense == minimize else 1

        # copy values over
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        # TODO some kind of special handling if the dual is phenomenally small?
        config.logger.debug('Adding OA cuts.')

        counter = 0
        if not hasattr(GDPopt, 'jacobians'):
            GDPopt.jacobians = ComponentMap()
        for constr, dual_value in zip(GDPopt.constraint_list,
                                      nlp_result.dual_values):
            if dual_value is None or constr.body.polynomial_degree() in (1, 0):
                continue

            # Determine if the user pre-specified that OA cuts should not be
            # generated for the given constraint.
            parent_block = constr.parent_block()
            ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
            config.logger.debug('Ignore_set %s' % ignore_set)
            if (ignore_set and (constr in ignore_set
                                or constr.parent_component() in ignore_set)):
                config.logger.debug(
                    'OA cut addition for %s skipped because it is in '
                    'the ignore set.' % constr.name)
                continue

            config.logger.debug("Adding OA cut for %s with dual value %s" %
                                (constr.name, dual_value))

            # Cache jacobian
            jacobian = GDPopt.jacobians.get(constr, None)
            if jacobian is None:
                constr_vars = list(
                    identify_variables(constr.body, include_fixed=False))
                if len(constr_vars) >= MAX_SYMBOLIC_DERIV_SIZE:
                    mode = differentiate.Modes.reverse_numeric
                else:
                    mode = differentiate.Modes.sympy

                try:
                    jac_list = differentiate(constr.body,
                                             wrt_list=constr_vars,
                                             mode=mode)
                    jac_map = ComponentMap(zip(constr_vars, jac_list))
                except:
                    if mode is differentiate.Modes.reverse_numeric:
                        raise
                    mode = differentiate.Modes.reverse_numeric
                    jac_map = ComponentMap()
                jacobian = JacInfo(mode=mode, vars=constr_vars, jac=jac_map)
                GDPopt.jacobians[constr] = jacobian
            # Recompute numeric derivatives
            if not jacobian.jac:
                jac_list = differentiate(constr.body,
                                         wrt_list=jacobian.vars,
                                         mode=jacobian.mode)
                jacobian.jac.update(zip(jacobian.vars, jac_list))

            # Create a block on which to put outer approximation cuts.
            oa_utils = parent_block.component('GDPopt_OA')
            if oa_utils is None:
                oa_utils = parent_block.GDPopt_OA = Block(
                    doc="Block holding outer approximation cuts "
                    "and associated data.")
                oa_utils.GDPopt_OA_cuts = ConstraintList()
                oa_utils.GDPopt_OA_slacks = VarList(bounds=(0,
                                                            config.max_slack),
                                                    domain=NonNegativeReals,
                                                    initialize=0)

            oa_cuts = oa_utils.GDPopt_OA_cuts
            slack_var = oa_utils.GDPopt_OA_slacks.add()
            rhs = value(constr.lower) if constr.has_lb() else value(
                constr.upper)
            try:
                new_oa_cut = (copysign(1, sign_adjust * dual_value) *
                              (value(constr.body) - rhs + sum(
                                  value(jac) * (var - value(var))
                                  for var, jac in iteritems(jacobian.jac))) -
                              slack_var <= 0)
                if new_oa_cut.polynomial_degree() not in (1, 0):
                    for var, jac in iteritems(jacobian.jac):
                        print(var.name, value(jac))
                oa_cuts.add(expr=new_oa_cut)
                counter += 1
            except ZeroDivisionError:
                config.logger.warning(
                    "Zero division occured attempting to generate OA cut for constraint %s.\n"
                    "Skipping OA cut generation for this constraint." %
                    (constr.name, ))
                # Simply continue on to the next constraint.
            # Clear out the numeric Jacobian values
            if jacobian.mode is differentiate.Modes.reverse_numeric:
                jacobian.jac.clear()

        config.logger.info('Added %s OA cuts' % counter)
Beispiel #29
0
    def transformForTrustRegion(self, model, eflist):
        # transform and model into suitable form for TRF method
        #
        # Arguments:
        # model : pyomo model containing ExternalFunctions
        # eflist : a list of the external functions that will be
        #   handled with TRF method rather than calls to compiled code

        efSet = set([id(x) for x in eflist])

        TRF = Block()

        # Get all varibles
        seenVar = set()
        allVariables = []
        for var in model.component_data_objects(Var):
            if id(var) not in seenVar:
                seenVar.add(id(var))
                allVariables.append(var)

        # This assumes that an external funtion call is present, required!
        model.add_component(unique_component_name(model, 'tR'), TRF)
        TRF.y = VarList()
        TRF.x = VarList()
        TRF.conset = ConstraintList()
        TRF.external_fcns = []
        TRF.exfn_xvars = []

        # TODO: Copy constraints onto block so that transformation can be reversed.

        for con in model.component_data_objects(Constraint, active=True):
            con.set_value((con.lower, self.substituteEF(con.body, TRF,
                                                        efSet), con.upper))
        for obj in model.component_data_objects(Objective, active=True):
            obj.set_value(self.substituteEF(obj.expr, TRF, efSet))
            ## Assume only one ative objective function here
            self.objective = obj

        if self.objective.sense == maximize:
            self.objective.expr = -1 * self.objective.expr
            self.objective.sense = minimize

        # xvars and zvars are lists of x and z varibles as in the paper
        TRF.xvars = []
        TRF.zvars = []
        seenVar = set()
        for varss in TRF.exfn_xvars:
            for var in varss:
                if id(var) not in seenVar:
                    seenVar.add(id(var))
                    TRF.xvars.append(var)

        for var in allVariables:
            if id(var) not in seenVar:
                seenVar.add(id(var))
                TRF.zvars.append(var)

        # TODO: build dict for exfn_xvars
        # assume it is not bottleneck of the code
        self.exfn_xvars_ind = []
        for varss in TRF.exfn_xvars:
            listtmp = []
            for var in varss:
                for i in range(len(TRF.xvars)):
                    if (id(var) == id(TRF.xvars[i])):
                        listtmp.append(i)
                        break

            self.exfn_xvars_ind.append(listtmp)

        return TRF
    def _apply_to_impl(self, instance, config):
        vars_to_eliminate = config.vars_to_eliminate
        self.constraint_filter = config.constraint_filtering_callback
        self.do_integer_arithmetic = config.do_integer_arithmetic
        self.integer_tolerance = config.integer_tolerance
        self.zero_tolerance = config.zero_tolerance
        if vars_to_eliminate is None:
            raise RuntimeError(
                "The Fourier-Motzkin Elimination transformation "
                "requires the argument vars_to_eliminate, a "
                "list of Vars to be projected out of the model.")

        # make transformation block
        transBlockName = unique_component_name(
            instance, '_pyomo_contrib_fme_transformation')
        transBlock = Block()
        instance.add_component(transBlockName, transBlock)
        projected_constraints = transBlock.projected_constraints = \
                                ConstraintList()

        # collect all of the constraints
        # NOTE that we are ignoring deactivated constraints
        constraints = []
        ctypes_not_to_transform = set(
            (Block, Param, Objective, Set, SetOf, Expression, Suffix, Var))
        for obj in instance.component_data_objects(
                descend_into=Block, sort=SortComponents.deterministic,
                active=True):
            if obj.ctype in ctypes_not_to_transform:
                continue
            elif obj.ctype is Constraint:
                cons_list = self._process_constraint(obj)
                constraints.extend(cons_list)
                obj.deactivate(
                )  # the truth will be on our transformation block
            else:
                raise RuntimeError(
                    "Found active component %s of type %s. The "
                    "Fourier-Motzkin Elimination transformation can only "
                    "handle purely algebraic models. That is, only "
                    "Sets, Params, Vars, Constraints, Expressions, Blocks, "
                    "and Objectives may be active on the model." %
                    (obj.name, obj.ctype))

        for obj in vars_to_eliminate:
            if obj.lb is not None:
                constraints.append({
                    'body': generate_standard_repn(obj),
                    'lower': value(obj.lb),
                    'map': ComponentMap([(obj, 1)])
                })
            if obj.ub is not None:
                constraints.append({
                    'body': generate_standard_repn(-obj),
                    'lower': -value(obj.ub),
                    'map': ComponentMap([(obj, -1)])
                })

        new_constraints = self._fourier_motzkin_elimination(
            constraints, vars_to_eliminate)

        # put the new constraints on the transformation block
        for cons in new_constraints:
            if self.constraint_filter is not None:
                try:
                    keep = self.constraint_filter(cons)
                except:
                    logger.error("Problem calling constraint filter callback "
                                 "on constraint with right-hand side %s and "
                                 "body:\n%s" %
                                 (cons['lower'], cons['body'].to_expression()))
                    raise
                if not keep:
                    continue
            lhs = cons['body'].to_expression(sort=True)
            lower = cons['lower']
            assert type(lower) is int or type(lower) is float
            if type(lhs >= lower) is bool:
                if lhs >= lower:
                    continue
                else:
                    # This would actually make a lot of sense in this case...
                    #projected_constraints.add(Constraint.Infeasible)
                    raise RuntimeError("Fourier-Motzkin found the model is "
                                       "infeasible!")
            else:
                projected_constraints.add(lhs >= lower)