Exemple #1
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.x = Var(within=Binary)
        model.y = Var(within=Binary)
        model.z = Var(within=Binary)

        model.obj = Objective(expr=model.x, sense=maximize)
        model.c0 = Constraint(expr=model.x + model.y + model.z == 1)
        model.qc0 = Constraint(expr=model.x**2 + model.y**2 <= model.z**2)
        model.qc1 = Constraint(expr=model.x**2 <= model.y * model.z)
Exemple #2
0
def makeBetweenStepsPaperExample():
    """Original example model, implicit disjunction"""
    m = ConcreteModel()
    m.I = RangeSet(1, 4)
    m.x = Var(m.I, bounds=(-2, 6))

    m.disjunction = Disjunction(expr=[[sum(
        m.x[i]**2
        for i in m.I) <= 1], [sum((3 - m.x[i])**2 for i in m.I) <= 1]])

    m.obj = Objective(expr=m.x[2] - m.x[1], sense=maximize)

    return m
Exemple #3
0
def back_off_constraint_with_calculated_cut_violation(cut, transBlock_rHull,
                                                      bigm_to_hull_map, opt,
                                                      stream_solver, TOL):
    """Calculates the maximum violation of cut subject to the relaxed hull
    constraints. Increases this violation by TOL (to account for optimality 
    tolerance in solving the problem), and, if it finds that cut can be violated
    up to this tolerance, makes it more conservative such that it no longer can.

    Parameters
    ----------
    cut: The cut to be made more conservative, a Constraint
    transBlock_rHull: the relaxed hull model's transformation Block
    bigm_to_hull_map: Dictionary mapping ids of bigM variables to the 
                      corresponding variables on the relaxed hull instance
    opt: SolverFactory object for solving the maximum violation problem
    stream_solver: Whether or not to set tee=True while solving the maximum
                   violation problem.
    TOL: An absolute tolerance to be added to the calculated cut violation,
         to account for optimality tolerance in the maximum violation problem
         solve.
    """
    instance_rHull = transBlock_rHull.model()
    logger.info("Post-processing cut: %s" % cut.expr)
    # Take a constraint. We will solve a problem maximizing its violation
    # subject to rHull. We will add some user-specified tolerance to that
    # violation, and then add that much padding to it if it can be violated.
    transBlock_rHull.separation_objective.deactivate()

    transBlock_rHull.infeasibility_objective = Objective(
        expr=clone_without_expression_components(cut.body,
                                                 substitute=bigm_to_hull_map))

    results = opt.solve(instance_rHull, tee=stream_solver)
    if verify_successful_solve(results) is not NORMAL:
        logger.warning("Problem to determine how much to "
                       "back off the new cut "
                       "did not solve normally. Leaving the constraint as is, "
                       "which could lead to numerical trouble%s" % (results,))
        # restore the objective
        transBlock_rHull.del_component(transBlock_rHull.infeasibility_objective)
        transBlock_rHull.separation_objective.activate()
        return

    # we're minimizing, val is <= 0
    val = value(transBlock_rHull.infeasibility_objective) - TOL
    if val <= 0:
        logger.info("\tBacking off cut by %s" % val)
        cut._body += abs(val)
    # else there is nothing to do: restore the objective
    transBlock_rHull.del_component(transBlock_rHull.infeasibility_objective)
    transBlock_rHull.separation_objective.activate()
Exemple #4
0
def solve_LOA_master(solve_data, config):
    """Solve the augmented lagrangean outer approximation master problem."""
    m = solve_data.linear_GDP.clone()
    GDPopt = m.GDPopt_utils

    # Set up augmented Lagrangean penalty objective
    GDPopt.objective.deactivate()
    sign_adjust = 1 if GDPopt.objective.sense == minimize else -1
    GDPopt.OA_penalty_expr = Expression(
        expr=sign_adjust * config.OA_penalty_factor *
        sum(v for v in m.component_data_objects(ctype=Var,
                                                descend_into=(Block, Disjunct))
            if v.parent_component().local_name == 'GDPopt_OA_slacks'))
    GDPopt.oa_obj = Objective(expr=GDPopt.objective.expr +
                              GDPopt.OA_penalty_expr,
                              sense=GDPopt.objective.sense)
    solve_data.mip_iteration += 1

    mip_results = solve_linear_GDP(m, solve_data, config)
    if mip_results:
        if GDPopt.objective.sense == minimize:
            solve_data.LB = max(value(GDPopt.oa_obj.expr), solve_data.LB)
        else:
            solve_data.UB = min(value(GDPopt.oa_obj.expr), solve_data.UB)
        solve_data.iteration_log[(solve_data.master_iteration,
                                  solve_data.mip_iteration,
                                  solve_data.nlp_iteration)] = (
                                      value(GDPopt.oa_obj.expr),
                                      value(GDPopt.objective.expr),
                                      mip_results[1]  # mip_var_values
                                  )
        config.logger.info(
            'ITER %s.%s.%s-MIP: OBJ: %s  LB: %s  UB: %s' %
            (solve_data.master_iteration, solve_data.mip_iteration,
             solve_data.nlp_iteration, value(
                 GDPopt.oa_obj.expr), solve_data.LB, solve_data.UB))
    else:
        # Master problem was infeasible.
        if solve_data.master_iteration == 1:
            config.logger.warning(
                'GDPopt initialization may have generated poor '
                'quality cuts.')
        # set optimistic bound to infinity
        if GDPopt.objective.sense == minimize:
            solve_data.LB = float('inf')
        else:
            solve_data.UB = float('-inf')
    # Call the MILP post-solve callback
    config.master_postsolve(m, solve_data)

    return mip_results
Exemple #5
0
def solve_NLP_feas(solve_data, config):
    """Solves feasibility NLP and copies result to working model

    Returns: Result values and dual values
    """
    fixed_nlp = solve_data.working_model.clone()
    add_feas_slacks(fixed_nlp)
    MindtPy = fixed_nlp.MindtPy_utils
    next(fixed_nlp.component_data_objects(Objective, active=True)).deactivate()
    for constr in fixed_nlp.component_data_objects(
            ctype=Constraint, active=True, descend_into=True):
        if constr.body.polynomial_degree() not in [0, 1]:
            constr.deactivate()

    MindtPy.MindtPy_feas.activate()
    MindtPy.MindtPy_feas_obj = Objective(
        expr=sum(s for s in MindtPy.MindtPy_feas.slack_var[...]),
        sense=minimize)
    TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)

    with SuppressInfeasibleWarning():
        feas_soln = SolverFactory(config.nlp_solver).solve(
            fixed_nlp, **config.nlp_solver_args)
    subprob_terminate_cond = feas_soln.solver.termination_condition
    if subprob_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list,
            config)
    elif subprob_terminate_cond is tc.infeasible:
        raise ValueError('Feasibility NLP infeasible. '
                         'This should never happen.')
    else:
        raise ValueError(
            'MindtPy unable to handle feasibility NLP termination condition '
            'of {}'.format(subprob_terminate_cond))

    var_values = [v.value for v in MindtPy.variable_list]
    duals = [0 for _ in MindtPy.constraint_list]

    for i, c in enumerate(MindtPy.constraint_list):
        rhs = c.upper if c. has_ub() else c.lower
        c_geq = -1 if c.has_ub() else 1
        duals[i] = c_geq * max(
            0, c_geq * (rhs - value(c.body)))

    if value(MindtPy.MindtPy_feas_obj.expr) == 0:
        raise ValueError(
            'Problem is not feasible, check NLP solver')

    return fixed_nlp, feas_soln
Exemple #6
0
def obbt_disjunct(orig_model, idx, solver):
    model = orig_model.clone()

    # Fix the disjunct to be active
    disjunct = model._disjuncts_to_process[idx]
    disjunct.indicator_var.fix(1)

    for obj in model.component_data_objects(Objective, active=True):
        obj.deactivate()

    # Deactivate nonlinear constraints
    for constr in model.component_data_objects(Constraint,
                                               active=True,
                                               descend_into=(Block, Disjunct)):
        if constr.body.polynomial_degree() not in linear_degrees:
            constr.deactivate()

    # Only look at the variables participating in active constraints within the scope
    relevant_var_set = ComponentSet()
    for constr in disjunct.component_data_objects(Constraint, active=True):
        relevant_var_set.update(
            identify_variables(constr.body, include_fixed=False))

    TransformationFactory('gdp.bigm').apply_to(model)

    model._var_bounding_obj = Objective(expr=1, sense=minimize)

    for var in relevant_var_set:
        model._var_bounding_obj.set_value(expr=var)
        var_lb = solve_bounding_problem(model, solver)
        if var_lb is None:
            return None  # bounding problem infeasible
        model._var_bounding_obj.set_value(expr=-var)
        var_ub = solve_bounding_problem(model, solver)
        if var_ub is None:
            return None  # bounding problem infeasible
        else:
            var_ub = -var_ub  # sign correction

        var.setlb(var_lb)
        var.setub(var_ub)

    # Maps original variable --> (new computed LB, new computed UB)
    var_bnds = ComponentMap(
        ((orig_var, (clone_var.lb if clone_var.has_lb() else -inf,
                     clone_var.ub if clone_var.has_ub() else inf))
         for orig_var, clone_var in zip(orig_model._disj_bnds_linear_vars,
                                        model._disj_bnds_linear_vars)
         if clone_var in relevant_var_set))
    return var_bnds
Exemple #7
0
def twoSegments_SawayaGrossmann():
    m = ConcreteModel()
    m.x = Var(bounds=(0, 3))
    m.disj1 = Disjunct()
    m.disj1.c = Constraint(expr=inequality(0, m.x, 1))
    m.disj2 = Disjunct()
    m.disj2.c = Constraint(expr=inequality(2, m.x, 3))
    m.disjunction = Disjunction(expr=[m.disj1, m.disj2])

    # this is my objective because I want to make sure that when I am testing
    # cutting planes, my first solution to rBigM is not on the convex hull.
    m.obj = Objective(expr=m.x - m.disj2.indicator_var)

    return m
Exemple #8
0
def twoDisj_twoCircles_easy():
    m = ConcreteModel()
    m.x = Var(bounds=(0,8))
    m.y = Var(bounds=(0,10))

    m.upper_circle = Disjunct()
    m.upper_circle.cons = Constraint(expr=(m.x - 1)**2 + (m.y - 6)**2 <= 2)
    m.lower_circle = Disjunct()
    m.lower_circle.cons = Constraint(expr=(m.x - 4)**2 + (m.y - 2)**2 <= 2)

    m.disjunction = Disjunction(expr=[m.upper_circle, m.lower_circle])
    
    m.obj = Objective(expr=m.x + m.y, sense=maximize)
    return m
Exemple #9
0
def knapsack(weights: List[Union[int, float]], values: List[Union[int, float]], max_weight: Union[int, float]):
    I = list(range(len(weights)))

    model = ConcreteModel()
    model.x = Var(I, within=Binary)

    model.objective = Objective(expr=sum(model.x[i] * values[i] for i in I), sense=maximize)
    model.constraint = Constraint(expr=sum(model.x[i] * weights[i] for i in I) <= max_weight)

    solver = solver_factory.get_solver(Solver.BONMIN)
    solver.solve(model)

    for v in model.component_data_objects(Var):
        print(str(v), v.value)
Exemple #10
0
def solve_NLP_feas(solve_data, config):
    m = solve_data.working_model.clone()
    add_feas_slacks(m)
    MindtPy = m.MindtPy_utils
    next(m.component_data_objects(Objective, active=True)).deactivate()
    for constr in m.component_data_objects(ctype=Constraint,
                                           active=True,
                                           descend_into=True):
        constr.deactivate()
    MindtPy.MindtPy_feas.activate()
    MindtPy.MindtPy_feas_obj = Objective(expr=sum(
        s for s in MindtPy.MindtPy_feas.slack_var[...]),
                                         sense=minimize)
    for v in MindtPy.variable_list:
        if v.is_binary():
            v.fix(int(round(v.value)))
    # m.pprint()  #print nlp feasibility problem for debugging
    with SuppressInfeasibleWarning():
        feas_soln = SolverFactory(config.nlp_solver).solve(
            m, **config.nlp_solver_args)
    subprob_terminate_cond = feas_soln.solver.termination_condition
    if subprob_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)
        pass
    elif subprob_terminate_cond is tc.infeasible:
        raise ValueError('Feasibility NLP infeasible. '
                         'This should never happen.')
    else:
        raise ValueError(
            'MindtPy unable to handle feasibility NLP termination condition '
            'of {}'.format(subprob_terminate_cond))

    var_values = [v.value for v in MindtPy.variable_list]
    duals = [0 for _ in MindtPy.constraint_list]

    for i, constr in enumerate(MindtPy.constraint_list):
        # TODO rhs only works if constr.upper and constr.lower do not both have values.
        # Sometimes you might have 1 <= expr <= 1. This would give an incorrect rhs of 2.
        rhs = ((0 if constr.upper is None else constr.upper) +
               (0 if constr.lower is None else constr.lower))
        sign_adjust = 1 if value(constr.upper) is None else -1
        duals[i] = sign_adjust * max(0, sign_adjust *
                                     (rhs - value(constr.body)))

    if value(MindtPy.MindtPy_feas_obj.expr) == 0:
        raise ValueError('Problem is not feasible, check NLP solver')

    return var_values, duals
Exemple #11
0
def process_objective(solve_data, config):
    m = solve_data.working_model
    GDPopt = m.GDPopt_utils
    # Handle missing or multiple objectives
    objs = list(
        m.component_data_objects(ctype=Objective,
                                 active=True,
                                 descend_into=True))
    num_objs = len(objs)
    solve_data.results.problem.number_of_objectives = num_objs
    if num_objs == 0:
        config.logger.warning(
            'Model has no active objectives. Adding dummy objective.')
        GDPopt.dummy_objective = Objective(expr=1)
        main_obj = GDPopt.dummy_objective
    elif num_objs > 1:
        raise ValueError('Model has multiple active objectives.')
    else:
        main_obj = objs[0]
    solve_data.working_objective_expr = main_obj.expr

    # Move the objective to the constraints

    # TODO only move the objective if nonlinear?
    GDPopt.objective_value = Var(domain=Reals, initialize=0)
    solve_data.objective_sense = main_obj.sense
    if main_obj.sense == minimize:
        GDPopt.objective_expr = Constraint(
            expr=GDPopt.objective_value >= main_obj.expr)
        solve_data.results.problem.sense = ProblemSense.minimize
    else:
        GDPopt.objective_expr = Constraint(
            expr=GDPopt.objective_value <= main_obj.expr)
        solve_data.results.problem.sense = ProblemSense.maximize
    main_obj.deactivate()
    GDPopt.objective = Objective(expr=GDPopt.objective_value,
                                 sense=main_obj.sense)
Exemple #12
0
    def select_tear_mip_model(self, G):
        """
        Generate a model for selecting tears from the given graph

        Returns
        -------
            model
            bin_list
                A list of the binary variables representing each edge,
                indexed by the edge index of the graph
        """
        model = ConcreteModel()

        bin_list = []
        for i in range(G.number_of_edges()):
            # add a binary "torn" variable for every edge
            vname = "edge%s" % i
            var = Var(domain=Binary)
            bin_list.append(var)
            model.add_component(vname, var)

        # var containing the maximum number of times any cycle is torn
        mct = model.max_cycle_tears = Var()

        _, cycleEdges = self.all_cycles(G)

        for i in range(len(cycleEdges)):
            ecyc = cycleEdges[i]

            # expression containing sum of tears for each cycle
            ename = "cycle_sum%s" % i
            expr = Expression(expr=sum(bin_list[i] for i in ecyc))
            model.add_component(ename, expr)

            # every cycle must have at least 1 tear
            cname_min = "cycle_min%s" % i
            con_min = Constraint(expr=expr >= 1)
            model.add_component(cname_min, con_min)

            # mct >= cycle_sum for all cycles, thus it becomes the max
            cname_mct = mct.name + "_geq%s" % i
            con_mct = Constraint(expr=mct >= expr)
            model.add_component(cname_mct, con_mct)

        # weigh the primary objective much greater than the secondary
        obj_expr = 1000 * mct + sum(var for var in bin_list)
        model.obj = Objective(expr=obj_expr, sense=minimize)

        return model, bin_list
    def get_model(self):
        m = ConcreteModel()
        m.x = Var(bounds=(-100, 100))

        m.obj = Objective(expr=m.x)

        m.disjunct1 = Disjunct()
        m.disjunct1.comp = Complementarity(
            expr=complements(m.x >= 0, 4 * m.x - 3 >= 0))
        m.disjunct2 = Disjunct()
        m.disjunct2.cons = Constraint(expr=m.x >= 2)

        m.disjunction = Disjunction(expr=[m.disjunct1, m.disjunct2])

        return m
Exemple #14
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.x = Var(within=Binary)
        model.y = Var(within=Binary)
        model.z = Var(within=Binary)

        model.o = Objective(expr=-model.x - model.y - model.z)

        model.c1 = Constraint(expr=model.x + model.y <= 1)
        model.c2 = Constraint(expr=model.x + model.z <= 1)
        model.c3 = Constraint(expr=model.y + model.z <= 1)
        model.c4 = Constraint(expr=model.x + model.y + model.z >= 1.5)
Exemple #15
0
def makeNestedDisjunctions_NestedDisjuncts():
    m = ConcreteModel()
    m.x = Var(bounds=(0, 2))
    m.obj = Objective(expr=m.x)
    m.d1 = Disjunct()
    m.d1.c = Constraint(expr=m.x >= 1)
    m.d2 = Disjunct()
    m.d2.c = Constraint(expr=m.x >= 1.1)
    m.d1.d3 = Disjunct()
    m.d1.d3.c = Constraint(expr=m.x >= 1.2)
    m.d1.d4 = Disjunct()
    m.d1.d4.c = Constraint(expr=m.x >= 1.3)
    m.disj = Disjunction(expr=[m.d1, m.d2])
    m.d1.disj2 = Disjunction(expr=[m.d1.d3, m.d1.d4])
    return m
Exemple #16
0
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.x = Var()
        model.y = Var()

        model.obj = Objective(expr=model.y)
        model.p = Piecewise(model.y, model.x,
                            pw_pts=[-1,0,1],
                            f_rule=[1,0.5,1],
                            pw_repn='SOS2',
                            pw_constr_type='LB',
                            unbounded_domain_var=True)
Exemple #17
0
    def test_add_column_exceptions(self):
        m = ConcreteModel()
        m.x = Var()
        m.c = Constraint(expr=(0, m.x, 1))
        m.ci = Constraint([1,2], rule=lambda m,i:(0,m.x,i+1))
        m.cd = Constraint(expr=(0, -m.x, 1))
        m.cd.deactivate()
        m.obj = Objective(expr=-m.x)

        opt = SolverFactory('cplex_persistent')

        # set_instance not called
        self.assertRaises(RuntimeError, opt.add_column, m, m.x, 0, [m.c], [1])

        opt.set_instance(m)

        m2 = ConcreteModel()
        m2.y = Var()
        m2.c = Constraint(expr=(0,m.x,1))

        # different model than attached to opt
        self.assertRaises(RuntimeError, opt.add_column, m2, m2.y, 0, [], [])
        # pyomo var attached to different model
        self.assertRaises(RuntimeError, opt.add_column, m, m2.y, 0, [], [])

        z = Var()
        # pyomo var floating
        self.assertRaises(RuntimeError, opt.add_column, m, z, -2, [m.c, z], [1])

        m.y = Var()
        # len(coefficents) == len(constraints)
        self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1,2])
        self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c, z], [1])

        # add indexed constraint
        self.assertRaises(AttributeError, opt.add_column, m, m.y, -2, [m.ci], [1])
        # add something not a _ConstraintData
        self.assertRaises(AttributeError, opt.add_column, m, m.y, -2, [m.x], [1])

        # constraint not on solver model
        self.assertRaises(KeyError, opt.add_column, m, m.y, -2, [m2.c], [1])

        # inactive constraint
        self.assertRaises(KeyError, opt.add_column, m, m.y, -2, [m.cd], [1])

        opt.add_var(m.y)
        # var already in solver model
        self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1])
Exemple #18
0
def init_max_binaries(solve_data, config):
    """Initialize by turning on as many binary variables as possible.

    The user would usually want to call _solve_NLP_subproblem after an
    invocation of this function.

    """
    m = solve_data.working_model.clone()
    m.dual.deactivate()
    MindtPy = m.MindtPy_utils
    solve_data.mip_subiter += 1
    config.logger.info("MILP %s: maximize value of binaries" %
                       (solve_data.mip_iter))
    for c in MindtPy.constraint_list:
        if c.body.polynomial_degree() not in (1, 0):
            c.deactivate()
    objective = next(m.component_data_objects(Objective, active=True))
    objective.deactivate()
    binary_vars = (v for v in m.component_data_objects(ctype=Var)
                   if v.is_binary() and not v.fixed)
    MindtPy.MindtPy_max_binary_obj = Objective(expr=sum(v
                                                        for v in binary_vars),
                                               sense=maximize)

    getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
    getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()

    opt = SolverFactory(config.mip_solver)
    if isinstance(opt, PersistentSolver):
        opt.set_instance(m)
    results = opt.solve(m, options=config.mip_solver_args)

    solve_terminate_cond = results.solver.termination_condition
    if solve_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)

        pass  # good
    elif solve_terminate_cond is tc.infeasible:
        raise ValueError('MILP master problem is infeasible. '
                         'Problem may have no more feasible '
                         'binary configurations.')
    else:
        raise ValueError(
            'MindtPy unable to handle MILP master termination condition '
            'of %s. Solver message: %s' %
            (solve_terminate_cond, results.solver.message))
Exemple #19
0
def define_model(**kwds):

    model = ConcreteModel()

    model.x1 = Var(bounds=(0, 6))  # domain variable
    model.x2 = Var(bounds=(0, 6))  # domain variable
    model.x3 = Var(bounds=(0, 6))  # domain variable
    model.x4 = Var(bounds=(0, 6))  # domain variable

    model.Fx1 = Var()  # range variable
    model.Fx2 = Var()  # range variable
    model.Fx3 = Var()  # range variable
    model.Fx4 = Var()  # range variable
    model.p = Param(initialize=1.0)

    model.obj = Objective(expr=model.Fx1 + model.Fx2 + model.Fx3 + model.Fx4,
                          sense=kwds.pop('sense', maximize))

    model.piecewise1 = Piecewise(model.Fx1,
                                 model.x1,
                                 pw_pts=DOMAIN_PTS,
                                 f_rule=F,
                                 **kwds)
    model.piecewise2 = Piecewise(model.Fx2,
                                 model.x2,
                                 pw_pts=DOMAIN_PTS,
                                 f_rule=F,
                                 **kwds)

    model.piecewise3 = Piecewise(model.Fx3,
                                 model.x3,
                                 pw_pts=DOMAIN_PTS,
                                 f_rule=F,
                                 **kwds)

    model.piecewise4 = Piecewise(model.Fx4,
                                 model.x4,
                                 pw_pts=DOMAIN_PTS,
                                 f_rule=F,
                                 **kwds)

    #Fix the answer for testing purposes
    model.set_answer_constraint1 = Constraint(expr=model.x1 == 0.0)
    model.set_answer_constraint2 = Constraint(expr=model.x2 == 3.0)
    model.set_answer_constraint3 = Constraint(expr=model.x3 == 5.5)
    model.set_answer_constraint4 = Constraint(expr=model.x4 == 6.0)

    return model
Exemple #20
0
    def build_mip(self):
        """Build the model <b>object</b>."""
        self.model = AbstractModel()

        self.model.J = Set()
        self.model.K = Set(self.model.J)

        def jk_init(m):
            return [(j, k) for j in m.J for k in m.K[j]]

        self.model.JK = Set(initialize=jk_init, dimen=None)

        self.model.y_pred = Param()
        self.model.epsilon = Param()
        self.model.max_cost = Var()
        self.model.w = Param(self.model.J)
        self.model.a = Param(self.model.JK)
        self.model.c = Param(self.model.JK)
        self.model.u = Var(self.model.JK, within=Binary)

        # Make sure only one action is on at a time.
        def c1Rule(m, j):
            return (sum([m.u[j, k] for k in m.K[j]])) == 1

        # 2.b: Action sets must flip the prediction of a linear classifier.
        def c2Rule(m):
            return (sum((m.u[j, k] * m.a[j, k] * m.w[j])
                        for j, k in m.JK) >= -m.y_pred)

        # instantiate max cost
        def maxcost_rule(m, j, k):
            return m.max_cost >= (m.u[j, k] * m.c[j, k])

        # Set up objective for total sum.
        def obj_rule_percentile(m):
            return sum(m.u[j, k] * m.c[j, k] for j, k in m.JK)

        # Set up objective for max cost.
        def obj_rule_max(m):
            return (sum(m.epsilon * m.u[j, k] * m.c[j, k]
                        for j, k in m.JK) + (1 - m.epsilon) * m.max_cost)

        ##
        self.model.g = Objective(rule=obj_rule_max, sense=minimize)
        self.model.c1 = Constraint(self.model.J, rule=c1Rule)
        self.model.c2 = Constraint(rule=c2Rule)
        self.model.c3 = Constraint(self.model.JK, rule=maxcost_rule)
        self.built = True
Exemple #21
0
def makeBetweenStepsPaperExample_DeclareVarOnDisjunct():
    """Exactly the same model as above, but declaring the Disjuncts explicitly 
    and declaring the variables on one of them.
    """
    m = ConcreteModel()
    m.I = RangeSet(1, 4)
    m.disj1 = Disjunct()
    m.disj1.x = Var(m.I, bounds=(-2, 6))
    m.disj1.c = Constraint(expr=sum(m.disj1.x[i]**2 for i in m.I) <= 1)
    m.disj2 = Disjunct()
    m.disj2.c = Constraint(expr=sum((3 - m.disj1.x[i])**2 for i in m.I) <= 1)
    m.disjunction = Disjunction(expr=[m.disj1, m.disj2])

    m.obj = Objective(expr=m.disj1.x[2] - m.disj1.x[1], sense=maximize)

    return m
Exemple #22
0
    def test_divide_by_mutable(self):
        #
        # Test from https://github.com/Pyomo/pyomo/issues/153
        #
        m = ConcreteModel()
        m.x = Var(bounds=(1,5))
        m.p = Param(initialize=100, mutable=True)
        m.con = Constraint(expr=exp(5*(1/m.x - 1/m.p))<=10)
        m.obj = Objective(expr=m.x**2)

        test = gar(m.con.body)
        self.assertEqual(test.constant, 0)
        self.assertEqual(test.linear_vars, tuple())
        self.assertEqual(test.linear_coefs, tuple())
        self.assertEqual(set(id(v) for v in test.nonlinear_vars), set([id(m.x)]))
        self.assertIs(test.nonlinear_expr, m.con.body)
Exemple #23
0
def generate_norm1_objective_function(model,
                                      setpoint_model,
                                      discrete_only=False):
    r"""This function generates objective (PF-OA main problem) for minimum Norm1 distance to setpoint_model.

    Norm1 distance of (x,y) = \sum_i |x_i - y_i|.

    Parameters
    ----------
    model : Pyomo model
        The model that needs new objective function.
    setpoint_model : Pyomo model
        The model that provides the base point for us to calculate the distance.
    discrete_only : bool, optional
        Whether to only optimize on distance between the discrete variables, by default False.

    Returns
    -------
    Objective
        The norm1 objective function.
    """
    # skip objective_value variable and slack_var variables
    var_filter = (lambda v: v.is_integer()) if discrete_only \
        else (lambda v: 'MindtPy_utils.objective_value' not in v.name and
              'MindtPy_utils.feas_opt.slack_var' not in v.name)
    model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list))
    setpoint_vars = list(
        filter(var_filter, setpoint_model.MindtPy_utils.variable_list))
    assert len(model_vars) == len(
        setpoint_vars
    ), 'Trying to generate Norm1 objective function for models with different number of variables'
    model.MindtPy_utils.del_component('L1_obj')
    obj_blk = model.MindtPy_utils.L1_obj = Block()
    obj_blk.L1_obj_idx = RangeSet(len(model_vars))
    obj_blk.L1_obj_var = Var(obj_blk.L1_obj_idx,
                             domain=Reals,
                             bounds=(0, None))
    obj_blk.abs_reform = ConstraintList()
    for idx, v_model, v_setpoint in zip(obj_blk.L1_obj_idx, model_vars,
                                        setpoint_vars):
        obj_blk.abs_reform.add(
            expr=v_model - v_setpoint.value >= -obj_blk.L1_obj_var[idx])
        obj_blk.abs_reform.add(
            expr=v_model - v_setpoint.value <= obj_blk.L1_obj_var[idx])

    return Objective(expr=sum(obj_blk.L1_obj_var[idx]
                              for idx in obj_blk.L1_obj_idx))
Exemple #24
0
def grossmann_oneDisj():
    m = ConcreteModel()
    m.x = Var(bounds=(0,20))
    m.y = Var(bounds=(0, 20))
    m.disjunct1 = Disjunct()
    m.disjunct1.constraintx = Constraint(expr=inequality(0, m.x, 2))
    m.disjunct1.constrainty = Constraint(expr=inequality(7, m.y, 10))

    m.disjunct2 = Disjunct()
    m.disjunct2.constraintx = Constraint(expr=inequality(8, m.x, 10))
    m.disjunct2.constrainty = Constraint(expr=inequality(0, m.y, 3))

    m.disjunction = Disjunction(expr=[m.disjunct1, m.disjunct2])

    m.objective = Objective(expr=m.x + 2*m.y, sense=maximize)

    return m
Exemple #25
0
    def add_cut(self, first=False):
        self._iter += 1
        model = self._model

        self._wprod[self._iter] = self._compute_weight_weight_inner_product()
        if first is True:
            self._alphas[self._iter] = -(
                self._compute_objective_term() +
                (self._ph._rho / 2.0) * self._wprod[self._iter])
        else:
            self._alphas[self._iter] = -(self._compute_objective_term(
            )) + self._compute_xbar_weight_inner_product()

        if self._solved is True:
            if self._compute_convergence() is True:
                return True

        model.del_component('cuts')
        model.cuts = Set(initialize=sorted(self._alphas.keys()))
        model.del_component('beta')
        model.beta = Var(model.cuts, within=NonNegativeReals)
        model.del_component('beta_sum_one')
        model.beta_sum_one = Constraint(expr=sum_product(model.beta) == 1)
        model.del_component('obj')
        model.obj = Objective(expr=sum(self._alphas[i] * model.beta[i]
                                       for i in model.cuts))

        self._wbars[self._iter] = {}
        for stage in self._ph._scenario_tree._stages[:
                                                     -1]:  # all blended stages
            for tree_node in stage._tree_nodes:
                self._wbars[self._iter][tree_node._name] = copy.deepcopy(
                    tree_node._wbars)
                block = getattr(model, tree_node._name)

                def _c_rule(block, i):
                    lhs = sum(model.beta[k] * self._wbars[k][tree_node._name][
                        block.id_to_var[i][0]][block.id_to_var[i][1]]
                              for k in model.beta.index_set())
                    if not isinstance(lhs, ExpressionBase):
                        return Constraint.Skip
                    return lhs == 0

                block.del_component('con')
                block.con = Constraint(block.var_index, rule=_c_rule)
        return False
Exemple #26
0
def init_max_binaries(solve_data, config):
    """Initialize by maximizing binary variables and disjuncts.

    This function activates as many binary variables and disjucts as
    feasible.

    """
    solve_data.mip_iteration += 1
    linear_GDP = solve_data.linear_GDP.clone()
    config.logger.info("Generating initial linear GDP approximation by "
                       "solving a subproblem that maximizes "
                       "the sum of all binary and logical variables.")
    # Set up binary maximization objective
    linear_GDP.GDPopt_utils.objective.deactivate()
    binary_vars = (v for v in linear_GDP.component_data_objects(
        ctype=Var, descend_into=(Block, Disjunct))
                   if v.is_binary() and not v.fixed)
    linear_GDP.GDPopt_utils.max_binary_obj = Objective(expr=sum(binary_vars),
                                                       sense=maximize)

    # Solve
    mip_results = solve_linear_GDP(linear_GDP, solve_data, config)
    if mip_results:
        _, mip_var_values = mip_results
        # use the mip_var_values to create the NLP subproblem
        nlp_model = solve_data.working_model.clone()
        # copy in the discrete variable values
        copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list,
                                       mip_var_values, config)
        TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model)
        solve_data.nlp_iteration += 1
        nlp_result = solve_NLP(nlp_model, solve_data, config)
        nlp_feasible, nlp_var_values, nlp_duals = nlp_result
        if nlp_feasible:
            update_nlp_progress_indicators(nlp_model, solve_data, config)
            add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data,
                                         config)
        add_integer_cut(mip_var_values,
                        solve_data,
                        config,
                        feasible=nlp_feasible)
    else:
        config.logger.info(
            "Linear relaxation for initialization was infeasible. "
            "Problem is infeasible.")
        return False
Exemple #27
0
def makeNestedDisjunctions_NestedDisjuncts():
    """Same as makeNestedDisjunctions_FlatDisjuncts except that the disjuncts
    of the nested disjunction are declared on the parent disjunct."""
    m = ConcreteModel()
    m.x = Var(bounds=(0, 2))
    m.obj = Objective(expr=m.x)
    m.d1 = Disjunct()
    m.d1.c = Constraint(expr=m.x >= 1)
    m.d2 = Disjunct()
    m.d2.c = Constraint(expr=m.x >= 1.1)
    m.d1.d3 = Disjunct()
    m.d1.d3.c = Constraint(expr=m.x >= 1.2)
    m.d1.d4 = Disjunct()
    m.d1.d4.c = Constraint(expr=m.x >= 1.3)
    m.disj = Disjunction(expr=[m.d1, m.d2])
    m.d1.disj2 = Disjunction(expr=[m.d1.d3, m.d1.d4])
    return m
Exemple #28
0
def makeNestedDisjunctions_FlatDisjuncts():
    """Two-term SimpleDisjunction where one of the disjuncts contains a nested
    SimpleDisjunction, the disjuncts of which are declared on the model"""
    m = ConcreteModel()
    m.x = Var(bounds=(0, 2))
    m.obj = Objective(expr=m.x)
    m.d1 = Disjunct()
    m.d1.c = Constraint(expr=m.x >= 1)
    m.d2 = Disjunct()
    m.d2.c = Constraint(expr=m.x >= 1.1)
    m.d3 = Disjunct()
    m.d3.c = Constraint(expr=m.x >= 1.2)
    m.d4 = Disjunct()
    m.d4.c = Constraint(expr=m.x >= 1.3)
    m.disj = Disjunction(expr=[m.d1, m.d2])
    m.d1.disj = Disjunction(expr=[m.d3, m.d4])
    return m
Exemple #29
0
    def do_setup(self):
        global tmpdir
        tmpdir = os.getcwd()
        os.chdir(currdir)
        TempfileManager.sequential_files(0)

        self.asl = pyomo.opt.SolverFactory('asl:ipopt', keepfiles=True)
        self.ipopt = pyomo.opt.SolverFactory('ipopt', keepfiles=True)

        # The sisser CUTEr instance
        # Formulated in Pyomo by Carl D. Laird, Daniel P. Word, Brandon C. Barrera and Saumyajyoti Chaudhuri
        # Taken from:

        #   Source:
        #   F.S. Sisser,
        #   "Elimination of bounds in optimization problems by transforming
        #   variables",
        #   Mathematical Programming 20:110-121, 1981.

        #   See also Buckley#216 (p. 91)

        #   SIF input: Ph. Toint, Dec 1989.

        #   classification OUR2-AN-2-0

        sisser_instance = ConcreteModel()

        sisser_instance.N = RangeSet(1, 2)
        sisser_instance.xinit = Param(sisser_instance.N,
                                      initialize={
                                          1: 1.0,
                                          2: 0.1
                                      })

        def fa(model, i):
            return value(model.xinit[i])

        sisser_instance.x = Var(sisser_instance.N, initialize=fa)

        def f(model):
            return 3 * model.x[1]**4 - 2 * (model.x[1] *
                                            model.x[2])**2 + 3 * model.x[2]**4

        sisser_instance.f = Objective(rule=f, sense=minimize)

        self.sisser_instance = sisser_instance
Exemple #30
0
def to_break_constraint_tolerances():
    m = ConcreteModel()
    m.x = Var(bounds=(0, 130))
    m.y = Var(bounds=(0, 130))
    m.disjunct1 = Disjunct()
    m.disjunct1.constraintx = Constraint(expr=inequality(0, m.x, 2))
    m.disjunct1.constrainty = Constraint(expr=inequality(117, m.y, 127))

    m.disjunct2 = Disjunct()
    m.disjunct2.constraintx = Constraint(expr=inequality(118, m.x, 120))
    m.disjunct2.constrainty = Constraint(expr=inequality(0, m.y, 3))

    m.disjunction = Disjunction(expr=[m.disjunct1, m.disjunct2])

    m.objective = Objective(expr=m.x + 2*m.y, sense=maximize)

    return m