def setup_main(solve_data, config, fp, regularization_problem): """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. Args: solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. fp (bool): whether it is in the loop of feasibility pump. regularization_problem (bool): whether it is solving a regularization problem. """ MindtPy = solve_data.mip.MindtPy_utils for c in MindtPy.constraint_list: if c.body.polynomial_degree() not in {1, 0}: c.deactivate() MindtPy.cuts.activate() sign_adjust = 1 if solve_data.objective_sense == minimize else -1 MindtPy.del_component('mip_obj') if regularization_problem and config.single_tree: MindtPy.del_component('loa_proj_mip_obj') MindtPy.cuts.del_component('obj_reg_estimate') if config.add_regularization is not None and config.add_no_good_cuts: if regularization_problem: MindtPy.cuts.no_good_cuts.activate() else: MindtPy.cuts.no_good_cuts.deactivate() if fp: MindtPy.del_component('fp_mip_obj') if config.fp_main_norm == 'L1': MindtPy.fp_mip_obj = generate_norm1_objective_function( solve_data.mip, solve_data.working_model, discrete_only=config.fp_discrete_only) elif config.fp_main_norm == 'L2': MindtPy.fp_mip_obj = generate_norm2sq_objective_function( solve_data.mip, solve_data.working_model, discrete_only=config.fp_discrete_only) elif config.fp_main_norm == 'L_infinity': MindtPy.fp_mip_obj = generate_norm_inf_objective_function( solve_data.mip, solve_data.working_model, discrete_only=config.fp_discrete_only) elif regularization_problem: if MindtPy.objective_list[0].expr.polynomial_degree() in {1, 0}: MindtPy.objective_constr.activate() if config.add_regularization == 'level_L1': MindtPy.loa_proj_mip_obj = generate_norm1_objective_function( solve_data.mip, solve_data.best_solution_found, discrete_only=False) elif config.add_regularization == 'level_L2': MindtPy.loa_proj_mip_obj = generate_norm2sq_objective_function( solve_data.mip, solve_data.best_solution_found, discrete_only=False) elif config.add_regularization == 'level_L_infinity': MindtPy.loa_proj_mip_obj = generate_norm_inf_objective_function( solve_data.mip, solve_data.best_solution_found, discrete_only=False) elif config.add_regularization in { 'grad_lag', 'hess_lag', 'hess_only_lag', 'sqp_lag' }: MindtPy.loa_proj_mip_obj = generate_lag_objective_function( solve_data.mip, solve_data.best_solution_found, config, solve_data, discrete_only=False) if solve_data.objective_sense == minimize: MindtPy.cuts.obj_reg_estimate = Constraint( expr=MindtPy.objective_value <= (1 - config.level_coef) * solve_data.UB + config.level_coef * solve_data.LB) else: MindtPy.cuts.obj_reg_estimate = Constraint( expr=MindtPy.objective_value >= (1 - config.level_coef) * solve_data.LB + config.level_coef * solve_data.UB) else: if config.add_slack: MindtPy.del_component('aug_penalty_expr') MindtPy.aug_penalty_expr = Expression( expr=sign_adjust * config.OA_penalty_factor * sum(v for v in MindtPy.cuts.slack_vars[...])) main_objective = MindtPy.objective_list[-1] MindtPy.mip_obj = Objective( expr=main_objective.expr + (MindtPy.aug_penalty_expr if config.add_slack else 0), sense=solve_data.objective_sense) if config.use_dual_bound: # Delete previously added dual bound constraint MindtPy.cuts.del_component('dual_bound') if solve_data.objective_sense == minimize: MindtPy.cuts.dual_bound = Constraint( expr=main_objective.expr + (MindtPy.aug_penalty_expr if config.add_slack else 0) >= solve_data.LB, doc= 'Objective function expression should improve on the best found dual bound' ) else: MindtPy.cuts.dual_bound = Constraint( expr=main_objective.expr + (MindtPy.aug_penalty_expr if config.add_slack else 0) <= solve_data.UB, doc= 'Objective function expression should improve on the best found dual bound' )
def solve_fp_subproblem(solve_data, config): """Solves the feasibility pump NLP subproblem. This function sets up the 'fp_nlp' by relax integer variables. precomputes dual values, deactivates trivial constraints, and then solves NLP model. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Returns ------- fp_nlp : Pyomo model Fixed-NLP from the model. results : SolverResults Results from solving the fixed-NLP subproblem. """ fp_nlp = solve_data.working_model.clone() MindtPy = fp_nlp.MindtPy_utils # Set up NLP fp_nlp.MindtPy_utils.objective_list[-1].deactivate() if solve_data.objective_sense == minimize: fp_nlp.improving_objective_cut = Constraint( expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) <= solve_data.UB) else: fp_nlp.improving_objective_cut = Constraint( expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) >= solve_data.LB) # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' https://doi.org/10.1007/s10107-012-0608-x # the norm type is consistant with the norm obj of the FP-main problem. if config.fp_norm_constraint: generate_norm_constraint(fp_nlp, solve_data, config) MindtPy.fp_nlp_obj = generate_norm2sq_objective_function( fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only) MindtPy.cuts.deactivate() TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp) try: TransformationFactory( 'contrib.deactivate_trivial_constraints').apply_to( fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) except ValueError: config.logger.warning( 'infeasibility detected in deactivate_trivial_constraints') results = SolverResults() results.solver.termination_condition = tc.infeasible return fp_nlp, results # Solve the NLP nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): with time_code(solve_data.timing, 'fp subproblem'): results = nlpopt.solve(fp_nlp, tee=config.nlp_solver_tee, **nlp_args) return fp_nlp, results
def solve_fp_subproblem(solve_data, config): """ Solves the feasibility pump NLP This function sets up the 'fp_nlp' by relax integer variables. precomputes dual values, deactivates trivial constraints, and then solves NLP model. Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm Returns ------- fp_nlp: Pyomo model Fixed-NLP from the model results: Pyomo results object result from solving the Fixed-NLP """ fp_nlp = solve_data.working_model.clone() MindtPy = fp_nlp.MindtPy_utils config.logger.info('FP-NLP %s: Solve feasibility pump NLP subproblem.' % (solve_data.fp_iter,)) # Set up NLP fp_nlp.MindtPy_utils.objective_list[-1].deactivate() if solve_data.objective_sense == minimize: fp_nlp.improving_objective_cut = Constraint( expr=fp_nlp.MindtPy_utils.objective_value <= solve_data.UB) else: fp_nlp.improving_objective_cut = Constraint( expr=fp_nlp.MindtPy_utils.objective_value >= solve_data.LB) # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' # the norm type is consistant with the norm obj of the FP-main problem. if config.fp_norm_constraint: if config.fp_main_norm == 'L1': # TODO: check if we can access the block defined in FP-main problem generate_norm1_norm_constraint( fp_nlp, solve_data.mip, config, discrete_only=True) elif config.fp_main_norm == 'L2': fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2 for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0) elif config.fp_main_norm == 'L_infinity': fp_nlp.norm_constraint = ConstraintList() rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip( fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list): fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs) MindtPy.fp_nlp_obj = generate_norm2sq_objective_function( fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only) MindtPy.cuts.deactivate() TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp) try: TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) except ValueError: config.logger.warning( 'infeasibility detected in deactivate_trivial_constraints') results = SolverResults() results.solver.termination_condition = tc.infeasible return fp_nlp, results # Solve the NLP nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): with time_code(solve_data.timing, 'fp subproblem'): results = nlpopt.solve( fp_nlp, tee=config.nlp_solver_tee, **nlp_args) return fp_nlp, results
def setup_main(solve_data, config, fp, regularization_problem): """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. fp : bool Whether it is in the loop of feasibility pump. regularization_problem : bool Whether it is solving a regularization problem. """ MindtPy = solve_data.mip.MindtPy_utils for c in MindtPy.constraint_list: if c.body.polynomial_degree( ) not in solve_data.mip_constraint_polynomial_degree: c.deactivate() MindtPy.cuts.activate() sign_adjust = 1 if solve_data.objective_sense == minimize else -1 MindtPy.del_component('mip_obj') if regularization_problem and config.single_tree: MindtPy.del_component('loa_proj_mip_obj') MindtPy.cuts.del_component('obj_reg_estimate') if config.add_regularization is not None and config.add_no_good_cuts: if regularization_problem: MindtPy.cuts.no_good_cuts.activate() else: MindtPy.cuts.no_good_cuts.deactivate() if fp: MindtPy.del_component('fp_mip_obj') if config.fp_main_norm == 'L1': MindtPy.fp_mip_obj = generate_norm1_objective_function( solve_data.mip, solve_data.working_model, discrete_only=config.fp_discrete_only) elif config.fp_main_norm == 'L2': MindtPy.fp_mip_obj = generate_norm2sq_objective_function( solve_data.mip, solve_data.working_model, discrete_only=config.fp_discrete_only) elif config.fp_main_norm == 'L_infinity': MindtPy.fp_mip_obj = generate_norm_inf_objective_function( solve_data.mip, solve_data.working_model, discrete_only=config.fp_discrete_only) elif regularization_problem: # The epigraph constraint is very "flat" for branching rules. # In ROA, if the objective function is linear(or quadratic when quadratic_strategy = 1 or 2), the original objective function is used in the MIP problem. # In the MIP projection problem, we need to reactivate the epigraph constraint(objective_constr). if MindtPy.objective_list[0].expr.polynomial_degree( ) in solve_data.mip_objective_polynomial_degree: MindtPy.objective_constr.activate() if config.add_regularization == 'level_L1': MindtPy.loa_proj_mip_obj = generate_norm1_objective_function( solve_data.mip, solve_data.best_solution_found, discrete_only=False) elif config.add_regularization == 'level_L2': MindtPy.loa_proj_mip_obj = generate_norm2sq_objective_function( solve_data.mip, solve_data.best_solution_found, discrete_only=False) elif config.add_regularization == 'level_L_infinity': MindtPy.loa_proj_mip_obj = generate_norm_inf_objective_function( solve_data.mip, solve_data.best_solution_found, discrete_only=False) elif config.add_regularization in { 'grad_lag', 'hess_lag', 'hess_only_lag', 'sqp_lag' }: MindtPy.loa_proj_mip_obj = generate_lag_objective_function( solve_data.mip, solve_data.best_solution_found, config, solve_data, discrete_only=False) if solve_data.objective_sense == minimize: MindtPy.cuts.obj_reg_estimate = Constraint( expr=sum(MindtPy.objective_value[:]) <= (1 - config.level_coef) * solve_data.primal_bound + config.level_coef * solve_data.dual_bound) else: MindtPy.cuts.obj_reg_estimate = Constraint( expr=sum(MindtPy.objective_value[:]) >= (1 - config.level_coef) * solve_data.primal_bound + config.level_coef * solve_data.dual_bound) else: if config.add_slack: MindtPy.del_component('aug_penalty_expr') MindtPy.aug_penalty_expr = Expression( expr=sign_adjust * config.OA_penalty_factor * sum(v for v in MindtPy.cuts.slack_vars[...])) main_objective = MindtPy.objective_list[-1] MindtPy.mip_obj = Objective( expr=main_objective.expr + (MindtPy.aug_penalty_expr if config.add_slack else 0), sense=solve_data.objective_sense) if config.use_dual_bound: # Delete previously added dual bound constraint MindtPy.cuts.del_component('dual_bound') if solve_data.objective_sense == minimize: MindtPy.cuts.dual_bound = Constraint( expr=main_objective.expr + (MindtPy.aug_penalty_expr if config.add_slack else 0) >= solve_data.dual_bound, doc= 'Objective function expression should improve on the best found dual bound' ) else: MindtPy.cuts.dual_bound = Constraint( expr=main_objective.expr + (MindtPy.aug_penalty_expr if config.add_slack else 0) <= solve_data.dual_bound, doc= 'Objective function expression should improve on the best found dual bound' )