def handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config): """Copies result to working model, updates bound, adds OA and integer cut, stores best solution if new one is best""" copy_var_list_values( fixed_nlp.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) for c in fixed_nlp.tmp_duals: if fixed_nlp.dual.get(c, None) is None: fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] dual_values = list(fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list) main_objective = next( fixed_nlp.component_data_objects(Objective, active=True)) if main_objective.sense == minimize: solve_data.UB = min(value(main_objective.expr), solve_data.UB) solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1] solve_data.UB_progress.append(solve_data.UB) else: solve_data.LB = max(value(main_objective.expr), solve_data.LB) solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1] solve_data.LB_progress.append(solve_data.LB) config.logger.info( 'NLP {}: OBJ: {} LB: {} UB: {}' .format(solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if solve_data.solution_improved: solve_data.best_solution_found = fixed_nlp.clone() # Add the linear cut if config.strategy == 'OA': copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) elif config.strategy == 'PSC': add_psc_cut(solve_data, config) elif config.strategy == 'GBD': add_gbd_cut(solve_data, config) # This adds an integer cut to the feasible_integer_cuts # ConstraintList, which is not activated by default. However, it # may be activated as needed in certain situations or for certain # values of option flags. var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_integer_cuts: add_int_cut(var_values, solve_data, config, feasible=True) config.call_after_subproblem_feasible(fixed_nlp, solve_data)
def handle_NLP_subproblem_other_termination(fix_nlp, termination_condition, solve_data, config): """Case that fix-NLP is neither optimal nor infeasible (i.e. max_iterations)""" if termination_condition is tc.maxIterations: # TODO try something else? Reinitialize with different initial value? config.logger.info( 'NLP subproblem failed to converge within iteration limit.') var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list) if config.add_integer_cuts: add_int_cut(var_values, solve_data, config) # excludes current discrete option else: raise ValueError('MindtPy unable to handle NLP subproblem termination ' 'condition of {}'.format(termination_condition))
def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config): """Solve feasibility problem, add cut according to strategy. The solution of the feasibility problem is copied to the working model. """ # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') for c in fix_nlp.component_data_objects(ctype=Constraint): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 fix_nlp.dual[c] = (sign_adjust * max(0, sign_adjust * (rhs - value(c.body)))) dual_values = list(fix_nlp.dual[c] for c in fix_nlp.MindtPy_utils.constraint_list) if config.strategy == 'PSC' or config.strategy == 'GBD': for var in fix_nlp.component_data_objects(ctype=Var, descend_into=True): fix_nlp.ipopt_zL_out[var] = 0 fix_nlp.ipopt_zU_out[var] = 0 if var.ub is not None and abs(var.ub - value(var)) < config.bound_tolerance: fix_nlp.ipopt_zL_out[var] = 1 elif var.lb is not None and abs(value(var) - var.lb) < config.bound_tolerance: fix_nlp.ipopt_zU_out[var] = -1 elif config.strategy == 'OA': config.logger.info('Solving feasibility problem') if config.initial_feas: # add_feas_slacks(fix_nlp, solve_data) # config.initial_feas = False feas_NLP, feas_NLP_results = solve_NLP_feas(solve_data, config) copy_var_list_values(feas_NLP.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) # Add an integer cut to exclude this discrete option var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list) if config.add_integer_cuts: add_int_cut(var_values, solve_data, config) # excludes current discrete option
def solve_NLP_subproblem(solve_data, config): m = solve_data.working_model.clone() MindtPy = m.MindtPy_utils main_objective = next(m.component_data_objects(Objective, active=True)) solve_data.nlp_iter += 1 config.logger.info('NLP %s: Solve subproblem for fixed binaries.' % (solve_data.nlp_iter, )) # Set up NLP for v in MindtPy.variable_list: if v.is_binary(): v.fix(int(round(value(v)))) # restore original variable values for nlp_var, orig_val in zip(MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.value = orig_val MindtPy.MindtPy_linear_cuts.deactivate() m.tmp_duals = ComponentMap() for c in m.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 m.tmp_duals[c] = sign_adjust * max(0, sign_adjust * (rhs - value(c.body))) # TODO check sign_adjust t = TransformationFactory('contrib.deactivate_trivial_constraints') t.apply_to(m, tmp=True, ignore_infeasible=True) # Solve the NLP # m.pprint() # print nlp problem for debugging with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( m, **config.nlp_solver_args) var_values = list(v.value for v in MindtPy.variable_list) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal: copy_var_list_values( m.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) for c in m.tmp_duals: if m.dual.get(c, None) is None: m.dual[c] = m.tmp_duals[c] duals = list(m.dual[c] for c in MindtPy.constraint_list) if main_objective.sense == minimize: solve_data.UB = min(value(main_objective.expr), solve_data.UB) solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[ -1] solve_data.UB_progress.append(solve_data.UB) else: solve_data.LB = max(value(main_objective.expr), solve_data.LB) solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[ -1] solve_data.LB_progress.append(solve_data.LB) config.logger.info('NLP {}: OBJ: {} LB: {} UB: {}'.format( solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if solve_data.solution_improved: solve_data.best_solution_found = m.clone() # Add the linear cut if config.strategy == 'OA': add_oa_cut(var_values, duals, solve_data, config) elif config.strategy == 'PSC': add_psc_cut(solve_data, config) elif config.strategy == 'GBD': add_gbd_cut(solve_data, config) # This adds an integer cut to the feasible_integer_cuts # ConstraintList, which is not activated by default. However, it # may be activated as needed in certain situations or for certain # values of option flags. add_int_cut(var_values, solve_data, config, feasible=True) config.call_after_subproblem_feasible(m, solve_data) elif subprob_terminate_cond is tc.infeasible: # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') for c in m.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 m.dual[c] = sign_adjust * max(0, sign_adjust * (rhs - value(c.body))) for var in m.component_data_objects(ctype=Var, descend_into=True): if config.strategy == 'PSC' or config.strategy == 'GBD': m.ipopt_zL_out[var] = 0 m.ipopt_zU_out[var] = 0 if var.ub is not None and abs( var.ub - value(var)) < config.bound_tolerance: m.ipopt_zL_out[var] = 1 elif var.lb is not None and abs( value(var) - var.lb) < config.bound_tolerance: m.ipopt_zU_out[var] = -1 # m.pprint() #print infeasible nlp problem for debugging if config.strategy == 'OA': config.logger.info('Solving feasibility problem') if config.initial_feas: # add_feas_slacks(m, solve_data) # config.initial_feas = False var_values, duals = solve_NLP_feas(solve_data, config) add_oa_cut(var_values, duals, solve_data, config) # Add an integer cut to exclude this discrete option add_int_cut(var_values, solve_data, config) elif subprob_terminate_cond is tc.maxIterations: # TODO try something else? Reinitialize with different initial # value? config.logger.info( 'NLP subproblem failed to converge within iteration limit.') # Add an integer cut to exclude this discrete option add_int_cut(solve_data, config) else: raise ValueError('MindtPy unable to handle NLP subproblem termination ' 'condition of {}'.format(subprob_terminate_cond)) # Call the NLP post-solve callback config.call_after_subproblem_solve(m, solve_data)
def solve_NLP_subproblem(solve_data, config): m = solve_data.working_model.clone() MindtPy = m.MindtPy_utils main_objective = next(m.component_data_objects(Objective, active=True)) solve_data.nlp_iter += 1 config.logger.info('NLP %s: Solve subproblem for fixed binaries.' % (solve_data.nlp_iter,)) # Set up NLP for v in MindtPy.variable_list: if v.is_binary(): v.fix(int(round(value(v)))) # restore original variable values for nlp_var, orig_val in zip( MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.value = orig_val MindtPy.MindtPy_linear_cuts.deactivate() m.tmp_duals = ComponentMap() for c in m.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 m.tmp_duals[c] = sign_adjust * max(0, sign_adjust * (rhs - value(c.body))) # TODO check sign_adjust t = TransformationFactory('contrib.deactivate_trivial_constraints') t.apply_to(m, tmp=True, ignore_infeasible=True) # Solve the NLP # m.pprint() # print nlp problem for debugging with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( m, **config.nlp_solver_args) var_values = list(v.value for v in MindtPy.variable_list) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal: copy_var_list_values( m.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) for c in m.tmp_duals: if m.dual.get(c, None) is None: m.dual[c] = m.tmp_duals[c] duals = list(m.dual[c] for c in MindtPy.constraint_list) if main_objective.sense == minimize: solve_data.UB = min(value(main_objective.expr), solve_data.UB) solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1] solve_data.UB_progress.append(solve_data.UB) else: solve_data.LB = max(value(main_objective.expr), solve_data.LB) solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1] solve_data.LB_progress.append(solve_data.LB) config.logger.info( 'NLP {}: OBJ: {} LB: {} UB: {}' .format(solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if solve_data.solution_improved: solve_data.best_solution_found = m.clone() # Add the linear cut if config.strategy == 'OA': add_oa_cut(var_values, duals, solve_data, config) elif config.strategy == 'PSC': add_psc_cut(solve_data, config) elif config.strategy == 'GBD': add_gbd_cut(solve_data, config) # This adds an integer cut to the feasible_integer_cuts # ConstraintList, which is not activated by default. However, it # may be activated as needed in certain situations or for certain # values of option flags. add_int_cut(var_values, solve_data, config, feasible=True) config.call_after_subproblem_feasible(m, solve_data) elif subprob_terminate_cond is tc.infeasible: # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') for c in m.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 m.dual[c] = sign_adjust * max(0, sign_adjust * (rhs - value(c.body))) for var in m.component_data_objects(ctype=Var, descend_into=True): if config.strategy == 'PSC' or config.strategy == 'GBD': m.ipopt_zL_out[var] = 0 m.ipopt_zU_out[var] = 0 if var.ub is not None and abs(var.ub - value(var)) < config.bound_tolerance: m.ipopt_zL_out[var] = 1 elif var.lb is not None and abs(value(var) - var.lb) < config.bound_tolerance: m.ipopt_zU_out[var] = -1 # m.pprint() #print infeasible nlp problem for debugging if config.strategy == 'OA': config.logger.info('Solving feasibility problem') if config.initial_feas: # add_feas_slacks(m, solve_data) # config.initial_feas = False var_values, duals = solve_NLP_feas(solve_data, config) add_oa_cut(var_values, duals, solve_data, config) # Add an integer cut to exclude this discrete option add_int_cut(var_values, solve_data, config) elif subprob_terminate_cond is tc.maxIterations: # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem failed to converge within iteration limit.') # Add an integer cut to exclude this discrete option add_int_cut(solve_data, config) else: raise ValueError( 'MindtPy unable to handle NLP subproblem termination ' 'condition of {}'.format(subprob_terminate_cond)) # Call the NLP post-solve callback config.call_after_subproblem_solve(m, solve_data)