def init_fixed_disjuncts(solve_data, config): """Initialize by solving the problem with the current disjunct values.""" # TODO error checking to make sure that the user gave proper disjuncts # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving subproblem with original user-specified disjunct values.") TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result: _, mip_var_values = mip_result # use the mip_var_values to create the NLP subproblem nlp_model = solve_data.working_model.clone() # copy in the discrete variable values copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) solve_data.nlp_iteration += 1 nlp_result = solve_NLP(nlp_model, solve_data, config) nlp_feasible, nlp_var_values, nlp_duals = nlp_result if nlp_feasible: update_nlp_progress_indicators(nlp_model, solve_data, config) add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data, config) add_integer_cut(mip_var_values, solve_data, config, feasible=nlp_feasible) else: config.logger.error('Linear GDP infeasible for initial user-specified ' 'disjunct values. ' 'Skipping initialization.')
def init_fixed_disjuncts(solve_data, config): """Initialize by solving the problem with the current disjunct values.""" # TODO error checking to make sure that the user gave proper disjuncts # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 config.logger.info( "Generating initial linear GDP approximation by " "solving subproblem with original user-specified disjunct values.") linear_GDP = solve_data.linear_GDP.clone() TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut(mip_result.var_values, solve_data.linear_GDP, solve_data, config, feasible=nlp_result.feasible) else: config.logger.error('Linear GDP infeasible for initial user-specified ' 'disjunct values. ' 'Skipping initialization.')
def init_custom_disjuncts(solve_data, config): """Initialize by using user-specified custom disjuncts.""" # TODO error checking to make sure that the user gave proper disjuncts for active_disjunct_set in config.custom_init_disjuncts: # custom_init_disjuncts contains a list of sets, giving the disjuncts # active at each initialization iteration # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving subproblems with user-specified active disjuncts.") for orig_disj, clone_disj in zip( solve_data.original_model.GDPopt_utils.disjunct_list, linear_GDP.GDPopt_utils.disjunct_list ): if orig_disj in active_disjunct_set: clone_disj.indicator_var.fix(1) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut( mip_result.var_values, solve_data.linear_GDP, solve_data, config, feasible=nlp_result.feasible) else: config.logger.error( 'Linear GDP infeasible for user-specified ' 'custom initialization disjunct set %s. ' 'Skipping that set and continuing on.' % list(disj.name for disj in active_disjunct_set))
def init_custom_disjuncts(solve_data, config): """Initialize by using user-specified custom disjuncts.""" # TODO error checking to make sure that the user gave proper disjuncts for active_disjunct_set in config.custom_init_disjuncts: # custom_init_disjuncts contains a list of sets, giving the disjuncts # active at each initialization iteration # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving subproblems with user-specified active disjuncts.") for orig_disj, clone_disj in zip( solve_data.original_model.GDPopt_utils.disjunct_list, linear_GDP.GDPopt_utils.disjunct_list): if orig_disj in active_disjunct_set: clone_disj.indicator_var.fix(True) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut(mip_result.var_values, solve_data.linear_GDP, solve_data, config, feasible=nlp_result.feasible) else: config.logger.error('Linear GDP infeasible for user-specified ' 'custom initialization disjunct set %s. ' 'Skipping that set and continuing on.' % list(disj.name for disj in active_disjunct_set))
def init_max_binaries(solve_data, config): """Initialize by maximizing binary variables and disjuncts. This function activates as many binary variables and disjucts as feasible. """ solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving a subproblem that maximizes " "the sum of all binary and logical variables.") # Set up binary maximization objective next(linear_GDP.component_data_objects(Objective, active=True)).deactivate() binary_vars = ( v for v in linear_GDP.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v.is_binary() and not v.fixed) linear_GDP.GDPopt_utils.max_binary_obj = Objective( expr=sum(binary_vars), sense=maximize) # Solve mip_results = solve_linear_GDP(linear_GDP, solve_data, config) if mip_results.feasible: nlp_result = solve_disjunctive_subproblem(mip_results, solve_data, config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut(mip_results.var_values, solve_data.linear_GDP, solve_data, config, feasible=nlp_result.feasible) else: config.logger.info( "Linear relaxation for initialization was infeasible. " "Problem is infeasible.") return False
def test_solve_linear_GDP_unbounded(self): m = ConcreteModel() m.GDPopt_utils = Block() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) m.z = Var() m.d = Disjunction(expr=[ [m.x + m.y >= 5], [m.x - m.y <= 3] ]) m.o = Objective(expr=m.z) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]] output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): solve_linear_GDP(m, GDPoptSolveData(), GDPoptSolver.CONFIG(dict(mip_solver=mip_solver))) self.assertIn("Linear GDP was unbounded. Resolving with arbitrary bound values", output.getvalue().strip())
def test_solve_linear_GDP_unbounded(self): m = ConcreteModel() m.GDPopt_utils = Block() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) m.z = Var() m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) m.o = Objective(expr=m.z) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] m.GDPopt_utils.disjunct_list = [ m.d._autodisjuncts[0], m.d._autodisjuncts[1] ] output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): solve_linear_GDP(m, GDPoptSolveData(), GDPoptSolver.CONFIG(dict(mip_solver=mip_solver))) self.assertIn( "Linear GDP was unbounded. Resolving with arbitrary bound values", output.getvalue().strip())
def solve_LOA_master(solve_data, config): """Solve the augmented lagrangean outer approximation master problem.""" m = solve_data.linear_GDP.clone() GDPopt = m.GDPopt_utils # Set up augmented Lagrangean penalty objective GDPopt.objective.deactivate() sign_adjust = 1 if GDPopt.objective.sense == minimize else -1 GDPopt.OA_penalty_expr = Expression( expr=sign_adjust * config.OA_penalty_factor * sum(v for v in m.component_data_objects(ctype=Var, descend_into=(Block, Disjunct)) if v.parent_component().local_name == 'GDPopt_OA_slacks')) GDPopt.oa_obj = Objective(expr=GDPopt.objective.expr + GDPopt.OA_penalty_expr, sense=GDPopt.objective.sense) solve_data.mip_iteration += 1 mip_results = solve_linear_GDP(m, solve_data, config) if mip_results: if GDPopt.objective.sense == minimize: solve_data.LB = max(value(GDPopt.oa_obj.expr), solve_data.LB) else: solve_data.UB = min(value(GDPopt.oa_obj.expr), solve_data.UB) solve_data.iteration_log[(solve_data.master_iteration, solve_data.mip_iteration, solve_data.nlp_iteration)] = ( value(GDPopt.oa_obj.expr), value(GDPopt.objective.expr), mip_results[1] # mip_var_values ) config.logger.info( 'ITER %s.%s.%s-MIP: OBJ: %s LB: %s UB: %s' % (solve_data.master_iteration, solve_data.mip_iteration, solve_data.nlp_iteration, value( GDPopt.oa_obj.expr), solve_data.LB, solve_data.UB)) else: # Master problem was infeasible. if solve_data.master_iteration == 1: config.logger.warning( 'GDPopt initialization may have generated poor ' 'quality cuts.') # set optimistic bound to infinity if GDPopt.objective.sense == minimize: solve_data.LB = float('inf') else: solve_data.UB = float('-inf') # Call the MILP post-solve callback config.master_postsolve(m, solve_data) return mip_results
def init_max_binaries(solve_data, config): """Initialize by maximizing binary variables and disjuncts. This function activates as many binary variables and disjucts as feasible. """ solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info("Generating initial linear GDP approximation by " "solving a subproblem that maximizes " "the sum of all binary and logical variables.") # Set up binary maximization objective linear_GDP.GDPopt_utils.objective.deactivate() binary_vars = (v for v in linear_GDP.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v.is_binary() and not v.fixed) linear_GDP.GDPopt_utils.max_binary_obj = Objective(expr=sum(binary_vars), sense=maximize) # Solve mip_results = solve_linear_GDP(linear_GDP, solve_data, config) if mip_results: _, mip_var_values = mip_results # use the mip_var_values to create the NLP subproblem nlp_model = solve_data.working_model.clone() # copy in the discrete variable values copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) solve_data.nlp_iteration += 1 nlp_result = solve_NLP(nlp_model, solve_data, config) nlp_feasible, nlp_var_values, nlp_duals = nlp_result if nlp_feasible: update_nlp_progress_indicators(nlp_model, solve_data, config) add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data, config) add_integer_cut(mip_var_values, solve_data, config, feasible=nlp_feasible) else: config.logger.info( "Linear relaxation for initialization was infeasible. " "Problem is infeasible.") return False
def solve_set_cover_MIP(linear_GDP_model, disj_needs_cover, solve_data, config): """Solve the set covering MIP to determine next configuration.""" m = linear_GDP_model GDPopt = linear_GDP_model.GDPopt_utils # number of disjuncts that still need to be covered num_needs_cover = sum(1 for disj_bool in disj_needs_cover if disj_bool) # number of disjuncts that have been covered num_covered = len(disj_needs_cover) - num_needs_cover # weights for the set covering problem weights = list((num_covered + 1 if disj_bool else 1) for disj_bool in disj_needs_cover) # Set up set covering objective GDPopt.objective.deactivate() GDPopt.set_cover_obj = Objective(expr=sum( weight * disj.indicator_var for (weight, disj) in zip(weights, GDPopt.working_disjuncts_list)), sense=maximize) # Deactivate potentially non-rigorous generated cuts for constr in m.component_objects(ctype=Constraint, active=True, descend_into=(Block, Disjunct)): if (constr.local_name == 'GDPopt_OA_cuts'): constr.deactivate() mip_results = solve_linear_GDP(m, solve_data, config) if mip_results: config.logger.info('Solved set covering MIP') return mip_results + (list( disj.indicator_var.value for disj in GDPopt.working_disjuncts_list), ) else: config.logger.info('Set covering problem is infeasible. ' 'Problem may have no more feasible ' 'binary configurations.') if GDPopt.mip_iter <= 1: config.logger.warning('Set covering problem was infeasible. ' 'Check your linear and logical constraints ' 'for contradictions.') if GDPopt.objective.sense == minimize: solve_data.LB = float('inf') else: solve_data.UB = float('-inf') return False
def init_custom_disjuncts(solve_data, config): """Initialize by using user-specified custom disjuncts.""" # TODO error checking to make sure that the user gave proper disjuncts for active_disjunct_set in config.custom_init_disjuncts: # custom_init_disjuncts contains a list of sets, giving the disjuncts # active at each initialization iteration # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving subproblems with user-specified active disjuncts.") for orig_disj, clone_disj in zip( solve_data.original_model.GDPopt_utils.orig_disjuncts_list, linear_GDP.GDPopt_utils.orig_disjuncts_list): if orig_disj in active_disjunct_set: clone_disj.indicator_var.fix(1) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result: _, mip_var_values = mip_result # use the mip_var_values to create the NLP subproblem nlp_model = solve_data.working_model.clone() # copy in the discrete variable values copy_and_fix_mip_values_to_nlp( nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) solve_data.nlp_iteration += 1 nlp_result = solve_NLP(nlp_model, solve_data, config) nlp_feasible, nlp_var_values, nlp_duals = nlp_result if nlp_feasible: update_nlp_progress_indicators(nlp_model, solve_data, config) add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data, config) add_integer_cut(mip_var_values, solve_data, config, feasible=nlp_feasible) else: config.logger.error('Linear GDP infeasible for user-specified ' 'custom initialization disjunct set %s. ' 'Skipping that set and continuing on.' % list(disj.name for disj in active_disjunct_set))
def solve_GLOA_master(solve_data, config): """Solve the rigorous outer approximation master problem.""" m = solve_data.linear_GDP.clone() GDPopt = m.GDPopt_utils solve_data.mip_iteration += 1 mip_results = solve_linear_GDP(m, solve_data, config) if mip_results: if GDPopt.objective.sense == minimize: solve_data.LB = max(value(GDPopt.objective.expr), solve_data.LB) else: solve_data.UB = min(value(GDPopt.objective.expr), solve_data.UB) solve_data.iteration_log[(solve_data.master_iteration, solve_data.mip_iteration, solve_data.nlp_iteration)] = ( value(GDPopt.objective.expr), value(GDPopt.objective.expr), mip_results[1] # mip_var_values ) config.logger.info( 'ITER %s.%s.%s-MIP: OBJ: %s LB: %s UB: %s' % (solve_data.master_iteration, solve_data.mip_iteration, solve_data.nlp_iteration, value( GDPopt.objective.expr), solve_data.LB, solve_data.UB)) else: # Master problem was infeasible. if solve_data.master_iteration == 1: config.logger.warning( 'GDPopt initialization may have generated poor ' 'quality cuts.') # set optimistic bound to infinity if GDPopt.objective.sense == minimize: solve_data.LB = float('inf') else: solve_data.UB = float('-inf') # Call the MILP post-solve callback config.master_postsolve(m, solve_data) return mip_results
def solve_set_cover_mip(model, disj_needs_cover, solve_data, config): """Solve the set covering MIP to determine next configuration.""" m = model GDPopt = m.GDPopt_utils # number of disjuncts that still need to be covered num_needs_cover = sum(1 for disj_bool in disj_needs_cover if disj_bool) # number of disjuncts that have been covered num_covered = len(disj_needs_cover) - num_needs_cover # weights for the set covering problem weights = list((num_covered + 1 if disj_bool else 1) for disj_bool in disj_needs_cover) # Set up set covering objective if hasattr(GDPopt, "set_cover_obj"): del GDPopt.set_cover_obj GDPopt.set_cover_obj = Objective( expr=sum(weight * disj.indicator_var for (weight, disj) in zip( weights, GDPopt.disjunct_list)), sense=maximize) mip_results = solve_linear_GDP(m.clone(), solve_data, config) if mip_results.feasible: config.logger.info('Solved set covering MIP') else: config.logger.info( 'Set covering problem is infeasible. ' 'Problem may have no more feasible ' 'disjunctive realizations.') if solve_data.mip_iteration <= 1: config.logger.warning( 'Set covering problem was infeasible. ' 'Check your linear and logical constraints ' 'for contradictions.') if solve_data.objective_sense == minimize: solve_data.LB = float('inf') else: solve_data.UB = float('-inf') return mip_results
def init_fixed_disjuncts(solve_data, config): """Initialize by solving the problem with the current disjunct values.""" # TODO error checking to make sure that the user gave proper disjuncts # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 config.logger.info( "Generating initial linear GDP approximation by " "solving subproblem with original user-specified disjunct values.") linear_GDP = solve_data.linear_GDP.clone() TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut( mip_result.var_values, solve_data.linear_GDP, solve_data, config, feasible=nlp_result.feasible) else: config.logger.error( 'Linear GDP infeasible for initial user-specified ' 'disjunct values. ' 'Skipping initialization.')