def setup_solver_environment(model, config): solve_data = GDPoptSolveData() # data object for storing solver state solve_data.config = config solve_data.results = SolverResults() solve_data.timing = Container() min_logging_level = logging.INFO if config.tee else None with time_code(solve_data.timing, 'total', is_main_timer=True), \ lower_logger_level_to(config.logger, min_logging_level), \ create_utility_block(model, 'GDPopt_utils', solve_data): # Create a working copy of the original model solve_data.original_model = model solve_data.working_model = model.clone() setup_results_object(solve_data, config) solve_data.active_strategy = config.strategy util_block = solve_data.working_model.GDPopt_utils # Save model initial values. # These can be used later to initialize NLP subproblems. solve_data.initial_var_values = list(v.value for v in util_block.variable_list) solve_data.best_solution_found = None # Integer cuts exclude particular discrete decisions util_block.integer_cuts = ConstraintList(doc='integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False yield solve_data # yield setup solver environment if (solve_data.best_solution_found is not None and solve_data.best_solution_found is not solve_data.original_model): # Update values on the original model copy_var_list_values( from_list=solve_data.best_solution_found.GDPopt_utils. variable_list, to_list=solve_data.original_model.GDPopt_utils.variable_list, config=config) # Finalize results object solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.iterations = solve_data.master_iteration solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. This function performs all of the GDPopt solver setup and problem validation. It then calls upon helper functions to construct the initial master approximation and iteration loop. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) solve_data = GDPoptSolveData() solve_data.results = SolverResults() solve_data.timing = Container() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total'), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPopt_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPopt version %s using %s algorithm" % (".".join(map(str, self.version())), config.strategy) ) config.logger.info( """ If you use this software, you may cite the following: - Implementation: Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE. Pyomo.GDP: Disjunctive Models in Python. Proc. of the 13th Intl. Symposium on Process Systems Eng. San Diego, 2018. - LOA algorithm: Türkay, M; Grossmann, IE. Logic-based MINLP algorithms for the optimal synthesis of process networks. Comp. and Chem. Eng. 1996, 20(8), 959–978. DOI: 10.1016/0098-1354(95)00219-7. - GLOA algorithm: Lee, S; Grossmann, IE. A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems Comp. and Chem. Eng. 2001, 25, 1675-1697. DOI: 10.1016/S0098-1354(01)00732-3 """.strip() ) solve_data.results.solver.name = 'GDPopt %s - %s' % ( str(self.version()), config.strategy) solve_data.original_model = model solve_data.working_model = model.clone() GDPopt = solve_data.working_model.GDPopt_utils setup_results_object(solve_data, config) solve_data.current_strategy = config.strategy # Verify that objective has correct form process_objective(solve_data, config) # Save model initial values. These are used later to initialize NLP # subproblems. solve_data.initial_var_values = list( v.value for v in GDPopt.variable_list) solve_data.best_solution_found = None # Validate the model to ensure that GDPopt is able to solve it. if not model_is_valid(solve_data, config): return # Integer cuts exclude particular discrete decisions GDPopt.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default, unless the initial model has no # discrete decisions. # Note: these cuts will only exclude integer realizations that are # not already in the primary GDPopt_integer_cuts ConstraintList. GDPopt.no_backtracking = ConstraintList( doc='explored integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False # Initialize the master problem with time_code(solve_data.timing, 'initialization'): GDPopt_initialize_master(solve_data, config) # Algorithm main loop with time_code(solve_data.timing, 'main loop'): GDPopt_iteration_loop(solve_data, config) if solve_data.best_solution_found is not None: # Update values in working model copy_var_list_values( from_list=solve_data.best_solution_found.GDPopt_utils.variable_list, to_list=GDPopt.variable_list, config=config) # Update values in original model copy_var_list_values( GDPopt.variable_list, solve_data.original_model.GDPopt_utils.variable_list, config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total solve_data.results.solver.iterations = solve_data.master_iteration return solve_data.results
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. This function performs all of the GDPopt solver setup and problem validation. It then calls upon helper functions to construct the initial master approximation and iteration loop. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) solve_data = GDPoptSolveData() created_GDPopt_block = False old_logger_level = config.logger.getEffectiveLevel() try: if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info("---Starting GDPopt---") # Create a model block on which to store GDPopt-specific utility # modeling objects. if hasattr(model, 'GDPopt_utils'): raise RuntimeError( "GDPopt needs to create a Block named GDPopt_utils " "on the model object, but an attribute with that name " "already exists.") else: created_GDPopt_block = True model.GDPopt_utils = Block( doc="Container for GDPopt solver utility modeling objects") solve_data.original_model = model solve_data.working_model = clone_orig_model_with_lists(model) GDPopt = solve_data.working_model.GDPopt_utils record_original_model_statistics(solve_data, config) solve_data.current_strategy = config.strategy # Reformulate integer variables to binary reformulate_integer_variables(solve_data.working_model, config) # Save ordered lists of main modeling components, so that data can # be easily transferred between future model clones. build_ordered_component_lists(solve_data.working_model) record_working_model_statistics(solve_data, config) solve_data.results.solver.name = 'GDPopt ' + str(self.version()) # Save model initial values. These are used later to initialize NLP # subproblems. solve_data.initial_var_values = list( v.value for v in GDPopt.working_var_list) # Store the initial model state as the best solution found. If we # find no better solution, then we will restore from this copy. solve_data.best_solution_found = solve_data.initial_var_values # Validate the model to ensure that GDPopt is able to solve it. if not model_is_valid(solve_data, config): return # Maps in order to keep track of certain generated constraints GDPopt.oa_cut_map = ComponentMap() # Integer cuts exclude particular discrete decisions GDPopt.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default, unless the initial model has no # discrete decisions. # Note: these cuts will only exclude integer realizations that are # not already in the primary GDPopt_integer_cuts ConstraintList. GDPopt.no_backtracking = ConstraintList( doc='explored integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False # Initialize the master problem GDPopt_initialize_master(solve_data, config) # Algorithm main loop GDPopt_iteration_loop(solve_data, config) # Update values in working model copy_var_list_values( from_list=solve_data.best_solution_found, to_list=GDPopt.working_var_list, config=config) GDPopt.objective_value.set_value( value(solve_data.working_objective_expr, exception=False)) # Update values in original model copy_var_list_values( GDPopt.orig_var_list, solve_data.original_model.GDPopt_utils.orig_var_list, config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB finally: config.logger.setLevel(old_logger_level) if created_GDPopt_block: model.del_component('GDPopt_utils')
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. This function performs all of the GDPopt solver setup and problem validation. It then calls upon helper functions to construct the initial master approximation and iteration loop. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) solve_data = GDPoptSolveData() solve_data.results = SolverResults() solve_data.timing = Container() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total', is_main_timer=True), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPopt_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPopt version %s using %s algorithm" % (".".join(map(str, self.version())), config.strategy) ) config.logger.info( """ If you use this software, you may cite the following: - Implementation: Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE. Pyomo.GDP: Disjunctive Models in Python. Proc. of the 13th Intl. Symposium on Process Systems Eng. San Diego, 2018. - LOA algorithm: Türkay, M; Grossmann, IE. Logic-based MINLP algorithms for the optimal synthesis of process networks. Comp. and Chem. Eng. 1996, 20(8), 959–978. DOI: 10.1016/0098-1354(95)00219-7. - GLOA algorithm: Lee, S; Grossmann, IE. A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems Comp. and Chem. Eng. 2001, 25, 1675-1697. DOI: 10.1016/S0098-1354(01)00732-3 """.strip() ) solve_data.results.solver.name = 'GDPopt %s - %s' % ( str(self.version()), config.strategy) solve_data.original_model = model solve_data.working_model = model.clone() GDPopt = solve_data.working_model.GDPopt_utils setup_results_object(solve_data, config) solve_data.current_strategy = config.strategy # Verify that objective has correct form process_objective(solve_data, config) # Save model initial values. These are used later to initialize NLP # subproblems. solve_data.initial_var_values = list( v.value for v in GDPopt.variable_list) solve_data.best_solution_found = None # Validate the model to ensure that GDPopt is able to solve it. if not model_is_valid(solve_data, config): return # Integer cuts exclude particular discrete decisions GDPopt.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default, unless the initial model has no # discrete decisions. # Note: these cuts will only exclude integer realizations that are # not already in the primary GDPopt_integer_cuts ConstraintList. GDPopt.no_backtracking = ConstraintList( doc='explored integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False # Initialize the master problem with time_code(solve_data.timing, 'initialization'): GDPopt_initialize_master(solve_data, config) # Algorithm main loop with time_code(solve_data.timing, 'main loop'): GDPopt_iteration_loop(solve_data, config) if solve_data.best_solution_found is not None: # Update values in working model copy_var_list_values( from_list=solve_data.best_solution_found.GDPopt_utils.variable_list, to_list=GDPopt.variable_list, config=config) # Update values in original model copy_var_list_values( GDPopt.variable_list, solve_data.original_model.GDPopt_utils.variable_list, config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total solve_data.results.solver.iterations = solve_data.master_iteration return solve_data.results