def __init__(self, *args, **kw): Container.__init__(self, *args, **kw) self.set_name('PyomoConfig') # # Create the nested options specified by the the PyomoConfig._option # dictionary, which has been populated with the global_option decorator. # for item in PyomoConfig._option: d = self for attr in item[:-1]: if not attr in d: d[attr] = Container() d = d[attr] d[item[-1]] = PyomoConfig._option[item]
def parse_simple_impact(filename): if filename.endswith('.gz'): f = gzip.open(filename, 'rb') f.read().split() f.close() INPUT = open(filename, 'r') INPUT.readline() INPUT.readline() # data = {} locations = set() newscenario = True for line in INPUT: tokens = map(eval, re.split('[ \t]+', line.strip())) if newscenario: data[tokens[0]] = {} newscenario = False if tokens[1] == -1: newscenario = True locations.add(tokens[1]) data[tokens[0]][tokens[1]] = tokens[3] # INPUT.close() locmap = {} for loc in locations: locmap[loc] = loc return Container(impact=data, location_map=locmap)
def setup_solver_environment(model, config): solve_data = GDPoptSolveData() # data object for storing solver state solve_data.config = config solve_data.results = SolverResults() solve_data.timing = Container() min_logging_level = logging.INFO if config.tee else None with time_code(solve_data.timing, 'total', is_main_timer=True), \ lower_logger_level_to(config.logger, min_logging_level), \ create_utility_block(model, 'GDPopt_utils', solve_data): # Create a working copy of the original model solve_data.original_model = model solve_data.working_model = model.clone() setup_results_object(solve_data, config) solve_data.active_strategy = config.strategy util_block = solve_data.working_model.GDPopt_utils # Save model initial values. # These can be used later to initialize NLP subproblems. solve_data.initial_var_values = list(v.value for v in util_block.variable_list) solve_data.best_solution_found = None # Integer cuts exclude particular discrete decisions util_block.integer_cuts = ConstraintList(doc='integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False yield solve_data # yield setup solver environment if (solve_data.best_solution_found is not None and solve_data.best_solution_found is not solve_data.original_model): # Update values on the original model copy_var_list_values( from_list=solve_data.best_solution_found.GDPopt_utils. variable_list, to_list=solve_data.original_model.GDPopt_utils.variable_list, config=config) # Finalize results object solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.iterations = solve_data.master_iteration solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total
def temoa_create_model_container(model): """Creates a container (wraps a dictionary) model_data to store model, instance and results data for processing after a solve. """ from pyutilib.misc import Container model_data = Container() model_data.model = model return model_data
def _write_run_config(this_run_dir: Path): config_to_store = Container(**this_run_config) if 'jobs_failed' in config_to_store: config_to_store.jobs_failed = [ (model, solver) for model, solver in config_to_store.jobs_failed ] if 'jobs_run' in config_to_store: config_to_store.jobs_run = [ (model, solver) for model, solver in config_to_store.jobs_run ] with this_run_dir.joinpath(run_config_filename).open('w') as runinfofile: yaml.safe_dump(dict(**config_to_store), runinfofile)
def _collect_run_rows(run_number: int) -> List[Container]: this_run_dir = get_run_dir(run_number) _load_run_config(this_run_dir) rows = [] # Process successfully complete jobs for job in this_run_config.jobs_run - this_run_config.jobs_failed: model_name, solver_name = job test_model = models[model_name] test_solver = solvers[solver_name] job_data = Container() job_data.model = model_name job_data.solver = solver_name test_result = _get_job_result(this_run_dir, model_name, solver_name) if not test_result: continue # TODO This should be unnecessary. We should detect a failure earlier in analysis. job_data.time = test_result.model_build_start_time job_data.LB = test_result.LB job_data.UB = test_result.UB job_data.elapsed = test_result.solver_run_time job_data.iterations = test_result.get('iterations', None) job_data.tc = test_result.termination_condition job_data.sense = test_model.objective_sense if job_data.tc != 'infeasible': job_data.soln_gap, job_data.opt_gap = _calculate_gaps( test_model, test_solver, test_result.LB, test_result.UB) else: job_data.soln_gap, job_data.opt_gap = None, None # Times to solution/optimality if job_data.soln_gap is not None and job_data.soln_gap <= options.optcr + options[ 'optcr tolerance']: job_data.time_to_soln = test_result.solver_run_time job_data.time_to_ok_soln = test_result.solver_run_time elif job_data.soln_gap is not None and job_data.soln_gap <= options[ "ok solution tolerance"]: job_data.time_to_soln = float('inf') job_data.time_to_ok_soln = test_result.solver_run_time else: job_data.time_to_soln = float('inf') job_data.time_to_ok_soln = float('inf') if job_data.opt_gap is not None and job_data.opt_gap <= options.optcr + options[ 'optcr tolerance']: job_data.time_to_opt = test_result.solver_run_time else: job_data.time_to_opt = float('inf') rows.append(job_data) return rows
def __init__(self, name='unknown', **kwargs): """Constructor""" # # NOTE: The 'ctype' keyword argument is not defined here. Thus, # a model is treated as a 'Block' class type. This simplifies # the definition of the block_data_objects() method, since we treat # Model and Block objects as the same. Similarly, this avoids # the requirement to import PyomoModel.py in the block.py file. # SimpleBlock.__init__(self, **kwargs) self._name = name self.statistics = Container() self.config = PyomoConfig() self.solutions = ModelSolutions(self) self.config.preprocessor = 'pyomo.model.simple_preprocessor'
def convert(options=Options(), parser=None, model_format=None): global _format if not model_format is None: _format = model_format # # Import plugins # import pyomo.environ if options.model.save_file is None: if _format == ProblemFormat.cpxlp: options.model.save_file = 'unknown.lp' else: options.model.save_file = 'unknown.'+str(_format) options.model.save_format = _format data = Options(options=options) model_data = None try: pyomo.scripting.util.setup_environment(data) pyomo.scripting.util.apply_preprocessing(data, parser=parser) if data.error: return Container() model_data = pyomo.scripting.util.create_model(data) model_data.options = options except: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. pyomo.scripting.util.finalize(data, model=ConcreteModel(), instance=None, results=None) raise else: pyomo.scripting.util.finalize(data, model=model_data.model) return model_data
def execute(self): if len(self.impact_list) is 0: raise IOError, "tso2Impact_Task expected non-zero list for input 'impact_list'" # args = "" for impact in self.impact_list: args += " --%s" % str(impact) if not self.detectionLimit is None: args += " --detectionLimit=%s" % str(self.detectionLimit) if not self.responseTime is None: args += " --responseTime=%s" % str(self.responseTime) if self.output_prefix is None: impact_tempfiles = True self.output_prefix = pyutilib.services.TempfileManager.create_tempfile( prefix='tso2Impact_') else: impact_tempfiles = False args += " %s" % str(self.output_prefix) args += " %s" % str(self.tso_filename) if not self.tai_filename is None: args += " %s" % str(self.tai_filename) # if self.logfile is None: self.logfile = pyutilib.services.TempfileManager.create_tempfile( suffix=".log") # TODO: manage debugging in tasks self.resource('tso2Impact').run(args, logfile=self.logfile, debug=True) # self.impact_data = {} for impact in self.impact_list: data = Container( type=str(impact), impact="%s_%s.impact" % (self.output_prefix, impact), id="%s_%s.impact.id" % (self.output_prefix, impact), nodemap="%s.nodemap" % self.output_prefix, scenariomap="%s.scenariomap" % self.output_prefix) pyutilib.services.TempfileManager.add_tempfile(data.impact) pyutilib.services.TempfileManager.add_tempfile(data.id) pyutilib.services.TempfileManager.add_tempfile(data.nodemap) pyutilib.services.TempfileManager.add_tempfile(data.scenariomap) self.impact_data[impact] = data
def test_solve_linear_GDP_unbounded(self): m = ConcreteModel() m.GDPopt_utils = Block() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) m.z = Var() m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) m.o = Objective(expr=m.z) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] m.GDPopt_utils.disjunct_list = [ m.d._autodisjuncts[0], m.d._autodisjuncts[1] ] output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): solver_data = GDPoptSolveData() solver_data.timing = Container() with time_code(solver_data.timing, 'main', is_main_timer=True): solve_linear_GDP( m, solver_data, GDPoptSolver.CONFIG(dict(mip_solver=mip_solver))) self.assertIn( "Linear GDP was unbounded. Resolving with arbitrary bound values", output.getvalue().strip())
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. This function performs all of the GDPopt solver setup and problem validation. It then calls upon helper functions to construct the initial master approximation and iteration loop. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) solve_data = GDPoptSolveData() solve_data.results = SolverResults() solve_data.timing = Container() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total'), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPopt_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPopt version %s using %s algorithm" % (".".join(map(str, self.version())), config.strategy) ) config.logger.info( """ If you use this software, you may cite the following: - Implementation: Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE. Pyomo.GDP: Disjunctive Models in Python. Proc. of the 13th Intl. Symposium on Process Systems Eng. San Diego, 2018. - LOA algorithm: Türkay, M; Grossmann, IE. Logic-based MINLP algorithms for the optimal synthesis of process networks. Comp. and Chem. Eng. 1996, 20(8), 959–978. DOI: 10.1016/0098-1354(95)00219-7. - GLOA algorithm: Lee, S; Grossmann, IE. A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems Comp. and Chem. Eng. 2001, 25, 1675-1697. DOI: 10.1016/S0098-1354(01)00732-3 """.strip() ) solve_data.results.solver.name = 'GDPopt %s - %s' % ( str(self.version()), config.strategy) solve_data.original_model = model solve_data.working_model = model.clone() GDPopt = solve_data.working_model.GDPopt_utils setup_results_object(solve_data, config) solve_data.current_strategy = config.strategy # Verify that objective has correct form process_objective(solve_data, config) # Save model initial values. These are used later to initialize NLP # subproblems. solve_data.initial_var_values = list( v.value for v in GDPopt.variable_list) solve_data.best_solution_found = None # Validate the model to ensure that GDPopt is able to solve it. if not model_is_valid(solve_data, config): return # Integer cuts exclude particular discrete decisions GDPopt.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default, unless the initial model has no # discrete decisions. # Note: these cuts will only exclude integer realizations that are # not already in the primary GDPopt_integer_cuts ConstraintList. GDPopt.no_backtracking = ConstraintList( doc='explored integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False # Initialize the master problem with time_code(solve_data.timing, 'initialization'): GDPopt_initialize_master(solve_data, config) # Algorithm main loop with time_code(solve_data.timing, 'main loop'): GDPopt_iteration_loop(solve_data, config) if solve_data.best_solution_found is not None: # Update values in working model copy_var_list_values( from_list=solve_data.best_solution_found.GDPopt_utils.variable_list, to_list=GDPopt.variable_list, config=config) # Update values in original model copy_var_list_values( GDPopt.variable_list, solve_data.original_model.GDPopt_utils.variable_list, config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total solve_data.results.solver.iterations = solve_data.master_iteration return solve_data.results
def build_model_size_report(model): """Build a model size report object.""" report = ModelSizeReport() activated_disjunctions = ComponentSet() activated_disjuncts = ComponentSet() fixed_true_disjuncts = ComponentSet() activated_constraints = ComponentSet() activated_vars = ComponentSet() new_containers = (model, ) while new_containers: new_activated_disjunctions = ComponentSet() new_activated_disjuncts = ComponentSet() new_fixed_true_disjuncts = ComponentSet() new_activated_constraints = ComponentSet() for container in new_containers: (next_activated_disjunctions, next_fixed_true_disjuncts, next_activated_disjuncts, next_activated_constraints ) = _process_activated_container(container) new_activated_disjunctions.update(next_activated_disjunctions) new_activated_disjuncts.update(next_activated_disjuncts) new_fixed_true_disjuncts.update(next_fixed_true_disjuncts) new_activated_constraints.update(next_activated_constraints) new_containers = ((new_activated_disjuncts - activated_disjuncts) | (new_fixed_true_disjuncts - fixed_true_disjuncts)) activated_disjunctions.update(new_activated_disjunctions) activated_disjuncts.update(new_activated_disjuncts) fixed_true_disjuncts.update(new_fixed_true_disjuncts) activated_constraints.update(new_activated_constraints) activated_vars.update( var for constr in new_activated_constraints for var in EXPR.identify_variables(constr.body, include_fixed=False)) activated_vars.update(disj.indicator_var for disj in activated_disjuncts) report.activated = Container() report.activated.variables = len(activated_vars) report.activated.binary_variables = sum(1 for v in activated_vars if v.is_binary()) report.activated.integer_variables = sum(1 for v in activated_vars if v.is_integer()) report.activated.continuous_variables = sum(1 for v in activated_vars if v.is_continuous()) report.activated.disjunctions = len(activated_disjunctions) report.activated.disjuncts = len(activated_disjuncts) report.activated.constraints = len(activated_constraints) report.activated.nonlinear_constraints = sum( 1 for c in activated_constraints if c.body.polynomial_degree() not in (1, 0)) report.overall = Container() block_like = (Block, Disjunct) all_vars = ComponentSet( model.component_data_objects(Var, descend_into=block_like)) report.overall.variables = len(all_vars) report.overall.binary_variables = sum(1 for v in all_vars if v.is_binary()) report.overall.integer_variables = sum(1 for v in all_vars if v.is_integer()) report.overall.continuous_variables = sum(1 for v in all_vars if v.is_continuous()) report.overall.disjunctions = sum(1 for d in model.component_data_objects( Disjunction, descend_into=block_like)) report.overall.disjuncts = sum(1 for d in model.component_data_objects( Disjunct, descend_into=block_like)) report.overall.constraints = sum(1 for c in model.component_data_objects( Constraint, descend_into=block_like)) report.overall.nonlinear_constraints = sum( 1 for c in model.component_data_objects(Constraint, descend_into=block_like) if c.body.polynomial_degree() not in (1, 0)) report.warning = Container() report.warning.unassociated_disjuncts = sum( 1 for d in model.component_data_objects(Disjunct, descend_into=block_like) if not d.indicator_var.fixed and d not in activated_disjuncts) return report
"""Global registry""" from datetime import datetime from pathlib import Path import yaml from pyutilib.misc import Container # Registries for the models and solvers models = Container() solvers = Container() options = Container() # Make output and runs directories, if they do not exist runsdir = Path(__file__).parent.joinpath("output/runs/") runsdir.mkdir(exist_ok=True, parents=True) outputdir = Path(__file__).parent.joinpath("output/") # File paths runner_filepath = Path(__file__).parent.joinpath( "pysperf_job_runner.py").resolve() runner_config_filename = "pysperf_job_runner.config" job_result_filename = "pysperf_result.log" job_start_filename = ".job_started.log" job_stop_filename = ".job_stopped.log" job_model_built_filename = ".job_model_built.log" job_solve_done_filename = ".job_solve_done.log" _internal_config_file = Path(__file__).parent.joinpath( '.internal.config.pfcache') _model_cache_path = Path(__file__).parent.joinpath('model.info.pfcache') run_config_filename = "run.config.pfdata"
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. This function performs all of the GDPopt solver setup and problem validation. It then calls upon helper functions to construct the initial master approximation and iteration loop. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) solve_data = GDPoptSolveData() solve_data.results = SolverResults() solve_data.timing = Container() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total'), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPopt_utils'): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info("---Starting GDPopt---") solve_data.original_model = model solve_data.working_model = clone_orig_model_with_lists(model) GDPopt = solve_data.working_model.GDPopt_utils record_original_model_statistics(solve_data, config) solve_data.current_strategy = config.strategy # Reformulate integer variables to binary reformulate_integer_variables(solve_data.working_model, config) process_objective(solve_data, config) # Save ordered lists of main modeling components, so that data can # be easily transferred between future model clones. build_ordered_component_lists(solve_data.working_model) record_working_model_statistics(solve_data, config) solve_data.results.solver.name = 'GDPopt ' + str(self.version()) # Save model initial values. These are used later to initialize NLP # subproblems. solve_data.initial_var_values = list( v.value for v in GDPopt.working_var_list) # Store the initial model state as the best solution found. If we # find no better solution, then we will restore from this copy. solve_data.best_solution_found = solve_data.initial_var_values # Validate the model to ensure that GDPopt is able to solve it. if not model_is_valid(solve_data, config): return # Maps in order to keep track of certain generated constraints GDPopt.oa_cut_map = ComponentMap() # Integer cuts exclude particular discrete decisions GDPopt.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default, unless the initial model has no # discrete decisions. # Note: these cuts will only exclude integer realizations that are # not already in the primary GDPopt_integer_cuts ConstraintList. GDPopt.no_backtracking = ConstraintList( doc='explored integer cuts') # Set up iteration counters solve_data.master_iteration = 0 solve_data.mip_iteration = 0 solve_data.nlp_iteration = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.iteration_log = {} # Flag indicating whether the solution improved in the past # iteration or not solve_data.feasible_solution_improved = False # Initialize the master problem with time_code(solve_data.timing, 'initialization'): GDPopt_initialize_master(solve_data, config) # Algorithm main loop with time_code(solve_data.timing, 'main loop'): GDPopt_iteration_loop(solve_data, config) # Update values in working model copy_var_list_values(from_list=solve_data.best_solution_found, to_list=GDPopt.working_var_list, config=config) GDPopt.objective_value.set_value( value(solve_data.working_objective_expr, exception=False)) # Update values in original model copy_var_list_values( GDPopt.orig_var_list, solve_data.original_model.GDPopt_utils.orig_var_list, config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.timing = solve_data.timing return solve_data.results
def solve(self, model, **kwds): config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) return SolverFactory('gdpopt').solve( model, strategy='LBB', minlp_solver=config.solver, minlp_solver_args=config.solver_args, tee=config.tee, check_sat=config.check_sat, logger=config.logger, time_limit=config.time_limit) # Validate model to be used with gdpbb self.validate_model(model) # Set solver as an MINLP solve_data = GDPbbSolveData() solve_data.timing = Container() solve_data.original_model = model solve_data.results = SolverResults() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total', is_main_timer=True), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPbb_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPbb version %s using %s as subsolver" % (".".join(map(str, self.version())), config.solver)) # Setup results solve_data.results.solver.name = 'GDPbb - %s' % (str( config.solver)) setup_results_object(solve_data, config) # clone original model for root node of branch and bound root = solve_data.working_model = solve_data.original_model.clone() # get objective sense process_objective(solve_data, config) objectives = solve_data.original_model.component_data_objects( Objective, active=True) obj = next(objectives, None) solve_data.results.problem.sense = obj.sense # set up lists to keep track of which disjunctions have been covered. # this list keeps track of the relaxed disjunctions root.GDPbb_utils.unenforced_disjunctions = list( disjunction for disjunction in root.GDPbb_utils.disjunction_list if disjunction.active) root.GDPbb_utils.deactivated_constraints = ComponentSet([ constr for disjunction in root.GDPbb_utils.unenforced_disjunctions for disjunct in disjunction.disjuncts for constr in disjunct.component_data_objects(ctype=Constraint, active=True) if constr.body.polynomial_degree() not in (1, 0) ]) # Deactivate nonlinear constraints in unenforced disjunctions for constr in root.GDPbb_utils.deactivated_constraints: constr.deactivate() # Add the BigM suffix if it does not already exist. Used later during nonlinear constraint activation. if not hasattr(root, 'BigM'): root.BigM = Suffix() # Pre-screen that none of the disjunctions are already predetermined due to the disjuncts being fixed # to True/False values. # TODO this should also be done within the loop, but we aren't handling it right now. # Should affect efficiency, but not correctness. root.GDPbb_utils.disjuncts_fixed_True = ComponentSet() # Only find top-level (non-nested) disjunctions for disjunction in root.component_data_objects(Disjunction, active=True): fixed_true_disjuncts = [ disjunct for disjunct in disjunction.disjuncts if disjunct.indicator_var.fixed and disjunct.indicator_var.value == 1 ] fixed_false_disjuncts = [ disjunct for disjunct in disjunction.disjuncts if disjunct.indicator_var.fixed and disjunct.indicator_var.value == 0 ] for disjunct in fixed_false_disjuncts: disjunct.deactivate() if len(fixed_false_disjuncts) == len( disjunction.disjuncts) - 1: # all but one disjunct in the disjunction is fixed to False. Remaining one must be true. if not fixed_true_disjuncts: fixed_true_disjuncts = [ disjunct for disjunct in disjunction.disjuncts if disjunct not in fixed_false_disjuncts ] # Reactivate the fixed-true disjuncts for disjunct in fixed_true_disjuncts: newly_activated = ComponentSet() for constr in disjunct.component_data_objects(Constraint): if constr in root.GDPbb_utils.deactivated_constraints: newly_activated.add(constr) constr.activate() # Set the big M value for the constraint root.BigM[constr] = 1 # Note: we use a default big M value of 1 # because all non-selected disjuncts should be deactivated. # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed. # The default M value should therefore be irrelevant. root.GDPbb_utils.deactivated_constraints -= newly_activated root.GDPbb_utils.disjuncts_fixed_True.add(disjunct) if fixed_true_disjuncts: assert disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \ "%s violates this assumption." % (disjunction.name, ) root.GDPbb_utils.unenforced_disjunctions.remove( disjunction) # Check satisfiability if config.check_sat and satisfiable(root, config.logger) is False: # Problem is not satisfiable. Problem is infeasible. obj_value = obj_sign * float('inf') else: # solve the root node config.logger.info("Solving the root node.") obj_value, result, var_values = self.subproblem_solve( root, config) if obj_sign * obj_value == float('inf'): config.logger.info( "Model was found to be infeasible at the root node. Elapsed %.2f seconds." % get_main_elapsed_time(solve_data.timing)) if solve_data.results.problem.sense == minimize: solve_data.results.problem.lower_bound = float('inf') solve_data.results.problem.upper_bound = None else: solve_data.results.problem.lower_bound = None solve_data.results.problem.upper_bound = float('-inf') solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.iterations = 0 solve_data.results.solver.termination_condition = tc.infeasible return solve_data.results # initialize minheap for Branch and Bound algorithm # Heap structure: (ordering tuple, model) # Ordering tuple: (objective value, disjunctions_left, -total_nodes_counter) # - select solutions with lower objective value, # then fewer disjunctions left to explore (depth first), # then more recently encountered (tiebreaker) heap = [] total_nodes_counter = 0 disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions) heapq.heappush(heap, ((obj_sign * obj_value, disjunctions_left, -total_nodes_counter), root, result, var_values)) # loop to branch through the tree while len(heap) > 0: # pop best model off of heap sort_tuple, incumbent_model, incumbent_results, incumbent_var_values = heapq.heappop( heap) incumbent_obj_value, disjunctions_left, _ = sort_tuple config.logger.info( "Exploring node with LB %.10g and %s inactive disjunctions." % (incumbent_obj_value, disjunctions_left)) # if all the originally active disjunctions are active, solve and # return solution if disjunctions_left == 0: config.logger.info("Model solved.") # Model is solved. Copy over solution values. original_model = solve_data.original_model for orig_var, val in zip( original_model.GDPbb_utils.variable_list, incumbent_var_values): orig_var.value = val solve_data.results.problem.lower_bound = incumbent_results.problem.lower_bound solve_data.results.problem.upper_bound = incumbent_results.problem.upper_bound solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.iterations = total_nodes_counter solve_data.results.solver.termination_condition = incumbent_results.solver.termination_condition return solve_data.results # Pick the next disjunction to branch on next_disjunction = incumbent_model.GDPbb_utils.unenforced_disjunctions[ 0] config.logger.info("Branching on disjunction %s" % next_disjunction.name) assert next_disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \ "%s violates this assumption." % (next_disjunction.name, ) new_nodes_counter = 0 for i, disjunct in enumerate(next_disjunction.disjuncts): # Create one branch for each of the disjuncts on the disjunction if any(disj.indicator_var.fixed and disj.indicator_var.value == 1 for disj in next_disjunction.disjuncts if disj is not disjunct): # If any other disjunct is fixed to 1 and an xor relationship applies, # then this disjunct cannot be activated. continue # Check time limit if get_main_elapsed_time( solve_data.timing) >= config.time_limit: if solve_data.results.problem.sense == minimize: solve_data.results.problem.lower_bound = incumbent_obj_value solve_data.results.problem.upper_bound = float( 'inf') else: solve_data.results.problem.lower_bound = float( '-inf') solve_data.results.problem.upper_bound = incumbent_obj_value config.logger.info('GDPopt unable to converge bounds ' 'before time limit of {} seconds. ' 'Elapsed: {} seconds'.format( config.time_limit, get_main_elapsed_time( solve_data.timing))) config.logger.info( 'Final bound values: LB: {} UB: {}'.format( solve_data.results.problem.lower_bound, solve_data.results.problem.upper_bound)) solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.iterations = total_nodes_counter solve_data.results.solver.termination_condition = tc.maxTimeLimit return solve_data.results # Branch on the disjunct child = incumbent_model.clone() # TODO I am leaving the old branching system in place, but there should be # something better, ideally that deals with nested disjunctions as well. disjunction_to_branch = child.GDPbb_utils.unenforced_disjunctions.pop( 0) child_disjunct = disjunction_to_branch.disjuncts[i] child_disjunct.indicator_var.fix(1) # Deactivate (and fix to 0) other disjuncts on the disjunction for disj in disjunction_to_branch.disjuncts: if disj is not child_disjunct: disj.deactivate() # Activate nonlinear constraints on the newly fixed child disjunct newly_activated = ComponentSet() for constr in child_disjunct.component_data_objects( Constraint): if constr in child.GDPbb_utils.deactivated_constraints: newly_activated.add(constr) constr.activate() # Set the big M value for the constraint child.BigM[constr] = 1 # Note: we use a default big M value of 1 # because all non-selected disjuncts should be deactivated. # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed. # The default M value should therefore be irrelevant. child.GDPbb_utils.deactivated_constraints -= newly_activated child.GDPbb_utils.disjuncts_fixed_True.add(child_disjunct) if disjunct in incumbent_model.GDPbb_utils.disjuncts_fixed_True: # If the disjunct was already branched to True from a parent disjunct branching, just pass # through the incumbent value without resolving. The solution should be the same as the parent. total_nodes_counter += 1 ordering_tuple = (obj_sign * incumbent_obj_value, disjunctions_left - 1, -total_nodes_counter) heapq.heappush(heap, (ordering_tuple, child, result, incumbent_var_values)) new_nodes_counter += 1 continue if config.check_sat and satisfiable( child, config.logger) is False: # Problem is not satisfiable. Skip this disjunct. continue obj_value, result, var_values = self.subproblem_solve( child, config) total_nodes_counter += 1 ordering_tuple = (obj_sign * obj_value, disjunctions_left - 1, -total_nodes_counter) heapq.heappush(heap, (ordering_tuple, child, result, var_values)) new_nodes_counter += 1 config.logger.info( "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % (new_nodes_counter, disjunctions_left - 1, len(heap)))
import yaml from pyutilib.misc import Container from .model_types import ModelType from pysperf.model_library import models from pysperf.solver_library import solvers from .config import ( cache_internal_options_to_file, options, run_config_filename, runner_filepath, runsdir, ) this_run_config = Container() def _write_run_config(this_run_dir: Path): config_to_store = Container(**this_run_config) if 'jobs_failed' in config_to_store: config_to_store.jobs_failed = [ (model, solver) for model, solver in config_to_store.jobs_failed ] if 'jobs_run' in config_to_store: config_to_store.jobs_run = [ (model, solver) for model, solver in config_to_store.jobs_run ] with this_run_dir.joinpath(run_config_filename).open('w') as runinfofile: yaml.safe_dump(dict(**config_to_store), runinfofile)
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. Warning: at this point in time, if you try to use PSC or GBD with anything other than IPOPT as the NLP solver, bad things will happen. This is because the suffixes are not in place to extract dual values from the variable bounds for any other solver. TODO: fix needed with the GBD implementation. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) # configuration confirmation if config.single_tree: config.iteration_limit = 1 config.add_slack = False config.add_nogood_cuts = False config.mip_solver = 'cplex_persistent' config.logger.info( "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'" ) # if the slacks fix to zero, just don't add them if config.max_slack == 0.0: config.add_slack = False if config.strategy == "GOA": config.add_nogood_cuts = True config.add_slack = True config.use_mcpp = True config.integer_to_binary = True config.use_dual = False config.use_fbbt = True if config.nlp_solver == "baron": config.use_dual = False # if ecp tolerance is not provided use bound tolerance if config.ecp_tolerance is None: config.ecp_tolerance = config.bound_tolerance # if the objective function is a constant, dual bound constraint is not added. obj = next(model.component_data_objects(ctype=Objective, active=True)) if obj.expr.polynomial_degree() == 0: config.use_dual_bound = False solve_data = MindtPySolveData() solve_data.results = SolverResults() solve_data.timing = Container() solve_data.curr_int_sol = [] solve_data.prev_int_sol = [] if config.use_fbbt: fbbt(model) config.logger.info( "Use the fbbt to tighten the bounds of variables") solve_data.original_model = model solve_data.working_model = model.clone() if config.integer_to_binary: TransformationFactory('contrib.integer_to_binary'). \ apply_to(solve_data.working_model) new_logging_level = logging.INFO if config.tee else None with time_code(solve_data.timing, 'total', is_main_timer=True), \ lower_logger_level_to(config.logger, new_logging_level), \ create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): config.logger.info("---Starting MindtPy---") MindtPy = solve_data.working_model.MindtPy_utils setup_results_object(solve_data, config) process_objective(solve_data, config, use_mcpp=config.use_mcpp) # Save model initial values. solve_data.initial_var_values = list( v.value for v in MindtPy.variable_list) # Store the initial model state as the best solution found. If we # find no better solution, then we will restore from this copy. solve_data.best_solution_found = None solve_data.best_solution_found_time = None # Record solver name solve_data.results.solver.name = 'MindtPy' + str(config.strategy) # Validate the model to ensure that MindtPy is able to solve it. if not model_is_valid(solve_data, config): return # Create a model block in which to store the generated feasibility # slack constraints. Do not leave the constraints on by default. feas = MindtPy.MindtPy_feas = Block() feas.deactivate() feas.feas_constraints = ConstraintList( doc='Feasibility Problem Constraints') # Create a model block in which to store the generated linear # constraints. Do not leave the constraints on by default. lin = MindtPy.MindtPy_linear_cuts = Block() lin.deactivate() # Integer cuts exclude particular discrete decisions lin.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default. # # Note: these cuts will only exclude integer realizations that are # not already in the primary integer_cuts ConstraintList. lin.feasible_integer_cuts = ConstraintList( doc='explored integer cuts') lin.feasible_integer_cuts.deactivate() # Set up iteration counters solve_data.nlp_iter = 0 solve_data.mip_iter = 0 solve_data.mip_subiter = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.LB_progress = [solve_data.LB] solve_data.UB_progress = [solve_data.UB] if config.single_tree and config.add_nogood_cuts: solve_data.stored_bound = {} if config.strategy == 'GOA' and config.add_nogood_cuts: solve_data.num_no_good_cuts_added = {} # Set of NLP iterations for which cuts were generated lin.nlp_iters = Set(dimen=1) # Set of MIP iterations for which cuts were generated in ECP lin.mip_iters = Set(dimen=1) if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': feas.nl_constraint_set = Set( initialize=[ i for i, constr in enumerate(MindtPy.constraint_list, 1) if constr.body.polynomial_degree() not in (1, 0) ], doc="Integer index set over the nonlinear constraints." "The set corresponds to the index of nonlinear constraint in constraint_set" ) # Create slack variables for feasibility problem feas.slack_var = Var(feas.nl_constraint_set, domain=NonNegativeReals, initialize=1) else: feas.slack_var = Var(domain=NonNegativeReals, initialize=1) # Create slack variables for OA cuts if config.add_slack: lin.slack_vars = VarList(bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals) # Flag indicating whether the solution improved in the past # iteration or not solve_data.solution_improved = False if config.nlp_solver == 'ipopt': if not hasattr(solve_data.working_model, 'ipopt_zL_out'): solve_data.working_model.ipopt_zL_out = Suffix( direction=Suffix.IMPORT) if not hasattr(solve_data.working_model, 'ipopt_zU_out'): solve_data.working_model.ipopt_zU_out = Suffix( direction=Suffix.IMPORT) # Initialize the master problem with time_code(solve_data.timing, 'initialization'): MindtPy_initialize_master(solve_data, config) # Algorithm main loop with time_code(solve_data.timing, 'main loop'): MindtPy_iteration_loop(solve_data, config) if solve_data.best_solution_found is not None: # Update values in original model copy_var_list_values(from_list=solve_data.best_solution_found. MindtPy_utils.variable_list, to_list=MindtPy.variable_list, config=config) # MindtPy.objective_value.set_value( # value(solve_data.working_objective_expr, exception=False)) copy_var_list_values( MindtPy.variable_list, solve_data.original_model.component_data_objects(Var), config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total solve_data.results.solver.iterations = solve_data.mip_iter solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time if config.single_tree: solve_data.results.solver.num_nodes = solve_data.nlp_iter - \ (1 if config.init_strategy == 'rNLP' else 0) return solve_data.results
def solve(self, model, **kwds): """Solve the model. Warning: this solver is still in beta. Keyword arguments subject to change. Undocumented keyword arguments definitely subject to change. Warning: at this point in time, if you try to use PSC or GBD with anything other than IPOPT as the NLP solver, bad things will happen. This is because the suffixes are not in place to extract dual values from the variable bounds for any other solver. TODO: fix needed with the GBD implementation. Args: model (Block): a Pyomo model or block to be solved """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) solve_data = MindtPySolveData() solve_data.results = SolverResults() solve_data.timing = Container() solve_data.original_model = model solve_data.working_model = model.clone() if config.integer_to_binary: TransformationFactory('contrib.integer_to_binary'). \ apply_to(solve_data.working_model) new_logging_level = logging.INFO if config.tee else None with time_code(solve_data.timing, 'total', is_main_timer=True), \ lower_logger_level_to(config.logger, new_logging_level), \ create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): config.logger.info("---Starting MindtPy---") MindtPy = solve_data.working_model.MindtPy_utils setup_results_object(solve_data, config) process_objective(solve_data, config) # Save model initial values. solve_data.initial_var_values = list( v.value for v in MindtPy.variable_list) # Store the initial model state as the best solution found. If we # find no better solution, then we will restore from this copy. solve_data.best_solution_found = None # Record solver name solve_data.results.solver.name = 'MindtPy' + str(config.strategy) # Validate the model to ensure that MindtPy is able to solve it. if not model_is_valid(solve_data, config): return # Create a model block in which to store the generated feasibility # slack constraints. Do not leave the constraints on by default. feas = MindtPy.MindtPy_feas = Block() feas.deactivate() feas.feas_constraints = ConstraintList( doc='Feasibility Problem Constraints') # Create a model block in which to store the generated linear # constraints. Do not leave the constraints on by default. lin = MindtPy.MindtPy_linear_cuts = Block() lin.deactivate() # Integer cuts exclude particular discrete decisions lin.integer_cuts = ConstraintList(doc='integer cuts') # Feasible integer cuts exclude discrete realizations that have # been explored via an NLP subproblem. Depending on model # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default. # # Note: these cuts will only exclude integer realizations that are # not already in the primary integer_cuts ConstraintList. lin.feasible_integer_cuts = ConstraintList( doc='explored integer cuts') lin.feasible_integer_cuts.deactivate() # Set up iteration counters solve_data.nlp_iter = 0 solve_data.mip_iter = 0 solve_data.mip_subiter = 0 # set up bounds solve_data.LB = float('-inf') solve_data.UB = float('inf') solve_data.LB_progress = [solve_data.LB] solve_data.UB_progress = [solve_data.UB] # Set of NLP iterations for which cuts were generated lin.nlp_iters = Set(dimen=1) # Set of MIP iterations for which cuts were generated in ECP lin.mip_iters = Set(dimen=1) nonlinear_constraints = [ c for c in MindtPy.constraint_list if c.body.polynomial_degree() not in (1, 0) ] lin.nl_constraint_set = RangeSet( len(nonlinear_constraints), doc="Integer index set over the nonlinear constraints") feas.constraint_set = RangeSet( len(MindtPy.constraint_list), doc="integer index set over the constraints") # # Mapping Constraint -> integer index # MindtPy.feas_map = {} # # Mapping integer index -> Constraint # MindtPy.feas_inverse_map = {} # # Generate the two maps. These maps may be helpful for later # # interpreting indices on the slack variables or generated cuts. # for c, n in zip(MindtPy.constraint_list, feas.constraint_set): # MindtPy.feas_map[c] = n # MindtPy.feas_inverse_map[n] = c # Create slack variables for OA cuts lin.slack_vars = VarList(bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals) # Create slack variables for feasibility problem feas.slack_var = Var(feas.constraint_set, domain=NonNegativeReals, initialize=1) # Flag indicating whether the solution improved in the past # iteration or not solve_data.solution_improved = False if not hasattr(solve_data.working_model, 'ipopt_zL_out'): solve_data.working_model.ipopt_zL_out = Suffix( direction=Suffix.IMPORT) if not hasattr(solve_data.working_model, 'ipopt_zU_out'): solve_data.working_model.ipopt_zU_out = Suffix( direction=Suffix.IMPORT) # Initialize the master problem with time_code(solve_data.timing, 'initialization'): MindtPy_initialize_master(solve_data, config) # Algorithm main loop with time_code(solve_data.timing, 'main loop'): MindtPy_iteration_loop(solve_data, config) if solve_data.best_solution_found is not None: # Update values in original model copy_var_list_values(from_list=solve_data.best_solution_found. MindtPy_utils.variable_list, to_list=MindtPy.variable_list, config=config) # MindtPy.objective_value.set_value( # value(solve_data.working_objective_expr, exception=False)) copy_var_list_values( MindtPy.variable_list, solve_data.original_model.component_data_objects(Var), config) solve_data.results.problem.lower_bound = solve_data.LB solve_data.results.problem.upper_bound = solve_data.UB solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.user_time = solve_data.timing.total solve_data.results.solver.wallclock_time = solve_data.timing.total solve_data.results.solver.iterations = solve_data.mip_iter return solve_data.results
def run_command(command=None, parser=None, args=None, name='unknown', data=None, options=None): """ Execute a function that processes command-line arguments and then calls a command-line driver. This function provides a generic facility for executing a command function is rather generic. This function is segregated from the driver to enable profiling of the command-line execution. Required: command: The name of a function that will be executed to perform process the command-line options with a parser object. parser: The parser object that is used by the command-line function. Optional: options: If this is not None, then ignore the args option and use this to specify command options. args: Command-line arguments that are parsed. If this value is `None`, then the arguments in `sys.argv` are used to parse the command-line. name: Specifying the name of the command-line (for error messages). data: A container of labeled data. Returned: retval: Return values from the command-line execution. errorcode: 0 if Pyomo ran successfully """ # # # Parse command-line options # # retval = None errorcode = 0 if options is None: try: if type(args) is argparse.Namespace: _options = args else: _options = parser.parse_args(args=args) # Replace the parser options object with a pyutilib.misc.Options object options = pyutilib.misc.Options() for key in dir(_options): if key[0] != '_': val = getattr(_options, key) if not isinstance(val, types.MethodType): options[key] = val except SystemExit: # the parser throws a system exit if "-h" is specified - catch # it to exit gracefully. return Container(retval=retval, errorcode=errorcode) # # Configure loggers # configure_loggers(options=options) # # Setup I/O redirect to a file # logfile = options.runtime.logfile if not logfile is None: pyutilib.misc.setup_redirect(logfile) # # Call the main Pyomo runner with profiling # TempfileManager.push() pcount = options.runtime.profile_count if pcount > 0: if not pstats_available: if not logfile is None: pyutilib.misc.reset_redirect() msg = "Cannot use the 'profile' option. The Python 'pstats' " \ 'package cannot be imported!' raise ValueError(msg) tfile = TempfileManager.create_tempfile(suffix=".profile") tmp = profile.runctx( command.__name__ + '(options=options,parser=parser)', command.__globals__, locals(), tfile) p = pstats.Stats(tfile).strip_dirs() p.sort_stats('time', 'cumulative') p = p.print_stats(pcount) p.print_callers(pcount) p.print_callees(pcount) p = p.sort_stats('cumulative', 'calls') p.print_stats(pcount) p.print_callers(pcount) p.print_callees(pcount) p = p.sort_stats('calls') p.print_stats(pcount) p.print_callers(pcount) p.print_callees(pcount) retval = tmp else: # # Call the main Pyomo runner without profiling # TempfileManager.push() try: retval = command(options=options, parser=parser) except SystemExit: err = sys.exc_info()[1] # # If debugging is enabled or the 'catch' option is specified, then # exit. Otherwise, print an "Exiting..." message. # if __debug__ and (options.runtime.logging == 'debug' or options.runtime.catch_errors): sys.exit(0) print('Exiting %s: %s' % (name, str(err))) errorcode = err.code except Exception: err = sys.exc_info()[1] # # If debugging is enabled or the 'catch' option is specified, then # pass the exception up the chain (to pyomo_excepthook) # if __debug__ and (options.runtime.logging == 'debug' or options.runtime.catch_errors): if not logfile is None: pyutilib.misc.reset_redirect() TempfileManager.pop(remove=not options.runtime.keep_files) raise if not options.model is None and not options.model.save_file is None: model = "model " + options.model.save_file else: model = "model" global filter_excepthook if filter_excepthook: action = "loading" else: action = "running" msg = "Unexpected exception while %s %s:\n" % (action, model) # # This handles the case where the error is propagated by a KeyError. # KeyError likes to pass raw strings that don't handle newlines # (they translate "\n" to "\\n"), as well as tacking on single # quotes at either end of the error message. This undoes all that. # errStr = str(err) if type(err) == KeyError and errStr != "None": errStr = str(err).replace(r"\n", "\n")[1:-1] logging.getLogger('pyomo.core').error(msg + errStr) errorcode = 1 if not logfile is None: pyutilib.misc.reset_redirect() if options.runtime.disable_gc: gc.enable() TempfileManager.pop(remove=not options.runtime.keep_files) return Container(retval=retval, errorcode=errorcode)
def solve(self, model, **kwds): config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) # Validate model to be used with gdpbb self.validate_model(model) # Set solver as an MINLP solver = SolverFactory(config.solver) solve_data = GDPbbSolveData() solve_data.timing = Container() solve_data.original_model = model solve_data.results = SolverResults() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total'), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPbb_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPbb version %s using %s as subsolver" % (".".join(map(str, self.version())), config.solver)) # Setup results solve_data.results.solver.name = 'GDPbb - %s' % (str( config.solver)) setup_results_object(solve_data, config) # Initialize list containing indicator vars for reupdating model after solving indicator_list_name = unique_component_name( model, "_indicator_list") indicator_vars = [] for disjunction in model.component_data_objects(ctype=Disjunction, active=True): for disjunct in disjunction.disjuncts: indicator_vars.append(disjunct.indicator_var) setattr(model, indicator_list_name, indicator_vars) # get objective sense objectives = model.component_data_objects(Objective, active=True) obj = next(objectives, None) obj_sign = 1 if obj.sense == minimize else -1 solve_data.results.problem.sense = obj.sense # clone original model for root node of branch and bound root = model.clone() # set up lists to keep track of which disjunctions have been covered. # this list keeps track of the original disjunctions that were active and are soon to be inactive root.GDPbb_utils.unenforced_disjunctions = list( disjunction for disjunction in root.GDPbb_utils.disjunction_list if disjunction.active) # this list keeps track of the disjunctions that have been activated by the branch and bound root.GDPbb_utils.curr_active_disjunctions = [] # deactivate all disjunctions in the model # self.indicate(root) for djn in root.GDPbb_utils.unenforced_disjunctions: djn.deactivate() # Deactivate all disjuncts in model. To be reactivated when disjunction # is reactivated. for disj in root.component_data_objects(Disjunct, active=True): disj._deactivate_without_fixing_indicator() # Satisfiability check would go here # solve the root node config.logger.info("Solving the root node.") obj_value, result, _ = self.subproblem_solve(root, solver, config) # initialize minheap for Branch and Bound algorithm # Heap structure: (ordering tuple, model) # Ordering tuple: (objective value, disjunctions_left, -counter) # - select solutions with lower objective value, # then fewer disjunctions left to explore (depth first), # then more recently encountered (tiebreaker) heap = [] counter = 0 disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions) heapq.heappush( heap, ((obj_sign * obj_value, disjunctions_left, -counter), root, result, root.GDPbb_utils.variable_list)) # loop to branch through the tree while len(heap) > 0: # pop best model off of heap sort_tup, mdl, mdl_results, vars = heapq.heappop(heap) old_obj_val, disjunctions_left, _ = sort_tup config.logger.info( "Exploring node with LB %.10g and %s inactive disjunctions." % (old_obj_val, disjunctions_left)) # if all the originally active disjunctions are active, solve and # return solution if disjunctions_left == 0: config.logger.info("Model solved.") # Model is solved. Copy over solution values. for orig_var, soln_var in zip( model.GDPbb_utils.variable_list, vars): orig_var.value = soln_var.value solve_data.results.problem.lower_bound = mdl_results.problem.lower_bound solve_data.results.problem.upper_bound = mdl_results.problem.upper_bound solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.termination_condition = mdl_results.solver.termination_condition return solve_data.results next_disjunction = mdl.GDPbb_utils.unenforced_disjunctions.pop( 0) config.logger.info("Activating disjunction %s" % next_disjunction.name) next_disjunction.activate() mdl.GDPbb_utils.curr_active_disjunctions.append( next_disjunction) djn_left = len(mdl.GDPbb_utils.unenforced_disjunctions) for disj in next_disjunction.disjuncts: disj._activate_without_unfixing_indicator() if not disj.indicator_var.fixed: disj.indicator_var = 0 # initially set all indicator vars to zero added_disj_counter = 0 for disj in next_disjunction.disjuncts: if not disj.indicator_var.fixed: disj.indicator_var = 1 mnew = mdl.clone() if not disj.indicator_var.fixed: disj.indicator_var = 0 # Check feasibility if config.check_sat and satisfiable( mnew, config.logger) is False: # problem is not satisfiable. Skip this disjunct. continue obj_value, result, vars = self.subproblem_solve( mnew, solver, config) counter += 1 ordering_tuple = (obj_sign * obj_value, djn_left, -counter) heapq.heappush(heap, (ordering_tuple, mnew, result, vars)) added_disj_counter = added_disj_counter + 1 config.logger.info( "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % (added_disj_counter, djn_left, len(heap)))
def run_pyomo(options=Options(), parser=None): data = Options(options=options) if options.model.filename == '': parser.print_help() return Container() try: pyomo.scripting.util.setup_environment(data) pyomo.scripting.util.apply_preprocessing(data, parser=parser) except: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. pyomo.scripting.util.finalize(data, model=ConcreteModel(), instance=None, results=None) raise else: if data.error: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. pyomo.scripting.util.finalize(data, model=ConcretModel(), instance=None, results=None) return Container() #pragma:nocover try: model_data = pyomo.scripting.util.create_model(data) except: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. pyomo.scripting.util.finalize(data, model=ConcreteModel(), instance=None, results=None) raise else: if (((not options.runtime.logging == 'debug') and \ options.model.save_file) or \ options.runtime.only_instance): pyomo.scripting.util.finalize(data, model=model_data.model, instance=model_data.instance, results=None) return Container(instance=model_data.instance) try: opt_data = pyomo.scripting.util.apply_optimizer( data, instance=model_data.instance) pyomo.scripting.util.process_results(data, instance=model_data.instance, results=opt_data.results, opt=opt_data.opt) pyomo.scripting.util.apply_postprocessing(data, instance=model_data.instance, results=opt_data.results) except: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. pyomo.scripting.util.finalize(data, model=ConcreteModel(), instance=None, results=None) raise else: pyomo.scripting.util.finalize(data, model=model_data.model, instance=model_data.instance, results=opt_data.results) return Container(options=options, instance=model_data.instance, results=opt_data.results, local=opt_data.local)
def export_to_excel(run_number: Optional[int] = None): this_run_dir = get_run_dir(run_number) _load_run_config(this_run_dir) excel_columns = [ "time", "model", "solver", "LB", "UB", "elapsed", "iterations", "tc", "sense", "soln_gap", "time_to_ok_soln", "time_to_soln", "opt_gap", "time_to_opt", "err_msg" ] rows = [] # Process successfully complete jobs for job in this_run_config.jobs_run - this_run_config.jobs_failed: model_name, solver_name = job test_model = models[model_name] test_solver = solvers[solver_name] job_data = Container() job_data.model = model_name job_data.solver = solver_name test_result = _get_job_result(this_run_dir, model_name, solver_name) if not test_result: continue # TODO This should be unnecessary. We should detect a failure earlier in analysis. job_data.time = test_result.model_build_start_time job_data.LB = test_result.LB job_data.UB = test_result.UB job_data.elapsed = test_result.solver_run_time job_data.iterations = test_result.get('iterations', None) job_data.tc = test_result.termination_condition job_data.sense = test_model.objective_sense if job_data.tc != 'infeasible': job_data.soln_gap, job_data.opt_gap = _calculate_gaps( test_model, test_solver, test_result.LB, test_result.UB) else: job_data.soln_gap, job_data.opt_gap = None, None # Times to solution/optimality if job_data.soln_gap is not None and job_data.soln_gap <= options.optcr + options[ 'optcr tolerance']: job_data.time_to_soln = test_result.solver_run_time job_data.time_to_ok_soln = test_result.solver_run_time elif job_data.soln_gap is not None and job_data.soln_gap <= options[ "ok solution tolerance"]: job_data.time_to_soln = float('inf') job_data.time_to_ok_soln = test_result.solver_run_time else: job_data.time_to_soln = float('inf') job_data.time_to_ok_soln = float('inf') if job_data.opt_gap is not None and job_data.opt_gap <= options.optcr + options[ 'optcr tolerance']: job_data.time_to_opt = test_result.solver_run_time else: job_data.time_to_opt = float('inf') rows.append(job_data) # Use Pandas to export to excel df = pandas.DataFrame.from_records( rows, columns=excel_columns).replace( # replace infinity with empty cells [float('inf'), float('-inf')], [None, None]) with pandas.ExcelWriter(str(outputdir.joinpath("results.xlsx"))) as writer: df.to_excel(writer, sheet_name="data") _autoformat_excel()