def test_default_function(self): c = ConfigValue(default=lambda: 10, domain=int) self.assertEqual(c.value(), 10) c.set_value(5) self.assertEqual(c.value(), 5) c.reset() self.assertEqual(c.value(), 10) try: c = ConfigValue(default=lambda x: 10*x, domain=int) except TypeError: pass else: self.fail("Expected type error") try: c = ConfigValue('a', domain=int) except ValueError: pass else: self.fail("Expected casting a to int to raise a value error")
def default_pyomo_config(): config = ConfigBlock("Pyomo configuration file") config.declare('paranoia_level', ConfigValue( 0, int, 'Pyomo paranoia and error checking level', """Higher levels of paranoia enable additional error checking and warning messages that may assist users in identifying likely modeling problems. Default=0""", visibility=ADVANCED_OPTION ) ) return config
def default_config_block(solver, init=False): config, blocks = ProblemConfigFactory('default').config_block(init) # # Solver # solver = ConfigBlock() solver.declare('solver name', ConfigValue('glpk', str, 'Solver name', None)) solver.declare( 'solver executable', ConfigValue( default=None, domain=str, description="The solver executable used by the solver interface.", doc= ("The solver executable used by the solver interface. " "This option is only valid for those solver interfaces that " "interact with a local executable through the shell. If unset, " "the solver interface will attempt to find an executable within " "the search path of the shell's environment that matches a name " "commonly associated with the solver interface."))) solver.declare( 'io format', ConfigValue( None, str, 'The type of IO used to execute the solver. Different solvers support different types of IO, but the following are common options: lp - generate LP files, nl - generate NL files, python - direct Python interface, os - generate OSiL XML files.', None)) solver.declare( 'manager', ConfigValue('serial', str, 'The technique that is used to manage solver executions.', None)) solver.declare( 'pyro host', ConfigValue( None, str, "The hostname to bind on when searching for a Pyro nameserver.", None)) solver.declare( 'pyro port', ConfigValue( None, int, "The port to bind on when searching for a Pyro nameserver.", None)) solver.declare( 'options', ConfigBlock(implicit=True, implicit_domain=ConfigValue(None, str, 'Solver option', None), description="Options passed into the solver")) solver.declare( 'options string', ConfigValue(None, str, 'String describing solver options', None)) solver.declare( 'suffixes', ConfigList([], ConfigValue( None, str, 'Suffix', None ), 'Solution suffixes that will be extracted by the solver (e.g., rc, dual, or slack). The use of this option is not required when a suffix has been declared on the model using Pyomo\'s Suffix component.', None)) blocks['solver'] = solver # solver_list = config.declare( 'solvers', ConfigList( [], solver, #ConfigValue(None, str, 'Solver', None), 'List of solvers. The first solver in this list is the master solver.', None)) # # Make sure that there is one solver in the list. # # This will be the solver into which we dump command line options. # Note that we CANNOT declare the argparse options on the base block # definition above, as we use that definition as the DOMAIN TYPE for # the list of solvers. As that information is NOT copied to # derivative blocks, the initial solver entry we are creating would # be missing all argparse information. Plus, if we were to have more # than one solver defined, we wouldn't want command line options # going to both. solver_list.append() solver_list[0].get('solver name').\ declare_as_argument('--solver', dest='solver') solver_list[0].get('solver executable').\ declare_as_argument('--solver-executable', dest="solver_executable", metavar="FILE") solver_list[0].get('io format').\ declare_as_argument('--solver-io', dest='io_format', metavar="FORMAT") solver_list[0].get('manager').\ declare_as_argument('--solver-manager', dest="smanager_type", metavar="TYPE") solver_list[0].get('pyro host').\ declare_as_argument('--pyro-host', dest="pyro_host") solver_list[0].get('pyro port').\ declare_as_argument('--pyro-port', dest="pyro_port") solver_list[0].get('options string').\ declare_as_argument('--solver-options', dest='options_string', metavar="STRING") solver_list[0].get('suffixes').\ declare_as_argument('--solver-suffix', dest="solver_suffixes") # # Postprocess # config.declare( 'postprocess', ConfigList( [], ConfigValue(None, str, 'Module', None), 'Specify a Python module that gets executed after optimization.', None)).declare_as_argument(dest='postprocess') # # Postsolve # postsolve = config.declare('postsolve', ConfigBlock()) postsolve.declare( 'print logfile', ConfigValue(False, bool, 'Print the solver logfile after performing optimization.', None)).declare_as_argument('-l', '--log', dest="log") postsolve.declare( 'save results', ConfigValue(None, str, 'Specify the filename to which the results are saved.', None)).declare_as_argument('--save-results', dest="save_results", metavar="FILE") postsolve.declare( 'show results', ConfigValue(False, bool, 'Print the results object after optimization.', None)).declare_as_argument(dest="show_results") postsolve.declare( 'results format', ConfigValue(None, str, 'Specify the results format: json or yaml.', None)).declare_as_argument( '--results-format', dest="results_format", metavar="FORMAT").declare_as_argument( '--json', dest="results_format", action="store_const", const="json", help="Store results in JSON format") postsolve.declare( 'summary', ConfigValue( False, bool, 'Summarize the final solution after performing optimization.', None)).declare_as_argument(dest="summary") blocks['postsolve'] = postsolve # # Runtime # runtime = blocks['runtime'] runtime.declare( 'only instance', ConfigValue(False, bool, "Generate a model instance, and then exit", None)).declare_as_argument('--instance-only', dest='only_instance') runtime.declare( 'stream output', ConfigValue( False, bool, "Stream the solver output to provide information about the solver's progress.", None)).declare_as_argument('--stream-output', '--stream-solver', dest="tee") # return config, blocks
class TrustRegionSolver(OptSolver): """ A trust region filter method for black box / glass box optimizaiton Solves nonlinear optimization problems containing external function calls through automatic construction of reduced models (ROM), also known as surrogate models. Currently implements linear and quadratic reduced models. See Eason, Biegler (2016) AIChE Journal for more details Arguments: """ # + param.CONFIG.generte_yaml_template() CONFIG = ConfigBlock('Trust Region') CONFIG.declare( 'solver', ConfigValue(default='ipopt', description='solver to use, defaults to ipopt', doc='')) CONFIG.declare( 'solver_options', ConfigBlock(implicit=True, description='options to pass to the subproblem solver', doc='')) # Initialize trust radius CONFIG.declare( 'trust radius', ConfigValue(default=1.0, domain=PositiveFloat, description='', doc='')) # Initialize sample region CONFIG.declare( 'sample region', ConfigValue(default=True, domain=bool, description='', doc='')) # Initialize sample radius # TODO do we need to keep the if statement? if CONFIG.sample_region: default_sample_radius = 0.1 else: default_sample_radius = CONFIG.trust_radius / 2.0 CONFIG.declare( 'sample radius', ConfigValue(default=default_sample_radius, domain=PositiveFloat, description='', doc='')) # Initialize radius max CONFIG.declare( 'radius max', ConfigValue(default=1000.0 * CONFIG.trust_radius, domain=PositiveFloat, description='', doc='')) # Termination tolerances CONFIG.declare( 'ep i', ConfigValue(default=1e-5, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'ep delta', ConfigValue(default=1e-5, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'ep chi', ConfigValue(default=1e-3, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'delta min', ConfigValue(default=1e-6, domain=PositiveFloat, description='delta min <= ep delta', doc='')) CONFIG.declare( 'max it', ConfigValue(default=20, domain=PositiveInt, description='', doc='')) # Compatibility Check Parameters CONFIG.declare( 'kappa delta', ConfigValue(default=0.8, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'kappa mu', ConfigValue(default=1.0, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'mu', ConfigValue(default=0.5, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'ep compatibility', ConfigValue(default=CONFIG.ep_i, domain=PositiveFloat, description='Suggested value: ep compatibility == ep i', doc='')) CONFIG.declare( 'compatibility penalty', ConfigValue(default=0.0, domain=NonNegativeFloat, description='', doc='')) # Criticality Check Parameters CONFIG.declare( 'criticality check', ConfigValue(default=0.1, domain=PositiveFloat, description='', doc='')) # Trust region update parameters CONFIG.declare( 'gamma c', ConfigValue(default=0.5, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'gamma e', ConfigValue(default=2.5, domain=PositiveFloat, description='', doc='')) # Switching Condition CONFIG.declare( 'gamma s', ConfigValue(default=2.0, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'kappa theta', ConfigValue(default=0.1, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'theta min', ConfigValue(default=1e-4, domain=PositiveFloat, description='', doc='')) # Filter CONFIG.declare( 'gamma f', ConfigValue( default=0.01, domain=PositiveFloat, description='gamma_f and gamma_theta in (0,1) are fixed parameters', doc='')) CONFIG.declare( 'gamma theta', ConfigValue( default=0.01, domain=PositiveFloat, description='gamma_f and gamma_theta in (0,1) are fixed parameters', doc='')) CONFIG.declare( 'theta max', ConfigValue(default=50, domain=PositiveInt, description='', doc='')) # Ratio test parameters (for theta steps) CONFIG.declare( 'eta1', ConfigValue(default=0.05, domain=PositiveFloat, description='', doc='')) CONFIG.declare( 'eta2', ConfigValue(default=0.2, domain=PositiveFloat, description='', doc='')) # Output level (replace with real printlevels!!!) CONFIG.declare( 'print variables', ConfigValue(default=False, domain=bool, description='', doc='')) # Sample Radius reset parameter CONFIG.declare( 'sample radius adjust', ConfigValue(default=0.5, domain=PositiveFloat, description='', doc='')) # Default romtype CONFIG.declare( 'reduced model type', ConfigValue(default=1, domain=In([0, 1]), description='0 = Linear, 1 = Quadratic', doc='')) def __init__(self, **kwds): # set persistent config options tmp_kwds = {'type': kwds.pop('type', 'trustregion')} self.config = self.CONFIG(kwds, preserve_implicit=True) # # Call base class constructor # tmp_kwds['solver'] = self.config.solver OptSolver.__init__(self, **tmp_kwds) def available(self, exception_flag=True): """Check if solver is available. TODO: For now, it is always available. However, sub-solvers may not always be available, and so this should reflect that possibility. """ return True def version(self): """Return a 3-tuple describing the solver version.""" return __version__ def solve(self, model, eflist, **kwds): # set customized config parameters self._local_config = self.config(kwds, preserve_implicit=True) # first store all data we will need to change in original model as a tuple # [0]=Var component, [1]=external function list, [2]=config block model._tmp_trf_data = (list(model.component_data_objects(Var)), eflist, self._local_config) # now clone the model inst = model.clone() # call TRF on cloned model TRF(inst, inst._tmp_trf_data[1], inst._tmp_trf_data[2]) # copy potentially changed variable values back to original model and return for inst_var, orig_var in zip(inst._tmp_trf_data[0], model._tmp_trf_data[0]): orig_var.set_value(value(inst_var))
def minlp_config_block(init=False): config = ConfigBlock( "Configuration for a canonical model construction and optimization sequence" ) blocks = {} # # Data # data = config.declare('data', ConfigBlock()) data.declare( 'files', ConfigList([], ConfigValue(None, str, 'Filename', None), 'Model data files', None)) data.declare( 'namespaces', ConfigList( [], ConfigValue(None, str, 'Namespace', None), 'A namespace that is used to select data in Pyomo data files.', None)).declare_as_argument('--namespace', dest='namespaces', action='append') blocks['data'] = data # # Model # model = config.declare('model', ConfigBlock()) model.declare( 'filename', ConfigValue(None, str, 'The Python module that specifies the model', None)) model.declare( 'object name', ConfigValue( None, str, 'The name of the model object that is created in the specified Pyomo module', None)).declare_as_argument('--model-name', dest='model_name') model.declare('type', ConfigValue(None, str, 'The problem type', None)) model.declare( 'options', ConfigBlock(implicit=True, description='Options used to construct the model')) model.declare( 'linearize expressions', ConfigValue( False, bool, 'An option intended for use on linear or mixed-integer models in which expression trees in a model (constraints or objectives) are compacted into a more memory-efficient and concise form.', None)) model.declare( 'save file', ConfigValue( None, str, "The filename to which the model is saved. The suffix of this filename specifies the file format.", None)) model.declare( 'save format', ConfigValue( None, str, "The format that the model is saved. When specified, this overrides the format implied by the 'save file' option.", None)) model.declare( 'symbolic solver labels', ConfigValue( False, bool, 'When interfacing with the solver, use symbol names derived from the model. For example, \"my_special_variable[1_2_3]\" instead of \"v1\". Useful for debugging. When using the ASL interface (--solver-io=nl), generates corresponding .row (constraints) and .col (variables) files. The ordering in these files provides a mapping from ASL index to symbolic model names.', None)).declare_as_argument(dest='symbolic_solver_labels') model.declare( 'file determinism', ConfigValue( 1, int, 'When interfacing with a solver using file based I/O, set the effort level for ensuring the file creation process is determistic. The default (1) sorts the index of components when transforming the model. Anything less than 1 disables index sorting. Anything greater than 1 additionaly sorts by component name to override declartion order.', None)).declare_as_argument(dest='file_determinism') blocks['model'] = model # # Transform # transform = ConfigBlock() transform.declare( 'name', ConfigValue(None, str, 'Name of the model transformation', None)) transform.declare( 'options', ConfigBlock(implicit=True, description='Transformation options')) blocks['transform'] = transform # transform_list = config.declare( 'transform', ConfigList([], ConfigValue(None, str, 'Transformation', None), 'List of model transformations', None)).declare_as_argument(dest='transformations') if init: transform_list.append() # # Preprocess # config.declare( 'preprocess', ConfigList([], ConfigValue( None, str, 'Module', None ), 'Specify a Python module that gets immediately executed (before the optimization model is setup).', None)).declare_as_argument(dest='preprocess') # # Runtime # runtime = config.declare('runtime', ConfigBlock()) runtime.declare( 'logging', ConfigValue(None, str, 'Logging level: quiet, warning, info, verbose, debug', None)).declare_as_argument(dest="logging", metavar="LEVEL") runtime.declare( 'logfile', ConfigValue(None, str, 'Redirect output to the specified file.', None)).declare_as_argument(dest="output", metavar="FILE") runtime.declare( 'catch errors', ConfigValue( False, bool, 'Trigger failures for exceptions to print the program stack.', None)).declare_as_argument('-c', '--catch-errors', dest="catch") runtime.declare( 'disable gc', ConfigValue(False, bool, 'Disable the garbage collecter.', None)).declare_as_argument('--disable-gc', dest='disable_gc') runtime.declare( 'interactive', ConfigValue( False, bool, 'After executing Pyomo, launch an interactive Python shell. If IPython is installed, this shell is an IPython shell.', None)) runtime.declare('keep files', ConfigValue(False, bool, 'Keep temporary files', None)).declare_as_argument('-k', '--keepfiles', dest='keepfiles') runtime.declare( 'paths', ConfigList([], ConfigValue(None, str, 'Path', None), 'Give a path that is used to find the Pyomo python files.', None)).declare_as_argument('--path', dest='path') runtime.declare( 'profile count', ConfigValue( 0, int, 'Enable profiling of Python code. The value of this option is the number of functions that are summarized.', None)).declare_as_argument(dest='profile_count', metavar='COUNT') runtime.declare( 'profile memory', ConfigValue( 0, int, "Report memory usage statistics for the generated instance and any associated processing steps. A value of 0 indicates disabled. A value of 1 forces the print of the total memory after major stages of the pyomo script. A value of 2 forces summary memory statistics after major stages of the pyomo script. A value of 3 forces detailed memory statistics during instance creation and various steps of preprocessing. Values equal to 4 and higher currently provide no additional information. Higher values automatically enable all functionality associated with lower values, e.g., 3 turns on detailed and summary statistics.", None)) runtime.declare( 'report timing', ConfigValue( False, bool, 'Report various timing statistics during model construction.', None)).declare_as_argument(dest='report_timing') runtime.declare( 'tempdir', ConfigValue( None, str, 'Specify the directory where temporary files are generated.', None)).declare_as_argument(dest='tempdir') blocks['runtime'] = runtime # return config, blocks
class DynamicBase(object): """ Base class for dynamic simulations objects. """ CONFIG = ConfigDict() CONFIG.declare( 'outlvl', ConfigValue( default=idaeslog.INFO, doc='Severity threshold for IDAES logger', )) # Would a time_resolution_option be useful for MHE? CONFIG.declare( 'continuous_set_tolerance', ConfigValue( default=1e-8, domain=float, doc=('Tolerance used for determining whether a float is a ' 'member of a ContinuousSet'), )) CONFIG.declare( 'solver', ConfigValue( default='ipopt', domain=dyn_config.validate_solver, doc='Pyomo solver object to be used to solve generated NLPs', )) CONFIG.declare( 'tolerance', ConfigValue( default=1e-8, domain=float, doc='Tolerance for checking constraint violation', )) @classmethod def get_namespace_name(cls): return '_DYNAMIC_NAMESPACE' @classmethod def add_namespace_to(cls, model, time): """ """ name = DynamicBase.get_namespace_name() derived_name = cls.namespace_name if hasattr(model, name): # Return if namespace has already been added. Don't throw an error # as this is expected if the user, say wants to use the same model # for NMPC and MHE. return if time.model() != model.model(): raise ValueError( 'time must belong to same top-level model as model') model.add_component(name, Block()) namespace = getattr(model, name) derived_namespace = getattr(model, derived_name) def get_time(): return time namespace.get_time = get_time derived_namespace.get_time = namespace.get_time # Validate discretization scheme and get ncp: namespace.ncp = dyn_config.get_ncp(time) namespace.variables_categorized = False @classmethod def remove_namespace_from(cls, model): """ """ # TODO: add remove_namespace_from to derived classes name = DynamicBase.get_namespace_name() if not hasattr(model, name): raise RuntimeError( 'Trying to delete block %s that does not exist on model' % name) model.del_component(name, Block()) @classmethod def get_logger_name(cls): return 'dynamic' def _populate_namespace(self, model): """ Given a model with categorized variables, a category_dict, and a var_locator, each referenced through the DynamicBase namespace, adds references to each of these objects through model's namespace corresponding to this particular instance of DynamicBase (i.e. NMPCSim or MHESim). """ if self.get_namespace_name() == DynamicBase.get_namespace_name(): return derived_namespace = getattr(model, self.get_namespace_name()) base_namespace = getattr(model, DynamicBase.get_namespace_name()) derived_namespace.dae_vars = base_namespace.dae_vars derived_namespace.diff_vars = base_namespace.diff_vars derived_namespace.n_diff_vars = base_namespace.n_diff_vars derived_namespace.deriv_vars = base_namespace.deriv_vars derived_namespace.n_deriv_vars = base_namespace.n_deriv_vars derived_namespace.input_vars = base_namespace.input_vars derived_namespace.n_input_vars = base_namespace.n_input_vars derived_namespace.alg_vars = base_namespace.alg_vars derived_namespace.n_alg_vars = base_namespace.n_alg_vars derived_namespace.fixed_vars = base_namespace.fixed_vars derived_namespace.n_fixed_vars = base_namespace.n_fixed_vars derived_namespace.scalar_vars = base_namespace.scalar_vars derived_namespace.n_scalar_vars = base_namespace.n_scalar_vars derived_namespace.ic_vars = base_namespace.ic_vars derived_namespace.n_ic_vars = base_namespace.n_ic_vars derived_namespace.variables_categorized = \ base_namespace.variables_categorized derived_namespace.get_time = base_namespace.get_time derived_namespace.ncp = base_namespace.ncp derived_namespace.category_dict = base_namespace.category_dict derived_namespace.var_locator = base_namespace.var_locator def __init__(self, plant, plant_time, controller, controller_time, inputs_at_t0, **kwargs): self.config = self.CONFIG(kwargs) dyn_config.validate_list_of_vardata(inputs_at_t0) # How, if at all, should I configure logger here? # add namespace self.add_namespace_to(plant, plant_time) self.add_namespace_to(controller, controller_time) self.plant = plant self.plant_time = plant_time self.controller = controller self.controller_time = controller_time self.sample_time = None # categorize, create category dicts, create locator # ^should this be done separately for NMPC/MHE? self.categorize_variables(self.plant, inputs_at_t0) namespace = getattr(self.plant, DynamicBase.get_namespace_name()) namespace.category_dict = { VariableCategory.DIFFERENTIAL: namespace.diff_vars, VariableCategory.DERIVATIVE: namespace.deriv_vars, VariableCategory.ALGEBRAIC: namespace.alg_vars, VariableCategory.INPUT: namespace.input_vars, VariableCategory.FIXED: namespace.fixed_vars, VariableCategory.SCALAR: namespace.scalar_vars, } self.build_variable_locator(self.plant, namespace.category_dict, ic_vars=namespace.ic_vars) # measurement_vars=namespace.measurement_vars) # find input and measurement vars in controller model init_controller_inputs = [ dyn_utils.find_comp_in_block(self.controller, self.plant, comp) for comp in inputs_at_t0 ] self.categorize_variables(self.controller, init_controller_inputs) namespace = getattr(self.controller, DynamicBase.get_namespace_name()) namespace.category_dict = { VariableCategory.DIFFERENTIAL: namespace.diff_vars, VariableCategory.DERIVATIVE: namespace.deriv_vars, VariableCategory.ALGEBRAIC: namespace.alg_vars, VariableCategory.INPUT: namespace.input_vars, VariableCategory.FIXED: namespace.fixed_vars, VariableCategory.SCALAR: namespace.scalar_vars, } self.build_variable_locator(self.controller, namespace.category_dict, ic_vars=namespace.ic_vars) # Populate derived-class namespaces with attributes just constructed # on the base class namespace self._populate_namespace(self.plant) self._populate_namespace(self.controller) def set_sample_time(self, sample_time): """ Validates sample time and adds as attribute to model. This method exists because providing sample time should not be required in constructor, and could change during simulation. """ self.validate_sample_time(sample_time) self.sample_time = sample_time def validate_sample_time(self, sample_time): raise NotImplementedError( 'Derived class must implement method for validating sample time') @staticmethod def categorize_variables(model, initial_inputs): """Creates lists of time-only-slices of the different types of variables in a model, given knowledge of which are inputs. These lists are added as attributes to the model's namespace. Possible variable categories are: - INPUT --- Those specified by the user to be inputs - DERIVATIVE --- Those declared as Pyomo DerivativeVars, whose "state variable" is not fixed, except possibly as an initial condition - DIFFERENTIAL --- Those referenced as the "state variable" by an unfixed (except possibly as an initial condition) DerivativeVar - FIXED --- Those that are fixed at non-initial time points. These are typically disturbances, design variables, or uncertain parameters. - ALGEBRAIC --- Unfixed, time-indexed variables that are neither inputs nor referenced by an unfixed derivative. - SCALAR --- Variables unindexed by time. These could be variables that refer to a specific point in time (initial or final conditions), averages over time, or truly time-independent variables like diameter. Args: model : Model whose variables will be flattened and categorized initial_inputs : List of VarData objects that are input variables at the initial time point """ namespace = getattr(model, DynamicBase.get_namespace_name()) time = namespace.get_time() t0 = time.first() t1 = time.get_finite_elements()[1] deriv_vars = [] diff_vars = [] input_vars = [] alg_vars = [] fixed_vars = [] ic_vars = [] # Create list of time-only-slices of time indexed variables # (And list of VarData objects for scalar variables) scalar_vars, dae_vars = flatten_dae_components(model, time, Var) dae_map = ComponentMap([(v[t0], v) for v in dae_vars]) t0_vardata = list(dae_map.keys()) namespace.dae_vars = list(dae_map.values()) namespace.scalar_vars = \ NMPCVarGroup( list(ComponentMap([(v, v) for v in scalar_vars]).values()), index_set=None, is_scalar=True) namespace.n_scalar_vars = \ namespace.scalar_vars.n_vars input_set = ComponentSet(initial_inputs) updated_input_set = ComponentSet(initial_inputs) # Iterate over initial vardata, popping from dae map when an input, # derivative, or differential var is found. for var0 in t0_vardata: if var0 in updated_input_set: input_set.remove(var0) time_slice = dae_map.pop(var0) input_vars.append(time_slice) parent = var0.parent_component() if not isinstance(parent, DerivativeVar): continue if not time in ComponentSet(parent.get_continuousset_list()): continue index0 = var0.index() var1 = dae_map[var0][t1] index1 = var1.index() state = parent.get_state_var() if state[index1].fixed: # Assume state var is fixed everywhere, so derivative # 'isn't really' a derivative. # Should be safe to remove state from dae_map here state_slice = dae_map.pop(state[index0]) fixed_vars.append(state_slice) continue if state[index0] in input_set: # If differential variable is an input, then this DerivativeVar # is 'not really a derivative' continue deriv_slice = dae_map.pop(var0) if var1.fixed: # Assume derivative has been fixed everywhere. # Add to list of fixed variables, and don't remove its state variable. fixed_vars.append(deriv_slice) elif var0.fixed: # In this case the derivative has been used as an initial condition. # Still want to include it in the list of derivatives. ic_vars.append(deriv_slice) state_slice = dae_map.pop(state[index0]) if state[index0].fixed: ic_vars.append(state_slice) deriv_vars.append(deriv_slice) diff_vars.append(state_slice) else: # Neither is fixed. This should be the most common case. state_slice = dae_map.pop(state[index0]) if state[index0].fixed: ic_vars.append(state_slice) deriv_vars.append(deriv_slice) diff_vars.append(state_slice) if not updated_input_set: raise RuntimeError('Not all inputs could be found') assert len(deriv_vars) == len(diff_vars) for var0, time_slice in dae_map.items(): var1 = time_slice[t1] # If the variable is still in the list of time-indexed vars, # it must either be fixed (not a var) or be an algebraic var if var1.fixed: fixed_vars.append(time_slice) else: if var0.fixed: ic_vars.append(time_slice) alg_vars.append(time_slice) namespace.deriv_vars = NMPCVarGroup(deriv_vars, time) namespace.diff_vars = NMPCVarGroup(diff_vars, time) namespace.n_diff_vars = len(diff_vars) namespace.n_deriv_vars = len(deriv_vars) assert (namespace.n_diff_vars == namespace.n_deriv_vars) # ic_vars will not be stored as a NMPCVarGroup - don't want to store # all the info twice namespace.ic_vars = ic_vars namespace.n_ic_vars = len(ic_vars) #assert model.n_dv == len(ic_vars) # Would like this to be true, but accurately detecting differential # variables that are not implicitly fixed (by fixing some input) # is difficult # Also, a categorization can have no input vars and still be # valid for MHE namespace.input_vars = NMPCVarGroup(input_vars, time) namespace.n_input_vars = len(input_vars) namespace.alg_vars = NMPCVarGroup(alg_vars, time) namespace.n_alg_vars = len(alg_vars) namespace.fixed_vars = NMPCVarGroup(fixed_vars, time) namespace.n_fixed_vars = len(fixed_vars) namespace.variables_categorized = True @staticmethod def build_variable_locator(model, category_dict, ic_vars=[], measurement_vars=[]): """Constructs a ComponentMap mapping each VarData object to a NMPCVarLocator object. This dictionary is added as an attribute to the model's namespace. Args: model : Flowsheet model containing the variables provided category_dict : Dictionary mapping VariableCategory enum items to NMPCVarGroup instances ic_vars : List of variables (time-only slices) that are fixed only at the initial time point """ namespace = getattr(model, DynamicBase.get_namespace_name()) time = namespace.get_time() ic_list = ic_vars locator = ComponentMap() for categ, vargroup in category_dict.items(): varlist = vargroup.varlist if categ == VariableCategory.SCALAR: for i, var in enumerate(varlist): locator[var] = NMPCVarLocator(categ, vargroup, i) else: for i, var in enumerate(varlist): for t in time: locator[var[t]] = NMPCVarLocator(categ, vargroup, i) # Since these variables already have NMPCVarLocator objects, # just set the desired attribute. for i, _slice in enumerate(ic_list): for t in time: locator[_slice[t]].is_ic = True for i, _slice in enumerate(measurement_vars): for t in time: locator[_slice[t]].is_measurement = True namespace.var_locator = locator
class BigM_Transformation(Transformation): """Relax disjunctive model using big-M terms. Relaxes a disjunctive model into an algebraic model by adding Big-M terms to all disjunctive constraints. This transformation accepts the following keyword arguments: bigM: A user-specified value (or dict) of M values to use (see below) targets: the targets to transform [default: the instance] M values are determined as follows: 1) if the constraint CUID appears in the bigM argument dict 2) if the constraint parent_component CUID appears in the bigM argument dict 3) if 'None' is in the bigM argument dict 4) if the constraint or the constraint parent_component appear in a BigM Suffix attached to any parent_block() beginning with the constraint's parent_block and moving up to the the root model. 5) if None appears in a BigM Suffix attached to any parent_block() between the constraint and the root model. 6) if the constraint is linear, estimate M using the variable bounds M values may be a single value or a 2-tuple specifying the M for the lower bound and the upper bound of the constraint body. Specifying "bigM=N" is automatically mapped to "bigM={None: N}". After transformation, every transformed disjunct will have a "_gdp_transformation_info" dict containing 2 entries: 'relaxed': True, 'bigm': { 'relaxationBlock': <block>, 'relaxedConstraints': ComponentMap(constraint: relaxed_constraint) } In addition, any block or disjunct containind a relaxed disjunction will have a "_gdp_transformation_info" dict with the following entry: 'disjunction_or_constraint': <constraint> Finally, the transformation will create a new Block with a unique name beginning "_pyomo_gdp_bigm_relaxation". That Block will contain an indexed Block named "relaxedDisjuncts", which will hold the relaxed disjuncts. This block is indexed by an integer indicating the order in which the disjuncts were relaxed. Each block will have a "_gdp_transformation_info" dict with the following entries: 'src': <source disjunct> 'srcConstraints': ComponentMap(relaxed_constraint: constraint) """ alias('gdp.bigm', doc=textwrap.fill(textwrap.dedent(__doc__.strip()))) CONFIG = ConfigBlock("gdp.bigm") CONFIG.declare( 'bigM', ConfigValue(default=None, domain=_to_dict, description="Big-M value used for constraint relaxation", doc=""" A user-specified value (or dict) of M values that override M-values found through model Suffixes or that would otherwise be calculated using variable domains.""")) def __init__(self): """Initialize transformation object.""" super(BigM_Transformation, self).__init__() self.handlers = { Constraint: self._xform_constraint, Var: False, Connector: False, Suffix: False, Param: False, Set: False, Disjunction: self._warn_for_active_disjunction, Disjunct: self._warn_for_active_disjunct, Block: self._transform_block_on_disjunct, } def _get_bigm_suffix_list(self, block): # Note that you can only specify suffixes on BlockData objects or # SimpleBlocks. Though it is possible at this point to stick them # on whatever components you want, we won't pick them up. suffix_list = [] while block is not None: bigm = block.component('BigM') if type(bigm) is Suffix: suffix_list.append(bigm) block = block.parent_block() return suffix_list def _apply_to(self, instance, targets=None, **kwds): config = self.CONFIG().set_value(kwds.pop('options', {})) # For now, we're not accepting options. We will let args override # suffixes and estimate as a last resort. More specific args/suffixes # override ones anywhere in the tree. Suffixes lower down in the tree # override ones higher up. if 'default_bigM' in kwds: logger.warn("DEPRECATED: the 'default_bigM=' argument has been " "replaced by 'bigM='") config.bigM = kwds.pop('default_bigM') config.set_value(kwds) bigM = config.bigM # make a transformation block to put transformed disjuncts on transBlockName = unique_component_name(instance, '_pyomo_gdp_bigm_relaxation') transBlock = Block() instance.add_component(transBlockName, transBlock) transBlock.relaxedDisjuncts = Block(Any) transBlock.lbub = Set(initialize=['lb', 'ub']) # this is a dictionary for keeping track of IndexedDisjuncts # and IndexedDisjunctions so that, at the end of the # transformation, we can check that the ones with no active # DisjstuffDatas are deactivated. transBlock.disjContainers = ComponentSet() if targets is None: targets = (instance, ) _HACK_transform_whole_instance = True else: _HACK_transform_whole_instance = False for _t in targets: t = _t.find_component(instance) if t is None: raise GDP_Error( "Target %s is not a component on the instance!" % _t) if not t.active: continue if t.type() is Disjunction: if t.parent_component() is t: self._transformDisjunction(t, transBlock, bigM) else: self._transformDisjunctionData(t, transBlock, bigM, t.index()) elif t.type() in (Block, Disjunct): if t.parent_component() is t: self._transformBlock(t, transBlock, bigM) else: self._transformBlockData(t, transBlock, bigM) else: raise GDP_Error( "Target %s was not a Block, Disjunct, or Disjunction. " "It was of type %s and can't be transformed." % (t.name, type(t))) # Go through our dictionary of indexed things and deactivate # the containers that don't have any active guys inside of # them. So the invalid component logic will tell us if we # missed something getting transformed. for obj in transBlock.disjContainers: if not obj.active: continue for i in obj: if obj[i].active: break else: # HACK due to active flag implementation. # # Ideally we would not have to do any of this (an # ActiveIndexedComponent would get its active status by # querring the active status of all the contained Data # objects). As a fallback, we would like to call: # # obj._deactivate_without_fixing_indicator() # # However, the sreaightforward implementation of that # method would have unintended side effects (fixing the # contained _DisjunctData's indicator_vars!) due to our # class hierarchy. Instead, we will directly call the # relevant base class (safe-ish since we are verifying # that all the contained _DisjunctionData are # deactivated directly above). ActiveComponent.deactivate(obj) # HACK for backwards compatibility with the older GDP transformations # # Until the writers are updated to find variables on things # other than active blocks, we need to reclassify the Disjuncts # as Blocks after transformation so that the writer will pick up # all the variables that it needs (in this case, indicator_vars). if _HACK_transform_whole_instance: HACK_GDP_Disjunct_Reclassifier().apply_to(instance) def _transformBlock(self, obj, transBlock, bigM): for i in sorted(iterkeys(obj)): self._transformBlockData(obj[i], transBlock, bigM) def _transformBlockData(self, obj, transBlock, bigM): # Transform every (active) disjunction in the block for disjunction in obj.component_objects( Disjunction, active=True, sort=SortComponents.deterministic, descend_into=(Block, Disjunct), descent_order=TraversalStrategy.PostfixDFS): self._transformDisjunction(disjunction, transBlock, bigM) def _getXorConstraint(self, disjunction): # Put the disjunction constraint on its parent block and # determine whether it is an OR or XOR constraint. # We never do this for just a DisjunctionData because we need # to know about the index set of its parent component. So if # we called this on a DisjunctionData, we did something wrong. assert isinstance(disjunction, Disjunction) parent = disjunction.parent_block() if hasattr(parent, "_gdp_transformation_info"): infodict = parent._gdp_transformation_info if type(infodict) is not dict: raise GDP_Error( "Component %s contains an attribute named " "_gdp_transformation_info. The transformation requires " "that it can create this attribute!" % parent.name) try: # On the off-chance that another GDP transformation went # first, the infodict may exist, but the specific map we # want will not be present orConstraintMap = infodict['disjunction_or_constraint'] except KeyError: orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() else: infodict = parent._gdp_transformation_info = {} orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() # If the Constraint already exists, return it if disjunction in orConstraintMap: return orConstraintMap[disjunction] # add the XOR (or OR) constraints to parent block (with unique name) # It's indexed if this is an IndexedDisjunction, not otherwise orC = Constraint(disjunction.index_set()) if \ disjunction.is_indexed() else Constraint() # The name used to indicate if thee were OR or XOR disjunctions, # however now that Disjunctions ae allowed to mix the state we # can no longer make that distinction in the name. # nm = '_xor' if xor else '_or' nm = '_xor' orCname = unique_component_name( parent, '_gdp_bigm_relaxation_' + disjunction.name + nm) parent.add_component(orCname, orC) orConstraintMap[disjunction] = orC return orC def _transformDisjunction(self, obj, transBlock, bigM): # relax each of the disjunctionDatas for i in sorted(iterkeys(obj)): self._transformDisjunctionData(obj[i], transBlock, bigM, i) # deactivate so we know we relaxed obj.deactivate() def _transformDisjunctionData(self, obj, transBlock, bigM, index): parent_component = obj.parent_component() transBlock.disjContainers.add(parent_component) orConstraint = self._getXorConstraint(parent_component) xor = obj.xor or_expr = 0 for disjunct in obj.disjuncts: or_expr += disjunct.indicator_var # make suffix list. (We don't need it until we are # transforming constraints, but it gets created at the # disjunct level, so more efficient to make it here and # pass it down. suffix_list = self._get_bigm_suffix_list(disjunct) # relax the disjunct self._bigM_relax_disjunct(disjunct, transBlock, bigM, suffix_list) # add or (or xor) constraint if xor: orConstraint.add(index, (or_expr, 1)) else: orConstraint.add(index, (1, or_expr, None)) obj.deactivate() def _bigM_relax_disjunct(self, obj, transBlock, bigM, suffix_list): if hasattr(obj, "_gdp_transformation_info"): infodict = obj._gdp_transformation_info # If the user has something with our name that is not a dict, we # scream. If they have a dict with this name then we are just going # to use it... if type(infodict) is not dict: raise GDP_Error( "Disjunct %s contains an attribute named " "_gdp_transformation_info. The transformation requires " "that it can create this attribute!" % obj.name) else: infodict = obj._gdp_transformation_info = {} # deactivated -> either we've already transformed or user deactivated if not obj.active: if obj.indicator_var.is_fixed(): if value(obj.indicator_var) == 0: # The user cleanly deactivated the disjunct: there # is nothing for us to do here. return else: raise GDP_Error( "The disjunct %s is deactivated, but the " "indicator_var is fixed to %. This makes no sense." % (obj.name, value(obj.indicator_var))) if not infodict.get('relaxed', False): raise GDP_Error( "The disjunct %s is deactivated, but the " "indicator_var is not fixed and the disjunct does not " "appear to have been relaxed. This makes no sense." % (obj.name, )) if 'bigm' in infodict: # we've transformed it (with BigM), so don't do it again. return # add reference to original disjunct to info dict on transformation # block relaxedDisjuncts = transBlock.relaxedDisjuncts relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] relaxationBlock._gdp_transformation_info = { 'src': obj, 'srcConstraints': ComponentMap(), } # add reference to transformation block on original disjunct assert 'bigm' not in infodict infodict['bigm'] = { 'relaxationBlock': relaxationBlock, 'relaxedConstraints': ComponentMap() } # if this is a disjunctData from an indexed disjunct, we are # going to want to check at the end that the container is # deactivated if everything in it is. So we save it in our # dictionary of things to check if it isn't there already. disjParent = obj.parent_component() if disjParent.is_indexed() and \ disjParent not in transBlock.disjContainers: transBlock.disjContainers.add(disjParent) # Transform each component within this disjunct self._transform_block_components(obj, obj, infodict, bigM, suffix_list) # deactivate disjunct so we know we've relaxed it obj._deactivate_without_fixing_indicator() infodict['relaxed'] = True def _transform_block_components(self, block, disjunct, infodict, bigM, suffix_list): # Look through the component map of block and transform # everything we have a handler for. Yell if we don't know how # to handle it. for name, obj in list(iteritems(block.component_map())): if hasattr(obj, 'active') and not obj.active: continue handler = self.handlers.get(obj.type(), None) if not handler: if handler is None: raise GDP_Error( "No BigM transformation handler registered " "for modeling components of type %s" % obj.type()) continue # obj is what we are transforming, we pass disjunct # through so that we will have access to the indicator # variables down the line. handler(obj, disjunct, infodict, bigM, suffix_list) def _warn_for_active_disjunction(self, disjunction, disjunct, infodict, bigMargs, suffix_list): # this should only have gotten called if the disjunction is active assert disjunction.active problemdisj = disjunction if disjunction.is_indexed(): for i in disjunction: if disjunction[i].active: # a _DisjunctionData is active, we will yell about # it specifically. problemdisj = disjunction[i] break # None of the _DisjunctionDatas were actually active. We # are OK and we can deactivate the container. else: disjunction.deactivate() return parentblock = problemdisj.parent_block() # the disjunction should only have been active if it wasn't transformed assert (not hasattr(parentblock, "_gdp_transformation_info")) or \ problemdisj.name not in parentblock._gdp_transformation_info raise GDP_Error("Found untransformed disjunction %s in disjunct %s! " "The disjunction must be transformed before the " "disjunct. If you are using targets, put the " "disjunction before the disjunct in the list." % (problemdisj.name, disjunct.name)) def _warn_for_active_disjunct(self, innerdisjunct, outerdisjunct, infodict, bigMargs, suffix_list): assert innerdisjunct.active problemdisj = innerdisjunct if innerdisjunct.is_indexed(): for i in innerdisjunct: if innerdisjunct[i].active: # This is shouldn't be true, we will complain about it. problemdisj = innerdisjunct[i] break # None of the _DisjunctDatas were actually active, so we # are fine and we can deactivate the container. else: # HACK: See above about _deactivate_without_fixing_indicator ActiveComponent.deactivate(innerdisjunct) return raise GDP_Error("Found active disjunct {0} in disjunct {1}! " "Either {0} " "is not in a disjunction or the disjunction it is in " "has not been transformed. " "{0} needs to be deactivated " "or its disjunction transformed before {1} can be " "transformed.".format(problemdisj.name, outerdisjunct.name)) def _transform_block_on_disjunct(self, block, disjunct, infodict, bigMargs, suffix_list): # We look through everything on the component map of the block # and transform it just as we would if it was on the disjunct # directly. (We are passing the disjunct through so that when # we find constraints, _xform_constraint will have access to # the correct indicator variable. self._transform_block_components(block, disjunct, infodict, bigMargs, suffix_list) def _xform_constraint(self, obj, disjunct, infodict, bigMargs, suffix_list): # add constraint to the transformation block, we'll transform it there. relaxationBlock = infodict['bigm']['relaxationBlock'] transBlock = relaxationBlock.parent_block() # Though rare, it is possible to get naming conflicts here # since constraints from all blocks are getting moved onto the # same block. So we get a unique name name = unique_component_name(relaxationBlock, obj.name) if obj.is_indexed(): try: newConstraint = Constraint(obj.index_set(), transBlock.lbub) except TypeError: # The original constraint may have been indexed by a # non-concrete set (like an Any). We will give up on # strict index verification and just blindly proceed. newConstraint = Constraint(Any) else: newConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component(name, newConstraint) # add mapping of original constraint to transformed constraint # in transformation info dictionary infodict['bigm']['relaxedConstraints'][obj] = newConstraint # add mapping of transformed constraint back to original constraint (we # know that the info dict is already created because this only got # called if we were transforming a disjunct...) relaxationBlock._gdp_transformation_info['srcConstraints'][ newConstraint] = obj for i in sorted(iterkeys(obj)): c = obj[i] if not c.active: continue c.deactivate() # first, we see if an M value was specified in the arguments. # (This returns None if not) M = self._get_M_from_args(c, bigMargs) if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(BigM): The value for M for constraint %s " "from the BigM argument is %s." % (obj.name, str(M))) # if we didn't get something from args, try suffixes: if M is None: M = self._get_M_from_suffixes(c, suffix_list) if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(BigM): The value for M for constraint %s " "after checking suffixes is %s." % (obj.name, str(M))) if not isinstance(M, (tuple, list)): if M is None: M = (None, None) else: try: M = (-M, M) except: logger.error("Error converting scalar M-value %s " "to (-M,M). Is %s not a numeric type?" % (M, type(M))) raise if len(M) != 2: raise GDP_Error("Big-M %s for constraint %s is not of " "length two. " "Expected either a single value or " "tuple or list of length two for M." % (str(M), name)) if c.lower is not None and M[0] is None: M = (self._estimate_M(c.body, name)[0] - c.lower, M[1]) if c.upper is not None and M[1] is None: M = (M[0], self._estimate_M(c.body, name)[1] - c.upper) if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(BigM): The value for M for constraint %s " "after estimating (if needed) is %s." % (obj.name, str(M))) # Handle indices for both SimpleConstraint and IndexedConstraint if i.__class__ is tuple: i_lb = i + ('lb', ) i_ub = i + ('ub', ) elif obj.is_indexed(): i_lb = ( i, 'lb', ) i_ub = ( i, 'ub', ) else: i_lb = 'lb' i_ub = 'ub' if c.lower is not None: if M[0] is None: raise GDP_Error("Cannot relax disjunctive constraint %s " "because M is not defined." % name) M_expr = M[0] * (1 - disjunct.indicator_var) newConstraint.add(i_lb, c.lower <= c.body - M_expr) if c.upper is not None: if M[1] is None: raise GDP_Error("Cannot relax disjunctive constraint %s " "because M is not defined." % name) M_expr = M[1] * (1 - disjunct.indicator_var) newConstraint.add(i_ub, c.body - M_expr <= c.upper) def _get_M_from_args(self, constraint, bigMargs): # check args: we only have to look for constraint, constraintdata, and # None if bigMargs is None: return None cuid = ComponentUID(constraint) parentcuid = ComponentUID(constraint.parent_component()) if cuid in bigMargs: return bigMargs[cuid] elif parentcuid in bigMargs: return bigMargs[parentcuid] elif None in bigMargs: return bigMargs[None] return None def _get_M_from_suffixes(self, constraint, suffix_list): M = None # first we check if the constraint or its parent is a key in any of the # suffix lists for bigm in suffix_list: if constraint in bigm: M = bigm[constraint] break # if c is indexed, check for the parent component if constraint.parent_component() in bigm: M = bigm[constraint.parent_component()] break # if we didn't get an M that way, traverse upwards through the blocks # and see if None has a value on any of them. if M is None: for bigm in suffix_list: if None in bigm: M = bigm[None] break return M def _estimate_M(self, expr, name): # Calculate a best guess at M repn = generate_canonical_repn(expr) M = [0, 0] if isinstance(repn, LinearCanonicalRepn): if repn.constant is not None: for i in (0, 1): if M[i] is not None: M[i] += repn.constant for i, coef in enumerate(repn.linear or []): var = repn.variables[i] coef = repn.linear[i] bounds = (value(var.lb), value(var.ub)) for i in (0, 1): # reverse the bounds if the coefficient is negative if coef > 0: j = i else: j = 1 - i if bounds[i] is not None: M[j] += value(bounds[i]) * coef else: raise GDP_Error( "Cannot estimate M for " "expressions with unbounded variables." "\n\t(found unbounded var %s while processing " "constraint %s)" % (var.name, name)) else: raise GDP_Error("Cannot estimate M for nonlinear " "expressions.\n\t(found while processing " "constraint %s)" % name) return tuple(M)