def test_smarts_fast_reset_memory_cleanup(agent_id, seed, social_agent_scenarios): agent_type = AgentType.Buddha # Run once to initialize globals and test to see if smarts is working _memory_buildup(agent_id, seed, social_agent_scenarios, 1, None, agent_type) tr = tracker.SummaryTracker() gc.collect() initial_size = muppy.get_size(muppy.get_objects()) for _ in range(100): _memory_buildup( agent_id, seed, social_agent_scenarios, 1, None, agent_type, max_episode_steps=2, ) gc.collect() end_size = muppy.get_size(muppy.get_objects()) gc.collect() tr.print_diff() # Check for a major leak assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT ), f"End size delta {end_size - initial_size}"
def test_env_memory_cleanup(agent_id, seed, primative_scenarios): # Run once to initialize globals _, action, agent_type = (100, None, AgentType.Buddha) _env_memory_buildup(agent_id, seed, primative_scenarios, action, agent_type) gc.collect() # Memory size check size = muppy.get_size(muppy.get_objects()) gc.collect() _env_memory_buildup(agent_id, seed, primative_scenarios, action, agent_type) end_size = muppy.get_size(muppy.get_objects()) gc.collect() def success_condition(): return end_size - size < EPISODE_MEMORY_GROWTH_LIMIT if not success_condition(): # Get a diff for failure case tr = tracker.SummaryTracker() tr.print_diff() _env_memory_buildup(agent_id, seed, primative_scenarios, action, agent_type) diff = tr.diff() summary.print_(diff) diff = None gc.collect() assert success_condition(), f"Size diff {end_size - size}"
def _initialize_component(self, modeldata, namespaces, component_name, profile_memory): declaration = self.component(component_name) if component_name in modeldata._default: if declaration.type() is not Set: declaration.set_default(modeldata._default[component_name]) data = None for namespace in namespaces: if component_name in modeldata._data.get(namespace, {}): if declaration.type() is Set: data = self._tuplize( modeldata._data[namespace][component_name], declaration) else: data = modeldata._data[namespace][component_name] if not data is None: break if __debug__ and logger.isEnabledFor(logging.DEBUG): _blockName = "Model" if self.parent_block() is None \ else "Block '%s'" % self.name logger.debug("Constructing %s '%s' on %s from data=%s", declaration.__class__.__name__, declaration.name, _blockName, str(data)) try: declaration.construct(data) except: err = sys.exc_info()[1] logger.error( "Constructing component '%s' from data=%s failed:\n%s: %s", str(declaration.name), str(data).strip(), type(err).__name__, err) raise if __debug__ and logger.isEnabledFor(logging.DEBUG): _out = StringIO() declaration.pprint(ostream=_out) logger.debug("Constructed component '%s':\n%s" % (declaration.name, _out.getvalue())) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print( " Total memory = %d bytes following construction of component=%s" % (mem_used, component_name)) if (pympler_available is True) and (profile_memory >= 3): gc.collect() mem_used = muppy.get_size(muppy.get_objects()) print( " Total memory = %d bytes following construction of component=%s (after garbage collection)" % (mem_used, component_name))
def test_smarts_basic_memory_cleanup(agent_id, seed, primative_scenarios, agent_params): # Run once to initialize globals and test to see if smarts is working _memory_buildup(agent_id, seed, primative_scenarios, 100, agent_params[1], agent_params[2]) gc.collect() initial_size = muppy.get_size(muppy.get_objects()) _memory_buildup(agent_id, seed, primative_scenarios, *agent_params) end_size = muppy.get_size(muppy.get_objects()) # Check for a major leak assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT ), f"End size delta {end_size - initial_size}"
def _initialize_component(self, modeldata, namespaces, component_name, profile_memory): declaration = self.component(component_name) if component_name in modeldata._default: if declaration.type() is not Set: declaration.set_default(modeldata._default[component_name]) data = None for namespace in namespaces: if component_name in modeldata._data.get(namespace,{}): if declaration.type() is Set: data = self._tuplize(modeldata._data[namespace][component_name], declaration) else: data = modeldata._data[namespace][component_name] if not data is None: break if __debug__ and logger.isEnabledFor(logging.DEBUG): _blockName = "Model" if self.parent_block() is None \ else "Block '%s'" % self.name logger.debug( "Constructing %s '%s' on %s from data=%s", declaration.__class__.__name__, declaration.name, _blockName, str(data) ) try: declaration.construct(data) except: err = sys.exc_info()[1] logger.error( "Constructing component '%s' from data=%s failed:\n %s: %s", str(declaration.name), str(data).strip(), type(err).__name__, err ) raise if __debug__ and logger.isEnabledFor(logging.DEBUG): _out = StringIO() declaration.pprint(ostream=_out) logger.debug("Constructed component '%s':\n %s" % ( declaration.name, _out.getvalue())) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes following construction of component=%s" % (mem_used, component_name)) if (pympler_available is True) and (profile_memory >= 3): gc.collect() mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes following construction of component=%s (after garbage collection)" % (mem_used, component_name))
def apply_postprocessing(data, instance=None, results=None): """ Apply post-processing steps. Required: instance: Problem instance. results: Optimization results object. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Applying Pyomo postprocessing actions\n' % (time.time()-start_time)) sys.stdout.flush() # options are of type ConfigValue, not raw strings / atomics. for config_value in data.options.postprocess: postprocess = pyutilib.misc.import_file(config_value, clear_cache=True) if "pyomo_postprocess" in dir(postprocess): postprocess.pyomo_postprocess(data.options, instance,results) for ep in ExtensionPoint(IPyomoScriptPostprocess): ep.apply( options=data.options, instance=instance, results=results ) if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes upon termination" % mem_used)
def test_smarts_social_agent_scenario_memory_cleanup(agent_id, seed, social_agent_scenarios, agent_type): # Run once to initialize globals and test to see if smarts is working _memory_buildup(agent_id, seed, social_agent_scenarios, 1, *agent_type) gc.collect() initial_size = muppy.get_size(muppy.get_objects()) _memory_buildup(agent_id, seed, social_agent_scenarios, 100, *agent_type) gc.collect() end_size = muppy.get_size(muppy.get_objects()) assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT ), f"End size delta {end_size - initial_size}"
def apply_postprocessing(data, instance=None, results=None): """ Apply post-processing steps. Required: instance: Problem instance. results: Optimization results object. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Applying Pyomo postprocessing actions\n' % (time.time() - start_time)) sys.stdout.flush() # options are of type ConfigValue, not raw strings / atomics. for config_value in data.options.postprocess: postprocess = pyutilib.misc.import_file(config_value, clear_cache=True) if "pyomo_postprocess" in dir(postprocess): postprocess.pyomo_postprocess(data.options, instance, results) for ep in ExtensionPoint(IPyomoScriptPostprocess): ep.apply(options=data.options, instance=instance, results=results) if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes upon termination" % mem_used)
def test_smarts_episode_memory_cleanup(agent_id, seed, primative_scenarios, agent_params): MAX_EPISODE_STEPS = 100 EPISODE_COUNT = 100 STEPS_PER_YIELD = 10 _, action, agent_type = agent_params env_and_agent_spec = env_and_spec(action, agent_type, MAX_EPISODE_STEPS, primative_scenarios, seed, agent_id) size = 0 last_size = 0 gc.collect() tr = tracker.SummaryTracker() try: for current_episode in _every_nth_episode( agent_id, EPISODE_COUNT, env_and_agent_spec, steps_per_yield=STEPS_PER_YIELD): gc.collect() all_objects = muppy.get_objects() size = muppy.get_size(all_objects) tr.print_diff(summary.summarize(all_objects)) print(flush=True) all_objects = None if current_episode > STEPS_PER_YIELD: assert (size - last_size < EPISODE_MEMORY_GROWTH_LIMIT ), f"End size delta {size - last_size}" last_size = size finally: env_and_agent_spec[0].close()
def test_smarts_repeated_runs_memory_cleanup(agent_id, seed, primative_scenarios, agent_type): # Run once to initialize globals and test to see if smarts is working _memory_buildup(agent_id, seed, primative_scenarios, 1, *agent_type) gc.collect() initial_size = muppy.get_size(muppy.get_objects()) for i in range(100): _memory_buildup(agent_id, seed, primative_scenarios, 1, *agent_type) gc.collect() end_size = muppy.get_size(muppy.get_objects()) # This "should" be roughly the same as `test_smarts_basic_memory_cleanup` assert (end_size - initial_size < SMARTS_MEMORY_GROWTH_LIMIT ), f"End size delta {end_size - initial_size}"
def test_get_size(self): """Test that the return value is the sum of the size of all objects.""" (o1, o2, o3, o4, o5) = (1, 'a', 'b', 4, 5) list = [o1, o2, o3, o4, o5] expected = 0 for o in list: expected += getsizeof(o) self.assertEqual(muppy.get_size(list), expected)
def test_get_size(self): """Test that the return value is the sum of the size of all objects.""" (o1, o2, o3, o4, o5) = (1, "a", "b", 4, 5) list = [o1, o2, o3, o4, o5] expected = 0 for o in list: expected += _getsizeof(o) self.assertEqual(muppy.get_size(list), expected)
def get_report(self): all_objects = muppy.get_objects() size = get_size(all_objects) report = summary.summarize(all_objects) sort_index = self.cleaned_data['sort_by'] limit = self.cleaned_data['limit'] report.sort(key=lambda item: item[sort_index], reverse=True) if limit: report = report[:limit] return size, report
def profile(self, frame, event, arg): # arg req to match signature """Profiling method used to profile matching codepoints and events.""" if (self.events is None) or (event in self.events): frame_info = inspect.getframeinfo(frame) cp = (frame_info[0], frame_info[2], frame_info[1]) if self.codepoint_included(cp): objects = muppy.get_objects() size = muppy.get_size(objects) if cp not in self.memories: self.memories[cp] = [0, 0, 0, 0] self.memories[cp][0] = 1 self.memories[cp][1] = size self.memories[cp][2] = size else: self.memories[cp][0] += 1 if self.memories[cp][1] > size: self.memories[cp][1] = size if self.memories[cp][2] < size: self.memories[cp][2] = size
def profile(self, frame, event, arg): # PYCHOK arg req to match signature """Profiling method used to profile matching codepoints and events.""" if (self.events is None) or (event in self.events): frame_info = inspect.getframeinfo(frame) cp = (frame_info[0], frame_info[2], frame_info[1]) if self.codepoint_included(cp): objects = muppy.get_objects() size = muppy.get_size(objects) if cp not in self.memories: self.memories[cp] = [0, 0, 0, 0] self.memories[cp][0] = 1 self.memories[cp][1] = size self.memories[cp][2] = size else: self.memories[cp][0] += 1 if self.memories[cp][1] > size: self.memories[cp][1] = size if self.memories[cp][2] < size: self.memories[cp][2] = size
def create_instance(self, filename=None, data=None, name=None, namespace=None, namespaces=None, preprocess=False, profile_memory=0, report_timing=False, clone=None): """ Create a concrete instance of an abstract model, possibly using data read in from a file. Optional: filename: The name of a Pyomo Data File that will be used to load data into the model. data: A dictionary containing initialization data for the model to be used if there is no filename name: The name given to the model. namespace: A namespace used to select data. namespaces: A list of namespaces used to select data. preprocess: If True, then preprocess the constructed model. profile_memory: A number that indicates the profiling level. report_timing: Report timing statistics during construction. clone: Force a clone of the model if this is True. """ if self._constructed: logger.warning( "DEPRECATION WARNING: Cannot call Model.create_instance() on a concrete model." ) return self if name is None: name = self.name if not filename is None: data = filename if data is None: data = {} # # Generate a warning if this is a concrete model but the filename is specified. # A concrete model is already constructed, so passing in a data file is a waste # of time. # if self.is_constructed() and isinstance(filename, basestring): msg = "The filename=%s will not be loaded - supplied as an argument to the create_instance() method of a ConcreteModel instance with name=%s." % ( filename, name) logger.warning(msg) # # If construction is deferred, then clone the model and # if not self._constructed: instance = self.clone() if namespaces is None or len(namespaces) == 0: instance.load(data, namespaces=[None], profile_memory=profile_memory, report_timing=report_timing) else: instance.load(data, namespaces=list(namespaces) + [None], profile_memory=profile_memory, report_timing=report_timing) else: if clone: instance = self.clone() else: instance = self # # Preprocess the new model # if preprocess is True: print( " Model preprocessing during construction has been deprecated." ) if False and preprocess is True: if report_timing is True: start_time = time.time() instance.preprocess() if report_timing is True: total_time = time.time() - start_time print(" %6.2f seconds required for preprocessing" % total_time) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print( " Total memory = %d bytes following instance preprocessing" % mem_used) print("") if (pympler_available is True) and (profile_memory >= 2): print("") print( " Summary of objects following instance preprocessing" ) post_preprocessing_summary = summary.summarize( muppy.get_objects()) summary.print_(post_preprocessing_summary, limit=100) if not name is None: instance.name = name # # Indicate that the model is concrete/constructed # instance._constructed = True return instance
def process_results(data, instance=None, results=None, opt=None): """ Process optimization results. Required: instance: Problem instance. results: Optimization results object. opt: Optimizer object. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Processing results\n' % (time.time() - start_time)) sys.stdout.flush() # if data.options.postsolve.print_logfile: print("") print("==========================================================") print("Solver Logfile: " + str(opt._log_file)) print("==========================================================") print("") with open(opt._log_file, "r") as INPUT: for line in INPUT: sys.stdout.write(line) print("==========================================================") print("Solver Logfile - END") print("==========================================================") # try: # transform the results object into human-readable names. instance.solutions.store_to(results) except Exception: print("Problem updating solver results") raise # if not data.options.postsolve.show_results: if data.options.postsolve.save_results: results_file = data.options.postsolve.save_results elif data.options.postsolve.results_format == 'yaml': results_file = 'results.yml' else: results_file = 'results.json' results.write(filename=results_file, format=data.options.postsolve.results_format) if not data.options.runtime.logging == 'quiet': print(" Number of solutions: " + str(len(results.solution))) if len(results.solution) > 0: print(" Solution Information") print(" Gap: " + str(results.solution[0].gap)) print(" Status: " + str(results.solution[0].status)) if len(results.solution[0].objective) == 1: key = list(results.solution[0].objective.keys())[0] print(" Function Value: " + str(results.solution[0].objective[key]['Value'])) print(" Solver results file: " + results_file) # #ep = ExtensionPoint(IPyomoScriptPrintResults) if data.options.postsolve.show_results: print("") results.write(num=1, format=data.options.postsolve.results_format) print("") # if data.options.postsolve.summary: print("") print("==========================================================") print("Solution Summary") print("==========================================================") if len(results.solution(0).variable) > 0: print("") display(instance) print("") else: print("No solutions reported by solver.") # for ep in ExtensionPoint(IPyomoScriptPrintResults): ep.apply(options=data.options, instance=instance, results=results) # for ep in ExtensionPoint(IPyomoScriptSaveResults): ep.apply(options=data.options, instance=instance, results=results) # if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): global memory_data mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes following results processing" % mem_used)
def apply_optimizer(data, instance=None): """ Perform optimization with a concrete instance Required: instance: Problem instance. Returned: results: Optimization results. opt: Optimizer object. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Applying solver\n' % (time.time() - start_time)) sys.stdout.flush() # # # Create Solver and Perform Optimization # solver = data.options.solvers[0].solver_name if solver is None: raise ValueError("Problem constructing solver: no solver specified") if len(data.options.solvers[0].suffixes) > 0: for suffix_name in data.options.solvers[0].suffixes: if suffix_name[0] in ['"', "'"]: suffix_name = suffix[1:-1] # Don't redeclare the suffix if it already exists suffix = getattr(instance, suffix_name, None) if suffix is None: setattr(instance, suffix_name, Suffix(direction=Suffix.IMPORT)) else: raise ValueError("Problem declaring solver suffix %s. A component "\ "with that name already exists on model %s." % (suffix_name, instance.name)) if getattr(data.options.solvers[0].options, 'timelimit', 0) == 0: data.options.solvers[0].options.timelimit = None # # Default results # results = None # # Figure out the type of solver manager # solver_mngr_name = None if data.options.solvers[0].manager is None: solver_mngr_name = 'serial' elif not data.options.solvers[0].manager in SolverManagerFactory.services( ): raise ValueError("Unknown solver manager %s" % data.options.solvers[0].manager) else: solver_mngr_name = data.options.solvers[0].manager # # Create the solver manager # solver_mngr_kwds = {} if data.options.solvers[0].pyro_host is not None: solver_mngr_kwds['host'] = data.options.solvers[0].pyro_host if data.options.solvers[0].pyro_port is not None: solver_mngr_kwds['port'] = data.options.solvers[0].pyro_port with SolverManagerFactory(solver_mngr_name, **solver_mngr_kwds) as solver_mngr: if solver_mngr is None: msg = "Problem constructing solver manager '%s'" raise ValueError(msg % str(data.options.solvers[0].manager)) # # Setup keywords for the solve # keywords = {} if (data.options.runtime.keep_files or \ data.options.postsolve.print_logfile): keywords['keepfiles'] = True if data.options.model.symbolic_solver_labels: keywords['symbolic_solver_labels'] = True if data.options.model.file_determinism != 1: keywords['file_determinism'] = data.options.model.file_determinism keywords['tee'] = data.options.runtime.stream_output keywords['timelimit'] = getattr(data.options.solvers[0].options, 'timelimit', 0) # # Call the solver # if solver_mngr_name == 'serial': # # If we're running locally, then we create the optimizer and pass it into the # solver manager. # with SolverFactory( solver, solver_io=data.options.solvers[0].io_format) as opt: if opt is None: raise ValueError("Problem constructing solver `%s`" % str(solver)) from pyomo.core.base.plugin import registered_callback for name in registered_callback: opt.set_callback(name, registered_callback[name]) if len(data.options.solvers[0].options) > 0: opt.set_options(data.options.solvers[0].options) #opt.set_options(" ".join("%s=%s" % (key, value) # for key, value in data.options.solvers[0].options.iteritems() # if not key == 'timelimit')) if not data.options.solvers[0].options_string is None: opt.set_options(data.options.solvers[0].options_string) # # Use the solver manager to call the optimizer # results = solver_mngr.solve(instance, opt=opt, **keywords) else: # # Get the solver option arguments # if len( data.options.solvers[0].options ) > 0 and not data.options.solvers[0].options_string is None: # If both 'options' and 'options_string' were specified, then create a # single options string that is passed to the solver. ostring = " ".join("%s=%s" % (key, value) for key, value in data.options.solvers[0].options.iteritems() if not value is None) keywords['options'] = ostring + ' ' + data.options.solvers[ 0].options_string elif len(data.options.solvers[0].options) > 0: keywords['options'] = data.options.solvers[0].options else: keywords['options'] = data.options.solvers[0].options_string # # If we're running remotely, then we pass the optimizer name to the solver # manager. # results = solver_mngr.solve(instance, opt=solver, **keywords) if (pympler_available is True) and \ (data.options.runtime.profile_memory >= 1): global memory_data mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes following optimization" % mem_used) return pyutilib.misc.Options(results=results, opt=solver, local=data.local)
def create_model(data): """ Create instance of Pyomo model. Return: model: Model object. instance: Problem instance. symbol_map: Symbol map created when writing model to a file. filename: Filename that a model instance was written to. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Creating model\n' % (time.time() - start_time)) sys.stdout.flush() # if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): global memory_data mem_used = muppy.get_size(muppy.get_objects()) data.local.max_memory = mem_used print(" Total memory = %d bytes prior to model construction" % mem_used) # # Find the Model objects # _models = {} for _name, _obj in iteritems(data.local.usermodel.__dict__): if isinstance(_obj, Model): _models[_name] = _obj model_name = data.options.model.object_name if len(_models) == 1: _name = list(_models.keys())[0] if model_name is None: model_name = _name elif model_name != _name: msg = "Model '%s' is not defined in file '%s'!" raise SystemExit(msg % (model_name, data.options.model.filename)) elif len(_models) > 1: if model_name is None: msg = "Multiple models defined in file '%s'!" raise SystemExit(msg % data.options.model.filename) elif not model_name in _models: msg = "Unknown model '%s' in file '%s'!" raise SystemExit(msg % (model_name, data.options.model.filename)) ep = ExtensionPoint(IPyomoScriptCreateModel) if model_name is None: if len(ep) == 0: msg = "A model is not defined and the 'pyomo_create_model' is not "\ "provided in module %s" raise SystemExit(msg % data.options.model.filename) elif len(ep) > 1: msg = 'Multiple model construction plugins have been registered in module %s!' raise SystemExit(msg % data.options.model.filename) else: model_options = data.options.model.options.value() model = ep.service().apply( options=pyutilib.misc.Container(*data.options), model_options=pyutilib.misc.Container(*model_options)) else: if model_name not in _models: msg = "Model '%s' is not defined in file '%s'!" raise SystemExit(msg % (model_name, data.options.model.filename)) model = _models[model_name] if model is None: msg = "'%s' object is 'None' in module %s" raise SystemExit(msg % (model_name, data.options.model.filename)) elif len(ep) > 0: msg = "Model construction function 'create_model' defined in " \ "file '%s', but model is already constructed!" raise SystemExit(msg % data.options.model.filename) # # Print model # for ep in ExtensionPoint(IPyomoScriptPrintModel): ep.apply(options=data.options, model=model) # # Create Problem Instance # ep = ExtensionPoint(IPyomoScriptCreateDataPortal) if len(ep) > 1: msg = 'Multiple model data construction plugins have been registered!' raise SystemExit(msg) if len(ep) == 1: modeldata = ep.service().apply(options=data.options, model=model) else: modeldata = DataPortal() if model._constructed: # # TODO: use a better test for ConcreteModel # instance = model elif len(data.options.data.files) > 1: # # Load a list of *.dat files # for file in data.options.data.files: suffix = (file).split(".")[-1] if suffix != "dat": msg = 'When specifiying multiple data files, they must all ' \ 'be *.dat files. File specified: %s' raise SystemExit(msg % str(file)) modeldata.load(filename=file, model=model) instance = model.create_instance( modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) elif len(data.options.data.files) == 1: # # Load a *.dat file or process a *.py data file # suffix = (data.options.data.files[0]).split(".")[-1].lower() if suffix == "dat": instance = model.create_instance( data.options.data.files[0], namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) elif suffix == "py": userdata = pyutilib.misc.import_file(data.options.data.files[0], clear_cache=True) if "modeldata" in dir(userdata): if len(ep) == 1: msg = "Cannot apply 'pyomo_create_modeldata' and use the" \ " 'modeldata' object that is provided in the model" raise SystemExit(msg) if userdata.modeldata is None: msg = "'modeldata' object is 'None' in module %s" raise SystemExit(msg % str(data.options.data.files[0])) modeldata = userdata.modeldata else: if len(ep) == 0: msg = "Neither 'modeldata' nor 'pyomo_create_dataportal' " \ 'is defined in module %s' raise SystemExit(msg % str(data.options.data.files[0])) modeldata.read(model) instance = model.create_instance( modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) elif suffix == "yml" or suffix == 'yaml': try: import yaml except: msg = "Cannot apply load data from a YAML file: PyYaml is not installed" raise SystemExit(msg) modeldata = yaml.load(open(data.options.data.files[0])) instance = model.create_instance( modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) else: raise ValueError("Unknown data file type: " + data.options.data.files[0]) else: instance = model.create_instance( modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) # modify_start_time = time.time() for ep in ExtensionPoint(IPyomoScriptModifyInstance): if data.options.runtime.report_timing is True: tick = time.time() ep.apply(options=data.options, model=model, instance=instance) if data.options.runtime.report_timing is True: print(" %6.2f seconds to apply %s" % (time.time() - tick, type(ep))) tick = time.time() # for transformation in data.options.transform: with TransformationFactory(transformation) as xfrm: instance = xfrm.create_using(instance) if instance is None: raise SystemExit("Unexpected error while applying " "transformation '%s'" % transformation) # if data.options.runtime.report_timing is True: total_time = time.time() - modify_start_time print(" %6.2f seconds required for problem transformations" % total_time) if logger.isEnabledFor(logging.DEBUG): print("MODEL INSTANCE") instance.pprint() print("") for ep in ExtensionPoint(IPyomoScriptPrintInstance): ep.apply(options=data.options, instance=instance) fname = None smap_id = None if not data.options.model.save_file is None: if data.options.runtime.report_timing is True: write_start_time = time.time() if data.options.model.save_file == True: if data.local.model_format in (ProblemFormat.cpxlp, ProblemFormat.lpxlp): fname = (data.options.data.files[0])[:-3] + 'lp' else: fname = (data.options.data.files[0])[:-3] + str( data.local.model_format) format = data.local.model_format else: fname = data.options.model.save_file format = data.options.model.save_format io_options = {} if data.options.model.symbolic_solver_labels: io_options['symbolic_solver_labels'] = True if data.options.model.file_determinism != 1: io_options[ 'file_determinism'] = data.options.model.file_determinism (fname, smap_id) = instance.write(filename=fname, format=format, io_options=io_options) if not data.options.runtime.logging == 'quiet': if not os.path.exists(fname): print("ERROR: file " + fname + " has not been created!") else: print("Model written to file '" + str(fname) + "'") if data.options.runtime.report_timing is True: total_time = time.time() - write_start_time print(" %6.2f seconds required to write file" % total_time) if (pympler_available is True) and (data.options.runtime.profile_memory >= 2): print("") print(" Summary of objects following file output") post_file_output_summary = summary.summarize(muppy.get_objects()) summary.print_(post_file_output_summary, limit=100) print("") for ep in ExtensionPoint(IPyomoScriptSaveInstance): ep.apply(options=data.options, instance=instance) if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes following Pyomo instance creation" % mem_used) return pyutilib.misc.Options(model=model, instance=instance, smap_id=smap_id, filename=fname, local=data.local)
def process_results(data, instance=None, results=None, opt=None): """ Process optimization results. Required: instance: Problem instance. results: Optimization results object. opt: Optimizer object. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Processing results\n' % (time.time()-start_time)) sys.stdout.flush() # if data.options.postsolve.print_logfile: print("") print("==========================================================") print("Solver Logfile:",opt._log_file) print("==========================================================") print("") with open(opt._log_file, "r") as INPUT: for line in INPUT: print(line,) # try: # transform the results object into human-readable names. instance.solutions.store_to(results) except Exception: print("Problem updating solver results") raise # if not data.options.postsolve.show_results: if data.options.postsolve.save_results: results_file = data.options.postsolve.save_results elif data.options.postsolve.results_format == 'yaml': results_file = 'results.yml' else: results_file = 'results.json' results.write(filename=results_file, format=data.options.postsolve.results_format) if not data.options.runtime.logging == 'quiet': print(" Number of solutions: "+str(len(results.solution))) if len(results.solution) > 0: print(" Solution Information") print(" Gap: "+str(results.solution[0].gap)) print(" Status: "+str(results.solution[0].status)) if len(results.solution[0].objective) == 1: key = list(results.solution[0].objective.keys())[0] print(" Function Value: "+str(results.solution[0].objective[key]['Value'])) print(" Solver results file: "+results_file) # #ep = ExtensionPoint(IPyomoScriptPrintResults) if data.options.postsolve.show_results: print("") results.write(num=1, format=data.options.postsolve.results_format) print("") # if data.options.postsolve.summary: print("") print("==========================================================") print("Solution Summary") print("==========================================================") if len(results.solution(0).variable) > 0: print("") display(instance) print("") else: print("No solutions reported by solver.") # for ep in ExtensionPoint(IPyomoScriptPrintResults): ep.apply( options=data.options, instance=instance, results=results ) # for ep in ExtensionPoint(IPyomoScriptSaveResults): ep.apply( options=data.options, instance=instance, results=results ) # if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): global memory_data mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes following results processing" % mem_used)
def get_ram_usage(self): return muppy.get_size(muppy.get_objects(include_frames=True)) // 1024
def create_model(data): """ Create instance of Pyomo model. Return: model: Model object. instance: Problem instance. symbol_map: Symbol map created when writing model to a file. filename: Filename that a model instance was written to. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Creating model\n' % (time.time()-start_time)) sys.stdout.flush() # if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): global memory_data mem_used = muppy.get_size(muppy.get_objects()) data.local.max_memory = mem_used print(" Total memory = %d bytes prior to model construction" % mem_used) # # Create Model # ep = ExtensionPoint(IPyomoScriptCreateModel) model_name = 'model' if data.options.model.object_name is not None: model_name = data.options.model.object_name if model_name in dir(data.local.usermodel): if len(ep) > 0: msg = "Model construction function 'create_model' defined in " \ "file '%s', but model is already constructed!" raise SystemExit(msg % data.options.model.filename) model = getattr(data.local.usermodel, data.options.model.object_name) if model is None: msg = "'%s' object is 'None' in module %s" raise SystemExit(msg % (model_name, data.options.model.filename)) else: if len(ep) == 0: msg = "Neither '%s' nor 'pyomo_create_model' are available in " \ 'module %s' raise SystemExit(msg % ( model_name, data.options.model.filename )) elif len(ep) > 1: msg = 'Multiple model construction plugins have been registered!' raise SystemExit(msg) else: model_options = data.options.model.options.value() #if model_options is None: #model_options = [] model = ep.service().apply( options = pyutilib.misc.Container(*data.options), model_options=pyutilib.misc.Container(*model_options) ) # for ep in ExtensionPoint(IPyomoScriptPrintModel): ep.apply( options=data.options, model=model ) # # Create Problem Instance # ep = ExtensionPoint(IPyomoScriptCreateDataPortal) if len(ep) > 1: msg = 'Multiple model data construction plugins have been registered!' raise SystemExit(msg) if len(ep) == 1: modeldata = ep.service().apply( options=data.options, model=model ) else: modeldata = DataPortal() if model._constructed: # # TODO: use a better test for ConcreteModel # instance = model elif len(data.options.data.files) > 1: # # Load a list of *.dat files # for file in data.options.data.files: suffix = (file).split(".")[-1] if suffix != "dat": msg = 'When specifiying multiple data files, they must all ' \ 'be *.dat files. File specified: %s' raise SystemExit(msg % str( file )) modeldata.load(filename=file, model=model) instance = model.create_instance(modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) elif len(data.options.data.files) == 1: # # Load a *.dat file or process a *.py data file # suffix = (data.options.data.files[0]).split(".")[-1].lower() if suffix == "dat": instance = model.create_instance(data.options.data.files[0], namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) elif suffix == "py": userdata = pyutilib.misc.import_file(data.options.data.files[0], clear_cache=True) if "modeldata" in dir(userdata): if len(ep) == 1: msg = "Cannot apply 'pyomo_create_modeldata' and use the" \ " 'modeldata' object that is provided in the model" raise SystemExit(msg) if userdata.modeldata is None: msg = "'modeldata' object is 'None' in module %s" raise SystemExit(msg % str( data.options.data.files[0] )) modeldata=userdata.modeldata else: if len(ep) == 0: msg = "Neither 'modeldata' nor 'pyomo_create_dataportal' " \ 'is defined in module %s' raise SystemExit(msg % str( data.options.data.files[0] )) modeldata.read(model) instance = model.create_instance(modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) elif suffix == "yml" or suffix == 'yaml': try: import yaml except: msg = "Cannot apply load data from a YAML file: PyYaml is not installed" raise SystemExit(msg) modeldata = yaml.load(open(data.options.data.files[0])) instance = model.create_instance(modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) else: raise ValueError("Unknown data file type: "+data.options.data.files[0]) else: instance = model.create_instance(modeldata, namespaces=data.options.data.namespaces, profile_memory=data.options.runtime.profile_memory, report_timing=data.options.runtime.report_timing) # modify_start_time = time.time() for ep in ExtensionPoint(IPyomoScriptModifyInstance): if data.options.runtime.report_timing is True: tick = time.time() ep.apply( options=data.options, model=model, instance=instance ) if data.options.runtime.report_timing is True: print(" %6.2f seconds to apply %s" % (time.time() - tick, type(ep))) tick = time.time() # for transformation in data.options.transform: with TransformationFactory(transformation) as xfrm: instance = xfrm.create_using(instance) if instance is None: raise SystemExit("Unexpected error while applying " "transformation '%s'" % transformation) # if data.options.runtime.report_timing is True: total_time = time.time() - modify_start_time print(" %6.2f seconds required for problem transformations" % total_time) if logger.isEnabledFor(logging.DEBUG): print("MODEL INSTANCE") instance.pprint() print("") for ep in ExtensionPoint(IPyomoScriptPrintInstance): ep.apply( options=data.options, instance=instance ) fname=None smap_id=None if not data.options.model.save_file is None: if data.options.runtime.report_timing is True: write_start_time = time.time() if data.options.model.save_file == True: if data.local.model_format in (ProblemFormat.cpxlp, ProblemFormat.lpxlp): fname = (data.options.data.files[0])[:-3]+'lp' else: fname = (data.options.data.files[0])[:-3]+str(data.local.model_format) format=data.local.model_format else: fname = data.options.model.save_file format= data.options.model.save_format io_options = {} if data.options.model.symbolic_solver_labels: io_options['symbolic_solver_labels'] = True if data.options.model.file_determinism != 1: io_options['file_determinism'] = data.options.model.file_determinism (fname, smap_id) = instance.write(filename=fname, format=format, io_options=io_options) if not data.options.runtime.logging == 'quiet': if not os.path.exists(fname): print("ERROR: file "+fname+" has not been created!") else: print("Model written to file '"+str(fname)+"'") if data.options.runtime.report_timing is True: total_time = time.time() - write_start_time print(" %6.2f seconds required to write file" % total_time) if (pympler_available is True) and (data.options.runtime.profile_memory >= 2): print("") print(" Summary of objects following file output") post_file_output_summary = summary.summarize(muppy.get_objects()) summary.print_(post_file_output_summary, limit=100) print("") for ep in ExtensionPoint(IPyomoScriptSaveInstance): ep.apply( options=data.options, instance=instance ) if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes following Pyomo instance creation" % mem_used) return pyutilib.misc.Options( model=model, instance=instance, smap_id=smap_id, filename=fname, local=data.local )
def _load_model_data(self, modeldata, namespaces, **kwds): """ Load declarations from a DataPortal object. """ # # As we are primarily generating objects here (and acyclic ones # at that), there is no need to run the GC until the entire # model is created. Simple reference-counting should be # sufficient to keep memory use under control. # with PauseGC() as pgc: # # Unlike the standard method in the pympler summary # module, the tracker doesn't print 0-byte entries to pad # out the limit. # profile_memory = kwds.get('profile_memory', 0) # # It is often useful to report timing results for various # activities during model construction. # report_timing = kwds.get('report_timing', False) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print("") print(" Total memory = %d bytes prior to model " "construction" % mem_used) if (pympler_available is True) and (profile_memory >= 3): gc.collect() mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes prior to model " "construction (after garbage collection)" % mem_used) # # Do some error checking # for namespace in namespaces: if not namespace is None and not namespace in modeldata._data: msg = "Cannot access undefined namespace: '%s'" raise IOError(msg % namespace) # # Initialize each component in order. # if report_timing is True: import pyomo.core.base.expr as EXPR construction_start_time = time.time() for component_name, component in iteritems(self.component_map()): if component.type() is Model: continue if report_timing is True: start_time = time.time() clone_counters = EXPR.generate_expression.clone_counter self._initialize_component(modeldata, namespaces, component_name, profile_memory) if report_timing is True: total_time = time.time() - start_time if isinstance(component, IndexedComponent): clen = len(component) else: assert isinstance(component, Component) clen = 1 print(" %%6.%df seconds required to construct component=%s; %d indicies total" \ % (total_time>=0.005 and 2 or 0, component_name, clen) \ % total_time) tmp_clone_counters = EXPR.generate_expression.clone_counter if clone_counters != tmp_clone_counters: clone_counters = tmp_clone_counters print( " Cloning detected! (clone counters: %d)" % clone_counters) # Note: As is, connectors are expanded when using command-line pyomo but not calling model.create(...) in a Python script. # John says this has to do with extension points which are called from commandline but not when writing scripts. # Uncommenting the next two lines switches this (command-line fails because it tries to expand connectors twice) #connector_expander = ConnectorExpander() #connector_expander.apply(instance=self) if report_timing is True: total_construction_time = time.time() - construction_start_time print(" %6.2f seconds required to construct instance=%s" % (total_construction_time, self.name)) if (pympler_available is True) and (profile_memory >= 2): print("") print( " Summary of objects following instance construction") post_construction_summary = summary.summarize( muppy.get_objects()) summary.print_(post_construction_summary, limit=100) print("")
def create_instance(self, filename=None, data=None, name=None, namespace=None, namespaces=None, profile_memory=0, report_timing=False, **kwds): """ Create a concrete instance of an abstract model, possibly using data read in from a file. Optional: filename: The name of a Pyomo Data File that will be used to load data into the model. data: A dictionary containing initialization data for the model to be used if there is no filename name: The name given to the model. namespace: A namespace used to select data. namespaces: A list of namespaces used to select data. profile_memory: A number that indicates the profiling level. report_timing: Report timing statistics during construction. """ # # Generate a warning if this is a concrete model but the # filename is specified. A concrete model is already # constructed, so passing in a data file is a waste of time. # if self.is_constructed() and isinstance(filename, string_types): msg = "The filename=%s will not be loaded - supplied as an " \ "argument to the create_instance() method of a "\ "concrete instance with name=%s." % (filename, name) logger.warning(msg) if 'clone' in kwds: kwds.pop('clone') logger.warning( """DEPRECATION WARNING: Model.create_instance() no longer accepts the 'clone' argument: the base abstract model is always cloned.""") if 'preprocess' in kwds: kwds.pop('preprocess') logger.warning( """DEPRECATION WARNING: Model.create_instance() no longer accepts the 'preprocess' argument: preprocessing is always deferred to when the model is sent to the solver""") if kwds: msg = \ """Model.create_instance() passed the following unrecognized keyword arguments (which have been ignored):""" for k in kwds: msg = msg + "\n '%s'" % (k, ) logger.error(msg) if self.is_constructed(): logger.warning( """DEPRECATION WARNING: Cannot call Model.create_instance() on a constructed model; returning a clone of the current model instance.""") return self.clone() if name is None: name = self.name if filename is not None: if data is not None: logger.warning( "Model.create_instance() passed both 'filename' " "and 'data' keyword arguments. Ignoring the " "'data' argument") data = filename if data is None: data = {} # # Clone the model and load the data # instance = self.clone() if name is not None: instance._name = name # If someone passed a rule for creating the instance, fire the # rule before constructing the components. if instance._rule is not None: instance._rule(instance) if namespaces: _namespaces = list(namespaces) else: _namespaces = [] if namespace is not None: _namespaces.append(namespace) if None not in _namespaces: _namespaces.append(None) instance.load(data, namespaces=_namespaces, profile_memory=profile_memory, report_timing=report_timing) # # Preprocess the new model # if False and preprocess is True: if report_timing is True: start_time = time.time() instance.preprocess() if report_timing is True: total_time = time.time() - start_time print(" %6.2f seconds required for preprocessing" % total_time) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print( " Total memory = %d bytes following instance preprocessing" % mem_used) print("") if (pympler_available is True) and (profile_memory >= 2): print("") print( " Summary of objects following instance preprocessing" ) post_preprocessing_summary = summary.summarize( muppy.get_objects()) summary.print_(post_preprocessing_summary, limit=100) # # Indicate that the model is concrete/constructed # instance._constructed = True return instance
def apply_optimizer(data, instance=None): """ Perform optimization with a concrete instance Required: instance: Problem instance. Returned: results: Optimization results. opt: Optimizer object. """ # if not data.options.runtime.logging == 'quiet': sys.stdout.write('[%8.2f] Applying solver\n' % (time.time()-start_time)) sys.stdout.flush() # # # Create Solver and Perform Optimization # solver = data.options.solvers[0].solver_name if solver is None: raise ValueError("Problem constructing solver: no solver specified") if len(data.options.solvers[0].suffixes) > 0: for suffix_name in data.options.solvers[0].suffixes: if suffix_name[0] in ['"',"'"]: suffix_name = suffix[1:-1] # Don't redeclare the suffix if it already exists suffix = getattr(instance, suffix_name, None) if suffix is None: setattr(instance, suffix_name, Suffix(direction=Suffix.IMPORT)) else: raise ValueError("Problem declaring solver suffix %s. A component "\ "with that name already exists on model %s." % (suffix_name, instance.name)) if getattr(data.options.solvers[0].options, 'timelimit', 0) == 0: data.options.solvers[0].options.timelimit = None # # Default results # results = None # # Figure out the type of solver manager # solver_mngr_name = None if data.options.solvers[0].manager is None: solver_mngr_name = 'serial' elif not data.options.solvers[0].manager in SolverManagerFactory.services(): raise ValueError("Unknown solver manager %s" % data.options.solvers[0].manager) else: solver_mngr_name = data.options.solvers[0].manager # # Create the solver manager # solver_mngr_kwds = {} if data.options.solvers[0].pyro_host is not None: solver_mngr_kwds['host'] = data.options.solvers[0].pyro_host if data.options.solvers[0].pyro_port is not None: solver_mngr_kwds['port'] = data.options.solvers[0].pyro_port with SolverManagerFactory(solver_mngr_name, **solver_mngr_kwds) as solver_mngr: if solver_mngr is None: msg = "Problem constructing solver manager '%s'" raise ValueError(msg % str(data.options.solvers[0].manager)) # # Setup keywords for the solve # keywords = {} if (data.options.runtime.keep_files or \ data.options.postsolve.print_logfile): keywords['keepfiles'] = True if data.options.model.symbolic_solver_labels: keywords['symbolic_solver_labels'] = True if data.options.model.file_determinism != 1: keywords['file_determinism'] = data.options.model.file_determinism keywords['tee'] = data.options.runtime.stream_output keywords['timelimit'] = getattr(data.options.solvers[0].options, 'timelimit', 0) # # Call the solver # if solver_mngr_name == 'serial': # # If we're running locally, then we create the optimizer and pass it into the # solver manager. # with SolverFactory(solver, solver_io=data.options.solvers[0].io_format) as opt: if opt is None: raise ValueError("Problem constructing solver `%s`" % str(solver)) from pyomo.core.base.plugin import registered_callback for name in registered_callback: opt.set_callback(name, registered_callback[name]) if len(data.options.solvers[0].options) > 0: opt.set_options(data.options.solvers[0].options) #opt.set_options(" ".join("%s=%s" % (key, value) # for key, value in data.options.solvers[0].options.iteritems() # if not key == 'timelimit')) if not data.options.solvers[0].options_string is None: opt.set_options(data.options.solvers[0].options_string) # # Use the solver manager to call the optimizer # results = solver_mngr.solve(instance, opt=opt, **keywords) else: # # Get the solver option arguments # if len(data.options.solvers[0].options) > 0 and not data.options.solvers[0].options_string is None: # If both 'options' and 'options_string' were specified, then create a # single options string that is passed to the solver. ostring = " ".join("%s=%s" % (key, value) for key, value in data.options.solvers[0].options.iteritems() if not value is None) keywords['options'] = ostring + ' ' + data.options.solvers[0].options_string elif len(data.options.solvers[0].options) > 0: keywords['options'] = data.options.solvers[0].options else: keywords['options'] = data.options.solvers[0].options_string # # If we're running remotely, then we pass the optimizer name to the solver # manager. # results = solver_mngr.solve(instance, opt=solver, **keywords) if (pympler_available is True) and \ (data.options.runtime.profile_memory >= 1): global memory_data mem_used = muppy.get_size(muppy.get_objects()) if mem_used > data.local.max_memory: data.local.max_memory = mem_used print(" Total memory = %d bytes following optimization" % mem_used) return pyutilib.misc.Options(results=results, opt=solver, local=data.local)
def _load_model_data(self, modeldata, namespaces, **kwds): """ Load declarations from a DataPortal object. """ # # As we are primarily generating objects here (and acyclic ones # at that), there is no need to run the GC until the entire # model is created. Simple reference-counting should be # sufficient to keep memory use under control. # with PauseGC() as pgc: # # Unlike the standard method in the pympler summary # module, the tracker doesn't print 0-byte entries to pad # out the limit. # profile_memory = kwds.get('profile_memory', 0) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print("") print(" Total memory = %d bytes prior to model " "construction" % mem_used) if (pympler_available is True) and (profile_memory >= 3): gc.collect() mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes prior to model " "construction (after garbage collection)" % mem_used) # # Do some error checking # for namespace in namespaces: if not namespace is None and not namespace in modeldata._data: msg = "Cannot access undefined namespace: '%s'" raise IOError(msg % namespace) # # Initialize each component in order. # for component_name, component in iteritems(self.component_map()): if component.type() is Model: continue self._initialize_component(modeldata, namespaces, component_name, profile_memory) if False: total_time = time.time() - start_time if isinstance(component, IndexedComponent): clen = len(component) else: assert isinstance(component, Component) clen = 1 print(" %%6.%df seconds required to construct component=%s; %d indicies total" \ % (total_time>=0.005 and 2 or 0, component_name, clen) \ % total_time) tmp_clone_counter = expr_common.clone_counter if clone_counter != tmp_clone_counter: clone_counter = tmp_clone_counter print(" Cloning detected! (clone count: %d)" % clone_counters) # Note: As is, connectors are expanded when using command-line pyomo but not calling model.create(...) in a Python script. # John says this has to do with extension points which are called from commandline but not when writing scripts. # Uncommenting the next two lines switches this (command-line fails because it tries to expand connectors twice) #connector_expander = ConnectorExpander() #connector_expander.apply(instance=self) if (pympler_available is True) and (profile_memory >= 2): print("") print(" Summary of objects following instance construction") post_construction_summary = summary.summarize(muppy.get_objects()) summary.print_(post_construction_summary, limit=100) print("")
def create_instance( self, filename=None, data=None, name=None, namespace=None, namespaces=None, profile_memory=0, report_timing=False, **kwds ): """ Create a concrete instance of an abstract model, possibly using data read in from a file. Parameters ---------- filename: `str`, optional The name of a Pyomo Data File that will be used to load data into the model. data: `dict`, optional A dictionary containing initialization data for the model to be used if there is no filename name: `str`, optional The name given to the model. namespace: `str`, optional A namespace used to select data. namespaces: `list`, optional A list of namespaces used to select data. profile_memory: `int`, optional A number that indicates the profiling level. report_timing: `bool`, optional Report timing statistics during construction. """ # # Generate a warning if this is a concrete model but the # filename is specified. A concrete model is already # constructed, so passing in a data file is a waste of time. # if self.is_constructed() and isinstance(filename, string_types): msg = "The filename=%s will not be loaded - supplied as an " \ "argument to the create_instance() method of a "\ "concrete instance with name=%s." % (filename, name) logger.warning(msg) if 'clone' in kwds: kwds.pop('clone') deprecation_warning( "Model.create_instance() no longer accepts the 'clone' " "argument: the base abstract model is always cloned.") if 'preprocess' in kwds: kwds.pop('preprocess') deprecation_warning( "Model.create_instance() no longer accepts the preprocess' " "argument: preprocessing is always deferred to when the " "model is sent to the solver") if kwds: msg = \ """Model.create_instance() passed the following unrecognized keyword arguments (which have been ignored):""" for k in kwds: msg = msg + "\n '%s'" % (k,) logger.error(msg) if self.is_constructed(): deprecation_warning( "Cannot call Model.create_instance() on a constructed " "model; returning a clone of the current model instance.") return self.clone() if report_timing: pyomo.common.timing.report_timing() if name is None: name = self.name if filename is not None: if data is not None: logger.warning("Model.create_instance() passed both 'filename' " "and 'data' keyword arguments. Ignoring the " "'data' argument") data = filename if data is None: data = {} # # Clone the model and load the data # instance = self.clone() if name is not None: instance._name = name # If someone passed a rule for creating the instance, fire the # rule before constructing the components. if instance._rule is not None: instance._rule(instance) if namespaces: _namespaces = list(namespaces) else: _namespaces = [] if namespace is not None: _namespaces.append(namespace) if None not in _namespaces: _namespaces.append(None) instance.load( data, namespaces=_namespaces, profile_memory=profile_memory ) # # Preprocess the new model # if False and preprocess is True: if report_timing is True: start_time = time.time() instance.preprocess() if report_timing is True: total_time = time.time() - start_time print(" %6.2f seconds required for preprocessing" % total_time) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes following instance preprocessing" % mem_used) print("") if (pympler_available is True) and (profile_memory >= 2): print("") print(" Summary of objects following instance preprocessing") post_preprocessing_summary = summary.summarize(muppy.get_objects()) summary.print_(post_preprocessing_summary, limit=100) # # Indicate that the model is concrete/constructed # instance._constructed = True # # Change this class from "Abstract" to "Concrete". It is # absolutely crazy that this is allowed in Python, but since the # AbstractModel and ConcreteModel are basically identical, we # can "reassign" the new concrete instance to be an instance of # ConcreteModel # instance.__class__ = ConcreteModel return instance
def create_instance(self, filename=None, data=None, name=None, namespace=None, namespaces=None, preprocess=False, profile_memory=0, report_timing=False, clone=None): """ Create a concrete instance of an abstract model, possibly using data read in from a file. Optional: filename: The name of a Pyomo Data File that will be used to load data into the model. data: A dictionary containing initialization data for the model to be used if there is no filename name: The name given to the model. namespace: A namespace used to select data. namespaces: A list of namespaces used to select data. preprocess: If True, then preprocess the constructed model. profile_memory: A number that indicates the profiling level. report_timing: Report timing statistics during construction. clone: Force a clone of the model if this is True. """ if self._constructed: logger.warning("DEPRECATION WARNING: Cannot call Model.create_instance() on a concrete model.") return self if name is None: name = self.name if not filename is None: data = filename if data is None: data = {} # # Generate a warning if this is a concrete model but the filename is specified. # A concrete model is already constructed, so passing in a data file is a waste # of time. # if self.is_constructed() and isinstance(filename, basestring): msg = "The filename=%s will not be loaded - supplied as an argument to the create_instance() method of a ConcreteModel instance with name=%s." % (filename, name) logger.warning(msg) # # If construction is deferred, then clone the model and # if not self._constructed: instance = self.clone() if namespaces is None or len(namespaces) == 0: instance.load(data, namespaces=[None], profile_memory=profile_memory, report_timing=report_timing) else: instance.load(data, namespaces=list(namespaces)+[None], profile_memory=profile_memory, report_timing=report_timing) else: if clone: instance = self.clone() else: instance = self # # Preprocess the new model # if preprocess is True: print(" Model preprocessing during construction has been deprecated.") if False and preprocess is True: if report_timing is True: start_time = time.time() instance.preprocess() if report_timing is True: total_time = time.time() - start_time print(" %6.2f seconds required for preprocessing" % total_time) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes following instance preprocessing" % mem_used) print("") if (pympler_available is True) and (profile_memory >= 2): print("") print(" Summary of objects following instance preprocessing") post_preprocessing_summary = summary.summarize(muppy.get_objects()) summary.print_(post_preprocessing_summary, limit=100) if not name is None: instance.name=name # # Indicate that the model is concrete/constructed # instance._constructed = True return instance