def launch_command(command, options, cmd_args=None, cmd_kwds=None, error_label="", disable_gc=False, profile_count=0, log_level=logging.INFO, traceback=False): # This is not the effective level, but the # level on the current logger. We want to # return the logger to its original state # before this function exits prev_log_level = logger.level logger.setLevel(log_level) if cmd_args is None: cmd_args = () if cmd_kwds is None: cmd_kwds = {} # # Control the garbage collector - more critical than I would like # at the moment. # with PauseGC(disable_gc) as pgc: # # Run command - precise invocation depends on whether we want # profiling output, traceback, etc. # rc = 0 if profile_count > 0: # Defer import of profiling packages until we know that they # are needed try: try: import cProfile as profile except ImportError: import profile import pstats except ImportError: configure_loggers(shutdown=True) raise ValueError( "Cannot use the 'profile' option: the Python " "'profile' or 'pstats' package cannot be imported!") # # Call the main routine with profiling. # try: tfile = TempfileManager.create_tempfile(suffix=".profile") tmp = profile.runctx('command(options, *cmd_args, **cmd_kwds)', globals(), locals(), tfile) p = pstats.Stats(tfile).strip_dirs() p.sort_stats('time', 'cumulative') p = p.print_stats(profile_count) p.print_callers(profile_count) p.print_callees(profile_count) p = p.sort_stats('cumulative', 'calls') p.print_stats(profile_count) p.print_callers(profile_count) p.print_callees(profile_count) p = p.sort_stats('calls') p.print_stats(profile_count) p.print_callers(profile_count) p.print_callees(profile_count) TempfileManager.clear_tempfiles() rc = tmp finally: logger.setLevel(prev_log_level) else: # # Call the main PH routine without profiling. # if traceback: try: rc = command(options, *cmd_args, **cmd_kwds) finally: logger.setLevel(prev_log_level) else: try: try: rc = command(options, *cmd_args, **cmd_kwds) except ValueError: sys.stderr.write(error_label + "VALUE ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except KeyError: sys.stderr.write(error_label + "KEY ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except TypeError: sys.stderr.write(error_label + "TYPE ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except NameError: sys.stderr.write(error_label + "NAME ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except IOError: sys.stderr.write(error_label + "IO ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except ConverterError: sys.stderr.write(error_label + "CONVERTER ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except ApplicationError: sys.stderr.write(error_label + "APPLICATION ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except RuntimeError: sys.stderr.write(error_label + "RUN-TIME ERROR:\n") sys.stderr.write(str(sys.exc_info()[1]) + "\n") raise except: sys.stderr.write(error_label + "Encountered unhandled exception:\n") if len(sys.exc_info()) > 1: sys.stderr.write(str(sys.exc_info()[1]) + "\n") else: traceback.print_exc(file=sys.stderr) raise except: sys.stderr.write("\n") sys.stderr.write( "To obtain further information regarding the " "source of the exception, use the " "--traceback option\n") rc = 1 # # TODO: Once we incorporate options registration into # all of the PySP commands we will assume the # options object is always a PySPConfigBlock # if isinstance(options, PySPConfigBlock): options.check_usage(error=False) logger.setLevel(prev_log_level) return rc
def _load_model_data(self, modeldata, namespaces, **kwds): """ Load declarations from a DataPortal object. """ # # As we are primarily generating objects here (and acyclic ones # at that), there is no need to run the GC until the entire # model is created. Simple reference-counting should be # sufficient to keep memory use under control. # with PauseGC() as pgc: # # Unlike the standard method in the pympler summary # module, the tracker doesn't print 0-byte entries to pad # out the limit. # profile_memory = kwds.get('profile_memory', 0) if (pympler_available is True) and (profile_memory >= 2): mem_used = muppy.get_size(muppy.get_objects()) print("") print(" Total memory = %d bytes prior to model " "construction" % mem_used) if (pympler_available is True) and (profile_memory >= 3): gc.collect() mem_used = muppy.get_size(muppy.get_objects()) print(" Total memory = %d bytes prior to model " "construction (after garbage collection)" % mem_used) # # Do some error checking # for namespace in namespaces: if not namespace is None and not namespace in modeldata._data: msg = "Cannot access undefined namespace: '%s'" raise IOError(msg % namespace) # # Initialize each component in order. # for component_name, component in iteritems(self.component_map()): if component.type() is Model: continue self._initialize_component(modeldata, namespaces, component_name, profile_memory) if False: total_time = time.time() - start_time if isinstance(component, IndexedComponent): clen = len(component) else: assert isinstance(component, Component) clen = 1 print(" %%6.%df seconds required to construct component=%s; %d indicies total" \ % (total_time>=0.005 and 2 or 0, component_name, clen) \ % total_time) tmp_clone_counter = expr_common.clone_counter if clone_counter != tmp_clone_counter: clone_counter = tmp_clone_counter print(" Cloning detected! (clone count: %d)" % clone_counters) # Note: As is, connectors are expanded when using command-line pyomo but not calling model.create(...) in a Python script. # John says this has to do with extension points which are called from commandline but not when writing scripts. # Uncommenting the next two lines switches this (command-line fails because it tries to expand connectors twice) #connector_expander = ConnectorExpander() #connector_expander.apply(instance=self) if (pympler_available is True) and (profile_memory >= 2): print("") print(" Summary of objects following instance construction") post_construction_summary = summary.summarize(muppy.get_objects()) summary.print_(post_construction_summary, limit=100) print("")
def __call__(self, model, output_filename, solver_capability, io_options): # Make sure not to modify the user's dictionary, # they may be reusing it outside of this call io_options = dict(io_options) # Skip writing constraints whose body section is # fixed (i.e., no variables) skip_trivial_constraints = \ io_options.pop("skip_trivial_constraints", False) # Use full Pyomo component names in the MPS file rather # than shortened symbols (slower, but useful for debugging). symbolic_solver_labels = \ io_options.pop("symbolic_solver_labels", False) output_fixed_variable_bounds = \ io_options.pop("output_fixed_variable_bounds", False) # If False, unused variables will not be included in # the MPS file. Otherwise, include all variables in # the bounds sections. include_all_variable_bounds = \ io_options.pop("include_all_variable_bounds", False) labeler = io_options.pop("labeler", None) # How much effort do we want to put into ensuring the # MPS file is written deterministically for a Pyomo model: # 0 : None # 1 : sort keys of indexed components (default) # 2 : sort keys AND sort names (over declaration order) file_determinism = io_options.pop("file_determinism", 1) # user defined orderings for variable and constraint # output row_order = io_options.pop("row_order", None) column_order = io_options.pop("column_order", None) # make sure the ONE_VAR_CONSTANT variable appears in # the objective even if the constant part of the # objective is zero force_objective_constant = \ io_options.pop("force_objective_constant", False) # Whether or not to include the OBJSENSE section in # the MPS file. Some solvers, like GLPK and CBC, # either throw an error or flat out ignore this # section (I assume the default is to minimize) skip_objective_sense = \ io_options.pop("skip_objective_sense", False) if len(io_options): raise ValueError( "ProblemWriter_mps passed unrecognized io_options:\n\t" + "\n\t".join("%s = %s" % (k,v) for k,v in iteritems(io_options))) if symbolic_solver_labels and (labeler is not None): raise ValueError("ProblemWriter_mps: Using both the " "'symbolic_solver_labels' and 'labeler' " "I/O options is forbidden") if symbolic_solver_labels: labeler = TextLabeler() elif labeler is None: labeler = NumericLabeler('x') # clear the collection of referenced variables. self._referenced_variable_ids.clear() if output_filename is None: output_filename = model.name + ".mps" # when sorting, there are a non-trivial number of # temporary objects created. these all yield # non-circular references, so disable GC - the # overhead is non-trivial, and because references # are non-circular, everything will be collected # immediately anyway. with PauseGC() as pgc: with open(output_filename, "w") as output_file: symbol_map = self._print_model_MPS( model, output_file, solver_capability, labeler, output_fixed_variable_bounds=output_fixed_variable_bounds, file_determinism=file_determinism, row_order=row_order, column_order=column_order, skip_trivial_constraints=skip_trivial_constraints, force_objective_constant=force_objective_constant, include_all_variable_bounds=include_all_variable_bounds, skip_objective_sense=skip_objective_sense) self._referenced_variable_ids.clear() return output_filename, symbol_map
def preprocess(self, preprocessor=None): """Apply the preprocess plugins defined by the user""" with PauseGC() as pgc: if preprocessor is None: preprocessor = self.config.preprocessor pyomo.common.PyomoAPIFactory(preprocessor)(self.config, model=self)
def __call__(self, model, output_filename, solver_capability, io_options): """ Write a model in the GAMS modeling language format. Keyword Arguments ----------------- output_filename: str Name of file to write GAMS model to. Optionally pass a file-like stream and the model will be written to that instead. io_options: dict - warmstart=True Warmstart by initializing model's variables to their values. - symbolic_solver_labels=False Use full Pyomo component names rather than shortened symbols (slower, but useful for debugging). - labeler=None Custom labeler. Incompatible with symbolic_solver_labels. - solver=None If None, GAMS will use default solver for model type. - mtype=None Model type. If None, will chose from lp, nlp, mip, and minlp. - add_options=None List of additional lines to write directly into model file before the solve statement. For model attributes, <model name> is GAMS_MODEL. - skip_trivial_constraints=False Skip writing constraints whose body section is fixed. - file_determinism=1 | How much effort do we want to put into ensuring the | GAMS file is written deterministically for a Pyomo model: | 0 : None | 1 : sort keys of indexed components (default) | 2 : sort keys AND sort names (over declaration order) - put_results=None Filename for optionally writing solution values and marginals to (put_results).dat, and solver statuses to (put_results + 'stat').dat. """ # Make sure not to modify the user's dictionary, # they may be reusing it outside of this call io_options = dict(io_options) # Use full Pyomo component names rather than # shortened symbols (slower, but useful for debugging). symbolic_solver_labels = io_options.pop("symbolic_solver_labels", False) # Custom labeler option. Incompatible with symbolic_solver_labels. labeler = io_options.pop("labeler", None) # If None, GAMS will use default solver for model type. solver = io_options.pop("solver", None) # If None, will chose from lp, nlp, mip, and minlp. mtype = io_options.pop("mtype", None) # Lines to add before solve statement. add_options = io_options.pop("add_options", None) # Skip writing constraints whose body section is # fixed (i.e., no variables) skip_trivial_constraints = \ io_options.pop("skip_trivial_constraints", False) # How much effort do we want to put into ensuring the # GAMS file is written deterministically for a Pyomo model: # 0 : None # 1 : sort keys of indexed components (default) # 2 : sort keys AND sort names (over declaration order) file_determinism = io_options.pop("file_determinism", 1) sorter_map = {0:SortComponents.unsorted, 1:SortComponents.deterministic, 2:SortComponents.sortBoth} sort = sorter_map[file_determinism] # Warmstart by initializing model's variables to their values. warmstart = io_options.pop("warmstart", True) # Filename for optionally writing solution values and marginals # Set to True by GAMSSolver put_results = io_options.pop("put_results", None) if len(io_options): raise ValueError( "GAMS writer passed unrecognized io_options:\n\t" + "\n\t".join("%s = %s" % (k,v) for k,v in iteritems(io_options))) if solver is not None and solver.upper() not in valid_solvers: raise ValueError( "GAMS writer passed unrecognized solver: %s" % solver) if mtype is not None: valid_mtypes = set([ 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp', 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp']) if mtype.lower() not in valid_mtypes: raise ValueError("GAMS writer passed unrecognized " "model type: %s" % mtype) if (solver is not None and mtype.upper() not in valid_solvers[solver.upper()]): raise ValueError("GAMS writer passed solver (%s) " "unsuitable for given model type (%s)" % (solver, mtype)) if output_filename is None: output_filename = model.name + ".gms" if symbolic_solver_labels and (labeler is not None): raise ValueError("GAMS writer: Using both the " "'symbolic_solver_labels' and 'labeler' " "I/O options is forbidden") if symbolic_solver_labels: var_labeler = con_labeler = ShortNameLabeler(63, '_') elif labeler is None: var_labeler = NumericLabeler('x') con_labeler = NumericLabeler('c') else: var_labeler = con_labeler = labeler var_list = [] def var_recorder(obj): ans = var_labeler(obj) try: if obj.is_variable_type(): var_list.append(ans) except: pass return ans def var_label(obj): #if obj.is_fixed(): # return str(value(obj)) return symbolMap.getSymbol(obj, var_recorder) symbolMap = SymbolMap(var_label) # when sorting, there are a non-trivial number of # temporary objects created. these all yield # non-circular references, so disable GC - the # overhead is non-trivial, and because references # are non-circular, everything will be collected # immediately anyway. with PauseGC() as pgc: try: if isinstance(output_filename, string_types): output_file = open(output_filename, "w") else: # Support passing of stream such as a StringIO # on which to write the model file output_file = output_filename self._write_model( model=model, output_file=output_file, solver_capability=solver_capability, var_list=var_list, var_label=var_label, symbolMap=symbolMap, con_labeler=con_labeler, sort=sort, skip_trivial_constraints=skip_trivial_constraints, warmstart=warmstart, solver=solver, mtype=mtype, add_options=add_options, put_results=put_results ) finally: if isinstance(output_filename, string_types): output_file.close() return output_filename, symbolMap