def _generate_configuration(): # defer the import until use (this eventually imports pkg_resources, # which is slow to import) from setuptools.extension import Extension # Try and find MC++. Defer to the MCPP_ROOT if it is set; # otherwise, look in common locations for a mcpp directory. pathlist = [ os.path.join(PYOMO_CONFIG_DIR, 'src'), this_file_dir(), ] if 'MCPP_ROOT' in os.environ: mcpp = os.environ['MCPP_ROOT'] else: mcpp = find_dir('mcpp', cwd=True, pathlist=pathlist) if mcpp: print("Found MC++ at %s" % (mcpp, )) else: raise RuntimeError( "Cannot identify the location of the MCPP source distribution") # # Configuration for this extension # project_dir = this_file_dir() sources = [ os.path.join(project_dir, 'mcppInterface.cpp'), ] include_dirs = [ os.path.join(mcpp, 'src', 'mc'), os.path.join(mcpp, 'src', '3rdparty', 'fadbad++'), ] mcpp_ext = Extension( "mcppInterface", sources=sources, language="c++", extra_compile_args=[], include_dirs=include_dirs, library_dirs=[], libraries=[], ) package_config = { 'name': 'mcpp', 'packages': [], 'ext_modules': [mcpp_ext], } return package_config
def download_install_module(): """ Downloads install_idaes_workshop_materials.py from pyomo.org """ download_dir = futils.this_file_dir() download_dir = os.path.join(download_dir, '../../examples/workshops') download_dest = os.path.join(download_dir, 'install_idaes_workshop_materials.py') print( "\n\n" "#######################################################################################\n" "# Downloading: {}\n" "# to: {}\n" "#######################################################################################\n" "".format(_install_idaes_workshop_materials_url, download_dest)) if not os.path.isdir(download_dir): raise NameError( 'Unable to locate download directory: {}'.format(download_dir)) try: downloader = dload.FileDownloader() downloader.set_destination_filename(download_dest) downloader.get_binary_file(_install_idaes_workshop_materials_url) except: print("\n\n***\nFailed to download: {}\n***".format( _install_idaes_workshop_materials_url)) raise print('... download complete') return download_dest
def read_data(fname): dfile = os.path.join(this_file_dir(), fname) data = {} data["T"] = [] # T in K col 0 data["P"] = [] # P in kPa col 1 data["rho"] = [] # density kg/m3 col 2 data["U"] = [] # internal energy kJ/kg col 4 data["H"] = [] # enthalpy kJ/kg col 5 data["S"] = [] # entropy kJ/kg/K col 6 data["cv"] = [] data["cp"] = [] data["w"] = [] data["phase"] = [] # liquid, vapor, or supercritical col 13 with open(dfile, 'r') as csvfile: dat = csv.reader(csvfile, delimiter='\t', quotechar='"') for i in range(7): next(dat) # skip header for row in dat: data["T"].append(float(row[0])) data["P"].append(float(row[1])*1000) data["rho"].append(float(row[2])) data["U"].append(float(row[4])-506.7791289) # differnt reference state data["H"].append(float(row[5])-506.7791289) #different reference state data["S"].append(float(row[6])-2.739003) #differnt reference state data["cv"].append(float(row[7])) data["cp"].append(float(row[8])) data["w"].append(float(row[9])) data["phase"].append(row[13]) return data
def read_data(fname, mw): dfile = os.path.join(this_file_dir(), fname) data = { "T": [], # T in K col 0 "P": [], # P in kPa col 1 "rho": [], # density kg/m3 col 2 "U": [], # internal energy kJ/kg col 4 "H": [], # enthalpy kJ/kg col 5 "S": [], # entropy kJ/kg/K col 6 "cv": [], "cp": [], "w": [], "phase": [], # liquid, vapor, or supercritical col 13 "visc": [], "tc": [], } with open(dfile, 'r') as csvfile: dat = csv.reader(csvfile, delimiter='\t', quotechar='"') for i in range(7): next(dat) # skip header for row in dat: data["T"].append(float(row[0])) data["P"].append(float(row[1])*1e6) data["rho"].append(float(row[2])) data["U"].append(float(row[4])*mw*1000) data["H"].append(float(row[5])*mw*1000) data["S"].append(float(row[6])*mw*1000) data["phase"].append(row[13]) return data
def get_appsi_extension(in_setup=False, appsi_root=None): from pybind11.setup_helpers import Pybind11Extension if appsi_root is None: from pyomo.common.fileutils import this_file_dir appsi_root = this_file_dir() sources = [ os.path.join(appsi_root, 'cmodel', 'src', file_) for file_ in ( 'interval.cpp', 'expression.cpp', 'common.cpp', 'nl_writer.cpp', 'lp_writer.cpp', 'model_base.cpp', 'fbbt_model.cpp', 'cmodel_bindings.cpp', ) ] if in_setup: package_name = 'pyomo.contrib.appsi.cmodel.appsi_cmodel' else: package_name = 'appsi_cmodel' return Pybind11Extension(package_name, sources, extra_compile_args=['-std=c++11'])
def get_appsi_extension(in_setup=False, appsi_root=None): from pybind11.setup_helpers import Pybind11Extension if appsi_root is None: from pyomo.common.fileutils import this_file_dir appsi_root = this_file_dir() sources = [ os.path.join(appsi_root, 'cmodel', 'src', file_) for file_ in ( 'interval.cpp', 'expression.cpp', 'common.cpp', 'nl_writer.cpp', 'lp_writer.cpp', 'model_base.cpp', 'fbbt_model.cpp', 'cmodel_bindings.cpp', ) ] if in_setup: package_name = 'pyomo.contrib.appsi.cmodel.appsi_cmodel' else: package_name = 'appsi_cmodel' if sys.platform.startswith('win'): # Assume that builds on Windows will use MSVC # MSVC doesn't have a flag for c++11, use c++14 extra_args = ['/std:c++14'] else: # Assume all other platforms are GCC-like extra_args = ['-std=c++11'] return Pybind11Extension(package_name, sources, extra_compile_args=extra_args)
def test_exceptions3(self): current_dir = this_file_dir() os.makedirs(os.path.join(current_dir, 'minlplib', 'osil')) with self.assertRaises(ValueError): coramin.third_party.get_minlplib( download_dir=os.path.join(current_dir, 'minlplib', 'osil')) files = os.listdir(os.path.join(current_dir, 'minlplib', 'osil')) self.assertEqual(len(files), 0) os.rmdir(os.path.join(current_dir, 'minlplib', 'osil')) os.rmdir(os.path.join(current_dir, 'minlplib'))
def test_get_minlplib(self): current_dir = this_file_dir() coramin.third_party.get_minlplib( download_dir=os.path.join(current_dir, 'minlplib', 'osil')) files = os.listdir(os.path.join(current_dir, 'minlplib', 'osil')) self.assertEqual(len(files), 1751) for i in files: self.assertTrue(i.endswith('.osil')) for i in os.listdir(os.path.join(current_dir, 'minlplib', 'osil')): os.remove(os.path.join(current_dir, 'minlplib', 'osil', i)) os.rmdir(os.path.join(current_dir, 'minlplib', 'osil')) os.rmdir(os.path.join(current_dir, 'minlplib'))
def test_json_load(m): fname = os.path.join(this_file_dir(), 'NGFC_flowsheet_init.json') ms.from_json(m, fname=fname) assert (pyo.value(m.fs.cathode.ion_outlet.flow_mol[0]) == pytest.approx( 1670.093, 1e-5)) assert (pyo.value(m.fs.reformer_recuperator.area) == pytest.approx( 4512.56, 1e-5)) assert (pyo.value(m.fs.anode.heat_duty[0]) == pytest.approx( -672918626, 1e-5)) assert (pyo.value(m.fs.CO2_emissions) == pytest.approx(291.169, 1e-5)) assert (pyo.value(m.fs.net_power) == pytest.approx(659.879, 1e-5))
def read_data(self, fname, col): dfile = os.path.join(this_file_dir(), fname) cond = [] # Tuple (T [K],P [Pa], data) pressure in file is MPa with open(dfile, 'r') as csvfile: dat = csv.reader(csvfile, delimiter='\t', quotechar='"') next(dat) # skip header for row in dat: try: x = float(row[col]) except: x = row[col] cond.append((float(row[0]), float(row[1])*1e6, x)) return cond
def test_exceptions1(self): current_dir = this_file_dir() filename = os.path.join(current_dir, 'instancedata.csv') f = open(filename, 'w') f.write('blah') f.close() with self.assertRaises(ValueError): coramin.third_party.get_minlplib_instancedata( target_filename=filename) f = open(filename, 'r') self.assertEqual(f.read(), 'blah') os.remove(filename)
def pfd_result(outfile, m, df): tags = {} for i in df.index: tags[i + "_F"] = df.loc[i, "Molar Flow (mol/s)"] tags[i + "_T"] = df.loc[i, "T (K)"] tags[i + "_P"] = df.loc[i, "P (Pa)"] tags[i + "_X"] = df.loc[i, "Vapor Fraction"] tags['FG_2_RH_Fm'] = value(m.fs.RH.side_2.properties_in[0].flow_mass) tags['FG_2_RH_T'] = value(m.fs.RH.side_2.properties_in[0].temperature) tags['FG_2_RH_P'] = value(m.fs.RH.side_2.properties_in[0].pressure) tags['FG_RH_2_Mix_Fm'] = value(m.fs.RH.side_2.properties_out[0].flow_mass) tags['FG_RH_2_Mix_T'] = value(m.fs.RH.side_2.properties_out[0].temperature) tags['FG_RH_2_Mix_P'] = value(m.fs.RH.side_2.properties_out[0].pressure) tags['FG_2_FSH_Fm'] = value(m.fs.FSH.side_2.properties_in[0].flow_mass) tags['FG_2_FSH_T'] = value(m.fs.FSH.side_2.properties_in[0].temperature) tags['FG_2_FSH_P'] = value(m.fs.FSH.side_2.properties_in[0].pressure) tags['FG_2_PrSH_Fm'] = value(m.fs.PrSH.side_2.properties_in[0].flow_mass) tags['FG_2_PrSH_T'] = value(m.fs.PrSH.side_2.properties_in[0].temperature) tags['FG_2_PrSH_P'] = value(m.fs.PrSH.side_2.properties_in[0].pressure) tags['FG_PrSH_2_Mix_Fm'] = value( m.fs.PrSH.side_2.properties_out[0].flow_mass) tags['FG_PrSH_2_Mix_T'] = value( m.fs.PrSH.side_2.properties_out[0].temperature) tags['FG_PrSH_2_Mix_P'] = value( m.fs.PrSH.side_2.properties_out[0].pressure) tags['FG_2_ECON_Fm'] = value(m.fs.ECON.side_2.properties_in[0].flow_mass) tags['FG_2_ECON_T'] = value(m.fs.ECON.side_2.properties_in[0].temperature) tags['FG_2_ECON_P'] = value(m.fs.ECON.side_2.properties_in[0].pressure) tags['FG_2_AIRPH_Fm'] = value(m.fs.ECON.side_2.properties_out[0].flow_mass) tags['FG_2_AIRPH_T'] = value( m.fs.ECON.side_2.properties_out[0].temperature) tags['FG_2_AIRPH_P'] = value(m.fs.ECON.side_2.properties_out[0].pressure) tags['FG_2_STACK_Fm'] = value(m.fs.ECON.side_2.properties_out[0].flow_mass) tags['FG_2_STACK_T'] = value( m.fs.ECON.side_2.properties_out[0].temperature) tags['FG_2_STACK_P'] = value(m.fs.ECON.side_2.properties_out[0].pressure) original_svg_file = os.path.join(this_file_dir(), "Boiler_scpc_PFD.svg") with open(original_svg_file, "r") as f: s = svg_tag(tags, f, outfile=outfile)
def test_exceptions2(self): current_dir = this_file_dir() filename = os.path.join(current_dir, 'minlplib', 'instancedata.csv') coramin.third_party.get_minlplib_instancedata(target_filename=filename) with self.assertRaises(ValueError): cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=filename, acceptable_probtype='foo') with self.assertRaises(ValueError): cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=filename, acceptable_probtype=['QCQP', 'foo']) os.remove(filename) os.rmdir(os.path.dirname(filename))
def read_sat_data(fname, mw): dfile = os.path.join(this_file_dir(), fname) data = {} data["T"] = [] # T in K col 0 data["P"] = [] # P in kPa col 1 data["rhol"] = [] # density kg/m3 col 2 data["rhov"] = [] # density kg/m3 col 15 with open(dfile, 'r') as csvfile: dat = csv.reader(csvfile, delimiter='\t', quotechar='"') for i in range(7): next(dat) # skip header for row in dat: data["T"].append(float(row[0])) data["P"].append(float(row[1]) * 1e6) data["rhol"].append(float(row[2])) data["rhov"].append(float(row[14])) return data
def read_data(fname, params): dfile = os.path.join(this_file_dir(), fname) # the data format is data[component][temperature][property] data = { "N2": {}, "O2": {}, "H2O": {}, "CO2": {}, "NO": {}, "SO2": {}, } with open(dfile, 'r') as csvfile: dat = csv.reader(csvfile, delimiter='\t') for i in range(7): next(dat) # skip header for row in dat: data[row[4]][int(row[0])] = {} d = data[row[4]][int(row[0])] d["Cp"] = float(row[1]) d["S"] = float(row[2]) H = pyo.value(params.cp_mol_ig_comp_coeff_H[(row[4])] * 1000) d["H"] = float(row[3]) + H # H = enthalpy of formation d["comp"] = {row[4]: 1.0} # Add a mixture to test data["mix1"] = {} for T in data["N2"]: data["mix1"][T] = {} d = data["mix1"][T] comp = { "N2": 0.2, "O2": 0.2, "H2O": 0.2, "CO2": 0.2, "NO": 0.1, "SO2": 0.1 } d["Cp"] = sum(data[i][T]["Cp"] * comp[i] for i in comp) d["H"] = sum(data[i][T]["H"] * comp[i] for i in comp) d["S"] = sum( (data[i][T]["S"] + 8.314 * log(comp[i])) * comp[i] for i in comp) d["comp"] = comp return data
def write_pfd_results(filename, tags, tag_format): """ Write simulation results in a template PFD in svg format and save as filename. Args: filename: (str) file namd for output tags: (dict) tag keys and expression values tag_format: (dict) tag keys and format string values Returns: None """ with open(os.path.join(this_file_dir(), "gas_turbine.svg"), "r") as f: iutil.svg_tag( svg=f, tags=tags, outfile=filename, tag_format=tag_format)
def write_pfd_results(filename, tags, tag_format, infilename=None): """ Write simulation results in a template PFD in svg format and save as filename. Args: filename: (str) file namd for output tags: (dict) tag keys and expression values tag_format: (dict) tag keys and format string values infilename: input file name, if you want to use an alternative diagram Returns: None """ if infilename is None: infilename = os.path.join(this_file_dir(), "gas_turbine.svg") with open(infilename, "r") as f: iutil.svg_tag(svg=f, tags=tags, outfile=filename, tag_format=tag_format)
def run(self): basedir = os.path.abspath(os.path.curdir) if self.inplace: tmpdir = os.path.join(this_file_dir(), 'cmodel') else: tmpdir = os.path.abspath(tempfile.mkdtemp()) print("Building in '%s'" % tmpdir) os.chdir(tmpdir) try: super(appsi_build_ext, self).run() if not self.inplace: library = glob.glob("build/*/appsi_cmodel.*")[0] target = os.path.join(PYOMO_CONFIG_DIR, 'lib', 'python%s.%s' % sys.version_info[:2], 'site-packages', '.') if not os.path.exists(target): os.makedirs(target) shutil.copy(library, target) finally: os.chdir(basedir) if not self.inplace: shutil.rmtree(tmpdir, onerror=handleReadonly)
def test_noscalers(): keras_folder_name = os.path.join(this_file_dir(), 'data', 'keras_models') keras_model = load_keras_json_hd5(keras_folder_name, 'PT_data_2_10_10_2_sigmoid') input_labels = ['Temperature_K', 'Pressure_Pa'] output_labels = ['EnthMol', 'VapFrac'] input_bounds = {'Temperature_K': (-3.0, 3.0), 'Pressure_Pa': (-3.0, 3.0)} keras_surrogate = KerasSurrogate(keras_model=keras_model, input_labels=input_labels, output_labels=output_labels, input_bounds=input_bounds) # check solve with pyomo x_test = pd.DataFrame({'Temperature_K': [0.5], 'Pressure_Pa': [0.5]}) y_test = keras_surrogate.evaluate_surrogate(x_test) m = ConcreteModel() m.obj = Objective(expr=1) m.surrogate = SurrogateBlock() m.surrogate.build_model(surrogate_object=keras_surrogate, formulation=KerasSurrogate.Formulation.FULL_SPACE) m.surrogate.inputs['Temperature_K'].fix(0.5) m.surrogate.inputs['Pressure_Pa'].fix(0.5) solver = SolverFactory('ipopt') status = solver.solve(m, tee=True) assert_optimal_termination(status) y_test_pyomo = pd.DataFrame({ 'EnthMol': [value(m.surrogate.outputs['EnthMol'])], 'VapFrac': [value(m.surrogate.outputs['VapFrac'])] }) pd.testing.assert_frame_equal(y_test, y_test_pyomo, check_dtype=False, rtol=rtol, atol=atol)
Three python dictionaries that are loaded: * BB_costing_exponents * BB_costing_params * sCO2_costing_params """ __author__ = "Costing Team (A. Noring and M. Zamarripa)" __version__ = "1.0.0" import os import json from pyomo.common.fileutils import this_file_dir directory = this_file_dir() ''' The costing exponents dictionary contains information from the QGESS on capital cost scaling methodology (DOE/NETL-2019/1784). Specifically it includes scaling exponents, valid ranges for the scaled parameter, and units for those ranges. It is important to note the units only apply to the ranges and are not neccessarily the units that the reference parameter value will be given in. This dictionary is nested with the following structure: tech type --> account --> property name --> property value''' with open(os.path.join(directory, "BB_costing_exponents.json"), 'r') as file: BB_costing_exponents = json.load(file) ''' The costing params dictionary contains information from the BBR4 COE spreadsheet. It includes the total plant cost (TPC), reference parameter value, and units for that value.
RangeSet, Reals, Set, value, Var, NonNegativeReals,\ exp, sqrt, log, tanh, ConcreteModel from pyomo.environ import ExternalFunction as EF from pyomo.common.fileutils import this_file_dir from pyomo.opt import SolverFactory, TerminationCondition from pyomo.core.kernel.component_set import ComponentSet from pyomo.common.config import ConfigValue, In # Import IDAES from idaes.core import declare_process_block_class, ProcessBlock, \ StateBlock, StateBlockData, PhysicalParameterBlock from idaes.core.util.math import smooth_max # Logger _log = logging.getLogger(__name__) _so = os.path.join(this_file_dir(), "iapws95_lib/iapws95_external.so") def iapws95_available(): """Make sure the compiled IAPWS-95 functions are available. Yes, in Windows the .so extention is still used. """ return os.path.isfile(_so) class StateVars(enum.Enum): """ State variable set options """ PH = 1 # Pressure-Enthalpy TPX = 2 # Temperature-Pressure-Quality
if not (numpy_available and scipy_available): raise unittest.SkipTest( "Pynumero needs scipy and numpy to run CyIpopt tests") from pyomo.contrib.pynumero.asl import AmplInterface if not AmplInterface.available(): raise unittest.SkipTest( "Pynumero needs the ASL extension to run CyIpopt tests") import pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver as cyipopt_solver if not cyipopt_solver.ipopt_available: raise unittest.SkipTest( "PyNumero needs CyIpopt installed to run CyIpopt tests") import cyipopt as cyipopt_core example_dir = os.path.join(this_file_dir(), '..', 'examples') class TestPyomoCyIpoptSolver(unittest.TestCase): def test_status_maps(self): self.assertEqual(len(cyipopt_core.STATUS_MESSAGES), len(cyipopt_solver._cyipopt_status_enum)) self.assertEqual(len(cyipopt_core.STATUS_MESSAGES), len(cyipopt_solver._ipopt_term_cond)) for msg in cyipopt_core.STATUS_MESSAGES.values(): self.assertIn(msg, cyipopt_solver._cyipopt_status_enum) for status in cyipopt_solver._cyipopt_status_enum.values(): self.assertIn(status, cyipopt_solver._ipopt_term_cond) class TestExamples(unittest.TestCase):
def build_SOFC_ROM(m): m.SOFC = b = Block() # load kriging coefficients fname = 'kriging_coefficients.dat' path = os.path.join(this_file_dir(), fname) with open(path) as file: text = file.readlines() kriging = [float(line) for line in text] n_inputs = 9 n_outputs = 48 n_samples = 13424 # create indecies for vars and params input_index = list(range(n_inputs)) input_plus_index = list(range(n_inputs+1)) output_index = list(range(n_outputs)) samples_index = list(range(n_samples)) # read through data file and create params start_count = 0 end_count = n_inputs values = kriging[start_count:end_count] b.mean_input = Param(input_index, initialize=build_dict(input_index, values), mutable=False) start_count += n_inputs end_count += n_inputs values = kriging[start_count:end_count] b.sigma_input = Param(input_index, initialize=build_dict(input_index, values), mutable=False) start_count += n_inputs end_count += n_outputs values = kriging[start_count:end_count] b.mean_output = Param(output_index, initialize=build_dict(output_index, values), mutable=False) start_count += n_outputs end_count += n_outputs values = kriging[start_count:end_count] b.sigma_output = Param(output_index, initialize=build_dict(output_index, values), mutable=False) start_count += n_outputs end_count += n_inputs*n_samples values = kriging[start_count:end_count] b.ds_input = Param(samples_index, input_index, initialize=build_matrix( samples_index, input_index, values), mutable=False) start_count += n_inputs*n_samples end_count += n_inputs values = kriging[start_count:end_count] b.theta = Param(input_index, initialize=build_dict(input_index, values), mutable=False) start_count += n_inputs end_count += (n_inputs+1)*n_outputs values = kriging[start_count:end_count] b.beta = Param(input_plus_index, output_index, initialize=build_matrix( input_plus_index, output_index, values), mutable=False) start_count += (n_inputs+1)*n_outputs end_count += n_samples*n_outputs values = kriging[start_count:end_count] b.gamma = Param(samples_index, output_index, initialize=build_matrix( samples_index, output_index, values), mutable=False) # create input vars for the user to interface with b.current_density = Var(initialize=4000, units=units.A/units.m**2, bounds=(2000, 6000)) # units for T should be degC but pyomo doesn't support conversion from K b.fuel_temperature = Var(initialize=500, units=None, bounds=(15, 600)) b.internal_reforming = Var(initialize=0.4, units=None, bounds=(0, 1)) b.air_temperature = Var(initialize=700, units=None, bounds=(550, 800)) b.air_recirculation = Var(initialize=0.5, units=None, bounds=(0, 0.8)) b.OTC = Var(initialize=2.1, units=None, bounds=(1.5, 3)) b.fuel_util = Var(initialize=0.85, units=None, bounds=(0.4, 0.95)) b.air_util = Var(initialize=0.5, units=None, bounds=(0.125, 0.833)) b.pressure = Var(initialize=1, units=units.atm, bounds=(1, 2.5)) # create vars for intermediate calculations ROM_initialize_values = [4000, 500, 0.4, 700, 0.5, 2.1, .85, 0.5, 1] b.ROM_input = Var(input_index, initialize=build_dict(input_index, ROM_initialize_values)) b.norm_input = Var(input_index, initialize=0) b.F = Var(input_plus_index, initialize=0) b.F[0].fix(1) b.R = Var(samples_index, initialize=0) b.norm_output = Var(output_index, initialize=0) b.ROM_output = Var(output_index) # create kriging regression constraints # this dict maps the index values to the input vars input_map = {0: b.current_density, 1: b.fuel_temperature, 2: b.internal_reforming, 3: b.air_temperature, 4: b.air_recirculation, 5: b.OTC, 6: b.fuel_util, 7: b.air_util, 8: b.pressure} def input_rule(b, i): if units.get_units(input_map[i]) is None: return b.ROM_input[i] == input_map[i] else: unit_conversion = units.get_units(input_map[i]) return b.ROM_input[i] == input_map[i]/unit_conversion b.input_mapping_eqs = Constraint(input_index, rule=input_rule) def norm_input_rule(b, i): return (b.norm_input[i] == (b.ROM_input[i] - b.mean_input[i])/b.sigma_input[i]) b.norm_input_eqs = Constraint(input_index, rule=norm_input_rule) def F_rule(b, i): return b.F[i+1] == b.norm_input[i] b.F_eqs = Constraint(input_index, rule=F_rule) def R_rule(b, i): return (b.R[i] == exp(-1*sum(b.theta[j] * (b.ds_input[i, j] - b.norm_input[j])**2 for j in input_index))) b.R_eqs = Constraint(samples_index, rule=R_rule) def norm_output_rule(b, i): return (b.norm_output[i] == sum(b.F[j]*b.beta[j, i] for j in input_plus_index) + sum(b.R[k]*b.gamma[k, i] for k in samples_index)) b.norm_output_eqs = Constraint(output_index, rule=norm_output_rule) def ROM_output_rule(b, i): return (b.ROM_output[i] == b.mean_output[i] + b.norm_output[i]*b.sigma_output[i]) b.ROM_output_eqs = Constraint(output_index, rule=ROM_output_rule) # create output variables and constraints b.anode_outlet_temperature = Var(initialize=600, units=None) b.cathode_outlet_temperature = Var(initialize=600, units=None) b.stack_voltage = Var(initialize=1, units=units.V) b.max_cell_temperature = Var(initialize=750, units=None) b.deltaT_cell = Var(initialize=100, units=None) def anode_outlet_rule(b): return b.anode_outlet_temperature == b.ROM_output[11] b.anode_outlet_eq = Constraint(rule=anode_outlet_rule) def cathode_outlet_rule(b): return b.cathode_outlet_temperature == b.ROM_output[13] b.cathode_outlet_eq = Constraint(rule=cathode_outlet_rule) def stack_voltage_rule(b): return b.stack_voltage == b.ROM_output[1]*units.V b.stack_voltage_eq = Constraint(rule=stack_voltage_rule) def max_cell_temp_rule(b): return b.max_cell_temperature == b.ROM_output[8] b.max_cell_temp_eq = Constraint(rule=max_cell_temp_rule) def deltaT_cell_rule(b): return b.deltaT_cell == b.ROM_output[10] b.deltaT_cell_eq = Constraint(rule=deltaT_cell_rule)
def create_command_line(self, executable, problem_files): # # Define log file # The log file in CPLEX contains the solution trace, but the # solver status can be found in the solution file. # if self._log_file is None: self._log_file = TempfileManager.\ create_tempfile(suffix = '.gurobi.log') # # Define solution file # As indicated above, contains (in XML) both the solution and # solver status. # if self._soln_file is None: self._soln_file = TempfileManager.\ create_tempfile(suffix = '.gurobi.txt') # # Write the GUROBI execution script # problem_filename = self._problem_files[0] solution_filename = self._soln_file warmstart_filename = self._warm_start_file_name # translate the options into a normal python dictionary, from a # pyutilib SectionWrapper - the gurobi_run function doesn't know # about pyomo, so the translation is necessary. options_dict = {} for key in self.options: options_dict[key] = self.options[key] # NOTE: the gurobi shell is independent of Pyomo python # virtualized environment, so any imports - specifically # that required to get GUROBI_RUN - must be handled # explicitly. # NOTE: The gurobi plugin (GUROBI.py) and GUROBI_RUN.py live in # the same directory. script = "import sys\n" script += "from gurobipy import *\n" script += "sys.path.append(%r)\n" % (this_file_dir(), ) script += "from GUROBI_RUN import *\n" script += "gurobi_run(" mipgap = float(self.options.mipgap) if \ self.options.mipgap is not None else \ None for x in (problem_filename, warmstart_filename, solution_filename, None, options_dict, self._suffixes): script += "%r," % x script += ")\n" script += "quit()\n" # dump the script and warm-start file names for the # user if we're keeping files around. if self._keepfiles: script_fname = TempfileManager.create_tempfile( suffix='.gurobi.script') script_file = open(script_fname, 'w') script_file.write(script) script_file.close() print("Solver script file: '%s'" % script_fname) if self._warm_start_solve and \ (self._warm_start_file_name is not None): print("Solver warm-start file: " + self._warm_start_file_name) # # Define command line # cmd = [executable] if self._timer: cmd.insert(0, self._timer) return Bunch(cmd=cmd, script=script, log_file=self._log_file, env=None)
ExecutableData, import_file, ) from pyomo.common.download import FileDownloader try: samefile = os.path.samefile except AttributeError: # os.path.samefile is not available in Python 2.7 under Windows. # Mock up a dummy function for that platform. def samefile(a, b): return True _this_file = this_file() _this_file_dir = this_file_dir() class TestFileUtils(unittest.TestCase): def setUp(self): self.tmpdir = None self.basedir = os.path.abspath(os.path.curdir) self.config = envvar.PYOMO_CONFIG_DIR self.ld_library_path = os.environ.get('LD_LIBRARY_PATH', None) self.path = os.environ.get('PATH', None) def tearDown(self): envvar.PYOMO_CONFIG_DIR = self.config os.chdir(self.basedir) if self.tmpdir: shutil.rmtree(self.tmpdir)
instance.obj = Objective(rule=obj_rule) self.recordData('postprocessing', timer.toc('postprocessing')) for fmt in ('nl', 'bar', 'gams'): if not getattr(self, fmt, 0): continue writer = WriterFactory(fmt) fname = 'tmp.test.' + fmt self.assertFalse(os.path.exists(fname)) try: timer.tic(None) writer(instance, fname, lambda x: True, {}) _time = timer.toc(fmt) self.assertTrue(os.path.exists(fname)) self.recordData(fmt, _time) finally: try: os.remove(fname) except: pass if __name__ == '__main__': import sys from pyomo.common.fileutils import this_file_dir sys.path.insert(0, os.path.dirname(this_file_dir())) __package__ = os.path.basename(this_file_dir()) unittest.main()
def __init__(self, name): # don't invoke the original build_ext for this special extension super(CMakeExtension, self).__init__(name, sources=[]) self.project_dir = os.path.join(this_file_dir(), name)
from os.path import abspath, dirname, join from filecmp import cmp import subprocess import pyomo.common.unittest as unittest from pyomo.common.dependencies import yaml_available from pyomo.common.fileutils import this_file_dir from pyomo.common.tee import capture_output from pyomo.common.tempfiles import TempfileManager import pyomo.core import pyomo.scripting.pyomo_main as main from pyomo.opt import check_available_solvers from io import StringIO currdir = this_file_dir() _diff_tol = 1e-6 deleteFiles = True solvers = None class BaseTester(unittest.TestCase): @classmethod def setUpClass(cls): global solvers import pyomo.environ solvers = check_available_solvers('glpk') def pyomo(self, cmd, **kwds):
def build_model(): model = AbstractModel() model.BigM = Suffix(direction=Suffix.LOCAL) model.BigM[None] = 1000 DATFILE = "stickies1.dat" ####################### #Sets ####################### # J model.Components = Set() # fiber model.GoodComponents = Set() # stickies model.BadComponents = Set() # N: total nodes in the system model.Nodes = Set() # S: possibe screens model.Screens = Set() def screen_node_filter(model, s, n): return s != n model.ScreenNodePairs = Set(initialize=model.Screens * model.Nodes, dimen=2, filter=screen_node_filter) def screen_filter(model, s, sprime): return s != sprime model.ScreenPairs = Set(initialize = model.Screens * model.Screens, dimen=2, filter=screen_filter) ###################### # Parameters ###################### # exponent coefficient for cost in screen s (alpha(s)) model.ExpScreenCostCoeff = Param(model.Screens) # beta(s, j) model.AcceptanceFactor = Param(model.Screens, model.Components) # C_s^1 model.ScreenCostCoeff1 = Param(model.Screens) # C_s^2 model.ScreenCostCoeff2 = Param(model.Screens, default=0) # max percentage inlet stickies accepted in total flow (C_{st}^{up}, q(kb)) model.AcceptedLeftover = Param(model.BadComponents) # F_j^0, m_src(k) model.InitialComponentFlow = Param(model.Components) # m_src_lo(k) model.InitialComponentFlowLB = Param(model.Components, default=0) # constants for objective function (W^1, W^2, W^3) model.FiberWeight = Param() model.StickiesWeight = Param() model.CostWeight = Param() ## Bounds on variables # F_s^{in, lo} and F_s^{in, up} (f_in_up(s), f_in_lo(s)) def flow_ub_rule(model, s): return sum(model.InitialComponentFlow[k] for k in model.Components) model.ScreenFlowLB = Param(model.Screens) model.ScreenFlowUB = Param(model.Screens, initialize=flow_ub_rule) # m_in_lo(ss, k): lower bound of individual flow into nodes. model.InletComponentFlowLB = Param(model.Components, model.Nodes, default=0) def component_flow_ub_rule(model, k, n): return model.InitialComponentFlow[k] # m_in_up(ss, k) model.InletComponentFlowUB = Param(model.Components, model.Nodes, initialize=component_flow_ub_rule) # r_lo(s) model.RejectRateLB = Param(model.Screens) # r_up(s) model.RejectRateUB = Param(model.Screens) # m_rej_lo(s, k) model.RejectedComponentFlowLB = Param(model.Components, model.Screens, default=0) def rejected_component_flow_bound(model, k, s): return model.InitialComponentFlow[k]*(model.RejectRateUB[s]**\ model.AcceptanceFactor[s, k]) # m_rej_up(s, k) model.RejectedComponentFlowUB = Param(model.Components, model.Screens, initialize=rejected_component_flow_bound) # m_acc_lo(s, k): lower bound of accepted individual flow model.AcceptedComponentFlowLB = Param(model.Components, model.Screens, default=0) def accepted_component_flow_bound(model, k, s): return model.InitialComponentFlow[k]*(1 - model.RejectRateLB[s]**\ model.AcceptanceFactor[s, k]) # m_acc_up(s, k) model.AcceptedComponentFlowUB = Param(model.Components, model.Screens, initialize=accepted_component_flow_bound) ###################### # Variables ###################### # c_s, C(s), cost of selecting screen model.screenCost = Var(model.Screens, within=NonNegativeReals)#, bounds=get_screen_cost_bounds) # total inlet flow into screen s (f_s, F_IN(s)) # NOTE: the upper bound is enforced globally. The lower bound is enforced in # the first disjunction (to match GAMS) def get_inlet_flow_bounds(model, s): return (0, model.ScreenFlowUB[s]) model.inletScreenFlow = Var(model.Screens, within=NonNegativeReals, bounds=get_inlet_flow_bounds) # inlet flow of component j into node n, (f_{n,j}^I, M_IN) def get_inlet_component_flow_bounds(model, j, n): return (model.InletComponentFlowLB[j, n], model.InletComponentFlowUB[j, n]) model.inletComponentFlow = Var(model.Components, model.Nodes, within=NonNegativeReals, bounds=get_inlet_component_flow_bounds) # accepted flow of component j from screen s (f_{s, j}^A) def get_accepted_component_flow_bounds(model, j, s): return (model.AcceptedComponentFlowLB[j, s], model.AcceptedComponentFlowUB[j, s]) model.acceptedComponentFlow = Var(model.Components, model.Screens, within=NonNegativeReals, bounds=get_accepted_component_flow_bounds) # rejected flow of component j from screen s (f_{s,j}^R) def rej_component_flow_bounds(model, k, s): return (model.RejectedComponentFlowLB[k, s], model.RejectedComponentFlowUB[k, s]) model.rejectedComponentFlow = Var(model.Components, model.Screens, within=NonNegativeReals, bounds=rej_component_flow_bounds) # accepted flow of component j from screen s to node n (m_{s,n,j}^A) def get_accepted_node_flow_bounds(model, j, s, n): return (0, model.AcceptedComponentFlowUB[j, s]) model.acceptedNodeFlow = Var(model.Components, model.Screens, model.Nodes, within=NonNegativeReals, bounds=get_accepted_node_flow_bounds) # rejected flow of component j from screen s to node n (m_{s,n,j}^R) def get_rejected_node_flow_bounds(model, j, s, n): return (0, model.RejectedComponentFlowUB[j, s]) model.rejectedNodeFlow = Var(model.Components, model.Screens, model.Nodes, within=NonNegativeReals, bounds=get_rejected_node_flow_bounds) # flow of component j from source to node n (m_{s,j}^0) def get_src_flow_bounds(model, j, n): return (0, model.InitialComponentFlow[j]) model.flowFromSource = Var(model.Components, model.Nodes, within=NonNegativeReals) # reject rate of screen s (r_s) def get_rej_rate_bounds(model, s): return (model.RejectRateLB[s], model.RejectRateUB[s]) model.rejectRate = Var(model.Screens, within=NonNegativeReals, bounds=get_rej_rate_bounds) ###################### # Objective ###################### def calc_cost_rule(model): lostFiberCost = model.FiberWeight * sum(model.inletComponentFlow[j,'SNK'] \ for j in model.GoodComponents) stickiesCost = model.StickiesWeight * sum(model.inletComponentFlow[j,'PRD']\ for j in model.BadComponents) screenCost = model.CostWeight * sum(model.screenCost[s] \ for s in model.Screens) return lostFiberCost + stickiesCost + screenCost model.min_cost = Objective(rule=calc_cost_rule) ###################### # Constraints ###################### def stickies_bound_rule(model, j): return sum(model.inletComponentFlow[j,'PRD'] for j in model.BadComponents) \ <= model.AcceptedLeftover[j] * model.InitialComponentFlow[j] model.stickies_bound = Constraint(model.BadComponents, rule=stickies_bound_rule) def inlet_flow_rule(model, s, j): return model.inletComponentFlow[j,s] == model.acceptedComponentFlow[j,s] + \ model.rejectedComponentFlow[j, s] model.inlet_flow = Constraint(model.Screens, model.Components, rule=inlet_flow_rule) def total_inlet_flow_rule(model, s): return model.inletScreenFlow[s] == sum(model.inletComponentFlow[j, s] \ for j in model.Components) model.total_inlet_flow = Constraint(model.Screens, rule=total_inlet_flow_rule) def inlet_flow_balance_rule(model, n, j): return model.inletComponentFlow[j, n] == model.flowFromSource[j, n] + \ sum(model.acceptedNodeFlow[j, s, n] + model.rejectedNodeFlow[j, s, n] \ for s in model.Screens if s != n) model.inlet_flow_balance = Constraint(model.Nodes, model.Components, rule=inlet_flow_balance_rule) def source_flow_rule(model, j): return model.InitialComponentFlow[j] == sum(model.flowFromSource[j, n] \ for n in model.Nodes) model.source_flow = Constraint(model.Components, rule=source_flow_rule) ################# ## Disjunctions ################# def screen_disjunct_rule(disjunct, selectScreen, s): model = disjunct.model() def rejected_flow_rule(disjunct, j): return model.rejectedComponentFlow[j,s] == \ model.inletComponentFlow[j,s]* \ (model.rejectRate[s]**model.AcceptanceFactor[s, j]) if selectScreen: disjunct.inlet_flow_bounds = Constraint(expr=model.ScreenFlowLB[s] <= \ model.inletScreenFlow[s])# <= \ #model.ScreenFlowUB[s]) disjunct.rejected_flow = Constraint(model.Components, rule=rejected_flow_rule) disjunct.screen_cost = Constraint(expr=model.screenCost[s] == \ model.ScreenCostCoeff1[s]* \ (model.inletScreenFlow[s]** \ model.ExpScreenCostCoeff[s]) + \ model.ScreenCostCoeff2[s]* \ (1 - model.rejectRate[s])) else: disjunct.no_flow = Constraint(expr=model.inletScreenFlow[s] == 0) disjunct.no_cost = Constraint(expr=model.screenCost[s] == 0) model.screen_selection_disjunct = Disjunct([0,1], model.Screens, rule=screen_disjunct_rule) def screen_disjunction_rule(model, s): return [model.screen_selection_disjunct[selectScreen, s] \ for selectScreen in [0,1]] model.screen_disjunction = Disjunction(model.Screens, rule=screen_disjunction_rule) def accepted_flow_disjunct_rule(disjunct, s, n, acceptFlow): model = disjunct.model() def flow_balance_rule(disjunct, j): return model.acceptedNodeFlow[j, s, n] == \ model.acceptedComponentFlow[j, s] def no_flow_rule(disjunct, j): return model.acceptedNodeFlow[j, s, n] == 0 if acceptFlow: disjunct.flow_balance = Constraint(model.Components, rule=flow_balance_rule) else: disjunct.no_flow = Constraint(model.Components, rule=no_flow_rule) model.flow_acceptance_disjunct = Disjunct(model.ScreenNodePairs, [0,1], rule=accepted_flow_disjunct_rule) def flow_acceptance_disjunction_rule(model, s, n): return [model.flow_acceptance_disjunct[s, n, acceptFlow] \ for acceptFlow in [0,1]] model.flow_acceptance_disjunction = Disjunction(model.ScreenNodePairs, rule=flow_acceptance_disjunction_rule) def rejected_flow_disjunct_rule(disjunct, s, n, rejectFlow): model = disjunct.model() def flow_balance_rule(disjunct, j): return model.rejectedNodeFlow[j, s, n] == \ model.rejectedComponentFlow[j, s] def no_reject_rule(disjunct, j): return model.rejectedNodeFlow[j, s, n] == 0 if rejectFlow: disjunct.flow_balance = Constraint(model.Components, rule=flow_balance_rule) else: disjunct.no_reject = Constraint(model.Components, rule=no_reject_rule) model.flow_rejection_disjunct = Disjunct(model.ScreenNodePairs, [0,1], rule=rejected_flow_disjunct_rule) def rejected_flow_disjunction_rule(model, s, n): return [model.flow_rejection_disjunct[s, n, rejectFlow] \ for rejectFlow in [0,1]] model.flow_rejection_disjunction = Disjunction(model.ScreenNodePairs, rule=rejected_flow_disjunction_rule) def flow_from_source_disjunct_rule(disjunct, n): model = disjunct.model() def sourceFlow_balance_rule1(disjunct, j): # this doesn't match the formulation, but it matches GAMS: return model.flowFromSource[j, n] >= model.InitialComponentFlowLB[j] # this would be the formulation version: #return model.flowFromSource[j, n] == model.InitialComponentFlow[j] def sourceFlow_balance_rule2(disjunct, j): return model.flowFromSource[j, n] <= model.InitialComponentFlow[j] def no_sourceFlow_rule(disjunct, j, nprime): return model.flowFromSource[j, nprime] == 0 disjunct.flow_balance1 = Constraint(model.Components, rule=sourceFlow_balance_rule1) disjunct.flow_balance2 = Constraint(model.Components, rule=sourceFlow_balance_rule2) disjunct.no_flow = Constraint(model.Components, model.Nodes - [n], rule=no_sourceFlow_rule) model.flow_from_source_disjunct = Disjunct(model.Nodes, rule=flow_from_source_disjunct_rule) def flow_from_source_disjunction_rule(model): return [model.flow_from_source_disjunct[n] for n in model.Nodes] model.flow_from_source_disjunction = Disjunction( rule=flow_from_source_disjunction_rule) ###################### # Boolean Constraints ###################### # These are the GAMS versions of the logical constraints, which is not # what appears in the formulation: def log1_rule(model, s): return model.screen_selection_disjunct[1, s].indicator_var == \ sum(model.flow_acceptance_disjunct[s, n, 1].indicator_var \ for n in model.Nodes if s != n) model.log1 = Constraint(model.Screens, rule=log1_rule) def log2_rule(model, s): return model.screen_selection_disjunct[1, s].indicator_var == \ sum(model.flow_rejection_disjunct[s, n, 1].indicator_var \ for n in model.Nodes if s != n) model.log2 = Constraint(model.Screens, rule=log2_rule) def log3_rule(model, s): return model.screen_selection_disjunct[1, s].indicator_var >= \ sum(model.flow_acceptance_disjunct[s, sprime, 1].indicator_var \ for sprime in model.Screens if s != sprime) model.log3 = Constraint(model.Screens, rule=log3_rule) def log4_rule(model, s): return model.screen_selection_disjunct[1, s].indicator_var >= \ sum(model.flow_rejection_disjunct[s, sprime, 1].indicator_var \ for sprime in model.Screens if s != sprime) model.log4 = Constraint(model.Screens, rule=log4_rule) def log6_rule(model, s, sprime): return model.flow_acceptance_disjunct[s, sprime, 1].indicator_var + \ model.flow_acceptance_disjunct[sprime, s, 1].indicator_var <= 1 model.log6 = Constraint(model.ScreenPairs, rule=log6_rule) def log7_rule(model, s, sprime): return model.flow_rejection_disjunct[s, sprime, 1].indicator_var + \ model.flow_rejection_disjunct[sprime, s, 1].indicator_var <= 1 model.log7 = Constraint(model.ScreenPairs, rule=log7_rule) def log8_rule(model, s, n): return model.flow_acceptance_disjunct[s, n, 1].indicator_var + \ model.flow_rejection_disjunct[s, n, 1].indicator_var <= 1 model.log8 = Constraint(model.ScreenNodePairs, rule=log8_rule) def log9_rule(model, s, sprime): return model.flow_acceptance_disjunct[s, sprime, 1].indicator_var + \ model.flow_rejection_disjunct[sprime, s, 1].indicator_var <= 1 model.log9 = Constraint(model.ScreenPairs, rule=log9_rule) # These are the above logical constraints implemented correctly (I think) # However, this doesn't match what is actually coded in gams and makes the # model infeasible with the data from gams. # YA_{s,n} v YR_{s,n} implies Y_s # def flow_existence_rule1(model, s, n): # return model.screen_selection_disjunct[1, s].indicator_var >= \ # model.flow_acceptance_disjunct[s, n, 1].indicator_var # model.flow_existence1 = Constraint(model.ScreenNodePairs, # rule=flow_existence_rule1) # def flow_existence_rule2(model, s, n): # return model.screen_selection_disjunct[1, s].indicator_var >= \ # model.flow_rejection_disjunct[s, n, 1].indicator_var # model.flow_existence2 = Constraint(model.ScreenNodePairs, # rule=flow_existence_rule2) # # YA_{s,s'} v YR_{s',s} implies Y_s # def screen_flow_existence_rule1(model, s, sprime): # return model.screen_selection_disjunct[1, s].indicator_var >= \ # model.flow_acceptance_disjunct[s, sprime, 1].indicator_var # model.screen_flow_existence1 = Constraint(model.ScreenPairs, # rule=screen_flow_existence_rule1) # def screen_flow_existence_rule2(model, s, sprime): # return model.screen_selection_disjunct[1,s].indicator_var >= \ # model.flow_rejection_disjunct[sprime, s, 1].indicator_var # model.screen_flow_existence2 = Constraint(model.ScreenPairs, # rule=screen_flow_existence_rule2) # # YA_{s', s} XOR YA_{s, s'} # def accept_rule1(model, s, sprime): # return 1 <= model.flow_acceptance_disjunct[s, sprime, 1].indicator_var + \ # model.flow_acceptance_disjunct[sprime, s, 1].indicator_var # model.accept1 = Constraint(model.ScreenPairs, rule=accept_rule1) # def accept_rule2(model, s, sprime): # return 1 >= model.flow_acceptance_disjunct[s, sprime, 1].indicator_var - \ # model.flow_acceptance_disjunct[sprime, s, 1].indicator_var # model.accept2 = Constraint(model.ScreenPairs, rule=accept_rule2) # def accept_rule3(model, s, sprime): # return 1 >= model.flow_acceptance_disjunct[sprime, s, 1].indicator_var - \ # model.flow_acceptance_disjunct[s, sprime, 1].indicator_var # model.accept3 = Constraint(model.ScreenPairs, rule=accept_rule3) # def accept_rule4(model, s, sprime): # return 1 <= 2 - model.flow_acceptance_disjunct[sprime,s,1].indicator_var - \ # model.flow_acceptance_disjunct[s, sprime, 1].indicator_var # model.accept4 = Constraint(model.ScreenPairs, rule=accept_rule4) # # YR_{s', s} XOR YR_{s, s'} # def reject_rule1(model, s, sprime): # return 1 <= model.flow_rejection_disjunct[s, sprime, 1].indicator_var + \ # model.flow_rejection_disjunct[sprime, s, 1].indicator_var # model.reject1 = Constraint(model.ScreenPairs, rule=reject_rule1) # def reject_rule2(model, s, sprime): # return 1 >= model.flow_rejection_disjunct[s, sprime, 1].indicator_var - \ # model.flow_rejection_disjunct[sprime, s, 1].indicator_var # model.reject2 = Constraint(model.ScreenPairs, rule=reject_rule2) # def reject_rule3(model, s, sprime): # return 1 >= model.flow_rejection_disjunct[sprime, s, 1].indicator_var - \ # model.flow_rejection_disjunct[s, sprime, 1].indicator_var # model.reject3 = Constraint(model.ScreenPairs, rule=reject_rule3) # def reject_rule4(model, s, sprime): # return 1 <= 2 - model.flow_rejection_disjunct[sprime,s,1].indicator_var - \ # model.flow_rejection_disjunct[s, sprime, 1].indicator_var # model.reject4 = Constraint(model.ScreenPairs, rule=reject_rule4) # # YA_{s,n} XOR YR_{s,n} # def accept_or_reject_rule1(model, s, n): # return 1 <= model.flow_acceptance_disjunct[s, n, 1].indicator_var + \ # model.flow_rejection_disjunct[s, n, 1].indicator_var # model.accept_or_reject1 = Constraint(model.ScreenNodePairs, # rule=accept_or_reject_rule1) # def accept_or_reject_rule2(model, s, n): # return 1 >= model.flow_acceptance_disjunct[s, n, 1].indicator_var - \ # model.flow_rejection_disjunct[s, n, 1].indicator_var # model.accept_or_reject2 = Constraint(model.ScreenNodePairs, # rule=accept_or_reject_rule2) # def accept_or_reject_rule3(model, s, n): # return 1 >= model.flow_rejection_disjunct[s, n, 1].indicator_var - \ # model.flow_acceptance_disjunct[s, n, 1].indicator_var # model.accept_or_reject3 = Constraint(model.ScreenNodePairs, # rule=accept_or_reject_rule3) # def accept_or_reject_rule4(model, s, n): # return 1 <= 2 - model.flow_acceptance_disjunct[s, n, 1].indicator_var - \ # model.flow_rejection_disjunct[s, n, 1].indicator_var # model.accept_or_reject4 = Constraint(model.ScreenNodePairs, # rule=accept_or_reject_rule4) instance = model.create_instance(os.path.join(this_file_dir(), DATFILE)) # fix the variables they fix in GAMS for s in instance.Screens: instance.flow_acceptance_disjunct[s,'SNK',1].indicator_var.fix(0) instance.flow_rejection_disjunct[s,'PRD',1].indicator_var.fix(0) ################################################################################## ## for validation: Fix all the indicator variables to see if we get same objective ## value (250.956) ################################################################################## # instance.screen_selection_disjunct[1,'S1'].indicator_var.fix(1) # instance.screen_selection_disjunct[1,'S2'].indicator_var.fix(0) # instance.screen_selection_disjunct[1,'S3'].indicator_var.fix(0) # instance.screen_selection_disjunct[1,'S4'].indicator_var.fix(0) # instance.screen_selection_disjunct[1,'S5'].indicator_var.fix(0) # instance.screen_selection_disjunct[1,'S6'].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S2',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S3',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S4',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S5',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S6',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','PRD',1].indicator_var.fix(1) # # 'SNK' is already fixed correctly in the loop above that is "in" the model # instance.flow_acceptance_disjunct['S2','S1',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S2','S3',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S2','S4',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S2','S5',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S2','S6',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S2','PRD',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S3','S1',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S3','S2',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S3','S4',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S3','S5',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S3','S6',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S3','PRD',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S4','S1',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S4','S2',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S4','S3',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S4','S5',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S4','S6',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S4','PRD',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S5','S1',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S5','S2',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S5','S3',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S5','S4',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S5','S6',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S5','PRD',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S6','S1',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S6','S2',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S6','S3',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S6','S4',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S6','S5',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S6','PRD',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S1','S2',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S1','S3',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S1','S4',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S1','S5',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S1','S6',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S1','SNK',1].indicator_var.fix(1) # # 'SNK' is already fixed correctly in the loop above that is "in" the model # instance.flow_rejection_disjunct['S2','S1',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S2','S3',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S2','S4',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S2','S5',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S2','S6',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S2','SNK',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S3','S1',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S3','S2',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S3','S4',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S3','S5',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S3','S6',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S3','SNK',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S4','S1',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S4','S2',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S4','S3',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S4','S5',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S4','S6',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S4','SNK',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S5','S1',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S5','S2',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S5','S3',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S5','S4',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S5','S6',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S5','SNK',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S6','S1',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S6','S2',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S6','S3',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S6','S4',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S6','S5',1].indicator_var.fix(0) # instance.flow_rejection_disjunct['S6','SNK',1].indicator_var.fix(0) # instance.flow_from_source_disjunct['S1'].indicator_var.fix(1) # instance.flow_from_source_disjunct['S2'].indicator_var.fix(0) # instance.flow_from_source_disjunct['S3'].indicator_var.fix(0) # instance.flow_from_source_disjunct['S4'].indicator_var.fix(0) # instance.flow_from_source_disjunct['S5'].indicator_var.fix(0) # instance.flow_from_source_disjunct['S6'].indicator_var.fix(0) # instance.flow_from_source_disjunct['PRD'].indicator_var.fix(0) # instance.flow_from_source_disjunct['SNK'].indicator_var.fix(0) return instance
def test_filter_minlplib_instances(self): current_dir = this_file_dir() coramin.third_party.get_minlplib_instancedata( target_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv')) total_cases = 1752 cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), acceptable_formats='osil', acceptable_probtype='QCQP', min_njacobiannz=1000, max_njacobiannz=10000) self.assertEqual(len(cases), 6) # regression cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), acceptable_formats=['osil', 'gms']) self.assertEqual(len(cases), total_cases) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv')) self.assertEqual(len(cases), total_cases) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), acceptable_probtype=['QCQP', 'MIQCQP', 'MBQCQP']) self.assertEqual(len(cases), 56) # regression cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), acceptable_objtype='linear', acceptable_objcurvature='linear', acceptable_conscurvature='convex', acceptable_convex=True) self.assertEqual(len(cases), 336) # regression cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), acceptable_convex=[True]) self.assertEqual(len(cases), 456) # regression cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), min_nvars=2, max_nvars=200000) self.assertEqual(len(cases), total_cases - 16 - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nbinvars=31000) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nintvars=1999) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_ncons=164000) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nsemi=13) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nsos1=0, max_nsos2=0) self.assertEqual(len(cases), total_cases - 6) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nnlvars=199998) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nnlbinvars=23867) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nnlintvars=1999) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nobjnz=99997) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nobjnlnz=99997) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nlincons=164319) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nquadcons=139999) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_npolynomcons=13975) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nsignomcons=801) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_ngennlcons=13975) self.assertEqual(len(cases), total_cases - 2) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_njacobiannlnz=1623023) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nlaghessiannz=1825419) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), max_nlaghessiandiagnz=100000) self.assertEqual(len(cases), total_cases - 1) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), min_nnlsemi=1) self.assertEqual(len(cases), 0) # unit cases = coramin.third_party.filter_minlplib_instances( instancedata_filename=os.path.join(current_dir, 'minlplib', 'instancedata.csv'), acceptable_objcurvature=['linear', 'convex']) self.assertEqual(len(cases), 1367) # regression os.remove(os.path.join(current_dir, 'minlplib', 'instancedata.csv')) os.rmdir(os.path.join(current_dir, 'minlplib'))
def build_model(): model = AbstractModel() # TODO: it looks like they set a bigM for each j. Which I need to look up how to do... model.BigM = Suffix(direction=Suffix.LOCAL) model.BigM[None] = 1000 ## Constants from GAMS StorageTankSizeFactor = 2 * 5 # btw, I know 2*5 is 10... I don't know why it's written this way in GAMS? StorageTankSizeFactorByProd = 3 MinFlow = -log(10000) VolumeLB = log(300) VolumeUB = log(3500) StorageTankSizeLB = log(100) StorageTankSizeUB = log(15000) UnitsInPhaseUB = log(6) UnitsOutOfPhaseUB = log(6) # TODO: YOU ARE HERE. YOU HAVEN'T ACTUALLY MADE THESE THE BOUNDS YET, NOR HAVE YOU FIGURED OUT WHOSE # BOUNDS THEY ARE. AND THERE ARE MORE IN GAMS. ########## # Sets ########## model.PRODUCTS = Set() model.STAGES = Set(ordered=True) model.PARALLELUNITS = Set(ordered=True) # TODO: this seems like an over-complicated way to accomplish this task... def filter_out_last(model, j): return j != model.STAGES.last() model.STAGESExceptLast = Set(initialize=model.STAGES, filter=filter_out_last) # TODO: these aren't in the formulation?? #model.STORAGE_TANKS = Set() ############### # Parameters ############### model.HorizonTime = Param() model.Alpha1 = Param() model.Alpha2 = Param() model.Beta1 = Param() model.Beta2 = Param() model.ProductionAmount = Param(model.PRODUCTS) model.ProductSizeFactor = Param(model.PRODUCTS, model.STAGES) model.ProcessingTime = Param(model.PRODUCTS, model.STAGES) # These are hard-coded in the GAMS file, hence the defaults model.StorageTankSizeFactor = Param(model.STAGES, default=StorageTankSizeFactor) model.StorageTankSizeFactorByProd = Param( model.PRODUCTS, model.STAGES, default=StorageTankSizeFactorByProd) # TODO: bonmin wasn't happy and I think it might have something to do with this? # or maybe issues with convexity or a lack thereof... I don't know yet. # I made PRODUCTS ordered so I could do this... Is that bad? And it does index # from 1, right? def get_log_coeffs(model, k): return log(model.PARALLELUNITS.ord(k)) model.LogCoeffs = Param(model.PARALLELUNITS, initialize=get_log_coeffs) # bounds model.volumeLB = Param(model.STAGES, default=VolumeLB) model.volumeUB = Param(model.STAGES, default=VolumeUB) model.storageTankSizeLB = Param(model.STAGES, default=StorageTankSizeLB) model.storageTankSizeUB = Param(model.STAGES, default=StorageTankSizeUB) model.unitsInPhaseUB = Param(model.STAGES, default=UnitsInPhaseUB) model.unitsOutOfPhaseUB = Param(model.STAGES, default=UnitsOutOfPhaseUB) ################ # Variables ################ # TODO: right now these match the formulation. There are more in GAMS... # unit size of stage j # model.volume = Var(model.STAGES) # # TODO: GAMS has a batch size indexed just by products that isn't in the formulation... I'm going # # to try to avoid it for the moment... # # batch size of product i at stage j # model.batchSize = Var(model.PRODUCTS, model.STAGES) # # TODO: this is different in GAMS... They index by stages too? # # cycle time of product i divided by batch size of product i # model.cycleTime = Var(model.PRODUCTS) # # number of units in parallel out-of-phase (or in phase) at stage j # model.unitsOutOfPhase = Var(model.STAGES) # model.unitsInPhase = Var(model.STAGES) # # TODO: what are we going to do as a boundary condition here? For that last stage? # # size of intermediate storage tank between stage j and j+1 # model.storageTankSize = Var(model.STAGES) # variables for convexified problem # TODO: I am beginning to think these are my only variables actually. # GAMS never un-logs them, I don't think. And I think the GAMs ones # must be the log ones. def get_volume_bounds(model, j): return (model.volumeLB[j], model.volumeUB[j]) model.volume_log = Var(model.STAGES, bounds=get_volume_bounds) model.batchSize_log = Var(model.PRODUCTS, model.STAGES) model.cycleTime_log = Var(model.PRODUCTS) def get_unitsOutOfPhase_bounds(model, j): return (0, model.unitsOutOfPhaseUB[j]) model.unitsOutOfPhase_log = Var(model.STAGES, bounds=get_unitsOutOfPhase_bounds) def get_unitsInPhase_bounds(model, j): return (0, model.unitsInPhaseUB[j]) model.unitsInPhase_log = Var(model.STAGES, bounds=get_unitsInPhase_bounds) def get_storageTankSize_bounds(model, j): return (model.storageTankSizeLB[j], model.storageTankSizeUB[j]) # TODO: these bounds make it infeasible... model.storageTankSize_log = Var(model.STAGES, bounds=get_storageTankSize_bounds) # binary variables for deciding number of parallel units in and out of phase model.outOfPhase = Var(model.STAGES, model.PARALLELUNITS, within=Binary) model.inPhase = Var(model.STAGES, model.PARALLELUNITS, within=Binary) ############### # Objective ############### def get_cost_rule(model): return model.Alpha1 * sum(exp(model.unitsInPhase_log[j] + model.unitsOutOfPhase_log[j] + \ model.Beta1 * model.volume_log[j]) for j in model.STAGES) +\ model.Alpha2 * sum(exp(model.Beta2 * model.storageTankSize_log[j]) for j in model.STAGESExceptLast) model.min_cost = Objective(rule=get_cost_rule) ############## # Constraints ############## def processing_capacity_rule(model, j, i): return model.volume_log[j] >= log(model.ProductSizeFactor[i, j]) + model.batchSize_log[i, j] - \ model.unitsInPhase_log[j] model.processing_capacity = Constraint(model.STAGES, model.PRODUCTS, rule=processing_capacity_rule) def processing_time_rule(model, j, i): return model.cycleTime_log[i] >= log(model.ProcessingTime[i, j]) - model.batchSize_log[i, j] - \ model.unitsOutOfPhase_log[j] model.processing_time = Constraint(model.STAGES, model.PRODUCTS, rule=processing_time_rule) def finish_in_time_rule(model): return model.HorizonTime >= sum(model.ProductionAmount[i]*exp(model.cycleTime_log[i]) \ for i in model.PRODUCTS) model.finish_in_time = Constraint(rule=finish_in_time_rule) ############### # Disjunctions ############### def storage_tank_selection_disjunct_rule(disjunct, selectStorageTank, j): model = disjunct.model() def volume_stage_j_rule(disjunct, i): return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \ model.batchSize_log[i, j] def volume_stage_jPlus1_rule(disjunct, i): return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \ model.batchSize_log[i, j+1] def batch_size_rule(disjunct, i): return inequality( -log(model.StorageTankSizeFactorByProd[i, j]), model.batchSize_log[i, j] - model.batchSize_log[i, j + 1], log(model.StorageTankSizeFactorByProd[i, j])) def no_batch_rule(disjunct, i): return model.batchSize_log[i, j] - model.batchSize_log[i, j + 1] == 0 if selectStorageTank: disjunct.volume_stage_j = Constraint(model.PRODUCTS, rule=volume_stage_j_rule) disjunct.volume_stage_jPlus1 = Constraint( model.PRODUCTS, rule=volume_stage_jPlus1_rule) disjunct.batch_size = Constraint(model.PRODUCTS, rule=batch_size_rule) else: # The formulation says 0, but GAMS has this constant. # 04/04: Francisco says volume should be free: # disjunct.no_volume = Constraint(expr=model.storageTankSize_log[j] == MinFlow) disjunct.no_batch = Constraint(model.PRODUCTS, rule=no_batch_rule) model.storage_tank_selection_disjunct = Disjunct( [0, 1], model.STAGESExceptLast, rule=storage_tank_selection_disjunct_rule) def select_storage_tanks_rule(model, j): return [ model.storage_tank_selection_disjunct[selectTank, j] for selectTank in [0, 1] ] model.select_storage_tanks = Disjunction(model.STAGESExceptLast, rule=select_storage_tanks_rule) # though this is a disjunction in the GAMs model, it is more efficiently formulated this way: # TODO: what on earth is k? def units_out_of_phase_rule(model, j): return model.unitsOutOfPhase_log[j] == sum(model.LogCoeffs[k] * model.outOfPhase[j,k] \ for k in model.PARALLELUNITS) model.units_out_of_phase = Constraint(model.STAGES, rule=units_out_of_phase_rule) def units_in_phase_rule(model, j): return model.unitsInPhase_log[j] == sum(model.LogCoeffs[k] * model.inPhase[j,k] \ for k in model.PARALLELUNITS) model.units_in_phase = Constraint(model.STAGES, rule=units_in_phase_rule) # and since I didn't do the disjunction as a disjunction, we need the XORs: def units_out_of_phase_xor_rule(model, j): return sum(model.outOfPhase[j, k] for k in model.PARALLELUNITS) == 1 model.units_out_of_phase_xor = Constraint(model.STAGES, rule=units_out_of_phase_xor_rule) def units_in_phase_xor_rule(model, j): return sum(model.inPhase[j, k] for k in model.PARALLELUNITS) == 1 model.units_in_phase_xor = Constraint(model.STAGES, rule=units_in_phase_xor_rule) return model.create_instance(join(this_file_dir(), 'batch_processing.dat'))
import tempfile from six import StringIO import pyutilib.th as unittest from pyutilib.subprocess import run import pyomo.common.config as config from pyomo.common.log import LoggingIntercept from pyomo.common.fileutils import ( this_file, this_file_dir, find_file, find_library, find_executable, PathManager, _system, _path, _exeExt, _libExt, _ExecutableData, ) _this_file = this_file() _this_file_dir = this_file_dir() class TestFileUtils(unittest.TestCase): def setUp(self): self.tmpdir = None self.basedir = os.path.abspath(os.path.curdir) self.config = config.PYOMO_CONFIG_DIR self.ld_library_path = os.environ.get('LD_LIBRARY_PATH', None) self.path = os.environ.get('PATH', None) def tearDown(self): config.PYOMO_CONFIG_DIR = self.config os.chdir(self.basedir) if self.tmpdir: shutil.rmtree(self.tmpdir) if self.ld_library_path is None: