Esempio n. 1
0
def _process_load(cmd, _model, _data, _default, options=None):
    #print("LOAD %s" % cmd)
    _cmd_len = len(cmd)
    _options = {}
    _options['filename'] = cmd[1]
    i = 2
    while cmd[i] != ':':
        _options[cmd[i]] = cmd[i + 2]
        i += 3
    i += 1
    _Index = (None, [])
    if type(cmd[i]) is tuple:
        _Index = (None, cmd[i])
        i += 1
    elif i + 1 < _cmd_len and cmd[i + 1] == '=':
        _Index = (cmd[i], cmd[i + 2])
        i += 3
    _smap = OrderedDict()
    while i < _cmd_len:
        if i + 2 < _cmd_len and cmd[i + 1] == '=':
            _smap[cmd[i + 2]] = cmd[i]
            i += 3
        else:
            _smap[cmd[i]] = cmd[i]
            i += 1

    if len(cmd) < 2:
        raise IOError("The 'load' command must specify a filename")

    options = Options(**_options)
    for key in options:
        if not key in [
                'range', 'filename', 'format', 'using', 'driver', 'query',
                'table', 'user', 'password'
        ]:
            raise ValueError("Unknown load option '%s'" % key)

    global Filename
    Filename = options.filename

    global Lineno
    Lineno = 0
    #
    # TODO: process mapping info
    #
    if options.using is None:
        tmp = options.filename.split(".")[-1]
        data = DataManagerFactory(tmp)
        if data is None:
            raise pyutilib.common.ApplicationError(
                "Data manager '%s' is not available." % tmp)
    else:
        try:
            data = DataManagerFactory(options.using)
        except:
            data = None
        if data is None:
            raise pyutilib.common.ApplicationError(
                "Data manager '%s' is not available." % options.using)
    set_name = None
    param_name = None
    #
    # Create symbol map
    #
    symb_map = _smap
    if len(symb_map) == 0:
        raise IOError(
            "Must specify at least one set or parameter name that will be loaded"
        )
    #
    # Process index data
    #
    _index = None
    index_name = _Index[0]
    _select = None
    #
    # Set the 'set name' based on the format
    #
    _set = None
    if options.format == 'set' or options.format == 'set_array':
        if len(_smap) != 1:
            raise IOError(
                "A single set name must be specified when using format '%s'" %
                options.format)
        set_name = list(_smap.keys())[0]
        _set = set_name
    #
    # Set the 'param name' based on the format
    #
    _param = None
    if options.format == 'transposed_array' or options.format == 'array' or options.format == 'param':
        if len(_smap) != 1:
            raise IOError(
                "A single parameter name must be specified when using format '%s'"
                % options.format)
    if options.format in ('transposed_array', 'array', 'param', None):
        if _Index[0] is None:
            _index = None
        else:
            _index = _Index[0]
        _param = []
        _select = list(_Index[1])
        for key in _smap:
            _param.append(_smap[key])
            _select.append(key)
    if options.format in ('transposed_array', 'array'):
        _select = None

    #print "YYY", _param, options
    if not _param is None and len(
            _param) == 1 and not _model is None and isinstance(
                getattr(_model, _param[0]), Set):
        _select = None
        _set = _param[0]
        _param = None
        _index = None

    #print "SELECT", _param, _select
    #
    data.initialize(model=options.model,
                    filename=options.filename,
                    index=_index,
                    index_name=index_name,
                    param_name=symb_map,
                    set=_set,
                    param=_param,
                    format=options.format,
                    range=options.range,
                    query=options.query,
                    using=options.using,
                    table=options.table,
                    select=_select)
    #
    data.open()
    try:
        data.read()
    except Exception:
        data.close()
        raise
    data.close()
    data.process(_model, _data, _default)
Esempio n. 2
0
# Mimic the pyomo script
from pyomo.core import *
from pyutilib.misc import Options

# set high level options that mimic pyomo comand line
options = Options()
options.model_file = 'DiseaseEstimation.py'
options.data_files = ['DiseaseEstimation.dat']
options.solver = 'ipopt'
options.solver_io = 'nl'
#options.keepfiles = True
#options.tee = True

# mimic the set of function calls done by pyomo command line
scripting.util.setup_environment(options)

# the following imports the model found in options.model_file,
# sets this to options.usermodel, and executes preprocessors
scripting.util.apply_preprocessing(options, parser=None)

# create the wrapper for the model, the data, the instance, and the options
model_data = scripting.util.create_model(options)
instance = model_data.instance

# solve
results, opt = scripting.util.apply_optimizer(options, instance)

# the following simply outputs the final time elapsed
scripting.util.finalize(options)

# load results into instance and print
Esempio n. 3
0
except:
    IPython_available=False
else:
    ipshell = IPShellEmbed([''],
                banner = '\n# Dropping into Python interpreter',
                exit_msg = '\n# Leaving Interpreter, back to Pyomo\n')

from pyutilib.misc import Options
try:
    from pympler import muppy
    from pympler import summary
    from pympler.asizeof import *
    pympler_available = True
except:
    pympler_available = False
memory_data = Options()

import pyutilib.misc
from pyomo.common.plugin import ExtensionPoint, Plugin, implements
from pyutilib.misc import Container
from pyutilib.services import TempfileManager

from pyomo.opt import ProblemFormat
from pyomo.opt.base import SolverFactory
from pyomo.opt.parallel import SolverManagerFactory
from pyomo.core import *
from pyomo.core.base import TextLabeler
import pyomo.core.base


filter_excepthook=False
Esempio n. 4
0
def _process_table(cmd, _model, _data, _default, options=None):
    #print("TABLE %s" % cmd)
    #
    _options = {}
    _set = OrderedDict()
    _param = OrderedDict()
    _labels = []

    _cmd = cmd[1]
    _cmd_len = len(_cmd)
    name = None
    i = 0
    while i < _cmd_len:
        try:
            #print("CMD i=%s cmd=%s" % (i, _cmd[i:]))
            #
            # This should not be error prone, so we treat errors
            # with a general exception
            #

            #
            # Processing labels
            #
            if _cmd[i] == ':':
                i += 1
                while i < _cmd_len:
                    _labels.append(_cmd[i])
                    i += 1
                continue
            #
            # Processing options
            #
            name = _cmd[i]
            if i + 1 == _cmd_len:
                _param[name] = []
                _labels = ['Z']
                i += 1
                continue
            if _cmd[i + 1] == '=':
                if type(_cmd[i + 2]) is list:
                    _set[name] = _cmd[i + 2]
                else:
                    _options[name] = _cmd[i + 2]
                i += 3
                continue
            # This should be a parameter declaration
            if not type(_cmd[i + 1]) is tuple:
                raise IOError
            if i + 2 < _cmd_len and _cmd[i + 2] == '=':
                _param[name] = (_cmd[i + 1], _cmd[i + 3][0])
                i += 4
            else:
                _param[name] = _cmd[i + 1]
                i += 2
        except:
            raise IOError("Error parsing table options: %s" % name)

    #print("_options %s" % _options)
    #print("_set %s" % _set)
    #print("_param %s" % _param)
    #print("_labels %s" % _labels)


#
    options = Options(**_options)
    for key in options:
        if not key in ['columns']:
            raise ValueError("Unknown table option '%s'" % key)
    #
    ncolumns = options.columns
    if ncolumns is None:
        ncolumns = len(_labels)
        if ncolumns == 0:
            if not (len(_set) == 1 and len(_set[_set.keys()[0]]) == 0):
                raise IOError(
                    "Must specify either the 'columns' option or column headers"
                )
            else:
                ncolumns = 1
    else:
        ncolumns = int(ncolumns)
    #
    data = cmd[2]
    Ldata = len(cmd[2])
    #
    cmap = {}
    if len(_labels) == 0:
        for i in range(ncolumns):
            cmap[i + 1] = i
        for label in _param:
            ndx = cmap[_param[label][1]]
            if ndx < 0 or ndx >= ncolumns:
                raise IOError("Bad column value %s for data %s" %
                              (str(ndx), label))
            cmap[label] = ndx
            _param[label] = _param[label][0]
    else:
        i = 0
        for label in _labels:
            cmap[label] = i
            i += 1
    #print("CMAP %s" % cmap)
    #
    #print("_param %s" % _param)
    #print("_set %s" % _set)
    for sname in _set:
        # Creating set sname
        cols = _set[sname]
        tmp = []
        for col in cols:
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for index set '%s'" %
                    (col, sname))
            tmp.append(cmap[col])
        if not sname in cmap:
            cmap[sname] = tmp
        cols = flatten(tmp)
        #
        _cmd = ['set', sname, ':=']
        i = 0
        while i < Ldata:
            row = []
            #print("COLS %s  NCOLS %d" % (cols, ncolumns))
            for col in cols:
                #print("Y %s %s" % (i, col))
                row.append(data[i + col])
            if len(row) > 1:
                _cmd.append(tuple(row))
            else:
                _cmd.append(row[0])
            i += ncolumns
        #print("_data %s" % _data)
        _process_set(_cmd, _model, _data)
    #
    #print("CMAP %s" % cmap)
    _i = 0
    if ncolumns == 0:
        raise IOError
    for vname in _param:
        _i += 1
        # create value vname
        cols = _param[vname]
        tmp = []
        for col in cols:
            #print("COL %s" % col)
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for table value '%s'" %
                    (col, vname))
            tmp.append(cmap[col])
        #print("X %s %s" % (len(cols), tmp))
        cols = flatten(tmp)
        #print("X %s" % len(cols))
        #print("VNAME %s %s" % (vname, cmap[vname]))
        if vname in cmap:
            cols.append(cmap[vname])
        else:
            cols.append(ncolumns - 1 - (len(_param) - _i))
        #print("X %s" % len(cols))
        #
        _cmd = ['param', vname, ':=']
        i = 0
        while i < Ldata:
            #print("HERE %s %s %s" % (i, cols, ncolumns))
            for col in cols:
                _cmd.append(data[i + col])
            i += ncolumns
        #print("HERE %s" % _cmd)
        #print("_data %s" % _data)
        _process_param(_cmd, _model, _data, None, ncolumns=len(cols))
Esempio n. 5
0
    def __init__(self, **kwds):
        OptSolver.__init__(self, **kwds)

        self._pyomo_model = None
        """The pyomo model being solved."""

        self._solver_model = None
        """The python instance of the solver model (e.g., the gurobipy Model instance)."""

        self._symbol_map = SymbolMap()
        """A symbol map used to map between pyomo components and their names used with the solver."""

        self._labeler = None
        """The labeler for creating names for the solver model components."""

        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = dict()
        """A dictionary mapping pyomo Var's to the solver variables."""

        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = dict()
        """A dictionary mapping pyomo constraints to solver constraints."""

        self._vars_referenced_by_con = ComponentMap()
        """A dictionary mapping constraints to a ComponentSet containt the pyomo variables referenced by that
        constraint. This is primarily needed for the persistent solvers. When a constraint is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._vars_referenced_by_obj = ComponentSet()
        """A set containing the pyomo variables referenced by that the objective.
        This is primarily needed for the persistent solvers. When a the objective is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._objective = None
        """The pyomo Objective object currently being used with the solver."""

        self.results = None
        """A results object return from the solve method."""

        self._skip_trivial_constraints = False
        """A bool. If True, then any constraints with a constant body will not be added to the solver model.
        Be careful with this. If a trivial constraint is skipped then that constraint cannot be removed from
        a persistent solver (an error will be raised if a user tries to remove a non-existent constraint)."""

        self._output_fixed_variable_bounds = False
        """A bool. If False then an error will be raised if a fixed variable is used in one of the solver constraints.
        This is useful for catching bugs. Ordinarily a fixed variable should appear as a constant value in the
        solver constraints. If True, then the error will not be raised."""

        self._python_api_exists = False
        """A bool indicating whether or not the python api is available for the specified solver."""

        self._version = None
        """The version of the solver."""

        self._version_major = None
        """The major version of the solver. For example, if using Gurobi 7.0.2, then _version_major is 7."""

        self._symbolic_solver_labels = False
        """A bool. If true then the solver components will be given names corresponding to the pyomo component names."""

        self._capabilites = Options()

        self._referenced_variables = ComponentMap()
        """dict: {var: count} where count is the number of constraints/objective referencing the var"""

        self._keepfiles = False
        """A bool. If True, then the solver log will be saved."""

        self._save_results = True
        """A bool. This is used for backwards compatability. If True, the solution will be loaded into the Solution
Esempio n. 6
0
File: sc.py Progetto: CanLi1/pyomo-1
def pyomo_create_model(options=None, model_options=None):
    if model_options is None:
        model_options = Options()
    if model_options.type is None:
        model_options.type = 'fixed_set_size'
    #
    # m - number of elements
    #
    m = 100 if model_options.m is None else model_options.m
    #
    # n - number of sets
    #
    n = 200 if model_options.n is None else model_options.n
    seed = 9090 if model_options.seed is None else model_options.seed
    random.seed(9090)
    #
    if model_options.type == 'fixed_set_size':
        #
        # p   - fixed number elements per set
        # rho - fixed fraction of elements per set
        #
        p = model_options.p
        if p is None:
            if model_options.rho is None:
                p = int(math.ceil(m * 0.7))
            else:
                p = int(math.ceil(m * model_options.rho))
        #
        def S_rule(model):
            ans = set()
            for j in xrange(1, n + 1):
                tmp = list(range(1, m + 1))
                random.shuffle(tmp)
                for i in range(0, p):
                    ans.add((tmp[i], j))
            return ans
    elif model_options.type == 'fixed_element_coverage':
        #
        # p   - fixed number of sets that cover each element
        # rho - fixed fraction of sets that cover each element
        #
        p = model_options.p
        if p is None:
            if model_options.rho is None:
                p = int(math.ceil(n * 0.4))
            else:
                p = int(math.ceil(n * model_options.rho))
        #
        def S_rule(model):
            ans = set()
            for i in xrange(1, m + 1):
                tmp = list(range(1, n + 1))
                random.shuffle(tmp)
                for j in range(0, p):
                    ans.add((i, tmp[j]))
            return ans
    elif model_options.type == 'fixed_probability':
        #
        # rho - probability of selecting element for a set
        #
        rho = 0.3 if model_options.rho is None else model_options.rho

        #
        def S_rule(model):
            ans = set()
            for j in xrange(1, n + 1):
                for i in xrange(1, m + 1):
                    if random.uniform(0, 1) < rho:
                        ans.add((i, j))
            return ans
    elif model_options.type == 'fixed_fill':
        #
        # rho - |S|/(I*J)
        #
        rho = 0.3 if model_options.rho is None else model_options.rho

        #
        def S_rule(model):
            ans = set()
            for j in xrange(1, n + 1):
                for i in xrange(1, m + 1):
                    if random.uniform(0, 1) < rho:
                        ans.add((i, j))
            return ans

    #
    # CREATE MODEL
    #
    model = ConcreteModel()
    #
    # (i,j) in S if element i in set j
    #
    model.S = Set(dimen=2, initialize=S_rule)

    #
    # Dynamically create the I and J index sets, since
    # some rows or columns of S may not be populated.
    #
    def I_rule(model):
        return set((i for (i, j) in model.S))

    model.I = Set(initialize=I_rule)

    def J_rule(model):
        return set((j for (i, j) in model.S))

    model.J = Set(initialize=J_rule)
    #
    # Weights
    #
    model.w = Param(model.J, within=NonNegativeReals, initialize=1.0)
    #
    # Set selection binary variables
    #
    model.x = Var(model.J, within=Binary)

    #
    # Objective
    #
    def cost_rule(model):
        return sum_product(model.w, model.x)

    model.cost = Objective(rule=cost_rule)

    #
    # Constraint
    #
    def cover_rule(model, i):
        expr = 0
        for j in model.x:
            if (i, j) in model.S:
                expr += model.x[j]
        #
        # WEH - this check is not needed, since I is constructed dynamically
        #
        #if expr is 0:
        #return Constraint.Skip
        return expr >= 1

    model.cover = Constraint(model.I, rule=cover_rule)

    #
    print_model_stats(model_options, model)
    return model
Esempio n. 7
0
    def __init__(self, **kwds):
        """ Constructor """
        #
        # The 'type' is the class type of the solver instance
        #
        if "type" in kwds:
            self.type = kwds["type"]
        else:  #pragma:nocover
            raise ValueError(
                "Expected option 'type' for OptSolver constructor")

        #
        # The 'name' is either the class type of the solver instance, or a
        # assigned name.
        #
        if "name" in kwds:
            self.name = kwds["name"]
        else:
            self.name = self.type

        if "doc" in kwds:
            self._doc = kwds["doc"]
        else:
            if self.type is None:  # pragma:nocover
                self._doc = ""
            elif self.name == self.type:
                self._doc = "%s OptSolver" % self.name
            else:
                self._doc = "%s OptSolver (type %s)" % (self.name, self.type)
        #
        # Options are persistent, meaning users must modify the
        # options dict directly rather than pass them into _presolve
        # through the solve command. Everything else is reset inside
        # presolve
        #
        self.options = Options()
        if 'options' in kwds and not kwds['options'] is None:
            for key in kwds['options']:
                setattr(self.options, key, kwds['options'][key])

        # the symbol map is an attribute of the solver plugin only
        # because it is generated in presolve and used to tag results
        # so they are interpretable - basically, it persists across
        # multiple methods.
        self._smap_id = None

        # These are ephimeral options that can be set by the user during
        # the call to solve, but will be reset to defaults if not given
        self._load_solutions = True
        self._select_index = 0
        self._report_timing = False
        self._suffixes = []
        self._log_file = None
        self._soln_file = None

        # overridden by a solver plugin when it returns sparse results
        self._default_variable_value = None
        # overridden by a solver plugin when it is always available
        self._assert_available = False
        # overridden by a solver plugin to indicate its input file format
        self._problem_format = None
        self._valid_problem_formats = []
        # overridden by a solver plugin to indicate its results file format
        self._results_format = None
        self._valid_result_formats = {}

        self._results_reader = None
        self._problem = None
        self._problem_files = None

        #
        # Used to document meta solvers
        #
        self._metasolver = False

        self._version = None
        #
        # Data for solver callbacks
        #
        self._allow_callbacks = False
        self._callback = {}

        # We define no capabilities for the generic solver; base
        # classes must override this
        self._capabilities = Options()
Esempio n. 8
0
def create_test_suite(suite, config, _globals, options):
    #
    # Skip suite creation if the options categores do not intersect with the list of test suite categories
    #
    if len(options.categories) > 0:
        flag = False
        for cat in options.categories:
            if cat in config['suites'][suite].get('categories', []):
                flag = True
                break
        if not flag:
            return
    #
    # Create test driver
    #
    if suite in _globals:
        raise IOError(
            "Cannot create suite '%s' since there is another symbol with that name in the global namespace!"
            % suite)

    def setUpClassFn(cls):
        options = cls._options[None]
        cls._test_driver.setUpClass(cls, options)

    _globals[suite] = type(
        str(suite),
        (unittest.TestCase,), {'setUpClass': classmethod(setUpClassFn)})
    _globals[suite]._options[None] = options
    setattr(_globals[suite], '_test_driver', _globals['test_driver'])
    setattr(_globals[suite], 'suite_categories', config['suites'][suite].get(
        'categories', []))
    #
    # Create test functions
    #
    tests = []
    if 'tests' in config['suites'][suite]:
        for item in config['suites'][suite]['tests']:
            tests.append((item['solver'], item['problem'], item))
    else:
        for solver in config['suites'][suite]['solvers']:
            for problem in config['suites'][suite]['problems']:
                tests.append((solver, problem, {}))
    #
    for solver, problem, item in tests:
        ##sname = solver
        if options.testname_format is None:
            test_name = solver + "_" + problem
        else:
            test_name = options.testname_format % (solver, problem)
        #
        def fn(testcase, name, suite):
            options = testcase._options[suite, name]
            fn.test_driver.setUp(testcase, options)
            ans = fn.test_driver.run_test(testcase, name, options)
            fn.test_driver.tearDown(testcase, options)
            return ans

        fn.test_driver = _globals['test_driver']
        #
        _options = Options()
        #
        problem_options = config['suites'][suite]['problems'][problem]
        if not problem_options is None and 'problem' in problem_options:
            _problem = problem_options['problem']
        else:
            _problem = problem
        for attr, value in config['problems'].get(_problem, {}).items():
            _options[attr] = _str(value)
        if not problem_options is None:
            for attr, value in problem_options.items():
                _options[attr] = _str(value)
        #
        solver_options = config['suites'][suite]['solvers'][solver]
        if not solver_options is None and 'solver' in solver_options:
            _solver = solver_options['solver']
        else:
            _solver = solver
        _name = _solver
        for attr, value in config['solvers'].get(_solver, {}).items():
            _options[attr] = _str(value)
            if attr == 'name':
                _name = value
        if not solver_options is None:
            for attr, value in solver_options.items():
                _options[attr] = _str(value)
        #
        for key in item:
            if key not in ['problem', 'solver']:
                _options[key] = _str(item[key])
        #
        _options.solver = _str(_name)
        _options.problem = _str(_problem)
        _options.suite = _str(suite)
        _options.currdir = _str(options.currdir)
        #
        _globals[suite].add_fn_test(
            name=test_name, fn=fn, suite=suite, options=_options)
Esempio n. 9
0
def run_test_scenarios(options):
    logging.disable(logging.WARNING)

    solvers = set(options.solver)
    stat = {}

    for key, test_case in test_scenarios():
        model, solver, io = key
        if len(solvers) > 0 and not solver in solvers:
            continue
        if test_case.status == 'skip':
            continue

        # Create the model test class
        model_class = test_case.model()
        # Create the model instance
        model_class.generate_model()
        model_class.warmstart_model()
        # Solve
        symbolic_labels = False
        load_solutions = False
        opt, results = model_class.solve(
            solver,
            io,
            test_case.testcase.io_options,
            {},
            symbolic_labels,
            load_solutions)

        termination_condition = results['Solver'][0]['termination condition']
        # Validate solution status
        try:
            model_class.post_solve_test_validation(None, results)
        except:
            if test_case.status == 'expected failure':
                stat[key] = (True, "Expected failure")
            else:
                stat[key] = (False, "Unexpected termination condition: %s" % str(termination_condition))
            continue
        if termination_condition == TerminationCondition.unbounded or \
           termination_condition == TerminationCondition.infeasible:
            # Unbounded or Infeasible
            stat[key] = (True, "")
        else:
            # Validate the solution returned by the solver
            if isinstance(model_class.model, IBlockStorage):
                model_class.model.load_solution(results.solution)
            else:
                model_class.model.solutions.load_from(
                    results,
                    default_variable_value=opt.default_variable_value())
            rc = model_class.validate_current_solution(suffixes=model_class.test_suffixes)

            if test_case.status == 'expected failure':
                if rc[0] is True:
                    stat[key] = (False, "Unexpected success")
                else:
                    stat[key] = (True, "Expected failure")
            else:
                if rc[0] is True:
                    stat[key] = (True, "")
                else:
                    stat[key] = (False, "Unexpected failure")

    if options.verbose:
        print("---------------")
        print(" Test Failures")
        print("---------------")
    nfail = 0
    #
    # Summarize the runtime statistics, by solver
    #
    summary = {}
    total = Options(NumEPass=0, NumEFail=0, NumUPass=0, NumUFail=0)
    for key in stat:
        model, solver, io = key
        if not solver in summary:
            summary[solver] = Options(NumEPass=0, NumEFail=0, NumUPass=0, NumUFail=0)
        _pass, _str = stat[key]
        if _pass:
            if _str == "Expected failure":
                summary[solver].NumEFail += 1
            else:
                summary[solver].NumEPass += 1
        else:
            nfail += 1
            if _str == "Unexpected failure":
                summary[solver].NumUFail += 1
                if options.verbose:
                    print("- Unexpected Test Failure: "+", ".join((model, solver, io)))
            else:
                summary[solver].NumUPass += 1
                if options.verbose:
                    print("- Unexpected Test Success: "+", ".join((model, solver, io)))
    if options.verbose:
        if nfail == 0:
            print("- NONE")
        print("")

    stream = sys.stdout
    maxSolverNameLen = max([max(len(name) for name in summary), len("Solver")])
    fmtStr = "{{0:<{0}}}| {{1:>8}} | {{2:>8}} | {{3:>10}} | {{4:>10}} | {{5:>13}}\n".format(maxSolverNameLen + 2)
    #
    stream.write("\n")
    stream.write("Solver Test Summary\n")
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(fmtStr.format("Solver", "# Pass", "# Fail", "# OK Fail", "# Bad Pass", "% OK"))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    #
    for _solver in sorted(summary):
        ans = summary[_solver]
        total.NumEPass += ans.NumEPass
        total.NumEFail += ans.NumEFail
        total.NumUPass += ans.NumUPass
        total.NumUFail += ans.NumUFail
        stream.write(fmtStr.format(_solver, str(ans.NumEPass), str(ans.NumUFail), str(ans.NumEFail), str(ans.NumUPass), str(int(100.0*(ans.NumEPass+ans.NumEFail)/(ans.NumEPass+ans.NumEFail+ans.NumUFail+ans.NumUPass)))))
    #
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(fmtStr.format("TOTALS", str(total.NumEPass), str(total.NumUFail), str(total.NumEFail), str(total.NumUPass), str(int(100.0*(total.NumEPass+total.NumEFail)/(total.NumEPass+total.NumEFail+total.NumUFail+total.NumUPass)))))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")

    logging.disable(logging.NOTSET)
Esempio n. 10
0
def run_pyomo(options=Options(), parser=None):
    data = Options(options=options)

    if options.model.filename == '':
        parser.print_help()
        return Container()

    try:
        pyomo.scripting.util.setup_environment(data)

        pyomo.scripting.util.apply_preprocessing(data,
                                                 parser=parser)
    except:
        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise
    else:
        if data.error:
            # TBD: I should be able to call this function in the case of
            #      an exception to perform cleanup. However, as it stands
            #      calling finalize with its default keyword value for
            #      model(=None) results in an a different error related to
            #      task port values.  Not sure how to interpret that.
            pyomo.scripting.util.finalize(data,
                                          model=ConcretModel(),
                                          instance=None,
                                          results=None)
            return Container()                                   #pragma:nocover

    try:
        model_data = pyomo.scripting.util.create_model(data)
    except:
        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise
    else:
        if (((not options.runtime.logging == 'debug') and \
             options.model.save_file) or \
            options.runtime.only_instance):
            pyomo.scripting.util.finalize(data,
                                          model=model_data.model,
                                          instance=model_data.instance,
                                          results=None)
            return Container(instance=model_data.instance)

    try:
        opt_data = pyomo.scripting.util.apply_optimizer(data,
                                                        instance=model_data.instance)

        pyomo.scripting.util.process_results(data,
                                             instance=model_data.instance,
                                             results=opt_data.results,
                                             opt=opt_data.opt)

        pyomo.scripting.util.apply_postprocessing(data,
                                                  instance=model_data.instance,
                                                  results=opt_data.results)
    except:
        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise
    else:
        pyomo.scripting.util.finalize(data,
                                      model=model_data.model,
                                      instance=model_data.instance,
                                      results=opt_data.results)

        return Container(options=options,
                         instance=model_data.instance,
                         results=opt_data.results,
                         local=opt_data.local)
Esempio n. 11
0
def convert_dakota(options=Options(), parser=None):
    #
    # Import plugins
    #
    import pyomo.environ

    model_file = os.path.basename(options.model.save_file)
    model_file_no_ext = os.path.splitext(model_file)[0]

    #
    # Set options for writing the .nl and related files
    #

    # By default replace .py with .nl
    if options.model.save_file is None:
        options.model.save_file = model_file_no_ext + '.nl'
    options.model.save_format = ProblemFormat.nl
    # Dakota requires .row/.col files
    options.model.symbolic_solver_labels = True

    #
    # Call the core converter
    #
    model_data = convert(options, parser)

    #
    # Generate Dakota input file fragments for the Vars, Objectives, Constraints
    #

    # TODO: the converted model doesn't expose the right symbol_map
    #       for only the vars active in the .nl

    model = model_data.instance

    # Easy way
    #print "VARIABLE:"
    #lines = open(options.save_model.replace('.nl','.col'),'r').readlines()
    #for varName in lines:
    #    varName = varName.strip()
    #    var = model_data.symbol_map.getObject(varName)
    #    print "'%s': %s" % (varName, var)
    #    #print var.pprint()

    # Hard way
    variables = 0
    var_descriptors = []
    var_lb = []
    var_ub = []
    var_initial = []
    tmpDict = model_data.symbol_map.getByObjectDictionary()
    for var in model.component_data_objects(Var, active=True):
        if id(var) in tmpDict:
            variables += 1
            var_descriptors.append(var.name)

            # apply user bound, domain bound, or infinite
            _lb, _ub = var.bounds
            if _lb is not None:
                var_lb.append(str(_lb))
            else:
                var_lb.append("-inf")

            if _ub is not None:
                var_ub.append(str(_ub))
            else:
                var_ub.append("inf")

            try:
                val = value(var)
            except:
                val = None
            var_initial.append(str(val))

    objectives = 0
    obj_descriptors = []
    for obj in model.component_data_objects(Objective, active=True):
        objectives += 1
        obj_descriptors.append(obj.name)

    constraints = 0
    cons_descriptors = []
    cons_lb = []
    cons_ub = []
    for con in model.component_data_objects(Constraint, active=True):
        constraints += 1
        cons_descriptors.append(con.name)
        if con.lower is not None:
            cons_lb.append(str(con.lower))
        else:
            cons_lb.append("-inf")
        if con.upper is not None:
            cons_ub.append(str(con.upper))
        else:
            cons_ub.append("inf")

    # Write the Dakota input file fragments

    dakfrag = open(model_file_no_ext + ".dak", 'w')

    dakfrag.write("#--- Dakota variables block ---#\n")
    dakfrag.write("variables\n")
    dakfrag.write("  continuous_design " + str(variables) + '\n')
    dakfrag.write("    descriptors\n")
    for vd in var_descriptors:
        dakfrag.write("      '%s'\n" % vd)
    dakfrag.write("    lower_bounds " + " ".join(var_lb) + '\n')
    dakfrag.write("    upper_bounds " + " ".join(var_ub) + '\n')
    dakfrag.write("    initial_point " + " ".join(var_initial) + '\n')

    dakfrag.write("#--- Dakota interface block ---#\n")
    dakfrag.write("interface\n")
    dakfrag.write("  algebraic_mappings = '" + options.model.save_file + "'\n")

    dakfrag.write("#--- Dakota responses block ---#\n")
    dakfrag.write("responses\n")
    dakfrag.write("  objective_functions " + str(objectives) + '\n')

    if (constraints > 0):
        dakfrag.write("  nonlinear_inequality_constraints " +
                      str(constraints) + '\n')
        dakfrag.write("    lower_bounds " + " ".join(cons_lb) + '\n')
        dakfrag.write("    upper_bounds " + " ".join(cons_ub) + '\n')

    dakfrag.write("    descriptors\n")
    for od in obj_descriptors:
        dakfrag.write("      '%s'\n" % od)
    if (constraints > 0):
        for cd in cons_descriptors:
            dakfrag.write("      '%s'\n" % cd)

    # TODO: detect whether gradient information available in model
    dakfrag.write("  analytic_gradients\n")
    dakfrag.write("  no_hessians\n")

    dakfrag.close()

    sys.stdout.write("Dakota input fragment written to file '%s'\n" %
                     (model_file_no_ext + ".dak", ))
    return model_data
Esempio n. 12
0
 def __init__(self):
     self._info = {}
     self.options = Options()
Esempio n. 13
0
def run(argv, _globals=None):
    #
    # Set sys.argv to the value specified by the user
    #
    sys.argv = argv
    #
    # Create the option parser
    #
    parser = optparse.OptionParser()
    parser.remove_option('-h')
    #
    parser.add_option(
        '-h',
        '--help',
        action='store_true',
        dest='help',
        default=False,
        help='Print command options')
    #
    parser.add_option(
        '-d',
        '--debug',
        action='store_true',
        dest='debug',
        default=False,
        help='Set debugging flag')
    #
    parser.add_option(
        '-v',
        '--verbose',
        action='store_true',
        dest='verbose',
        default=False,
        help='Verbose output')
    #
    parser.add_option(
        '-q',
        '--quiet',
        action='store_true',
        dest='quiet',
        default=False,
        help='Minimal output')
    #
    parser.add_option(
        '-f',
        '--failfast',
        action='store_true',
        dest='failfast',
        default=False,
        help='Stop on first failure')
    #
    parser.add_option(
        '-c',
        '--catch',
        action='store_true',
        dest='catch',
        default=False,
        help='Catch control-C and display results')
    #
    parser.add_option(
        '-b',
        '--buffer',
        action='store_true',
        dest='buffer',
        default=False,
        help='Buffer stdout and stderr durring test runs')
    #
    parser.add_option(
        '--cat',
        '--category',
        action='append',
        dest='categories',
        default=[],
        help='Define a list of categories that filter the execution of test suites')
    #
    parser.add_option(
        '--help-suites',
        action='store_true',
        dest='help_suites',
        default=False,
        help='Print the test suites that can be executed')
    #
    parser.add_option(
        '--help-tests',
        action='store',
        dest='help_tests',
        default=None,
        help='Print the tests in the specified test suite')
    #
    parser.add_option(
        '--help-categories',
        action='store_true',
        dest='help_categories',
        default=False,
        help='Print the test suite categories that can be specified')
    #
    # Parse the argument list and print help info if needed
    #
    _options, args = parser.parse_args(sys.argv)
    if _options.help:
        parser.print_help()

        print("""
Examples:
  %s                               - run all test suites
  %s MyTestCase.testSomething      - run MyTestCase.testSomething
  %s MyTestCase                    - run all 'test*' test methods
                                               in MyTestCase
""" % (args[0], args[0], args[0]))
        return
    #
    # If no value for _globals is specified, then we use the current context.
    #
    if _globals is None:
        _globals = globals()
    #
    # Setup and Options object and create test suites from the specified
    # configuration files.
    #
    options = Options()
    options.debug = _options.debug
    options.verbose = _options.verbose
    options.quiet = _options.quiet
    options.categories = _options.categories
    _argv = []
    for arg in args[1:]:
        if os.path.exists(arg):
            create_test_suites(filename=arg, _globals=_globals, options=options)
        else:
            _argv.append(arg)
    #
    # Collect information about the test suites:  suite names and categories
    #
    suites = []
    categories = set()
    for key in _globals.keys():
        if type(_globals[key]) is type and issubclass(_globals[key],
                                                      unittest.TestCase):
            suites.append(key)
            for c in _globals[key].suite_categories:
                categories.add(c)
    #
    # Process the --help-tests option
    #
    if _options.help_tests and not _globals is None:
        suite = _globals.get(_options.help_tests, None)
        if not type(suite) is type:
            print("Test suite '%s' not found!" % str(_options.help_tests))
            return cleanup(_globals, suites)
        tests = []
        for item in dir(suite):
            if item.startswith('test'):
                tests.append(item)
        print("")
        if len(tests) > 0:
            print("Tests defined in test suite '%s':" % _options.help_tests)
            for tmp in sorted(tests):
                print("    " + tmp)
        else:
            print("No tests defined in test suite '%s':" % _options.help_tests)
        print("")
        return cleanup(_globals, suites)
    #
    # Process the --help-suites and --help-categories options
    #
    if (_options.help_suites or
            _options.help_categories) and not _globals is None:
        if _options.help_suites:
            print("")
            if len(suites) > 0:
                print("Test suites defined in '%s':" %
                      os.path.basename(argv[0]))
                for suite in sorted(suites):
                    print("    " + suite)
            else:
                print("No test suites defined in '%s'!" %
                      os.path.basename(argv[0]))
            print("")
        if _options.help_categories:
            tmp = list(categories)
            print("")
            if len(tmp) > 0:
                print("Test suite categories defined in '%s':" %
                      os.path.basename(argv[0]))
                for c in sorted(tmp):
                    print("    " + c)
            else:
                print("No test suite categories defined in '%s':" %
                      os.path.basename(argv[0]))
            print("")
        return cleanup(_globals, suites)
    #
    # Reset the value of sys.argv per the expectations of the unittest module
    #
    tmp = [args[0]]
    if _options.quiet:
        tmp.append('-q')
    if _options.verbose or _options.debug:
        tmp.append('-v')
    if _options.failfast:
        tmp.append('-f')
    if _options.catch:
        tmp.append('-c')
    if _options.buffer:
        tmp.append('-b')
    tmp += _argv
    sys.argv = tmp
    #
    # Execute the unittest main function to run tests
    #
    unittest.main(module=_globals['__name__'])
    cleanup(_globals, suites)
Esempio n. 14
0
    def __init__(self, model_data, kda_results, output_directory, mipgap=.001):
        ### Process model_data for deterministic solve
        from MTSSP import PRDP_Data_Processing
        self.problem_data = PRDP_Data_Processing.MTSSP_PRDP_Data_Processing(
            model_data._data)

        self.results = kda_results.output['results']
        self.sp_realizations = kda_results.output['sub_problem_realizations']

        opt = SolverFactory("cplex")

        options = Options()
        opt.options.mip_tolerances_mipgap = mipgap

        ### Generate Non-Anticipativity Constraints
        self.PRDP_NAC_Generation()
        self.Scen_Spec_Parms()

        ### Generate Model
        from MSSP.defunction import de
        wrm_model_start_time = time.clock()
        model = de(
            self.problem_data.product, self.problem_data.stage_gate,
            self.problem_data.time_step, self.problem_data.resource_type,
            self.problem_data.SS, self.problem_data.resource_max,
            self.problem_data.gammaL, self.problem_data.gammaD,
            self.problem_data.duration, self.problem_data.trial_cost,
            self.problem_data.resource_required, self.problem_data.revenue_max,
            self.pb, self.problem_data.success,
            self.problem_data.Last_Time_Step, self.problem_data.last_trial,
            self.problem_data.running_revenue, self.problem_data.open_revenue,
            self.problem_data.discounting_factor, self.phi, self.phii,
            self.phij, self.outcome)

        ### Create Instance
        wrm_instnce_strt_time = time.clock()
        instance = model.create()

        del model
        gc.collect()

        for s in self.problem_data.SS:
            ### Calculate X
            xbox = self.Calculate_X(self.problem_data.List_of_Scenarios[s])

            ### Fix X Values in Instance
            for i in self.problem_data.product:
                for j in self.problem_data.stage_gate:
                    for t in self.problem_data.stage_gate:
                        idx = self.problem_data.product.index(i)
                        jdx = self.problem_data.stage_gate.index(j)
                        tdx = self.problem_data.stage_gate.index(t)
                        if xbox[idx][jdx][tdx]:
                            instance.Decision_X[i, j, t, s].value == 1

        ### Solve Model
        wrm_strt_time = time.clock()
        output = opt.solve(instance, warmstart=True)
        wrm_fnsh_time = time.clock()

        WS_Solve_Time = wrm_fnsh_time - wrm_strt_time
        WS_InstanceGen_Time = wrm_strt_time - wrm_instnce_strt_time
        WS_ModelCreate_Time = wrm_instnce_strt_time - wrm_model_start_time
        Warmstart_Total_Time = wrm_fnsh_time - wrm_model_start_time

        ### Clear Solution Variables
        del instance
        del output

        model_start_time = time.clock()

        ### Recreate Model and Solve for Deterministic Time
        model = de(
            self.problem_data.product, self.problem_data.stage_gate,
            self.problem_data.time_step, self.problem_data.resource_type,
            self.problem_data.SS, self.problem_data.resource_max,
            self.problem_data.gammaL, self.problem_data.gammaD,
            self.problem_data.duration, self.problem_data.trial_cost,
            self.problem_data.resource_required, self.problem_data.revenue_max,
            self.pb, self.problem_data.success,
            self.problem_data.Last_Time_Step, self.problem_data.last_trial,
            self.problem_data.running_revenue, self.problem_data.open_revenue,
            self.problem_data.discounting_factor, self.phi, self.phii,
            self.phij, self.outcome)

        instnce_strt_time = time.clock()

        ### Create Instance
        instance = model.create()

        ### time and solve DE_Version
        strt_time = time.clock()
        de_results = opt.solve(instance)
        fnsh_time = time.clock()

        NWS_Solve_Time = fnsh_time - strt_time
        NWS_InstanceGen_Time = strt_time - instnce_strt_time
        NWS_ModelCreate_Time = instnce_strt_time - model_start_time
        Total_Time = fnsh_time - model_start_time

        ### Load Results
        instance.load(de_results)

        ### Transform Results
        transformed_results = instance.update_results(de_results)

        ### Write File
        self.Output_Write(transformed_results, Warmstart_Total_Time,
                          WS_Solve_Time, WS_InstanceGen_Time,
                          WS_ModelCreate_Time, Total_Time, NWS_Solve_Time,
                          NWS_InstanceGen_Time, NWS_ModelCreate_Time,
                          output_directory)
Esempio n. 15
0
    def solve(self, *args, **kwds):
        """ Solve the problem """

        self.available(exception_flag=True)
        #
        # If the inputs are models, then validate that they have been
        # constructed! Collect suffix names to try and import from solution.
        #
        from pyomo.core.base.block import _BlockData
        import pyomo.core.base.suffix
        from pyomo.core.kernel.block import IBlock
        import pyomo.core.kernel.suffix
        _model = None
        for arg in args:
            if isinstance(arg, (_BlockData, IBlock)):
                if isinstance(arg, _BlockData):
                    if not arg.is_constructed():
                        raise RuntimeError(
                            "Attempting to solve model=%s with unconstructed "
                            "component(s)" % (arg.name, ))

                _model = arg
                # import suffixes must be on the top-level model
                if isinstance(arg, _BlockData):
                    model_suffixes = list(name for (name,comp) \
                                          in pyomo.core.base.suffix.\
                                          active_import_suffix_generator(arg))
                else:
                    assert isinstance(arg, IBlock)
                    model_suffixes = list(comp.storage_key for comp
                                          in pyomo.core.kernel.suffix.\
                                          import_suffix_generator(arg,
                                                                  active=True,
                                                                  descend_into=False))

                if len(model_suffixes) > 0:
                    kwds_suffixes = kwds.setdefault('suffixes', [])
                    for name in model_suffixes:
                        if name not in kwds_suffixes:
                            kwds_suffixes.append(name)

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #

        orig_options = self.options

        self.options = Options()
        self.options.update(orig_options)
        self.options.update(kwds.pop('options', {}))
        self.options.update(
            self._options_string_to_dict(kwds.pop('options_string', '')))
        try:

            # we're good to go.
            initial_time = time.time()

            self._presolve(*args, **kwds)

            presolve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for presolve" %
                      (presolve_completion_time - initial_time))

            if not _model is None:
                self._initialize_callbacks(_model)

            _status = self._apply_solver()
            if hasattr(self, '_transformation_data'):
                del self._transformation_data
            if not hasattr(_status, 'rc'):
                logger.warning(
                    "Solver (%s) did not return a solver status code.\n"
                    "This is indicative of an internal solver plugin error.\n"
                    "Please report this to the Pyomo developers.")
            elif _status.rc:
                logger.error("Solver (%s) returned non-zero return code (%s)" %
                             (
                                 self.name,
                                 _status.rc,
                             ))
                if self._tee:
                    logger.error(
                        "See the solver log above for diagnostic information.")
                elif hasattr(_status, 'log') and _status.log:
                    logger.error("Solver log:\n" + str(_status.log))
                raise ApplicationError("Solver (%s) did not exit normally" %
                                       self.name)
            solve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for solver" %
                      (solve_completion_time - presolve_completion_time))

            result = self._postsolve()
            result._smap_id = self._smap_id
            result._smap = None
            if _model:
                if isinstance(_model, IBlock):
                    if len(result.solution) == 1:
                        result.solution(0).symbol_map = \
                            getattr(_model, "._symbol_maps")[result._smap_id]
                        result.solution(0).default_variable_value = \
                            self._default_variable_value
                        if self._load_solutions:
                            _model.load_solution(result.solution(0))
                    else:
                        assert len(result.solution) == 0
                    # see the hack in the write method
                    # we don't want this to stick around on the model
                    # after the solve
                    assert len(getattr(_model, "._symbol_maps")) == 1
                    delattr(_model, "._symbol_maps")
                    del result._smap_id
                    if self._load_solutions and \
                       (len(result.solution) == 0):
                        logger.error("No solution is available")
                else:
                    if self._load_solutions:
                        _model.solutions.load_from(result,
                                                   select=self._select_index,
                                                   default_variable_value=self.
                                                   _default_variable_value)
                        result._smap_id = None
                        result.solution.clear()
                    else:
                        result._smap = _model.solutions.symbol_map[
                            self._smap_id]
                        _model.solutions.delete_symbol_map(self._smap_id)
            postsolve_completion_time = time.time()

            if self._report_timing:
                print("      %6.2f seconds required for postsolve" %
                      (postsolve_completion_time - solve_completion_time))

        finally:
            #
            # Reset the options dict
            #
            self.options = orig_options

        return result
Esempio n. 16
0
#model.obj = Objective(expr = math.sqrt(((model.p - model.x)**2) + ((model.q - model.y)**2)))
model.obj = Objective(expr=(((model.p - model.x)**2) +
                            ((model.q - model.y)**2))**0.5)

# Constraints
model.KeineAhnung = Constraint(expr=((model.x / model.length)**2) +
                               ((model.y / model.width)**2) - 1 >= 0)

model.pprint()

model.skip_canonical_repn = True  # for nonlinear models

instance = model.create()

SolverName = "asl"
so = Options()
so.solver = "ipopt"
opt = SolverFactory(SolverName, options=so)

if opt is None:
    print("Could not construct solver %s : %s" % (SolverName, so.solver))
    sys.exit(1)

results = opt.solve(instance)
results.write()
instance.load(results)  # put results in model

# because we know there is a variable named x
x_var = getattr(instance, "x")
x_val = x_var()
Esempio n. 17
0
def EOSS_PRDP_Solve(s, problem_data, fixed_parameters,output_directory):
	opt = SolverFactory("cplex")

	options = Options()
	opt.options.mip_tolerances_mipgap = .000001
	opt.options.mip_tolerances_absmipgap = .000001
	
	SSsuccess = {}
	
	### Redefine Success
	for i in problem_data.product:
		SSsuccess[i] = problem_data.success[(i,s)]	
		
	### Redefine Outcome
	SSoutcome = problem_data.List_of_Scenarios[s].outcome
		
	### Redefine Probability
	SSProbability = problem_data.List_of_Scenarios[s].probability
		
	model = SingleScenario(problem_data.product,problem_data.stage_gate,problem_data.time_step,problem_data.resource_type,problem_data.resource_max,problem_data.gammaL,problem_data.gammaD,problem_data.duration,problem_data.trial_cost,problem_data.resource_required, problem_data.revenue_max,SSProbability, SSsuccess,problem_data.Last_Time_Step, problem_data.last_trial, problem_data.running_revenue, problem_data.open_revenue, problem_data.discounting_factor, SSoutcome)
		
	instance = model.create()
	
	###################################################################
	### Determine which fixed parameters are applicable to this problem
	###################################################################
	new_fixed_parameters = EOSS_Fixed_Parameters(fixed_parameters,SSoutcome)
	
	for items in new_fixed_parameters:	
		var_i = items[0]
		var_j = items[1]
		var_t = items[2] + 1
		decision = items[3]
		instance.Decision_X[var_i,var_j,var_t] = decision
		instance.Decision_X[var_i,var_j,var_t].fixed = True
		
		
	del model
	gc.collect()
	
	instance.preprocess()	
	results= opt.solve(instance)
	instance.load(results)	
	"""
	save_file = str(s) 
	
	### Open save file
	if not os.path.exists(output_directory):
		os.makedirs(output_directory)
		
	f = open(os.path.join(output_directory, save_file),	"w")
	
	transformed_results = instance.update_results(results)
	tr = str(transformed_results)
	f.write(tr + '\n')
	f.close()
	"""	
	
	if results.solver.status == SolverStatus.ok and results.solver.termination_condition == TerminationCondition.optimal:
		return [s,results.solution.objective['__default_objective__']['Value']]
	else:
		save_file = "Scenario ", s
		if not os.path.exists(output_directory):
			os.makedirs(output_directory)
		
		f = open(os.path.join(output_directory, save_file),	"w")
	
		transformed_results = instance.update_results(results)
		tr = str(transformed_results)
		f.write(tr + '\n')
		f.close()
		exit()