Exemple #1
0
    def test_k_rxn(self):
        m = self._make_model()
        rxn_block = m.fs.reaction_block

        n_scen = 4
        mols = pyunits.mol/pyunits.s
        kgs = pyunits.kg/pyunits.s
        K = pyunits.K
        bar = pyunits.bar
        state_values = {
            "gas_state.flow_mol": [1.0*mols]*n_scen,
            "solid_state.flow_mass": [1.0*kgs]*n_scen,
            "gas_state.temperature": [1000.0*K, 1100.0*K, 1200.0*K, 1300.0*K],
            "solid_state.temperature": [1000.0*K, 1100.0*K, 1200.0*K, 1300.0*K],
            "gas_state.pressure": [1.0*bar]*n_scen,
            "solid_state.particle_porosity": [0.27]*n_scen,
            "gas_state.mole_frac_comp[O2]": [0.25]*n_scen,
            "gas_state.mole_frac_comp[N2]": [0.25]*n_scen,
            "gas_state.mole_frac_comp[H2O]": [0.25]*n_scen,
            "gas_state.mole_frac_comp[CO2]": [0.25]*n_scen,
            "solid_state.mass_frac_comp[Fe2O3]": [1.0/3.0]*n_scen,
            "solid_state.mass_frac_comp[Fe3O4]": [1.0/3.0]*n_scen,
            "solid_state.mass_frac_comp[Al2O3]": [1.0/3.0]*n_scen,
            }
        state_values = ComponentMap((m.fs.find_component(name), values)
                for name, values in state_values.items())

        # Units of k_rxn are "non-physical" si units, chosen to be
        # consistent with the reaction rate rule.
        target_values = {
                "reaction_block.k_rxn[R1]": [
                    5.7556e-5,
                    6.7076e-5,
                    7.6203e-5,
                    8.4888e-5,
                    ],
                }
        target_values = ComponentMap((m.fs.find_component(name), values)
                for name, values in target_values.items())

        assert degrees_of_freedom(m.fs) == 0

        param_sweeper = ParamSweeper(n_scen, state_values,
                output_values=target_values)
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(m.fs)

                # Make sure property equalites have been converged
                assert number_large_residuals(m.fs, tol=1e-8) == 0

                # Sanity checks that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), rel=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in outputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), rel=1e-3)
Exemple #2
0
 def test_items(self):
     cmap = ComponentMap(self._components)
     for x in cmap.items():
         self.assertEqual(type(x), tuple)
         self.assertEqual(len(x), 2)
     self.assertEqual(
         sorted(cmap.items(), key=lambda _x: (id(_x[0]), _x[1])),
         sorted(self._components, key=lambda _x: (id(_x[0]), _x[1])))
Exemple #3
0
    def test_mw(self):
        m = self._make_model()
        state = m.fs.state

        # Define a somewhat arbitrary set of values we'd like to use to
        # test molecular weight.
        n_scenario = 7
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scenario,
            "temperature": [300.0 * pyunits.K] * n_scenario,
            "pressure": [1.0 * pyunits.bar] * n_scenario,
            "mole_frac_comp[O2]": [1.0, 0.5, 0.25, 0.0, 0.0, 0.0, 0.0],
            "mole_frac_comp[N2]": [0.0, 0.5, 0.25, 1.0, 0.0, 0.0, 0.0],
            "mole_frac_comp[H2O]": [0.0, 0.0, 0.25, 0.0, 1.0, 0.0, 0.5],
            "mole_frac_comp[CO2]": [0.0, 0.0, 0.25, 0.0, 0.0, 1.0, 0.5],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())
        target_values = {
            "mw": [
                32.0 * pyunits.g / pyunits.mol,
                30.0 * pyunits.g / pyunits.mol,
                30.5 * pyunits.g / pyunits.mol,
                28.0 * pyunits.g / pyunits.mol,
                18.0 * pyunits.g / pyunits.mol,
                44.0 * pyunits.g / pyunits.mol,
                31.0 * pyunits.g / pyunits.mol,
            ]
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct mw and all prerequisites
        state.mw

        param_sweeper = ParamSweeper(
            n_scenario,
            state_values,
            output_values=target_values,
        )
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Check that state block has been been solved correctly
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs have been set properly
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(val, abs=1e-8)

                # Check that the state block computes the property values
                # we expect
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(val, abs=1e-8)
Exemple #4
0
    def _estimate_M(self, expr, name):
        # If there are fixed variables here, unfix them for this calculation,
        # and we'll restore them at the end.
        fixed_vars = ComponentMap()
        if not self.assume_fixed_vars_permanent:
            for v in EXPR.identify_variables(expr, include_fixed=True):
                if v.fixed:
                    fixed_vars[v] = value(v)
                    v.fixed = False

        expr_lb, expr_ub = compute_bounds_on_expr(expr)
        if expr_lb is None or expr_ub is None:
            raise GDP_Error("Cannot estimate M for unbounded "
                            "expressions.\n\t(found while processing "
                            "constraint '%s'). Please specify a value of M "
                            "or ensure all variables that appear in the "
                            "constraint are bounded." % name)
        else:
            M = (expr_lb, expr_ub)

        # clean up if we unfixed things (fixed_vars is empty if we were assuming
        # fixed vars are fixed for life)
        for v, val in fixed_vars.items():
            v.fix(val)

        return tuple(M)
    def _transform_constraintData(self, logical_constraint, new_varlists,
                                  transBlocks):
        # first find all the relevant BooleanVars and associate a binary (if
        # they don't have one already)
        for bool_vardata in identify_variables(logical_constraint.expr):
            if bool_vardata.ctype is BooleanVar:
                self._transform_boolean_varData(bool_vardata, new_varlists)

        # now create a transformation block on the constraint's parent block (if
        # we don't have one already)
        parent_block = logical_constraint.parent_block()
        xfrm_block = transBlocks.get(parent_block)
        if xfrm_block is None:
            xfrm_block = self._create_transformation_block(parent_block)
            transBlocks[parent_block] = xfrm_block
        new_constrlist = xfrm_block.transformed_constraints
        new_boolvarlist = xfrm_block.augmented_vars
        new_varlist = xfrm_block.augmented_vars_asbinary

        old_boolvarlist_length = len(new_boolvarlist)

        indicator_map = ComponentMap()
        cnf_statements = to_cnf(logical_constraint.body, new_boolvarlist,
                                indicator_map)
        logical_constraint.deactivate()

        # Associate new Boolean vars to new binary variables
        num_new = len(new_boolvarlist) - old_boolvarlist_length
        list_o_vars = list(new_boolvarlist.values())
        if num_new:
            for bool_vardata in list_o_vars[-num_new:]:
                new_binary_vardata = new_varlist.add()
                bool_vardata.associate_binary_var(new_binary_vardata)

        # Add constraints associated with each CNF statement
        for cnf_statement in cnf_statements:
            for linear_constraint in _cnf_to_linear_constraint_list(
                    cnf_statement):
                new_constrlist.add(expr=linear_constraint)

        # Add bigM associated with special atoms
        # Note: this ad-hoc reformulation may be revisited for tightness in the
        # future.
        old_varlist_length = len(new_varlist)
        for indicator_var, special_atom in indicator_map.items():
            for linear_constraint in _cnf_to_linear_constraint_list(
                    special_atom,
                    indicator_var,
                    new_varlist):
                new_constrlist.add(expr=linear_constraint)

        # Previous step may have added auxiliary binaries. Associate augmented
        # Booleans to them.
        num_new = len(new_varlist) - old_varlist_length
        list_o_vars = list(new_varlist.values())
        if num_new:
            for binary_vardata in list_o_vars[-num_new:]:
                new_bool_vardata = new_boolvarlist.add()
                new_bool_vardata.associate_binary_var(binary_vardata)
    def test_dens_mass_particle(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 3
        state_values = {
            "flow_mass": [1.0 * pyunits.kg / pyunits.s] * n_scen,
            "temperature": [1200.0 * pyunits.K] * n_scen,
            "particle_porosity": [0.22, 0.27, 0.32],
            "mass_frac_comp[Fe2O3]": [1.0 / 3.0] * n_scen,
            "mass_frac_comp[Fe3O4]": [1.0 / 3.0] * n_scen,
            "mass_frac_comp[Al2O3]": [1.0 / 3.0] * n_scen,
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())
        kgm3 = pyunits.kg / pyunits.m**3
        target_values = {
            "dens_mass_particle": [
                3648.888 * kgm3,
                3414.985 * kgm3,
                3181.081 * kgm3,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        param_sweeper = ParamSweeper(
            n_scen,
            state_values,
            output_values=target_values,
        )
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(state)

                # Check that we have eliminated infeasibility
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs have been set properly
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                for var, val in outputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #7
0
def _map_variable_stages(model):

    variable_stage_annotation = locate_annotations(model,
                                                   VariableStageAnnotation,
                                                   max_allowed=1)
    if len(variable_stage_annotation) == 0:
        raise ValueError("Reference model is missing variable stage "
                         "annotation: %s" % (VariableStageAnnotation.__name__))
    else:
        assert len(variable_stage_annotation) == 1
        variable_stage_annotation = variable_stage_annotation[0][1]

    variable_stage_assignments = ComponentMap(
        variable_stage_annotation.expand_entries())
    if len(variable_stage_assignments) == 0:
        raise ValueError("At least one variable stage assignment "
                         "is required.")

    min_stagenumber = min(variable_stage_assignments.values(),
                          key=lambda x: x[0])[0]
    max_stagenumber = max(variable_stage_assignments.values(),
                          key=lambda x: x[0])[0]
    if max_stagenumber > 2:
        for var, (stagenum, derived) in \
              variable_stage_assignments.items():
            if stagenum > 2:
                raise ValueError(
                    "Embedded stochastic programs must be two-stage "
                    "(for now), but variable with name '%s' has been "
                    "annotated with stage number: %s" % (var.name, stagenum))

    stage_to_variables_map = {}
    stage_to_variables_map[1] = []
    stage_to_variables_map[2] = []
    for var in model.component_data_objects(
            Var,
            active=True,
            descend_into=True,
            sort=SortComponents.alphabetizeComponentAndIndex):
        stagenumber, derived = \
            variable_stage_assignments.get(var, (2, False))
        if (stagenumber != 1) and (stagenumber != 2):
            raise ValueError("Invalid stage annotation for variable with "
                             "name '%s'. Stage assignment must be 1 or 2. "
                             "Current value: %s" % (var.name, stagenumber))
        if (stagenumber == 1):
            stage_to_variables_map[1].append((var, derived))
        else:
            assert stagenumber == 2
            stage_to_variables_map[2].append((var, derived))

    variable_to_stage_map = ComponentMap()
    for stagenum, stagevars in stage_to_variables_map.items():
        for var, derived in stagevars:
            variable_to_stage_map[var] = (stagenum, derived)

    return (stage_to_variables_map, variable_to_stage_map,
            variable_stage_assignments)
    def test_dens_mass_skeletal(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 4
        state_values = {
            "flow_mass": [1.0 * pyunits.kg / pyunits.s] * n_scen,
            "temperature": [1200.0 * pyunits.K] * n_scen,
            "particle_porosity": [0.27] * n_scen,
            "mass_frac_comp[Fe2O3]": [1.0, 0.0, 0.0, 1.0 / 3.0],
            "mass_frac_comp[Fe3O4]": [0.0, 1.0, 0.0, 1.0 / 3.0],
            "mass_frac_comp[Al2O3]": [0.0, 0.0, 1.0, 1.0 / 3.0],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())
        kgm3 = pyunits.kg / pyunits.m**3
        target_values = {
            "dens_mass_skeletal": [
                5250.000 * kgm3,
                5000.000 * kgm3,
                3987.000 * kgm3,
                4678.061 * kgm3,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        param_sweeper = ParamSweeper(
            n_scen,
            state_values,
            output_values=target_values,
        )
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(state)

                assert number_large_residuals(state, tol=1e-8) == 0

                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                for var, val in outputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #9
0
    def _estimate_M(self, expr, name):
        # If there are fixed variables here, unfix them for this calculation,
        # and we'll restore them at the end.
        fixed_vars = ComponentMap()
        if not self.assume_fixed_vars_permanent:
            for v in EXPR.identify_variables(expr, include_fixed=True):
                if v.fixed:
                    fixed_vars[v] = value(v)
                    v.fixed = False

        # Calculate a best guess at M
        repn = generate_standard_repn(expr, quadratic=False)
        M = [0, 0]

        if not repn.is_nonlinear():
            if repn.constant is not None:
                for i in (0, 1):
                    if M[i] is not None:
                        M[i] += repn.constant

            for i, coef in enumerate(repn.linear_coefs or []):
                var = repn.linear_vars[i]
                bounds = (value(var.lb), value(var.ub))
                for i in (0, 1):
                    # reverse the bounds if the coefficient is negative
                    if coef > 0:
                        j = i
                    else:
                        j = 1 - i

                    if bounds[i] is not None:
                        M[j] += value(bounds[i]) * coef
                    else:
                        raise GDP_Error(
                            "Cannot estimate M for "
                            "expressions with unbounded variables."
                            "\n\t(found unbounded var '%s' while processing "
                            "constraint '%s')" % (var.name, name))
        else:
            # expression is nonlinear. Try using `contrib.fbbt` to estimate.
            expr_lb, expr_ub = compute_bounds_on_expr(expr)
            if expr_lb is None or expr_ub is None:
                raise GDP_Error("Cannot estimate M for unbounded nonlinear "
                                "expressions.\n\t(found while processing "
                                "constraint '%s')" % name)
            else:
                M = (expr_lb, expr_ub)

        # clean up if we unfixed things (fixed_vars is empty if we were assuming
        # fixed vars are fixed for life)
        for v, val in fixed_vars.items():
            v.fix(val)

        return tuple(M)
Exemple #10
0
def preprocess_subproblem(m, config):
    """Applies preprocessing transformations to the model."""
    if not config.tighten_nlp_var_bounds:
        original_bounds = ComponentMap()
        # TODO: Switch this to the general utility function, but I hid it in
        # #2221
        for cons in m.component_data_objects(Constraint,
                                             active=True,
                                             descend_into=Block):
            for v in EXPR.identify_variables(cons.expr):
                if v not in original_bounds.keys():
                    original_bounds[v] = (v.lb, v.ub)
        # We could miss if there is a variable that only appears in the
        # objective, but its bounds are not going to get changed anyway if
        # that's the case.

    # First do FBBT
    fbbt(m,
         integer_tol=config.integer_tolerance,
         feasibility_tol=config.constraint_tolerance,
         max_iter=config.max_fbbt_iterations)
    xfrm = TransformationFactory
    # Now that we've tightened bounds, see if any variables are fixed because
    # their lb is equal to the ub (within tolerance)
    xfrm('contrib.detect_fixed_vars').apply_to(
        m, tolerance=config.variable_tolerance)

    # Restore the original bounds because the NLP solver might like that better
    # and because, if deactivate_trivial_constraints ever gets fancier, this
    # could change what is and is not trivial.
    if not config.tighten_nlp_var_bounds:
        for v, (lb, ub) in original_bounds.items():
            v.setlb(lb)
            v.setub(ub)

    # Now, if something got fixed to 0, we might have 0*var terms to remove
    xfrm('contrib.remove_zero_terms').apply_to(m)
    # Last, check if any constraints are now trivial and deactivate them
    xfrm('contrib.deactivate_trivial_constraints').apply_to(
        m, tolerance=config.constraint_tolerance)
Exemple #11
0
    def test_cp(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 8
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                300.0 * pyunits.K,
                600.0 * pyunits.K,
                900.0 * pyunits.K,
                1200.0 * pyunits.K,
            ],
            "pressure": [1.0 * pyunits.bar] * n_scen,
            "mole_frac_comp[O2]": [1.0, 0.0, 0.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[N2]": [0.0, 1.0, 0.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[H2O]":
            [0.0, 0.0, 1.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[CO2]":
            [0.0, 0.0, 0.0, 1.0, 0.25, 0.25, 0.25, 0.25],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        u = pyunits.kJ / pyunits.mol / pyunits.K
        kJkgK = pyunits.kJ / pyunits.kg / pyunits.K
        target_values = {
            "cp_mol_comp[O2]": [
                0.03566421043844448 * u,
                0.03566421043844444 * u,
                0.03566421043844444 * u,
                0.03566421043844444 * u,
                0.02408660519211111 * u,
                0.031970683705777776 * u,
                0.03435676292601234 * u,
                0.03566421043844444 * u,
            ],
            "cp_mol_comp[N2]": [
                0.03372177593533332 * u,
                0.03372177593533333 * u,
                0.03372177593533333 * u,
                0.03372177593533333 * u,
                0.03059729435133333 * u,
                0.030104019077333333 * u,
                0.03208929344525926 * u,
                0.03372177593533333 * u,
            ],
            "cp_mol_comp[H2O]": [
                0.0437510227322222 * u,
                0.04375102273222223 * u,
                0.04375102273222223 * u,
                0.04375102273222223 * u,
                0.03359738794555556 * u,
                0.036317861208888885 * u,
                0.039997715202839505 * u,
                0.04375102273222223 * u,
            ],
            "cp_mol_comp[CO2]": [
                0.05634605443600005 * u,
                0.056346054436000007 * u,
                0.056346054436000007 * u,
                0.056346054436000007 * u,
                0.037217621149000006 * u,
                0.047317934392 * u,
                0.053001289534111116 * u,
                0.056346054436000007 * u,
            ],
            "cp_mol": [
                0.03566421043844448 * u,
                0.03372177593533333 * u,
                0.04375102273222223 * u,
                0.056346054436000007 * u,
                0.0313747271595 * u,
                0.036427624596 * u,
                0.03986126527705556 * u,
                0.25 * (
                    # Component values at 1200 K have been computed.
                    0.03566421043844444 * u + 0.03372177593533333 * u +
                    0.04375102273222223 * u + 0.056346054436000007 * u),
            ],
            "cp_mass": [
                1.1145065762013922 * kJkgK,
                1.2043491405476134 * kJkgK,
                2.4306123740123473 * kJkgK,
                1.280592146272725 * kJkgK,
                1.0286795790000056 * kJkgK,
                1.194348347409837 * kJkgK,
                1.3069267303952685 * kJkgK,
                1.3892054388688537 * kJkgK,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct cp_mass and all prerequisites.
        # This constructs cp_mol and cp_mol_comp as well.
        state.cp_mass

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Make sure property equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #12
0
    def test_diffusion_comp(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 11
        bar = pyunits.bar
        K = pyunits.K
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [
                1200.0 * K,
                1200.0 * K,
                1200.0 * K,
                1200.0 * K,
                300.0 * K,
                600.0 * K,
                900.0 * K,
                1200.0 * K,
                1200.0 * K,
                1200.0 * K,
                1200.0 * K,
            ],
            "pressure": [
                1.0 * bar,
                1.0 * bar,
                1.0 * bar,
                1.0 * bar,
                1.0 * bar,
                1.0 * bar,
                1.0 * bar,
                1.0 * bar,
                0.5 * bar,
                1.5 * bar,
                2.0 * bar,
            ],
            "mole_frac_comp[O2]": [
                # Note that diffusivity is not defined for a pure
                # component in itself (zero gradient, zero net diffusion)
                0.90,
                0.025,
                0.025,
                0.025,
                0.25,
                0.25,
                0.25,
                0.25,
                0.25,
                0.25,
                0.25
            ],
            "mole_frac_comp[N2]": [
                0.025, 0.90, 0.025, 0.025, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                0.25
            ],
            "mole_frac_comp[H2O]": [
                0.025, 0.025, 0.90, 0.025, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                0.25
            ],
            "mole_frac_comp[CO2]": [
                0.025, 0.025, 0.025, 0.90, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                0.25
            ],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        cm = pyunits.cm
        s = pyunits.s
        target_values = {
            # These values look reasonable
            # TODO: Verify with external source.
            "diffusion_comp[O2]": [
                3.11792621830951 * cm**2 / s,
                2.456227751888218 * cm**2 / s,
                3.034740091620132 * cm**2 / s,
                1.9479388404541838 * cm**2 / s,
                0.206691259894311 * cm**2 / s,
                0.6952237580376006 * cm**2 / s,
                1.4134625566198689 * cm**2 / s,
                2.3384446637321363 * cm**2 / s,
                4.676889327464272 * cm**2 / s,
                1.5589631091547582 * cm**2 / s,
                1.1692223318660684 * cm**2 / s,
            ],
            "diffusion_comp[N2]": [
                2.457518754629481 * cm**2 / s,
                3.1383309495574956 * cm**2 / s,
                3.0334118458311523 * cm**2 / s,
                1.9790010245966736 * cm**2 / s,
                0.2080439152537244 * cm**2 / s,
                0.6997735302088169 * cm**2 / s,
                1.422712718932098 * cm**2 / s,
                2.353748212168125 * cm**2 / s,
                4.70749642433625 * cm**2 / s,
                1.5691654747787498 * cm**2 / s,
                1.176874106084062 * cm**2 / s,
            ],
            "diffusion_comp[H2O]": [
                3.0845168350215713 * cm**2 / s,
                3.0811129521516416 * cm**2 / s,
                3.719143107603082 * cm**2 / s,
                2.5051274838003996 * cm**2 / s,
                0.24654668546150185 * cm**2 / s,
                0.8292808959890481 * cm**2 / s,
                1.6860147281349098 * cm**2 / s,
                2.7893573307023156 * cm**2 / s,
                5.578714661404631 * cm**2 / s,
                1.8595715538015434 * cm**2 / s,
                1.3946786653511578 * cm**2 / s,
            ],
            "diffusion_comp[CO2]": [
                1.929322600571961 * cm**2 / s,
                1.9589681584041365 * cm**2 / s,
                2.4422863072776697 * cm**2 / s,
                2.7098612589802444 * cm**2 / s,
                0.17964011927809195 * cm**2 / s,
                0.6042349293467888 * cm**2 / s,
                1.2284727588198259 * cm**2 / s,
                2.0323959442351844 * cm**2 / s,
                4.064791888470369 * cm**2 / s,
                1.3549306294901227 * cm**2 / s,
                1.0161979721175922 * cm**2 / s,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct diffusion_comp and all prerequisites
        state.diffusion_comp

        assert_units_consistent(state.diffusion_comp_constraint)

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Make sure property equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #13
0
    def test_therm_cond(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 8
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                300.0 * pyunits.K,
                600.0 * pyunits.K,
                900.0 * pyunits.K,
                1200.0 * pyunits.K,
            ],
            "pressure": [1.0 * pyunits.bar] * n_scen,
            "mole_frac_comp[O2]": [1.0, 0.0, 0.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[N2]": [0.0, 1.0, 0.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[H2O]":
            [0.0, 0.0, 1.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[CO2]":
            [0.0, 0.0, 0.0, 1.0, 0.25, 0.25, 0.25, 0.25],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        u = pyunits.kJ / pyunits.m / pyunits.K / pyunits.s
        target_values = {
            "therm_cond": [
                8.490673044994837e-05 * u,
                7.803113104821915e-05 * u,
                0.0001245121534187936 * u,
                7.844692969560201e-05 * u,
                2.1981943936613706e-05 * u,
                4.567583423706824e-05 * u,
                6.946515568649932e-05 * u,
                9.30078254960681e-05 * u,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct therm_cond and all prerequisites
        state.therm_cond

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Make sure property equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #14
0
    def test_dens_mol_comp(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 5
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [1200.0 * pyunits.K] * n_scen,
            "pressure": [1.0 * pyunits.bar] * n_scen,
            "mole_frac_comp[O2]": [1.0, 0.0, 0.0, 0.0, 0.25],
            "mole_frac_comp[N2]": [0.0, 1.0, 0.0, 0.0, 0.25],
            "mole_frac_comp[H2O]": [0.0, 0.0, 1.0, 0.0, 0.25],
            "mole_frac_comp[CO2]": [0.0, 0.0, 0.0, 1.0, 0.25],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        u = pyunits.mol / pyunits.m**3
        target_values = {
            "dens_mol": [10.023 * u] * n_scen,
            "dens_mol_comp[O2]": [
                10.023 * u,
                0.0 * u,
                0.0 * u,
                0.0 * u,
                0.25 * 10.023 * u,
            ],
            "dens_mol_comp[N2]": [
                0.0 * u,
                10.023 * u,
                0.0 * u,
                0.0 * u,
                0.25 * 10.023 * u,
            ],
            "dens_mol_comp[H2O]": [
                0.0 * u,
                0.0 * u,
                10.023 * u,
                0.0 * u,
                0.25 * 10.023 * u,
            ],
            "dens_mol_comp[CO2]": [
                0.0 * u,
                0.0 * u,
                0.0 * u,
                10.023 * u,
                0.25 * 10.023 * u,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct dens_mol_comp and all prerequisites
        state.dens_mol_comp

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Make sure property equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs have been set properly
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    # ^ Problem converting units when value is zero
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure property values are what we expect
                for var, val in target.items():
                    #val = value(pyunits.convert(val, var.get_units()))
                    # ^ Problem converting units when value is zero
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #15
0
    def test_visc_d(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 8
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                300.0 * pyunits.K,
                600.0 * pyunits.K,
                900.0 * pyunits.K,
                1200.0 * pyunits.K,
            ],
            "pressure": [1.0 * pyunits.bar] * n_scen,
            "mole_frac_comp[O2]": [1.0, 0.0, 0.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[N2]": [0.0, 1.0, 0.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[H2O]":
            [0.0, 0.0, 1.0, 0.0, 0.25, 0.25, 0.25, 0.25],
            "mole_frac_comp[CO2]":
            [0.0, 0.0, 0.0, 1.0, 0.25, 0.25, 0.25, 0.25],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        target_values = {
            "visc_d": [
                # These values were copied after a solve.
                # TODO: Cross-reference with another source.
                5.534440949133228e-05 * pyunits.Pa * pyunits.s,
                4.67667824296429e-05 * pyunits.Pa * pyunits.s,
                4.6232771210527155e-05 * pyunits.Pa * pyunits.s,
                4.512867970060493e-05 * pyunits.Pa * pyunits.s,
                1.6181534595116313e-05 * pyunits.Pa * pyunits.s,
                2.866222939903063e-05 * pyunits.Pa * pyunits.s,
                3.909320395131273e-05 * pyunits.Pa * pyunits.s,
                4.838841106600266e-05 * pyunits.Pa * pyunits.s,
            ]
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct visc_d and all prerequisites
        state.visc_d

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Make sure property equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #16
0
    def test_reaction_rate(self):
        m = self._make_model()
        rxn_block = m.fs.reaction_block

        n_scen = 9
        mols = pyunits.mol/pyunits.s
        kgs = pyunits.kg/pyunits.s
        K = pyunits.K
        bar = pyunits.bar
        state_values = {
            "gas_state.flow_mol": [1.0*mols]*n_scen,
            "solid_state.flow_mass": [1.0*kgs]*n_scen,
            "gas_state.temperature": [1273.0*K]*n_scen,
            "solid_state.temperature": [
                1273.0*K, 1273.0*K, 1273.0*K,
                1273.0*K, 1273.0*K, 1273.0*K,
                1100.0*K, 1200.0*K, 1300.0*K,
                ],
            "gas_state.pressure": [1.0*bar]*n_scen,
            "solid_state.particle_porosity": [0.27]*n_scen,
            "gas_state.mole_frac_comp[O2]":  [
                1.0, 0.7, 0.0, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                ],
            "gas_state.mole_frac_comp[N2]":  [
                0.0, 0.1, 1/3, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                ],
            "gas_state.mole_frac_comp[H2O]": [
                0.0, 0.1, 1/3, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                ],
            "gas_state.mole_frac_comp[CO2]": [
                0.0, 0.1, 1/3, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
                ],
            "solid_state.mass_frac_comp[Fe2O3]": [
                1/3, 1/3, 1/3, 2/3, 0.0, 1/3, 1/3, 1/3, 1/3,
                ],
            "solid_state.mass_frac_comp[Fe3O4]": [
                1/3, 1/3, 1/3, 0.0, 2/3, 1/3, 1/3, 1/3, 1/3,
                ],
            "solid_state.mass_frac_comp[Al2O3]": [1/3]*n_scen,
            }
        state_values = ComponentMap((m.fs.find_component(name), values)
                for name, values in state_values.items())

        molm3s = pyunits.mol/pyunits.m**3/pyunits.s
        target_values = {
                "reaction_block.reaction_rate[R1]": [
                    351.367*molm3s,
                    245.957*molm3s,
                    0.0*molm3s,
                    0.0*molm3s,
                    271.731*molm3s,
                    87.842*molm3s,
                    71.344*molm3s,
                    81.051*molm3s,
                    90.288*molm3s,
                    ],
                }
        target_values = ComponentMap((m.fs.find_component(name), values)
                for name, values in target_values.items())

        assert degrees_of_freedom(m.fs) == 0

        param_sweeper = ParamSweeper(n_scen, state_values,
                output_values=target_values)
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(m.fs)

                # Make sure property equalites have been converged
                assert number_large_residuals(m.fs, tol=1e-8) == 0

                # Sanity checks that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in outputs.items():
                    if value(val) != 0:
                        # To get around Pyomo issue #1627
                        val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #17
0
    def test_dens_mol(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 8
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [
                200.0 * pyunits.K,
                300.0 * pyunits.K,
                600.0 * pyunits.K,
                900.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
            ],
            "pressure": [
                1.0 * pyunits.bar,
                1.0 * pyunits.bar,
                1.0 * pyunits.bar,
                1.0 * pyunits.bar,
                1.0 * pyunits.bar,
                0.5 * pyunits.bar,
                1.5 * pyunits.bar,
                2.0 * pyunits.bar,
            ],
            "mole_frac_comp[O2]": [0.25] * n_scen,
            "mole_frac_comp[N2]": [0.25] * n_scen,
            "mole_frac_comp[H2O]": [0.25] * n_scen,
            "mole_frac_comp[CO2]": [0.25] * n_scen,
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        u = pyunits.mol / pyunits.m**3
        target_values = {
            "dens_mol": [
                # Calculated by P*1e5/(T*8.314462618)
                60.136 * u,
                40.091 * u,
                20.045 * u,
                13.364 * u,
                10.023 * u,
                5.011 * u,
                15.034 * u,
                20.045 * u,
            ]
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct dens_mol and all prerequisites
        state.dens_mol
        assert_units_consistent(state.ideal_gas)

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Check that state block equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs are what we expect
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(val, abs=1e-3)

                # Check that state block performs the calculations we expect
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(val, abs=1e-3)
def solve_separation_problem(model_data, config):

    # Timing variables
    global_solve_time = 0
    local_solve_time = 0

    # List of objective functions
    objectives_map = model_data.separation_model.util.map_obj_to_constr
    constraint_map_to_master = model_data.separation_model.util.map_new_constraint_list_to_original_con

    # Add additional or remaining separation objectives to the dict
    # (those either not assigned an explicit priority or those added by Pyros for ssv bounds)
    config_sep_priority_dict = config.separation_priority_order
    actual_sep_priority_dict = ComponentMap()
    for perf_con in model_data.separation_model.util.performance_constraints:
        actual_sep_priority_dict[perf_con] = config_sep_priority_dict.get(
            perf_con.name, 0)

    # "Bin" the objectives based on priorities
    sorted_unique_priorities = sorted(list(
        set(actual_sep_priority_dict.values())),
                                      reverse=True)
    set_of_deterministic_constraints = model_data.separation_model.util.deterministic_constraints
    if hasattr(model_data.separation_model, "epigraph_constr"):
        set_of_deterministic_constraints.add(
            model_data.separation_model.epigraph_constr)
    for is_global in (False, True):
        solver = config.global_solver if \
            (is_global or config.bypass_local_separation) else config.local_solver
        solve_data_list = []

        for val in sorted_unique_priorities:
            # Descending ordered by value
            # The list of performance constraints with this priority
            perf_constraints = [
                constr_name
                for constr_name, priority in actual_sep_priority_dict.items()
                if priority == val
            ]
            for perf_con in perf_constraints:
                #config.progress_logger.info("Separating constraint " + str(perf_con))
                try:
                    separation_obj = objectives_map[perf_con]
                except:
                    raise ValueError(
                        "Error in mapping separation objective to its master constraint form."
                    )
                separation_obj.activate()

                if perf_con in set_of_deterministic_constraints:
                    nom_constraint = perf_con
                else:
                    nom_constraint = constraint_map_to_master[perf_con]

                try:
                    model_data.master_nominal_scenario_value = value(
                        model_data.master_nominal_scenario.find_component(
                            nom_constraint))
                except:
                    raise ValueError(
                        "Unable to access nominal scenario value for the constraint "
                        + str(nom_constraint))

                if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS:
                    solve_data_list.append(
                        discrete_solve(model_data=model_data,
                                       config=config,
                                       solver=solver,
                                       is_global=is_global))
                    if all(s.termination_condition in globally_acceptable for
                           sep_soln_list in solve_data_list for s in sep_soln_list) or \
                            (is_global == False and all(s.termination_condition in locally_acceptable for
                                                        sep_soln_list in solve_data_list for s in sep_soln_list)):
                        exit_separation_loop = False
                    else:
                        exit_separation_loop = True
                else:
                    solve_data = SeparationResult()
                    exit_separation_loop = solver_call_separation(
                        model_data=model_data,
                        config=config,
                        solver=solver,
                        solve_data=solve_data,
                        is_global=is_global)
                    solve_data_list.append([solve_data])

                # === Keep track of total solve times
                if is_global or config.bypass_local_separation:
                    if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS:
                        for sublist in solve_data_list:
                            for s in sublist:
                                global_solve_time += get_time_from_solver(
                                    s.results)
                    else:
                        global_solve_time += get_time_from_solver(
                            solve_data.results)
                else:
                    if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS:
                        for sublist in solve_data_list:
                            for s in sublist:
                                local_solve_time += get_time_from_solver(
                                    s.results)
                    else:
                        local_solve_time += get_time_from_solver(
                            solve_data.results)

                # === Terminate for timing
                if exit_separation_loop:
                    return solve_data_list, [], [], is_global, local_solve_time, global_solve_time
                separation_obj.deactivate()

        # Do we return?
        # If their are multiple violations in this bucket, pick the worst-case
        idx_i, idx_j = get_index_of_max_violation(
            model_data=model_data,
            config=config,
            solve_data_list=solve_data_list)

        violating_realizations = [
            v
            for v in solve_data_list[idx_i][idx_j].violating_param_realization
        ]
        violations = solve_data_list[idx_i][idx_j].list_of_scaled_violations

        if any(s.found_violation for solve_list in solve_data_list
               for s in solve_list):
            #config.progress_logger.info(
            #	"Violation found in constraint %s with realization %s" % (
            #	list(objectives_map.keys())[idx_i], violating_realizations))
            return solve_data_list, violating_realizations, violations, is_global, local_solve_time, global_solve_time

    return solve_data_list, [], [], is_global, local_solve_time, global_solve_time
Exemple #19
0
class BasePWRelaxationData(BaseRelaxationData):
    def __init__(self, component):
        BaseRelaxationData.__init__(self, component)

        self._partitions = ComponentMap()  # ComponentMap: var: list of float
        self._saved_partitions = list()  # list of CompnentMap

    def rebuild(self, build_nonlinear_constraint=False):
        """
        Remove any auto-created vars/constraints from the relaxation block and recreate it
        """
        self.clean_partitions()
        super(BasePWRelaxationData, self).rebuild(
            build_nonlinear_constraint=build_nonlinear_constraint)

    def _set_input(self,
                   relaxation_side=RelaxationSide.BOTH,
                   persistent_solvers=None,
                   use_linear_relaxation=True,
                   large_eval_tol=math.inf):
        self._partitions = ComponentMap()
        self._saved_partitions = list()
        BaseRelaxationData._set_input(
            self,
            relaxation_side=relaxation_side,
            persistent_solvers=persistent_solvers,
            use_linear_relaxation=use_linear_relaxation,
            large_eval_tol=large_eval_tol)

    def add_partition_point(self):
        """
        Add a point to the current partitioning. This does not rebuild the relaxation. You must call rebuild()
        to rebuild the relaxation.
        """
        raise NotImplementedError(
            'This method should be implemented in the derived class.')

    def _add_partition_point(self, var, value=None):
        if value is None:
            value = pe.value(var)
        # if the point is outside the variable's bounds, then it will simply get removed when clean_partitions
        # gets called.
        self._partitions[var].append(value)

    def push_partitions(self):
        """
        Save the current partitioning for later use through pop_partitions().
        """
        self._saved_partitions.append(
            pe.ComponentMap((k, list(v)) for k, v in self._partitions.items()))

    def clear_partitions(self):
        """
        Delete any existing partitioning scheme.
        """
        tmp = ComponentMap()
        for var, pts in self._partitions.items():
            tmp[var] = [pe.value(var.lb), pe.value(var.ub)]
        self._partitions = tmp

    def pop_partitions(self):
        """
        Use the most recently saved partitioning.
        """
        self._partitions = self._saved_partitions.pop(-1)

    def clean_partitions(self):
        # discard any points in the partitioning that are not within the variable bounds
        for var, pts in self._partitions.items():
            pts.sort()

        for var, pts in self._partitions.items():
            lb, ub = tuple(_get_bnds_list(var))

            new_pts = list()
            new_pts.append(lb)
            for val in pts[1:-1]:
                if lb < val < ub:
                    new_pts.append(val)
            new_pts.append(ub)
            self._partitions[var] = new_pts

    def get_active_partitions(self):
        ans = ComponentMap()
        for var, pts in self._partitions.items():
            val = pyo.value(var)
            lower = var.lb
            upper = var.ub
            for p in pts:
                if val >= p and p > lower:
                    lower = p
                if val <= p and p < upper:
                    upper = p
            ans[var] = lower, upper
        return ans
Exemple #20
0
def categorize_dae_variables(dae_vars, time, inputs, measurements=None):
    t0 = time.first()
    t1 = time.get_finite_elements()[1]
    deriv_vars = []
    diff_vars = []
    input_vars = []
    alg_vars = []
    fixed_vars = []

    # TODO: give user ability to specify measurements and disturbances
    measured_vars = []

    if measurements is not None:
        infer_measurements = False
        user_measurements = ComponentSet(measurements)
        updated_user_measurements = ComponentSet(measurements)
        user_measured_vars = []
    else:
        infer_measurements = True
        updated_user_measurements = ComponentSet()

    dae_map = ComponentMap([(v[t0], v) for v in dae_vars])
    t0_vardata = list(dae_map.keys())

    if inputs is None:
        inputs = []
    input_set = ComponentSet(inputs)
    updated_input_set = ComponentSet(inputs)

    for var0 in t0_vardata:
        if var0 in input_set:
            updated_input_set.remove(var0)
            time_slice = dae_map.pop(var0)
            input_vars.append(time_slice)

        if var0 in updated_user_measurements:
            updated_user_measurements.remove(var0)
            # Don't pop measured vars. They will be popped elsewhere.
            time_slice = dae_map[var0]
            user_measured_vars.append(time_slice)

        parent = var0.parent_component()
        if not isinstance(parent, DerivativeVar):
            continue
        if not time in ComponentSet(parent.get_continuousset_list()):
            continue
        index0 = var0.index()
        var1 = dae_map[var0][t1]
        index1 = var1.index()
        state = parent.get_state_var()

        if state[index1].fixed:
            # Assume state var is fixed everywhere, so derivative
            # 'isn't really' a derivative.
            # Should be safe to remove state from dae_map here
            state_slice = dae_map.pop(state[index0])
            fixed_vars.append(state_slice)
            continue
        if state[index0] in input_set:
            # If differential variable is an input, then this DerivativeVar
            # is 'not really a derivative'
            continue

        deriv_slice = dae_map.pop(var0)

        if var1.fixed:
            # Assume derivative has been fixed everywhere.
            # Add to list of fixed variables, and don't remove its state variable.
            fixed_vars.append(deriv_slice)
        elif var0.fixed:
            # In this case the derivative has been used as an initial condition.
            # Still want to include it in the list of derivatives.
            measured_vars.append(deriv_slice)
            state_slice = dae_map.pop(state[index0])
            if state[index0].fixed:
                measured_vars.append(state_slice)
            deriv_vars.append(deriv_slice)
            diff_vars.append(state_slice)
        else:
            # Neither is fixed. This should be the most common case.
            state_slice = dae_map.pop(state[index0])
            if state[index0].fixed:
                measured_vars.append(state_slice)
            deriv_vars.append(deriv_slice)
            diff_vars.append(state_slice)

    if updated_input_set:
        raise RuntimeError('Not all inputs could be found')
    assert len(deriv_vars) == len(diff_vars)

    for var0, time_slice in dae_map.items():
        var1 = time_slice[t1]
        # If the variable is still in the list of time-indexed vars,
        # it must either be fixed (not a var) or be an algebraic var
        if var1.fixed:
            fixed_vars.append(time_slice)
        else:
            if var0.fixed:
                measured_vars.append(time_slice)
            alg_vars.append(time_slice)

    category_list_map = {
        VariableCategory.DERIVATIVE: deriv_vars,
        VariableCategory.DIFFERENTIAL: diff_vars,
        VariableCategory.ALGEBRAIC: alg_vars,
        VariableCategory.INPUT: input_vars,
        VariableCategory.FIXED: fixed_vars,
        VariableCategory.MEASUREMENT: measured_vars,
    }
    if measurements is not None:
        # If the user provided their own measurements,
        # override the inferred measurements. Assume the user
        # will modify the state of their variables appropriately.
        category_list_map[VariableCategory.MEASUREMENT] = user_measured_vars
    category_dict = {
        category: [
            Reference(ref.referent, ctype=ctype)
            for ref in category_list_map[category]
        ]
        for category, ctype in CATEGORY_TYPE_MAP.items()
    }
    return category_dict
    def test_cp(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 4
        K = pyunits.K
        state_values = {
            "flow_mass": [1.0 * pyunits.kg / pyunits.s] * n_scen,
            "temperature": [1000.0 * K, 1100 * K, 1200 * K, 1300 * K],
            "particle_porosity": [0.27] * n_scen,
            "mass_frac_comp[Fe2O3]": [1.0 / 3.0] * n_scen,
            "mass_frac_comp[Fe3O4]": [1.0 / 3.0] * n_scen,
            "mass_frac_comp[Al2O3]": [1.0 / 3.0] * n_scen,
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())
        kJmolK = pyunits.kJ / pyunits.mol / pyunits.K
        kJkgK = pyunits.kJ / pyunits.kg / pyunits.K
        target_values = {
            "cp_mol_comp[Fe2O3]": [
                0.1401 * kJmolK,
                0.1408 * kJmolK,
                0.1415 * kJmolK,
                0.1423 * kJmolK,
            ],
            "cp_mol_comp[Fe3O4]": [
                0.2008 * kJmolK,
                0.2008 * kJmolK,
                0.2008 * kJmolK,
                0.2008 * kJmolK,
            ],
            "cp_mol_comp[Al2O3]": [
                0.1249 * kJmolK,
                0.1268 * kJmolK,
                0.1285 * kJmolK,
                0.1299 * kJmolK,
            ],
            "cp_mass": [
                0.9899 * kJkgK,
                0.9975 * kJkgK,
                1.0045 * kJkgK,
                1.0108 * kJkgK,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        param_sweeper = ParamSweeper(
            n_scen,
            state_values,
            output_values=target_values,
        )
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(state)

                # Check that we have eliminated infeasibility
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs have been set properly
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                for var, val in outputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #22
0
class GurobiDirect(DirectSolver):

    _verified_license = None

    def __init__(self, **kwds):
        if 'type' not in kwds:
            kwds['type'] = 'gurobi_direct'
        super(GurobiDirect, self).__init__(**kwds)
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()
        self._needs_updated = True  # flag that indicates if solver_model.update() needs called before getting variable and constraint attributes
        self._callback = None
        self._callback_func = None

        self._name = None
        try:
            import gurobipy
            self._gurobipy = gurobipy
            self._python_api_exists = True
            self._version = self._gurobipy.gurobi.version()
            self._name = "Gurobi %s.%s%s" % self._version
            while len(self._version) < 4:
                self._version += (0, )
            self._version = self._version[:4]
            self._version_major = self._version[0]
        except ImportError:
            self._python_api_exists = False
        except Exception as e:
            # other forms of exceptions can be thrown by the gurobi python
            # import. for example, a gurobipy.GurobiError exception is thrown
            # if all tokens for Gurobi are already in use. assuming, of
            # course, the license is a token license. unfortunately, you can't
            # import without a license, which means we can't test for the
            # exception above!
            logger.error("Import of gurobipy failed - gurobi message=%s\n" %
                         (e, ))
            self._python_api_exists = False

        self._range_constraints = set()

        self._max_obj_degree = 2
        self._max_constraint_degree = 2

        # Note: Undefined capabilites default to None
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = True
        self._capabilities.sos2 = True

        # fix for compatibility with pre-5.0 Gurobi
        if self._python_api_exists and \
           (self._version_major < 5):
            self._max_constraint_degree = 1
            self._capabilities.quadratic_constraint = False

    def available(self, exception_flag=True):
        if not super().available(exception_flag):
            return False
        if self._verified_license is None:
            try:
                # verify that we can get a Gurobi license
                m = self._gurobipy.Model()
                m.dispose()
                GurobiDirect._verified_license = True
            except Exception as e:
                logger.error("Could not create Model - gurobi message=%s\n" %
                             (e, ))
                GurobiDirect._verified_license = False
        return self._verified_license

    def _apply_solver(self):
        if not self._save_results:
            for block in self._pyomo_model.block_data_objects(
                    descend_into=True, active=True):
                for var in block.component_data_objects(
                        ctype=pyomo.core.base.var.Var,
                        descend_into=False,
                        active=True,
                        sort=False):
                    var.stale = True
        if self._tee:
            self._solver_model.setParam('OutputFlag', 1)
        else:
            self._solver_model.setParam('OutputFlag', 0)

        self._solver_model.setParam('LogFile', self._log_file)

        if self._keepfiles:
            print("Solver log file: " + self._log_file)

        # Options accepted by gurobi (case insensitive):
        # ['Cutoff', 'IterationLimit', 'NodeLimit', 'SolutionLimit', 'TimeLimit',
        #  'FeasibilityTol', 'IntFeasTol', 'MarkowitzTol', 'MIPGap', 'MIPGapAbs',
        #  'OptimalityTol', 'PSDTol', 'Method', 'PerturbValue', 'ObjScale', 'ScaleFlag',
        #  'SimplexPricing', 'Quad', 'NormAdjust', 'BarIterLimit', 'BarConvTol',
        #  'BarCorrectors', 'BarOrder', 'Crossover', 'CrossoverBasis', 'BranchDir',
        #  'Heuristics', 'MinRelNodes', 'MIPFocus', 'NodefileStart', 'NodefileDir',
        #  'NodeMethod', 'PumpPasses', 'RINS', 'SolutionNumber', 'SubMIPNodes', 'Symmetry',
        #  'VarBranch', 'Cuts', 'CutPasses', 'CliqueCuts', 'CoverCuts', 'CutAggPasses',
        #  'FlowCoverCuts', 'FlowPathCuts', 'GomoryPasses', 'GUBCoverCuts', 'ImpliedCuts',
        #  'MIPSepCuts', 'MIRCuts', 'NetworkCuts', 'SubMIPCuts', 'ZeroHalfCuts', 'ModKCuts',
        #  'Aggregate', 'AggFill', 'PreDual', 'DisplayInterval', 'IISMethod', 'InfUnbdInfo',
        #  'LogFile', 'PreCrush', 'PreDepRow', 'PreMIQPMethod', 'PrePasses', 'Presolve',
        #  'ResultFile', 'ImproveStartTime', 'ImproveStartGap', 'Threads', 'Dummy', 'OutputFlag']
        for key, option in self.options.items():
            # When options come from the pyomo command, all
            # values are string types, so we try to cast
            # them to a numeric value in the event that
            # setting the parameter fails.
            try:
                self._solver_model.setParam(key, option)
            except TypeError:
                # we place the exception handling for
                # checking the cast of option to a float in
                # another function so that we can simply
                # call raise here instead of except
                # TypeError as e / raise e, because the
                # latter does not preserve the Gurobi stack
                # trace
                if not _is_numeric(option):
                    raise
                self._solver_model.setParam(key, float(option))

        if self._version_major >= 5:
            for suffix in self._suffixes:
                if re.match(suffix, "dual"):
                    self._solver_model.setParam(
                        self._gurobipy.GRB.Param.QCPDual, 1)

        self._solver_model.optimize(self._callback)
        self._needs_updated = False

        self._solver_model.setParam('LogFile', 'default')

        # FIXME: can we get a return code indicating if Gurobi had a significant failure?
        return Bunch(rc=None, log=None)

    def _get_expr_from_pyomo_repn(self, repn, max_degree=2):
        referenced_vars = ComponentSet()

        degree = repn.polynomial_degree()
        if (degree is None) or (degree > max_degree):
            raise DegreeError(
                'GurobiDirect does not support expressions of degree {0}.'.
                format(degree))

        if len(repn.linear_vars) > 0:
            referenced_vars.update(repn.linear_vars)
            new_expr = self._gurobipy.LinExpr(repn.linear_coefs, [
                self._pyomo_var_to_solver_var_map[i] for i in repn.linear_vars
            ])
        else:
            new_expr = 0.0

        for i, v in enumerate(repn.quadratic_vars):
            x, y = v
            new_expr += repn.quadratic_coefs[
                i] * self._pyomo_var_to_solver_var_map[
                    x] * self._pyomo_var_to_solver_var_map[y]
            referenced_vars.add(x)
            referenced_vars.add(y)

        new_expr += repn.constant

        return new_expr, referenced_vars

    def _get_expr_from_pyomo_expr(self, expr, max_degree=2):
        if max_degree == 2:
            repn = generate_standard_repn(expr, quadratic=True)
        else:
            repn = generate_standard_repn(expr, quadratic=False)

        try:
            gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                repn, max_degree)
        except DegreeError as e:
            msg = e.args[0]
            msg += '\nexpr: {0}'.format(expr)
            raise DegreeError(msg)

        return gurobi_expr, referenced_vars

    def _gurobi_lb_ub_from_var(self, var):
        if var.is_fixed():
            val = var.value
            return val, val
        if var.has_lb():
            lb = value(var.lb)
        else:
            lb = -self._gurobipy.GRB.INFINITY
        if var.has_ub():
            ub = value(var.ub)
        else:
            ub = self._gurobipy.GRB.INFINITY
        return lb, ub

    def _add_var(self, var):
        varname = self._symbol_map.getSymbol(var, self._labeler)
        vtype = self._gurobi_vtype_from_var(var)
        lb, ub = self._gurobi_lb_ub_from_var(var)

        gurobipy_var = self._solver_model.addVar(lb=lb,
                                                 ub=ub,
                                                 vtype=vtype,
                                                 name=varname)

        self._pyomo_var_to_solver_var_map[var] = gurobipy_var
        self._solver_var_to_pyomo_var_map[gurobipy_var] = var
        self._referenced_variables[var] = 0

        self._needs_updated = True

    def _set_instance(self, model, kwds={}):
        self._range_constraints = set()
        DirectOrPersistentSolver._set_instance(self, model, kwds)
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        try:
            if model.name is not None:
                self._solver_model = self._gurobipy.Model(model.name)
            else:
                self._solver_model = self._gurobipy.Model()
        except Exception:
            e = sys.exc_info()[1]
            msg = ("Unable to create Gurobi model. "
                   "Have you installed the Python "
                   "bindings for Gurobi?\n\n\t" +
                   "Error message: {0}".format(e))
            raise Exception(msg)

        self._add_block(model)

        for var, n_ref in self._referenced_variables.items():
            if n_ref != 0:
                if var.fixed:
                    if not self._output_fixed_variable_bounds:
                        raise ValueError(
                            "Encountered a fixed variable (%s) inside "
                            "an active objective or constraint "
                            "expression on model %s, which is usually "
                            "indicative of a preprocessing error. Use "
                            "the IO-option 'output_fixed_variable_bounds=True' "
                            "to suppress this error and fix the variable "
                            "by overwriting its bounds in the Gurobi instance."
                            % (
                                var.name,
                                self._pyomo_model.name,
                            ))

    def _add_block(self, block):
        DirectOrPersistentSolver._add_block(self, block)

    def _add_constraint(self, con):
        if not con.active:
            return None

        if is_fixed(con.body):
            if self._skip_trivial_constraints:
                return None

        conname = self._symbol_map.getSymbol(con, self._labeler)

        if con._linear_canonical_form:
            gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                con.canonical_form(), self._max_constraint_degree)
        #elif isinstance(con, LinearCanonicalRepn):
        #    gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn(
        #        con,
        #        self._max_constraint_degree)
        else:
            gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr(
                con.body, self._max_constraint_degree)

        if con.has_lb():
            if not is_fixed(con.lower):
                raise ValueError("Lower bound of constraint {0} "
                                 "is not constant.".format(con))
        if con.has_ub():
            if not is_fixed(con.upper):
                raise ValueError("Upper bound of constraint {0} "
                                 "is not constant.".format(con))

        if con.equality:
            gurobipy_con = self._solver_model.addConstr(
                lhs=gurobi_expr,
                sense=self._gurobipy.GRB.EQUAL,
                rhs=value(con.lower),
                name=conname)
        elif con.has_lb() and con.has_ub():
            gurobipy_con = self._solver_model.addRange(gurobi_expr,
                                                       value(con.lower),
                                                       value(con.upper),
                                                       name=conname)
            self._range_constraints.add(con)
        elif con.has_lb():
            gurobipy_con = self._solver_model.addConstr(
                lhs=gurobi_expr,
                sense=self._gurobipy.GRB.GREATER_EQUAL,
                rhs=value(con.lower),
                name=conname)
        elif con.has_ub():
            gurobipy_con = self._solver_model.addConstr(
                lhs=gurobi_expr,
                sense=self._gurobipy.GRB.LESS_EQUAL,
                rhs=value(con.upper),
                name=conname)
        else:
            raise ValueError("Constraint does not have a lower "
                             "or an upper bound: {0} \n".format(con))

        for var in referenced_vars:
            self._referenced_variables[var] += 1
        self._vars_referenced_by_con[con] = referenced_vars
        self._pyomo_con_to_solver_con_map[con] = gurobipy_con
        self._solver_con_to_pyomo_con_map[gurobipy_con] = con

        self._needs_updated = True

    def _add_sos_constraint(self, con):
        if not con.active:
            return None

        conname = self._symbol_map.getSymbol(con, self._labeler)
        level = con.level
        if level == 1:
            sos_type = self._gurobipy.GRB.SOS_TYPE1
        elif level == 2:
            sos_type = self._gurobipy.GRB.SOS_TYPE2
        else:
            raise ValueError("Solver does not support SOS "
                             "level {0} constraints".format(level))

        gurobi_vars = []
        weights = []

        self._vars_referenced_by_con[con] = ComponentSet()

        if hasattr(con, 'get_items'):
            # aml sos constraint
            sos_items = list(con.get_items())
        else:
            # kernel sos constraint
            sos_items = list(con.items())

        for v, w in sos_items:
            self._vars_referenced_by_con[con].add(v)
            gurobi_vars.append(self._pyomo_var_to_solver_var_map[v])
            self._referenced_variables[v] += 1
            weights.append(w)

        gurobipy_con = self._solver_model.addSOS(sos_type, gurobi_vars,
                                                 weights)
        self._pyomo_con_to_solver_con_map[con] = gurobipy_con
        self._solver_con_to_pyomo_con_map[gurobipy_con] = con

        self._needs_updated = True

    def _gurobi_vtype_from_var(self, var):
        """
        This function takes a pyomo variable and returns the appropriate gurobi variable type
        :param var: pyomo.core.base.var.Var
        :return: gurobipy.GRB.CONTINUOUS or gurobipy.GRB.BINARY or gurobipy.GRB.INTEGER
        """
        if var.is_binary():
            vtype = self._gurobipy.GRB.BINARY
        elif var.is_integer():
            vtype = self._gurobipy.GRB.INTEGER
        elif var.is_continuous():
            vtype = self._gurobipy.GRB.CONTINUOUS
        else:
            raise ValueError(
                'Variable domain type is not recognized for {0}'.format(
                    var.domain))
        return vtype

    def _set_objective(self, obj):
        if self._objective is not None:
            for var in self._vars_referenced_by_obj:
                self._referenced_variables[var] -= 1
            self._vars_referenced_by_obj = ComponentSet()
            self._objective = None

        if obj.active is False:
            raise ValueError('Cannot add inactive objective to solver.')

        if obj.sense == minimize:
            sense = self._gurobipy.GRB.MINIMIZE
        elif obj.sense == maximize:
            sense = self._gurobipy.GRB.MAXIMIZE
        else:
            raise ValueError('Objective sense is not recognized: {0}'.format(
                obj.sense))

        gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr(
            obj.expr, self._max_obj_degree)

        for var in referenced_vars:
            self._referenced_variables[var] += 1

        self._solver_model.setObjective(gurobi_expr, sense=sense)
        self._objective = obj
        self._vars_referenced_by_obj = referenced_vars

        self._needs_updated = True

    def _postsolve(self):
        # the only suffixes that we extract from GUROBI are
        # constraint duals, constraint slacks, and variable
        # reduced-costs. scan through the solver suffix list
        # and throw an exception if the user has specified
        # any others.
        extract_duals = False
        extract_slacks = False
        extract_reduced_costs = False
        for suffix in self._suffixes:
            flag = False
            if re.match(suffix, "dual"):
                extract_duals = True
                flag = True
            if re.match(suffix, "slack"):
                extract_slacks = True
                flag = True
            if re.match(suffix, "rc"):
                extract_reduced_costs = True
                flag = True
            if not flag:
                raise RuntimeError(
                    "***The gurobi_direct solver plugin cannot extract solution suffix="
                    + suffix)

        gprob = self._solver_model
        grb = self._gurobipy.GRB
        status = gprob.Status

        if gprob.getAttr(self._gurobipy.GRB.Attr.IsMIP):
            if extract_reduced_costs:
                logger.warning("Cannot get reduced costs for MIP.")
            if extract_duals:
                logger.warning("Cannot get duals for MIP.")
            extract_reduced_costs = False
            extract_duals = False

        self.results = SolverResults()
        soln = Solution()

        self.results.solver.name = self._name
        self.results.solver.wallclock_time = gprob.Runtime

        if status == grb.LOADED:  # problem is loaded, but no solution
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Model is loaded, but no solution information is available."
            self.results.solver.termination_condition = TerminationCondition.error
            soln.status = SolutionStatus.unknown
        elif status == grb.OPTIMAL:  # optimal
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \
                                                      "and an optimal solution is available."
            self.results.solver.termination_condition = TerminationCondition.optimal
            soln.status = SolutionStatus.optimal
        elif status == grb.INFEASIBLE:
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message = "Model was proven to be infeasible"
            self.results.solver.termination_condition = TerminationCondition.infeasible
            soln.status = SolutionStatus.infeasible
        elif status == grb.INF_OR_UNBD:
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message = "Problem proven to be infeasible or unbounded."
            self.results.solver.termination_condition = TerminationCondition.infeasibleOrUnbounded
            soln.status = SolutionStatus.unsure
        elif status == grb.UNBOUNDED:
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message = "Model was proven to be unbounded."
            self.results.solver.termination_condition = TerminationCondition.unbounded
            soln.status = SolutionStatus.unbounded
        elif status == grb.CUTOFF:
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Optimal objective for model was proven to be worse than the " \
                                                      "value specified in the Cutoff parameter. No solution " \
                                                      "information is available."
            self.results.solver.termination_condition = TerminationCondition.minFunctionValue
            soln.status = SolutionStatus.unknown
        elif status == grb.ITERATION_LIMIT:
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Optimization terminated because the total number of simplex " \
                                                      "iterations performed exceeded the value specified in the " \
                                                      "IterationLimit parameter."
            self.results.solver.termination_condition = TerminationCondition.maxIterations
            soln.status = SolutionStatus.stoppedByLimit
        elif status == grb.NODE_LIMIT:
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Optimization terminated because the total number of " \
                                                      "branch-and-cut nodes explored exceeded the value specified " \
                                                      "in the NodeLimit parameter"
            self.results.solver.termination_condition = TerminationCondition.maxEvaluations
            soln.status = SolutionStatus.stoppedByLimit
        elif status == grb.TIME_LIMIT:
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Optimization terminated because the time expended exceeded " \
                                                      "the value specified in the TimeLimit parameter."
            self.results.solver.termination_condition = TerminationCondition.maxTimeLimit
            soln.status = SolutionStatus.stoppedByLimit
        elif status == grb.SOLUTION_LIMIT:
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Optimization terminated because the number of solutions found " \
                                                      "reached the value specified in the SolutionLimit parameter."
            self.results.solver.termination_condition = TerminationCondition.unknown
            soln.status = SolutionStatus.stoppedByLimit
        elif status == grb.INTERRUPTED:
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "Optimization was terminated by the user."
            self.results.solver.termination_condition = TerminationCondition.error
            soln.status = SolutionStatus.error
        elif status == grb.NUMERIC:
            self.results.solver.status = SolverStatus.error
            self.results.solver.termination_message = "Optimization was terminated due to unrecoverable numerical " \
                                                      "difficulties."
            self.results.solver.termination_condition = TerminationCondition.error
            soln.status = SolutionStatus.error
        elif status == grb.SUBOPTIMAL:
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message = "Unable to satisfy optimality tolerances; a sub-optimal " \
                                                      "solution is available."
            self.results.solver.termination_condition = TerminationCondition.other
            soln.status = SolutionStatus.feasible
        # note that USER_OBJ_LIMIT was added in Gurobi 7.0, so it may not be present
        elif (status is not None) and \
             (status == getattr(grb,'USER_OBJ_LIMIT',None)):
            self.results.solver.status = SolverStatus.aborted
            self.results.solver.termination_message = "User specified an objective limit " \
                                                      "(a bound on either the best objective " \
                                                      "or the best bound), and that limit has " \
                                                      "been reached. Solution is available."
            self.results.solver.termination_condition = TerminationCondition.other
            soln.status = SolutionStatus.stoppedByLimit
        else:
            self.results.solver.status = SolverStatus.error
            self.results.solver.termination_message = \
                ("Unhandled Gurobi solve status "
                 "("+str(status)+")")
            self.results.solver.termination_condition = TerminationCondition.error
            soln.status = SolutionStatus.error

        self.results.problem.name = gprob.ModelName

        if gprob.ModelSense == 1:
            self.results.problem.sense = minimize
        elif gprob.ModelSense == -1:
            self.results.problem.sense = maximize
        else:
            raise RuntimeError(
                'Unrecognized gurobi objective sense: {0}'.format(
                    gprob.ModelSense))

        self.results.problem.upper_bound = None
        self.results.problem.lower_bound = None
        if (gprob.NumBinVars + gprob.NumIntVars) == 0:
            try:
                self.results.problem.upper_bound = gprob.ObjVal
                self.results.problem.lower_bound = gprob.ObjVal
            except (self._gurobipy.GurobiError, AttributeError):
                pass
        elif gprob.ModelSense == 1:  # minimizing
            try:
                self.results.problem.upper_bound = gprob.ObjVal
            except (self._gurobipy.GurobiError, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = gprob.ObjBound
            except (self._gurobipy.GurobiError, AttributeError):
                pass
        elif gprob.ModelSense == -1:  # maximizing
            try:
                self.results.problem.upper_bound = gprob.ObjBound
            except (self._gurobipy.GurobiError, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = gprob.ObjVal
            except (self._gurobipy.GurobiError, AttributeError):
                pass
        else:
            raise RuntimeError(
                'Unrecognized gurobi objective sense: {0}'.format(
                    gprob.ModelSense))

        try:
            soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound
        except TypeError:
            soln.gap = None

        self.results.problem.number_of_constraints = gprob.NumConstrs + gprob.NumQConstrs + gprob.NumSOS
        self.results.problem.number_of_nonzeros = gprob.NumNZs
        self.results.problem.number_of_variables = gprob.NumVars
        self.results.problem.number_of_binary_variables = gprob.NumBinVars
        self.results.problem.number_of_integer_variables = gprob.NumIntVars
        self.results.problem.number_of_continuous_variables = gprob.NumVars - gprob.NumIntVars - gprob.NumBinVars
        self.results.problem.number_of_objectives = 1
        self.results.problem.number_of_solutions = gprob.SolCount

        # if a solve was stopped by a limit, we still need to check to
        # see if there is a solution available - this may not always
        # be the case, both in LP and MIP contexts.
        if self._save_results:
            """
            This code in this if statement is only needed for backwards compatability. It is more efficient to set
            _save_results to False and use load_vars, load_duals, etc.
            """
            if gprob.SolCount > 0:
                soln_variables = soln.variable
                soln_constraints = soln.constraint

                gurobi_vars = self._solver_model.getVars()
                gurobi_vars = list(
                    set(gurobi_vars).intersection(
                        set(self._pyomo_var_to_solver_var_map.values())))
                var_vals = self._solver_model.getAttr("X", gurobi_vars)
                names = self._solver_model.getAttr("VarName", gurobi_vars)
                for gurobi_var, val, name in zip(gurobi_vars, var_vals, names):
                    pyomo_var = self._solver_var_to_pyomo_var_map[gurobi_var]
                    if self._referenced_variables[pyomo_var] > 0:
                        pyomo_var.stale = False
                        soln_variables[name] = {"Value": val}

                if extract_reduced_costs:
                    vals = self._solver_model.getAttr("Rc", gurobi_vars)
                    for gurobi_var, val, name in zip(gurobi_vars, vals, names):
                        pyomo_var = self._solver_var_to_pyomo_var_map[
                            gurobi_var]
                        if self._referenced_variables[pyomo_var] > 0:
                            soln_variables[name]["Rc"] = val

                if extract_duals or extract_slacks:
                    gurobi_cons = self._solver_model.getConstrs()
                    con_names = self._solver_model.getAttr(
                        "ConstrName", gurobi_cons)
                    for name in con_names:
                        soln_constraints[name] = {}
                    if self._version_major >= 5:
                        gurobi_q_cons = self._solver_model.getQConstrs()
                        q_con_names = self._solver_model.getAttr(
                            "QCName", gurobi_q_cons)
                        for name in q_con_names:
                            soln_constraints[name] = {}

                if extract_duals:
                    vals = self._solver_model.getAttr("Pi", gurobi_cons)
                    for val, name in zip(vals, con_names):
                        soln_constraints[name]["Dual"] = val
                    if self._version_major >= 5:
                        q_vals = self._solver_model.getAttr(
                            "QCPi", gurobi_q_cons)
                        for val, name in zip(q_vals, q_con_names):
                            soln_constraints[name]["Dual"] = val

                if extract_slacks:
                    gurobi_range_con_vars = set(
                        self._solver_model.getVars()) - set(
                            self._pyomo_var_to_solver_var_map.values())
                    vals = self._solver_model.getAttr("Slack", gurobi_cons)
                    for gurobi_con, val, name in zip(gurobi_cons, vals,
                                                     con_names):
                        pyomo_con = self._solver_con_to_pyomo_con_map[
                            gurobi_con]
                        if pyomo_con in self._range_constraints:
                            lin_expr = self._solver_model.getRow(gurobi_con)
                            for i in reversed(range(lin_expr.size())):
                                v = lin_expr.getVar(i)
                                if v in gurobi_range_con_vars:
                                    Us_ = v.X
                                    Ls_ = v.UB - v.X
                                    if Us_ > Ls_:
                                        soln_constraints[name]["Slack"] = Us_
                                    else:
                                        soln_constraints[name]["Slack"] = -Ls_
                                    break
                        else:
                            soln_constraints[name]["Slack"] = val
                    if self._version_major >= 5:
                        q_vals = self._solver_model.getAttr(
                            "QCSlack", gurobi_q_cons)
                        for val, name in zip(q_vals, q_con_names):
                            soln_constraints[name]["Slack"] = val
        elif self._load_solutions:
            if gprob.SolCount > 0:

                self._load_vars()

                if extract_reduced_costs:
                    self._load_rc()

                if extract_duals:
                    self._load_duals()

                if extract_slacks:
                    self._load_slacks()

        self.results.solution.insert(soln)

        # finally, clean any temporary files registered with the temp file
        # manager, created populated *directly* by this plugin.
        TempfileManager.pop(remove=not self._keepfiles)

        return DirectOrPersistentSolver._postsolve(self)

    def warm_start_capable(self):
        return True

    def _warm_start(self):
        for pyomo_var, gurobipy_var in self._pyomo_var_to_solver_var_map.items(
        ):
            if pyomo_var.value is not None:
                gurobipy_var.setAttr(self._gurobipy.GRB.Attr.Start,
                                     value(pyomo_var))
        self._needs_updated = True

    def _load_vars(self, vars_to_load=None):
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        gurobi_vars_to_load = [
            var_map[pyomo_var] for pyomo_var in vars_to_load
        ]
        vals = self._solver_model.getAttr("X", gurobi_vars_to_load)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                var.stale = False
                var.value = val

    def _load_rc(self, vars_to_load=None):
        if not hasattr(self._pyomo_model, 'rc'):
            self._pyomo_model.rc = Suffix(direction=Suffix.IMPORT)
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        rc = self._pyomo_model.rc
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        gurobi_vars_to_load = [
            var_map[pyomo_var] for pyomo_var in vars_to_load
        ]
        vals = self._solver_model.getAttr("Rc", gurobi_vars_to_load)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                rc[var] = val

    def _load_duals(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'dual'):
            self._pyomo_model.dual = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        reverse_con_map = self._solver_con_to_pyomo_con_map
        dual = self._pyomo_model.dual

        if cons_to_load is None:
            linear_cons_to_load = self._solver_model.getConstrs()
            if self._version_major >= 5:
                quadratic_cons_to_load = self._solver_model.getQConstrs()
        else:
            gurobi_cons_to_load = set(
                [con_map[pyomo_con] for pyomo_con in cons_to_load])
            linear_cons_to_load = gurobi_cons_to_load.intersection(
                set(self._solver_model.getConstrs()))
            if self._version_major >= 5:
                quadratic_cons_to_load = gurobi_cons_to_load.intersection(
                    set(self._solver_model.getQConstrs()))
        linear_vals = self._solver_model.getAttr("Pi", linear_cons_to_load)
        if self._version_major >= 5:
            quadratic_vals = self._solver_model.getAttr(
                "QCPi", quadratic_cons_to_load)

        for gurobi_con, val in zip(linear_cons_to_load, linear_vals):
            pyomo_con = reverse_con_map[gurobi_con]
            dual[pyomo_con] = val
        if self._version_major >= 5:
            for gurobi_con, val in zip(quadratic_cons_to_load, quadratic_vals):
                pyomo_con = reverse_con_map[gurobi_con]
                dual[pyomo_con] = val

    def _load_slacks(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'slack'):
            self._pyomo_model.slack = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        reverse_con_map = self._solver_con_to_pyomo_con_map
        slack = self._pyomo_model.slack

        gurobi_range_con_vars = set(self._solver_model.getVars()) - set(
            self._pyomo_var_to_solver_var_map.values())

        if cons_to_load is None:
            linear_cons_to_load = self._solver_model.getConstrs()
            if self._version_major >= 5:
                quadratic_cons_to_load = self._solver_model.getQConstrs()
        else:
            gurobi_cons_to_load = set(
                [con_map[pyomo_con] for pyomo_con in cons_to_load])
            linear_cons_to_load = gurobi_cons_to_load.intersection(
                set(self._solver_model.getConstrs()))
            if self._version_major >= 5:
                quadratic_cons_to_load = gurobi_cons_to_load.intersection(
                    set(self._solver_model.getQConstrs()))
        linear_vals = self._solver_model.getAttr("Slack", linear_cons_to_load)
        if self._version_major >= 5:
            quadratic_vals = self._solver_model.getAttr(
                "QCSlack", quadratic_cons_to_load)

        for gurobi_con, val in zip(linear_cons_to_load, linear_vals):
            pyomo_con = reverse_con_map[gurobi_con]
            if pyomo_con in self._range_constraints:
                lin_expr = self._solver_model.getRow(gurobi_con)
                for i in reversed(range(lin_expr.size())):
                    v = lin_expr.getVar(i)
                    if v in gurobi_range_con_vars:
                        Us_ = v.X
                        Ls_ = v.UB - v.X
                        if Us_ > Ls_:
                            slack[pyomo_con] = Us_
                        else:
                            slack[pyomo_con] = -Ls_
                        break
            else:
                slack[pyomo_con] = val
        if self._version_major >= 5:
            for gurobi_con, val in zip(quadratic_cons_to_load, quadratic_vals):
                pyomo_con = reverse_con_map[gurobi_con]
                slack[pyomo_con] = val

    def load_duals(self, cons_to_load=None):
        """
        Load the duals into the 'dual' suffix. The 'dual' suffix must live on the parent model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_duals(cons_to_load)

    def load_rc(self, vars_to_load):
        """
        Load the reduced costs into the 'rc' suffix. The 'rc' suffix must live on the parent model.

        Parameters
        ----------
        vars_to_load: list of Var
        """
        self._load_rc(vars_to_load)

    def load_slacks(self, cons_to_load=None):
        """
        Load the values of the slack variables into the 'slack' suffix. The 'slack' suffix must live on the parent
        model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_slacks(cons_to_load)

    def _update(self):
        self._solver_model.update()
        self._needs_updated = False
Exemple #23
0
def flatten_components_along_sets(m, sets, ctype, indices=None):
    """
    This function iterates over components (recursively) contained
    in a block and partitions their data objects into components
    indexed only by the specified sets.

    Args:
        m : Block whose components (and their sub-components) will be
            partitioned
        sets : Possible indexing sets for the returned components
        ctype : Type of component to identify and partition
        indices : indices of sets to use when descending into subblocks

    Returns:
        tuple: The first entry is a list of tuples of Pyomo Sets. The
               second is a list of lists of components, each indexed by
               the corresponding sets in the first entry.
        
    """
    if indices is None:
        index_map = ComponentMap()
    elif type(indices) is ComponentMap:
        index_map = indices
    else:
        index_map = ComponentMap(zip(sets, indices))
    for s, idx in index_map.items():
        if not idx in s:
            raise ValueError(
                "%s is a bad index for set %s. \nPlease provide an index "
                "that is in the set." % (idx, s.name)
            )
    index_stack = []

    set_of_sets = ComponentSet(sets)
    # Using these two `OrderedDict`s is a workaround because I can't
    # reliably use tuples of components as keys in a `ComponentMap`.
    sets_dict = OrderedDict()
    comps_dict = OrderedDict()
    for index_sets, slice_ in generate_sliced_components(m, index_stack,
            m, set_of_sets, ctype, index_map):
        # Note that index_sets should always be a tuple, never a scalar.

        # TODO: Potentially re-order sets at this point.
        # In this way (time, space) would have the same key as (space, time).
        # They we'd have to somehow "swap indexing sets" when we create
        # the reference below.
        key = tuple(id(c) for c in index_sets)
        if key not in sets_dict:
            if len(key) == 0:
                sets_dict[key] = (UnindexedComponent_set,)
            else:
                sets_dict[key] = index_sets
        if key not in comps_dict:
            comps_dict[key] = []
        if len(key) == 0:
            comps_dict[key].append(slice_)
        else:
            # If the user wants to change these flags, they can access the
            # slice via the `referent` attribute of each reference component.
            slice_.attribute_errors_generate_exceptions = False
            slice_.key_errors_generate_exceptions = False
            comps_dict[key].append(Reference(slice_))

    # list-of-tuples of Sets:
    sets_list = list(sets for sets in sets_dict.values())
    # list-of-lists of components:
    comps_list = list(comps for comps in comps_dict.values())
    # E.g. we return: (
    #          [(time, space), (time,)],
    #          [[some_component, ...], [other, ...]],
    #      )                            ^ These components are indexed by time
    #            ^ These components are indexed by time and space
    return sets_list, comps_list
Exemple #24
0
class XpressDirect(DirectSolver):

    _name = None
    _version = None
    XpressException = RuntimeError

    def __init__(self, **kwds):
        if 'type' not in kwds:
            kwds['type'] = 'xpress_direct'
        super(XpressDirect, self).__init__(**kwds)
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()

        self._range_constraints = set()

        self._python_api_exists = xpress_available

        # TODO: this isn't a limit of XPRESS, which implements an SLP
        #       method for NLPs. But it is a limit of *this* interface
        self._max_obj_degree = 2
        self._max_constraint_degree = 2

        # There does not seem to be an easy way to get the
        # wallclock time out of xpress, so we will measure it
        # ourselves
        self._opt_time = None

        # Note: Undefined capabilites default to None
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = True
        self._capabilities.sos2 = True

        # remove the instance-level definition of the xpress version:
        # because the version comes from an imported module, only one
        # version of xpress is supported (and stored as a class attribute)
        del self._version

    def available(self, exception_flag=True):
        """True if the solver is available."""

        if exception_flag and not xpress_available:
            xpress.log_import_warning(logger=__name__)
            raise ApplicationError(
                "No Python bindings available for %s solver plugin" %
                (type(self), ))
        return bool(xpress_available)

    def _apply_solver(self):
        if not self._save_results:
            for block in self._pyomo_model.block_data_objects(
                    descend_into=True, active=True):
                for var in block.component_data_objects(
                        ctype=pyomo.core.base.var.Var,
                        descend_into=False,
                        active=True,
                        sort=False):
                    var.stale = True

        self._solver_model.setlogfile(self._log_file)
        if self._keepfiles:
            print("Solver log file: " + self._log_file)

        # Setting a log file in xpress disables all output
        # in xpress versions less than 36.
        # This callback prints all messages to stdout
        # when using those xpress versions.
        if self._tee and XpressDirect._version[0] < 36:
            self._solver_model.addcbmessage(_print_message, None, 0)

        # set xpress options
        # if the user specifies a 'mipgap', set it, and
        # set xpress's related options to 0.
        if self.options.mipgap is not None:
            self._solver_model.setControl('miprelstop',
                                          float(self.options.mipgap))
            self._solver_model.setControl('miprelcutoff', 0.0)
            self._solver_model.setControl('mipaddcutoff', 0.0)
        # xpress is picky about the type which is passed
        # into a control. So we will infer and cast
        # get the xpress valid controls
        xp_controls = xpress.controls
        for key, option in self.options.items():
            if key == 'mipgap':  # handled above
                continue
            try:
                self._solver_model.setControl(key, option)
            except XpressDirect.XpressException:
                # take another try, converting to its type
                # we'll wrap this in a function to raise the
                # xpress error
                contr_type = type(getattr(xp_controls, key))
                if not _is_convertable(contr_type, option):
                    raise
                self._solver_model.setControl(key, contr_type(option))

        start_time = time.time()
        if self._tee:
            self._solver_model.solve()
        else:
            # In xpress versions greater than or equal 36,
            # it seems difficult to completely suppress console
            # output without disabling logging altogether.
            # As a work around, we capature all screen output
            # when tee is False.
            with capture_output() as OUT:
                self._solver_model.solve()
        self._opt_time = time.time() - start_time

        self._solver_model.setlogfile('')
        if self._tee and XpressDirect._version[0] < 36:
            self._solver_model.removecbmessage(_print_message, None)

        # FIXME: can we get a return code indicating if XPRESS had a significant failure?
        return Bunch(rc=None, log=None)

    def _get_expr_from_pyomo_repn(self, repn, max_degree=2):
        referenced_vars = ComponentSet()

        degree = repn.polynomial_degree()
        if (degree is None) or (degree > max_degree):
            raise DegreeError(
                'XpressDirect does not support expressions of degree {0}.'.
                format(degree))

        # NOTE: xpress's python interface only allows for expresions
        #       with native numeric types. Others, like numpy.float64,
        #       will cause an exception when constructing expressions
        if len(repn.linear_vars) > 0:
            referenced_vars.update(repn.linear_vars)
            new_expr = xpress.Sum(
                float(coef) * self._pyomo_var_to_solver_var_map[var]
                for coef, var in zip(repn.linear_coefs, repn.linear_vars))
        else:
            new_expr = 0.0

        for coef, (x, y) in zip(repn.quadratic_coefs, repn.quadratic_vars):
            new_expr += float(coef) * self._pyomo_var_to_solver_var_map[
                x] * self._pyomo_var_to_solver_var_map[y]
            referenced_vars.add(x)
            referenced_vars.add(y)

        new_expr += repn.constant

        return new_expr, referenced_vars

    def _get_expr_from_pyomo_expr(self, expr, max_degree=2):
        if max_degree == 2:
            repn = generate_standard_repn(expr, quadratic=True)
        else:
            repn = generate_standard_repn(expr, quadratic=False)

        try:
            xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                repn, max_degree)
        except DegreeError as e:
            msg = e.args[0]
            msg += '\nexpr: {0}'.format(expr)
            raise DegreeError(msg)

        return xpress_expr, referenced_vars

    def _xpress_lb_ub_from_var(self, var):
        if var.is_fixed():
            val = var.value
            return val, val
        if var.has_lb():
            lb = value(var.lb)
        else:
            lb = -xpress.infinity
        if var.has_ub():
            ub = value(var.ub)
        else:
            ub = xpress.infinity
        return lb, ub

    def _add_var(self, var):
        varname = self._symbol_map.getSymbol(var, self._labeler)
        vartype = self._xpress_vartype_from_var(var)
        lb, ub = self._xpress_lb_ub_from_var(var)

        xpress_var = xpress.var(name=varname, lb=lb, ub=ub, vartype=vartype)
        self._solver_model.addVariable(xpress_var)

        ## bounds on binary variables don't seem to be set correctly
        ## by the method above
        if vartype == xpress.binary:
            if lb == ub:
                self._solver_model.chgbounds([xpress_var], ['B'], [lb])
            else:
                self._solver_model.chgbounds([xpress_var, xpress_var],
                                             ['L', 'U'], [lb, ub])

        self._pyomo_var_to_solver_var_map[var] = xpress_var
        self._solver_var_to_pyomo_var_map[xpress_var] = var
        self._referenced_variables[var] = 0

    def _set_instance(self, model, kwds={}):
        self._range_constraints = set()
        DirectOrPersistentSolver._set_instance(self, model, kwds)
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        try:
            if model.name is not None:
                self._solver_model = xpress.problem(name=model.name)
            else:
                self._solver_model = xpress.problem()
        except Exception:
            e = sys.exc_info()[1]
            msg = ("Unable to create Xpress model. "
                   "Have you installed the Python "
                   "bindings for Xpress?\n\n\t" +
                   "Error message: {0}".format(e))
            raise Exception(msg)
        self._add_block(model)

    def _add_block(self, block):
        DirectOrPersistentSolver._add_block(self, block)

    def _add_constraint(self, con):
        if not con.active:
            return None

        if is_fixed(con.body):
            if self._skip_trivial_constraints:
                return None

        conname = self._symbol_map.getSymbol(con, self._labeler)

        if con._linear_canonical_form:
            xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                con.canonical_form(), self._max_constraint_degree)
        else:
            xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr(
                con.body, self._max_constraint_degree)

        if con.has_lb():
            if not is_fixed(con.lower):
                raise ValueError("Lower bound of constraint {0} "
                                 "is not constant.".format(con))
        if con.has_ub():
            if not is_fixed(con.upper):
                raise ValueError("Upper bound of constraint {0} "
                                 "is not constant.".format(con))

        if con.equality:
            xpress_con = xpress.constraint(body=xpress_expr,
                                           sense=xpress.eq,
                                           rhs=value(con.lower),
                                           name=conname)
        elif con.has_lb() and con.has_ub():
            xpress_con = xpress.constraint(body=xpress_expr,
                                           sense=xpress.range,
                                           lb=value(con.lower),
                                           ub=value(con.upper),
                                           name=conname)
            self._range_constraints.add(xpress_con)
        elif con.has_lb():
            xpress_con = xpress.constraint(body=xpress_expr,
                                           sense=xpress.geq,
                                           rhs=value(con.lower),
                                           name=conname)
        elif con.has_ub():
            xpress_con = xpress.constraint(body=xpress_expr,
                                           sense=xpress.leq,
                                           rhs=value(con.upper),
                                           name=conname)
        else:
            raise ValueError("Constraint does not have a lower "
                             "or an upper bound: {0} \n".format(con))

        self._solver_model.addConstraint(xpress_con)

        for var in referenced_vars:
            self._referenced_variables[var] += 1
        self._vars_referenced_by_con[con] = referenced_vars
        self._pyomo_con_to_solver_con_map[con] = xpress_con
        self._solver_con_to_pyomo_con_map[xpress_con] = con

    def _add_sos_constraint(self, con):
        if not con.active:
            return None

        conname = self._symbol_map.getSymbol(con, self._labeler)
        level = con.level
        if level not in [1, 2]:
            raise ValueError("Solver does not support SOS "
                             "level {0} constraints".format(level))

        xpress_vars = []
        weights = []

        self._vars_referenced_by_con[con] = ComponentSet()

        if hasattr(con, 'get_items'):
            # aml sos constraint
            sos_items = list(con.get_items())
        else:
            # kernel sos constraint
            sos_items = list(con.items())

        for v, w in sos_items:
            self._vars_referenced_by_con[con].add(v)
            xpress_vars.append(self._pyomo_var_to_solver_var_map[v])
            self._referenced_variables[v] += 1
            weights.append(w)

        xpress_con = xpress.sos(xpress_vars, weights, level, conname)
        self._solver_model.addSOS(xpress_con)
        self._pyomo_con_to_solver_con_map[con] = xpress_con
        self._solver_con_to_pyomo_con_map[xpress_con] = con

    def _xpress_vartype_from_var(self, var):
        """
        This function takes a pyomo variable and returns the appropriate xpress variable type
        :param var: pyomo.core.base.var.Var
        :return: xpress.continuous or xpress.binary or xpress.integer
        """
        if var.is_binary():
            vartype = xpress.binary
        elif var.is_integer():
            vartype = xpress.integer
        elif var.is_continuous():
            vartype = xpress.continuous
        else:
            raise ValueError(
                'Variable domain type is not recognized for {0}'.format(
                    var.domain))
        return vartype

    def _set_objective(self, obj):
        if self._objective is not None:
            for var in self._vars_referenced_by_obj:
                self._referenced_variables[var] -= 1
            self._vars_referenced_by_obj = ComponentSet()
            self._objective = None

        if obj.active is False:
            raise ValueError('Cannot add inactive objective to solver.')

        if obj.sense == minimize:
            sense = xpress.minimize
        elif obj.sense == maximize:
            sense = xpress.maximize
        else:
            raise ValueError('Objective sense is not recognized: {0}'.format(
                obj.sense))

        xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr(
            obj.expr, self._max_obj_degree)

        for var in referenced_vars:
            self._referenced_variables[var] += 1

        self._solver_model.setObjective(xpress_expr, sense=sense)
        self._objective = obj
        self._vars_referenced_by_obj = referenced_vars

    def _postsolve(self):
        # the only suffixes that we extract from XPRESS are
        # constraint duals, constraint slacks, and variable
        # reduced-costs. scan through the solver suffix list
        # and throw an exception if the user has specified
        # any others.
        extract_duals = False
        extract_slacks = False
        extract_reduced_costs = False
        for suffix in self._suffixes:
            flag = False
            if re.match(suffix, "dual"):
                extract_duals = True
                flag = True
            if re.match(suffix, "slack"):
                extract_slacks = True
                flag = True
            if re.match(suffix, "rc"):
                extract_reduced_costs = True
                flag = True
            if not flag:
                raise RuntimeError(
                    "***The xpress_direct solver plugin cannot extract solution suffix="
                    + suffix)

        xprob = self._solver_model
        xp = xpress
        xprob_attrs = xprob.attributes

        ## XPRESS's status codes depend on this
        ## (number of integer vars > 0) or (number of special order sets > 0)
        is_mip = (xprob_attrs.mipents > 0) or (xprob_attrs.sets > 0)

        if is_mip:
            if extract_reduced_costs:
                logger.warning("Cannot get reduced costs for MIP.")
            if extract_duals:
                logger.warning("Cannot get duals for MIP.")
            extract_reduced_costs = False
            extract_duals = False

        self.results = SolverResults()
        soln = Solution()

        self.results.solver.name = XpressDirect._name
        self.results.solver.wallclock_time = self._opt_time

        if is_mip:
            status = xprob_attrs.mipstatus
            mip_sols = xprob_attrs.mipsols
            if status == xp.mip_not_loaded:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Model is not loaded; no solution information is available."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.unknown
            #no MIP solution, first LP did not solve, second LP did, third search started but incomplete
            elif status == xp.mip_lp_not_optimal \
                    or status == xp.mip_lp_optimal \
                    or status == xp.mip_no_sol_found:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Model is loaded, but no solution information is available."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.unknown
            elif status == xp.mip_solution:  # some solution available
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Unable to satisfy optimality tolerances; a sub-optimal " \
                                                          "solution is available."
                self.results.solver.termination_condition = TerminationCondition.other
                soln.status = SolutionStatus.feasible
            elif status == xp.mip_infeas:  # MIP proven infeasible
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Model was proven to be infeasible"
                self.results.solver.termination_condition = TerminationCondition.infeasible
                soln.status = SolutionStatus.infeasible
            elif status == xp.mip_optimal:  # optimal
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \
                                                          "and an optimal solution is available."
                self.results.solver.termination_condition = TerminationCondition.optimal
                soln.status = SolutionStatus.optimal
            elif status == xp.mip_unbounded and mip_sols > 0:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "LP relaxation was proven to be unbounded, " \
                                                          "but a solution is available."
                self.results.solver.termination_condition = TerminationCondition.unbounded
                soln.status = SolutionStatus.unbounded
            elif status == xp.mip_unbounded and mip_sols <= 0:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "LP relaxation was proven to be unbounded."
                self.results.solver.termination_condition = TerminationCondition.unbounded
                soln.status = SolutionStatus.unbounded
            else:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = \
                    ("Unhandled Xpress solve status "
                     "("+str(status)+")")
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
        else:  ## an LP, we'll check the lpstatus
            status = xprob_attrs.lpstatus
            if status == xp.lp_unstarted:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Model is not loaded; no solution information is available."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.unknown
            elif status == xp.lp_optimal:
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \
                                                          "and an optimal solution is available."
                self.results.solver.termination_condition = TerminationCondition.optimal
                soln.status = SolutionStatus.optimal
            elif status == xp.lp_infeas:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Model was proven to be infeasible"
                self.results.solver.termination_condition = TerminationCondition.infeasible
                soln.status = SolutionStatus.infeasible
            elif status == xp.lp_cutoff:
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Optimal objective for model was proven to be worse than the " \
                                                          "cutoff value specified; a solution is available."
                self.results.solver.termination_condition = TerminationCondition.minFunctionValue
                soln.status = SolutionStatus.optimal
            elif status == xp.lp_unfinished:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Optimization was terminated by the user."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
            elif status == xp.lp_unbounded:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Model was proven to be unbounded."
                self.results.solver.termination_condition = TerminationCondition.unbounded
                soln.status = SolutionStatus.unbounded
            elif status == xp.lp_cutoff_in_dual:
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Xpress reported the LP was cutoff in the dual."
                self.results.solver.termination_condition = TerminationCondition.minFunctionValue
                soln.status = SolutionStatus.optimal
            elif status == xp.lp_unsolved:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = "Optimization was terminated due to unrecoverable numerical " \
                                                          "difficulties."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
            elif status == xp.lp_nonconvex:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = "Optimization was terminated because nonconvex quadratic data " \
                                                          "were found."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
            else:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = \
                    ("Unhandled Xpress solve status "
                     "("+str(status)+")")
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error

        self.results.problem.name = xprob_attrs.matrixname

        if xprob_attrs.objsense == 1.0:
            self.results.problem.sense = minimize
        elif xprob_attrs.objsense == -1.0:
            self.results.problem.sense = maximize
        else:
            raise RuntimeError(
                'Unrecognized Xpress objective sense: {0}'.format(
                    xprob_attrs.objsense))

        self.results.problem.upper_bound = None
        self.results.problem.lower_bound = None
        if not is_mip:  #LP or continuous problem
            try:
                self.results.problem.upper_bound = xprob_attrs.lpobjval
                self.results.problem.lower_bound = xprob_attrs.lpobjval
            except (XpressDirect.XpressException, AttributeError):
                pass
        elif xprob_attrs.objsense == 1.0:  # minimizing MIP
            try:
                self.results.problem.upper_bound = xprob_attrs.mipbestobjval
            except (XpressDirect.XpressException, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = xprob_attrs.bestbound
            except (XpressDirect.XpressException, AttributeError):
                pass
        elif xprob_attrs.objsense == -1.0:  # maximizing MIP
            try:
                self.results.problem.upper_bound = xprob_attrs.bestbound
            except (XpressDirect.XpressException, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = xprob_attrs.mipbestobjval
            except (XpressDirect.XpressException, AttributeError):
                pass
        else:
            raise RuntimeError(
                'Unrecognized xpress objective sense: {0}'.format(
                    xprob_attrs.objsense))

        try:
            soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound
        except TypeError:
            soln.gap = None

        self.results.problem.number_of_constraints = xprob_attrs.rows + xprob_attrs.sets + xprob_attrs.qconstraints
        self.results.problem.number_of_nonzeros = xprob_attrs.elems
        self.results.problem.number_of_variables = xprob_attrs.cols
        self.results.problem.number_of_integer_variables = xprob_attrs.mipents
        self.results.problem.number_of_continuous_variables = xprob_attrs.cols - xprob_attrs.mipents
        self.results.problem.number_of_objectives = 1
        self.results.problem.number_of_solutions = xprob_attrs.mipsols if is_mip else 1

        # if a solve was stopped by a limit, we still need to check to
        # see if there is a solution available - this may not always
        # be the case, both in LP and MIP contexts.
        if self._save_results:
            """
            This code in this if statement is only needed for backwards compatability. It is more efficient to set
            _save_results to False and use load_vars, load_duals, etc.
            """
            if xprob_attrs.lpstatus in \
                    [xp.lp_optimal, xp.lp_cutoff, xp.lp_cutoff_in_dual] or \
                    xprob_attrs.mipsols > 0:
                soln_variables = soln.variable
                soln_constraints = soln.constraint

                xpress_vars = list(self._solver_var_to_pyomo_var_map.keys())
                var_vals = xprob.getSolution(xpress_vars)
                for xpress_var, val in zip(xpress_vars, var_vals):
                    pyomo_var = self._solver_var_to_pyomo_var_map[xpress_var]
                    if self._referenced_variables[pyomo_var] > 0:
                        pyomo_var.stale = False
                        soln_variables[xpress_var.name] = {"Value": val}

                if extract_reduced_costs:
                    vals = xprob.getRCost(xpress_vars)
                    for xpress_var, val in zip(xpress_vars, vals):
                        pyomo_var = self._solver_var_to_pyomo_var_map[
                            xpress_var]
                        if self._referenced_variables[pyomo_var] > 0:
                            soln_variables[xpress_var.name]["Rc"] = val

                if extract_duals or extract_slacks:
                    xpress_cons = list(
                        self._solver_con_to_pyomo_con_map.keys())
                    for con in xpress_cons:
                        soln_constraints[con.name] = {}

                if extract_duals:
                    vals = xprob.getDual(xpress_cons)
                    for val, con in zip(vals, xpress_cons):
                        soln_constraints[con.name]["Dual"] = val

                if extract_slacks:
                    vals = xprob.getSlack(xpress_cons)
                    for con, val in zip(xpress_cons, vals):
                        if con in self._range_constraints:
                            ## for xpress, the slack on a range constraint
                            ## is based on the upper bound
                            lb = con.lb
                            ub = con.ub
                            ub_s = val
                            expr_val = ub - ub_s
                            lb_s = lb - expr_val
                            if abs(ub_s) > abs(lb_s):
                                soln_constraints[con.name]["Slack"] = ub_s
                            else:
                                soln_constraints[con.name]["Slack"] = lb_s
                        else:
                            soln_constraints[con.name]["Slack"] = val

        elif self._load_solutions:
            if xprob_attrs.lpstatus == xp.lp_optimal and \
                    ((not is_mip) or (xprob_attrs.mipsols > 0)):

                self._load_vars()

                if extract_reduced_costs:
                    self._load_rc()

                if extract_duals:
                    self._load_duals()

                if extract_slacks:
                    self._load_slacks()

        self.results.solution.insert(soln)

        # finally, clean any temporary files registered with the temp file
        # manager, created populated *directly* by this plugin.
        TempfileManager.pop(remove=not self._keepfiles)
        return DirectOrPersistentSolver._postsolve(self)

    def warm_start_capable(self):
        return True

    def _warm_start(self):
        mipsolval = list()
        mipsolcol = list()
        for pyomo_var, xpress_var in self._pyomo_var_to_solver_var_map.items():
            if pyomo_var.value is not None:
                mipsolval.append(value(pyomo_var))
                mipsolcol.append(xpress_var)
        self._solver_model.addmipsol(mipsolval, mipsolcol)

    def _load_vars(self, vars_to_load=None):
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        xpress_vars_to_load = [
            var_map[pyomo_var] for pyomo_var in vars_to_load
        ]
        vals = self._solver_model.getSolution(xpress_vars_to_load)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                var.stale = False
                var.value = val

    def _load_rc(self, vars_to_load=None):
        if not hasattr(self._pyomo_model, 'rc'):
            self._pyomo_model.rc = Suffix(direction=Suffix.IMPORT)
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        rc = self._pyomo_model.rc
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        xpress_vars_to_load = [
            var_map[pyomo_var] for pyomo_var in vars_to_load
        ]
        vals = self._solver_model.getRCost(xpress_vars_to_load)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                rc[var] = val

    def _load_duals(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'dual'):
            self._pyomo_model.dual = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        dual = self._pyomo_model.dual

        if cons_to_load is None:
            cons_to_load = con_map.keys()

        xpress_cons_to_load = [
            con_map[pyomo_con] for pyomo_con in cons_to_load
        ]
        vals = self._solver_model.getDual(xpress_cons_to_load)

        for pyomo_con, val in zip(cons_to_load, vals):
            dual[pyomo_con] = val

    def _load_slacks(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'slack'):
            self._pyomo_model.slack = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        slack = self._pyomo_model.slack

        if cons_to_load is None:
            cons_to_load = con_map.keys()

        xpress_cons_to_load = [
            con_map[pyomo_con] for pyomo_con in cons_to_load
        ]
        vals = self._solver_model.getSlack(xpress_cons_to_load)

        for pyomo_con, xpress_con, val in zip(cons_to_load,
                                              xpress_cons_to_load, vals):
            if xpress_con in self._range_constraints:
                ## for xpress, the slack on a range constraint
                ## is based on the upper bound
                lb = con.lb
                ub = con.ub
                ub_s = val
                expr_val = ub - ub_s
                lb_s = lb - expr_val
                if abs(ub_s) > abs(lb_s):
                    slack[pyomo_con] = ub_s
                else:
                    slack[pyomo_con] = lb_s
            else:
                slack[pyomo_con] = val

    def load_duals(self, cons_to_load=None):
        """
        Load the duals into the 'dual' suffix. The 'dual' suffix must live on the parent model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_duals(cons_to_load)

    def load_rc(self, vars_to_load=None):
        """
        Load the reduced costs into the 'rc' suffix. The 'rc' suffix must live on the parent model.

        Parameters
        ----------
        vars_to_load: list of Var
        """
        self._load_rc(vars_to_load)

    def load_slacks(self, cons_to_load=None):
        """
        Load the values of the slack variables into the 'slack' suffix. The 'slack' suffix must live on the parent
        model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_slacks(cons_to_load)
Exemple #25
0
    def test_enth(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 7
        state_values = {
            "flow_mol": [1.0 * pyunits.mol / pyunits.s] * n_scen,
            "temperature": [
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                1200.0 * pyunits.K,
                # We do not test enthalpy at 300 K. The Shomate equation
                # we use is not valid below 500 K.
                #300.0*pyunits.K,
                600.0 * pyunits.K,
                900.0 * pyunits.K,
                1200.0 * pyunits.K,
            ],
            "pressure": [1.0 * pyunits.bar] * n_scen,
            "mole_frac_comp[O2]": [1.0, 0.0, 0.0, 0.0, 0.25, 0.25, 0.25],
            "mole_frac_comp[N2]": [0.0, 1.0, 0.0, 0.0, 0.25, 0.25, 0.25],
            "mole_frac_comp[H2O]": [0.0, 0.0, 1.0, 0.0, 0.25, 0.25, 0.25],
            "mole_frac_comp[CO2]": [0.0, 0.0, 0.0, 1.0, 0.25, 0.25, 0.25],
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())

        u = pyunits.kJ / pyunits.mol
        target_values = {
            "enth_mol_comp[O2]": [
                29.760175857866656 * u,
                29.760175857866656 * u,
                29.760175857866656 * u,
                29.760175857866656 * u,
                #0.5175085434916653*u,
                9.248259058533336 * u,
                19.241674269713887 * u,
                29.760175857866656 * u,
            ],
            "enth_mol_comp[N2]": [
                28.1081423656 * u,
                28.1081423656 * u,
                28.1081423656 * u,
                28.1081423656 * u,
                #-0.021818752399997976*u,
                8.8939164816 * u,
                18.223311732266666 * u,
                28.1081423656 * u,
            ],
            "enth_mol_comp[H2O]": [
                34.505905041333335 * u,
                34.505905041333335 * u,
                34.505905041333335 * u,
                34.505905041333335 * u,
                #0.06267505633334736*u,
                10.500564354666665 * u,
                21.939189237444452 * u,
                34.505905041333335 * u,
            ],
            "enth_mol_comp[CO2]": [
                44.474410900800024 * u,
                44.474410900800024 * u,
                44.474410900800024 * u,
                44.474410900800024 * u,
                #0.06585135367498651*u,
                12.906441898799983 * u,
                28.031785067675003 * u,
                44.474410900800024 * u,
            ],
            "enth_mol": [
                29.760175857866656 * u,
                28.1081423656 * u,
                34.505905041333335 * u,
                44.474410900800024 * u,
                #0.15605405027500296*u,
                10.387295448399996 * u,
                21.858990076775 * u,
                0.25 * (29.760175857866656 * u + 28.1081423656 * u +
                        34.505905041333335 * u + 44.474410900800024 * u),
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        # Construct enth_mol and all prerequisites, including enth_mol_comp
        state.enth_mol

        param_sweeper = ParamSweeper(n_scen,
                                     state_values,
                                     output_values=target_values)
        with param_sweeper:
            for inputs, target in param_sweeper:
                solve_strongly_connected_components(state)

                # Make sure property equations have been converged
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in target.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #26
0
class MosekDirect(DirectSolver):

    def __init__(self, **kwds):
        kwds['type'] = 'mosek'
        DirectSolver.__init__(self, **kwds)
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()
        self._pyomo_cone_to_solver_cone_map = dict()
        self._solver_cone_to_pyomo_cone_map = ComponentMap()
        self._init()

    def _init(self):
        self._name = None
        try:
            import mosek
            self._mosek = mosek
            self._mosek_env = self._mosek.Env()
            self._python_api_exists = True
            self._version = self._mosek_env.getversion()
            if self._version[0] > 8:
                self._name = "Mosek %s.%s.%s" % self._version
                while len(self._version) < 3:
                    self._version += (0,)
            else:
                self._name = "Mosek %s.%s.%s.%s" % self._version
                while len(self._version) < 4:
                    self._version += (0,)

            self._version_major = self._version[0]
        except ImportError:
            self._python_api_exists = False
        except Exception as e:
            print("Import of mosek failed - mosek message=" + str(e) + "\n")
            self._python_api_exists = False

        self._range_constraints = set()

        self._max_obj_degree = 2
        self._max_constraint_degree = 2
        self._termcode = None

        # Note: Undefined capabilites default to None
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = False
        self._capabilities.sos2 = False

    @staticmethod
    def license_is_valid():
        """
        Runs a check for a valid Mosek license. Returns False
        if Mosek fails to run on a trivial test case.
        """
        try:
            import mosek
        except ImportError:
            return False
        try:
            mosek.Env().Task(0,0).optimize()
        except mosek.Error:
            return False
        return True

    def _apply_solver(self):
        if not self._save_results:
            for block in self._pyomo_model.block_data_objects(descend_into=True,
                                                              active=True):
                for var in block.component_data_objects(ctype=pyomo.core.base.var.Var,
                                                        descend_into=False,
                                                        active=True,
                                                        sort=False):
                    var.stale = True
        if self._tee:
            def _process_stream(msg):
                sys.stdout.write(msg)
                sys.stdout.flush()
            self._solver_model.set_Stream(
                self._mosek.streamtype.log, _process_stream)

        if self._keepfiles:
            print("Solver log file: "+self._log_file)

        for key, option in self.options.items():

            param = self._mosek

            try:
                for sub_key in key.split('.'):
                    param = getattr(param, sub_key)
            except (TypeError, AttributeError):
                raise

            if 'sparam' in key.split('.'):
                self._solver_model.putstrparam(param, option)
            else:
                if 'iparam' in key.split('.'):
                    self._solver_model.putintparam(param, option)
                elif 'dparam' in key.split('.'):
                    self._solver_model.putdouparam(param, option)
                else:
                    raise AttributeError(
                        "Unknown parameter type. Type sparam, iparam or dparam expected.")

        self._termcode = self._solver_model.optimize()
        self._solver_model.solutionsummary(self._mosek.streamtype.msg)

        # FIXME: can we get a return code indicating if Mosek had a significant failure?
        return Bunch(rc=None, log=None)

    def _get_cone_data(self, con):
        # if the cone is not recognized, this function
        # will return None for cone_type and cone_members
        cone_type = None
        cone_param = 0
        cone_members = None
        if isinstance(con, quadratic):
            assert con.has_ub() and \
                (con.ub == 0) and (not con.has_lb())
            assert con.check_convexity_conditions(relax=True)
            cone_type = self._mosek.conetype.quad
            cone_members = [con.r] + list(con.x)
        elif isinstance(con, rotated_quadratic):
            assert con.has_ub() and \
                (con.ub == 0) and (not con.has_lb())
            assert con.check_convexity_conditions(relax=True)
            cone_type = self._mosek.conetype.rquad
            cone_members = [con.r1, con.r2] + list(con.x)
        elif self._version >= (9, 0, 0):
            if isinstance(con, primal_exponential):
                assert con.has_ub() and \
                    (con.ub == 0) and (not con.has_lb())
                assert con.check_convexity_conditions(
                    relax=False)
                cone_type = self._mosek.conetype.pexp
                cone_members = [con.r, con.x1, con.x2]
            elif isinstance(con, primal_power):
                assert con.has_ub() and \
                    (con.ub == 0) and (not con.has_lb())
                assert con.check_convexity_conditions(
                    relax=False)
                cone_type = self._mosek.conetype.ppow
                cone_param = value(con.alpha)
                cone_members = [con.r1, con.r2] + list(con.x)
            elif isinstance(con, dual_exponential):
                assert con.has_ub() and \
                    (con.ub == 0) and (not con.has_lb())
                assert con.check_convexity_conditions(
                    relax=False)
                cone_type = self._mosek.conetype.dexp
                cone_members = [con.r, con.x1, con.x2]
            elif isinstance(con, dual_power):
                assert con.has_ub() and \
                    (con.ub == 0) and (not con.has_lb())
                assert con.check_convexity_conditions(
                    relax=False)
                cone_type = self._mosek.conetype.dpow
                cone_param = value(con.alpha)
                cone_members = [con.r1, con.r2] + list(con.x)

        return cone_type, cone_param, cone_members

    def _get_expr_from_pyomo_repn(self, repn, max_degree=2):
        referenced_vars = ComponentSet()

        degree = repn.polynomial_degree()
        if (degree is None) or (degree > max_degree):
            raise DegreeError(
                'Mosek does not support expressions of degree {0}.'.format(degree))

        # if len(repn.linear_vars) > 0:
        referenced_vars.update(repn.linear_vars)

        indexes = []
        [indexes.append(self._pyomo_var_to_solver_var_map[i])
         for i in repn.linear_vars]

        new_expr = [list(repn.linear_coefs), indexes, repn.constant]

        qsubi = []
        qsubj = []
        qval = []
        for i, v in enumerate(repn.quadratic_vars):
            x, y = v
            qsubj.append(self._pyomo_var_to_solver_var_map[x])
            qsubi.append(self._pyomo_var_to_solver_var_map[y])
            qval.append(repn.quadratic_coefs[i]*((qsubi==qsubj)+1))
            referenced_vars.add(x)
            referenced_vars.add(y)
        new_expr.extend([qval, qsubi, qsubj])

        return new_expr, referenced_vars

    def _get_expr_from_pyomo_expr(self, expr, max_degree=2):
        if max_degree == 2:
            repn = generate_standard_repn(expr, quadratic=True)
        else:
            repn = generate_standard_repn(expr, quadratic=False)

        try:
            mosek_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                repn, max_degree)
        except DegreeError as e:
            msg = e.args[0]
            msg += '\nexpr: {0}'.format(expr)
            raise DegreeError(msg)

        return mosek_expr, referenced_vars

    def _add_var(self, var):
        varname = self._symbol_map.getSymbol(var, self._labeler)
        vtype = self._mosek_vtype_from_var(var)
        if var.has_lb():
            lb = value(var.lb)
        else:
            lb = '0'
        if var.has_ub():
            ub = value(var.ub)
        else:
            ub = '0'

        bound_type = self.set_var_boundtype(var, ub, lb)
        self._solver_model.appendvars(1)
        index = self._solver_model.getnumvar()-1
        self._solver_model.putvarbound(index, bound_type, float(lb), float(ub))
        self._solver_model.putvartype(index, vtype)
        self._solver_model.putvarname(index, varname)

        self._pyomo_var_to_solver_var_map[var] = index
        self._solver_var_to_pyomo_var_map[index] = var
        self._referenced_variables[var] = 0

    def _set_instance(self, model, kwds={}):
        self._range_constraints = set()
        DirectOrPersistentSolver._set_instance(self, model, kwds)
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()
        self._pyomo_cone_to_solver_cone_map = dict()
        self._solver_cone_to_pyomo_cone_map = ComponentMap()
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        self._whichsol = getattr(
            self._mosek.soltype, kwds.pop('soltype', 'bas'))

        try:
            self._solver_model = self._mosek_env.Task(0, 0)
        except Exception:
            e = sys.exc_info()[1]
            msg = ("Unable to create Mosek Task. "
                   "Have you installed the Python "
                   "bindings for Mosek?\n\n\t" +
                   "Error message: {0}".format(e))
            raise Exception(msg)

        self._add_block(model)

    def _add_block(self, block):
        DirectOrPersistentSolver._add_block(self, block)

    def _add_constraint(self, con):
        if not con.active:
            return None

        if is_fixed(con.body):
            if self._skip_trivial_constraints:
                return None

        conname = self._symbol_map.getSymbol(con, self._labeler)

        mosek_expr = None
        referenced_vars = None
        cone_type = None
        cone_param = 0
        cone_members = None
        if con._linear_canonical_form:
            mosek_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                con.canonical_form(),
                self._max_constraint_degree)
        elif isinstance(con, _ConicBase):
            cone_type, cone_param, cone_members = \
                self._get_cone_data(con)
            if cone_type is not None:
                assert cone_members is not None
                referenced_vars = ComponentSet(cone_members)
            else:
                logger.warning("Cone %s was not recognized by Mosek"
                               % (str(con)))
                # the cone was not recognized, treat
                # it like a standard constraint, which
                # will in all likelihood lead to Mosek
                # reporting a helpful error message
                assert mosek_expr is None
        if (mosek_expr is None) and (cone_type is None):
            mosek_expr, referenced_vars = \
                self._get_expr_from_pyomo_expr(
                    con.body,
                    self._max_constraint_degree)

        assert referenced_vars is not None
        if mosek_expr is not None:
            assert cone_type is None
            self._solver_model.appendcons(1)
            con_index = self._solver_model.getnumcon()-1
            con_type, ub, lb = self.set_con_bounds(con, mosek_expr[2])

            if con.has_lb():
                if not is_fixed(con.lower):
                    raise ValueError("Lower bound of constraint {0} "
                                     "is not constant.".format(con))
            if con.has_ub():
                if not is_fixed(con.upper):
                    raise ValueError("Upper bound of constraint {0} "
                                     "is not constant.".format(con))

            self._solver_model.putarow(con_index, mosek_expr[1], mosek_expr[0])
            self._solver_model.putqconk(
                con_index, mosek_expr[4], mosek_expr[5], mosek_expr[3])
            self._solver_model.putconbound(con_index, con_type, lb, ub)
            self._solver_model.putconname(con_index, conname)
            self._pyomo_con_to_solver_con_map[con] = con_index
            self._solver_con_to_pyomo_con_map[con_index] = con
        else:
            assert cone_type is not None
            members = [self._pyomo_var_to_solver_var_map[v_]
                       for v_ in cone_members]
            self._solver_model.appendcone(cone_type,
                                          cone_param,
                                          members)
            cone_index = self._solver_model.getnumcone()-1
            self._solver_model.putconename(cone_index, conname)
            self._pyomo_cone_to_solver_cone_map[con] = cone_index
            self._solver_cone_to_pyomo_cone_map[cone_index] = con

        for var in referenced_vars:
            self._referenced_variables[var] += 1
        self._vars_referenced_by_con[con] = referenced_vars

    def _mosek_vtype_from_var(self, var):
        """
        This function takes a pyomo variable and returns the appropriate mosek variable type
        :param var: pyomo.core.base.var.Var
        :return: mosek.variabletype.type_int or mosek.variabletype.type_cont
        """

        if var.is_integer() or var.is_binary():
            vtype = self._mosek.variabletype.type_int
        elif var.is_continuous():
            vtype = self._mosek.variabletype.type_cont
        else:
            raise ValueError(
                'Variable domain type is not recognized for {0}'.format(var.domain))
        return vtype

    def set_var_boundtype(self, var, ub, lb):

        if var.is_fixed():
            return self._mosek.boundkey.fx
        elif ub != '0' and lb != '0':
            return self._mosek.boundkey.ra
        elif ub == '0' and lb == '0':
            return self._mosek.boundkey.fr
        elif ub != '0' and lb == '0':
            return self._mosek.boundkey.up
        return self._mosek.boundkey.lo

    def set_con_bounds(self, con, constant):

        if con.equality:
            ub = value(con.upper) - constant
            lb = value(con.lower) - constant
            con_type = self._mosek.boundkey.fx
        elif con.has_lb() and con.has_ub():
            ub = value(con.upper) - constant
            lb = value(con.lower) - constant
            con_type = self._mosek.boundkey.ra
        elif con.has_lb():
            ub = 0
            lb = value(con.lower) - constant
            con_type = self._mosek.boundkey.lo
        elif con.has_ub():
            ub = value(con.upper) - constant
            lb = 0
            con_type = self._mosek.boundkey.up
        else:
            ub = 0
            lb = 0
            con_type = self._mosek.boundkey.fr
        return con_type, ub, lb

    def _set_objective(self, obj):

        if self._objective is not None:
            for var in self._vars_referenced_by_obj:
                self._referenced_variables[var] -= 1
            self._vars_referenced_by_obj = ComponentSet()
            self._objective = None

        if obj.active is False:
            raise ValueError('Cannot add inactive objective to solver.')

        if obj.sense == minimize:
            self._solver_model.putobjsense(self._mosek.objsense.minimize)
        elif obj.sense == maximize:
            self._solver_model.putobjsense(self._mosek.objsense.maximize)
        else:
            raise ValueError(
                'Objective sense is not recognized: {0}'.format(obj.sense))

        mosek_expr, referenced_vars = self._get_expr_from_pyomo_expr(
            obj.expr, self._max_obj_degree)

        for var in referenced_vars:
            self._referenced_variables[var] += 1

        for i, j in enumerate(mosek_expr[1]):
            self._solver_model.putcj(j, mosek_expr[0][i])

        self._solver_model.putqobj(mosek_expr[4], mosek_expr[5], mosek_expr[3])
        self._solver_model.putcfix(mosek_expr[2])
        self._objective = obj
        self._vars_referenced_by_obj = referenced_vars

    def _postsolve(self):

        extract_duals = False
        extract_slacks = False
        extract_reduced_costs = False
        for suffix in self._suffixes:
            flag = False
            if re.match(suffix, "dual"):
                extract_duals = True
                flag = True
            if re.match(suffix, "slack"):
                extract_slacks = True
                flag = True
            if re.match(suffix, "rc"):
                extract_reduced_costs = True
                flag = True
            if not flag:
                raise RuntimeError(
                    "***The mosek solver plugin cannot extract solution suffix="+suffix)

        msk_task = self._solver_model
        msk = self._mosek

        itr_soltypes = [msk.problemtype.qo,
                        msk.problemtype.qcqo,
                        msk.problemtype.conic]

        if (msk_task.getnumintvar() >= 1):
            self._whichsol = msk.soltype.itg
            if extract_reduced_costs:
                logger.warning("Cannot get reduced costs for MIP.")
            if extract_duals:
                logger.warning("Cannot get duals for MIP.")
            extract_reduced_costs = False
            extract_duals = False
        elif (msk_task.getprobtype() in itr_soltypes):
            self._whichsol = msk.soltype.itr

        whichsol = self._whichsol
        sol_status = msk_task.getsolsta(whichsol)
        pro_status = msk_task.getprosta(whichsol)

        self.results = SolverResults()
        soln = Solution()

        self.results.solver.name = self._name
        self.results.solver.wallclock_time = msk_task.getdouinf(
            msk.dinfitem.optimizer_time)

        SOLSTA_MAP = {
            msk.solsta.unknown: 'unknown',
            msk.solsta.optimal: 'optimal',
            msk.solsta.prim_and_dual_feas: 'pd_feas',
            msk.solsta.prim_feas: 'p_feas',
            msk.solsta.dual_feas: 'd_feas',
            msk.solsta.prim_infeas_cer: 'p_infeas',
            msk.solsta.dual_infeas_cer: 'd_infeas',
            msk.solsta.prim_illposed_cer: 'p_illposed',
            msk.solsta.dual_illposed_cer: 'd_illposed',
            msk.solsta.integer_optimal: 'optimal'
        }
        PROSTA_MAP = {
            msk.prosta.unknown: 'unknown',
            msk.prosta.prim_and_dual_feas: 'pd_feas',
            msk.prosta.prim_feas: 'p_feas',
            msk.prosta.dual_feas: 'd_feas',
            msk.prosta.prim_infeas: 'p_infeas',
            msk.prosta.dual_infeas: 'd_infeas',
            msk.prosta.prim_and_dual_infeas: 'pd_infeas',
            msk.prosta.ill_posed: 'illposed',
            msk.prosta.prim_infeas_or_unbounded: 'p_inf_unb'
        }

        if self._version_major < 9:
            SOLSTA_OLD = {
                msk.solsta.near_optimal: 'optimal',
                msk.solsta.near_integer_optimal: 'optimal',
                msk.solsta.near_prim_feas: 'p_feas',
                msk.solsta.near_dual_feas: 'd_feas',
                msk.solsta.near_prim_and_dual_feas: 'pd_feas',
                msk.solsta.near_prim_infeas_cer: 'p_infeas',
                msk.solsta.near_dual_infeas_cer: 'd_infeas'
            }
            PROSTA_OLD = {
                msk.prosta.near_prim_and_dual_feas: 'pd_feas',
                msk.prosta.near_prim_feas: 'p_feas',
                msk.prosta.near_dual_feas: 'd_feas'
            }
            SOLSTA_MAP.update(SOLSTA_OLD)
            PROSTA_MAP.update(PROSTA_OLD)

        if self._termcode == msk.rescode.ok:
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message = ""

        elif self._termcode == msk.rescode.trm_max_iterations:
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message = "Optimization terminated because the total number " \
                "iterations performed exceeded the value specified in the " \
                "IterationLimit parameter."
            self.results.solver.termination_condition = TerminationCondition.maxIterations
            soln.status = SolutionStatus.stoppedByLimit

        elif self._termcode == msk.rescode.trm_max_time:
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message = "Optimization terminated because the time expended exceeded " \
                "the value specified in the TimeLimit parameter."
            self.results.solver.termination_condition = TerminationCondition.maxTimeLimit
            soln.status = SolutionStatus.stoppedByLimit

        elif self._termcode == msk.rescode.trm_user_callback:
            self.results.solver.status = SolverStatus.Aborted
            self.results.solver.termination_message = "Optimization terminated because of the user callback "
            self.results.solver.termination_condition = TerminationCondition.userInterrupt
            soln.status = SolutionStatus.unknown

        elif self._termcode in [msk.rescode.trm_mio_num_relaxs,
                                msk.rescode.trm_mio_num_branches,
                                msk.rescode.trm_num_max_num_int_solutions]:
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message = "Optimization terminated because maximum number of relaxations" \
                " / branches / integer solutions exceeded " \
                "the value specified in the TimeLimit parameter."
            self.results.solver.termination_condition = TerminationCondition.maxEvaluations
            soln.status = SolutionStatus.stoppedByLimit

        else:
            self.results.solver.termination_message = " Optimization terminated %s response code." \
                "Check Mosek response code documentation for further explanation." % self._termcode
            self.results.solver.termination_condition = TerminationCondition.unknown

        if SOLSTA_MAP[sol_status] == 'unknown':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " Unknown solution status."
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.unknown
            soln.status = SolutionStatus.unknown

        if PROSTA_MAP[pro_status] == 'd_infeas':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " Problem proven to be dual infeasible"
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.unbounded
            soln.status = SolutionStatus.unbounded

        elif PROSTA_MAP[pro_status] == 'p_infeas':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " Problem proven to be primal infeasible."
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.infeasible
            soln.status = SolutionStatus.infeasible

        elif PROSTA_MAP[pro_status] == 'pd_infeas':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " Problem proven to be primal and dual infeasible."
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.infeasible
            soln.status = SolutionStatus.infeasible

        elif PROSTA_MAP[pro_status] == 'p_inf_unb':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " Problem proven to be infeasible or unbounded."
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.infeasibleOrUnbounded
            soln.status = SolutionStatus.unsure

        if SOLSTA_MAP[sol_status] == 'optimal':
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message += " Model was solved to optimality, " \
                "and an optimal solution is available."
            self.results.solver.termination_condition = TerminationCondition.optimal
            soln.status = SolutionStatus.optimal

        elif SOLSTA_MAP[sol_status] == 'pd_feas':
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message += " The solution is both primal and dual feasible"
            self.results.solver.termination_condition = TerminationCondition.feasible
            soln.status = SolutionStatus.feasible

        elif SOLSTA_MAP[sol_status] == 'p_feas':
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message += " Primal feasible solution is available."
            self.results.solver.termination_condition = TerminationCondition.feasible
            soln.status = SolutionStatus.feasible

        elif SOLSTA_MAP[sol_status] == 'd_feas':
            self.results.solver.status = SolverStatus.ok
            self.results.solver.termination_message += " Dual feasible solution is available."
            self.results.solver.termination_condition = TerminationCondition.feasible
            soln.status = SolutionStatus.feasible

        elif SOLSTA_MAP[sol_status] == 'd_infeas':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " The solution is dual infeasible."
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.unbounded
            soln.status = SolutionStatus.infeasible

        elif SOLSTA_MAP[sol_status] == 'p_infeas':
            self.results.solver.status = SolverStatus.warning
            self.results.solver.termination_message += " The solution is primal infeasible."
            self.results.solver.Message = self.results.solver.termination_message
            self.results.solver.termination_condition = TerminationCondition.infeasible
            soln.status = SolutionStatus.infeasible

        self.results.problem.name = msk_task.gettaskname()

        if msk_task.getobjsense() == msk.objsense.minimize:
            self.results.problem.sense = minimize
        elif msk_task.getobjsense() == msk.objsense.maximize:
            self.results.problem.sense = maximize
        else:
            raise RuntimeError(
                'Unrecognized Mosek objective sense: {0}'.format(msk_task.getobjname()))

        self.results.problem.upper_bound = None
        self.results.problem.lower_bound = None

        if msk_task.getnumintvar() == 0:
            try:
                if msk_task.getobjsense() == msk.objsense.minimize:
                    self.results.problem.upper_bound = msk_task.getprimalobj(
                        whichsol)
                    self.results.problem.lower_bound = msk_task.getdualobj(
                        whichsol)
                elif msk_task.getobjsense() == msk.objsense.maximize:
                    self.results.problem.upper_bound = msk_task.getprimalobj(
                        whichsol)
                    self.results.problem.lower_bound = msk_task.getdualobj(
                        whichsol)

            except (msk.MosekException, AttributeError):
                pass
        elif msk_task.getobjsense() == msk.objsense.minimize:  # minimizing
            try:
                self.results.problem.upper_bound = msk_task.getprimalobj(
                    whichsol)
            except (msk.MosekException, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = msk_task.getdouinf(
                    msk.dinfitem.mio_obj_bound)
            except (msk.MosekException, AttributeError):
                pass
        elif msk_task.getobjsense() == msk.objsense.maximize:  # maximizing
            try:
                self.results.problem.upper_bound = msk_task.getdouinf(
                    msk.dinfitem.mio_obj_bound)
            except (msk.MosekException, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = msk_task.getprimalobj(
                    whichsol)
            except (msk.MosekException, AttributeError):
                pass
        else:
            raise RuntimeError(
                'Unrecognized Mosek objective sense: {0}'.format(msk_task.getobjsense()))

        try:
            soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound
        except TypeError:
            soln.gap = None

        self.results.problem.number_of_constraints = msk_task.getnumcon()
        self.results.problem.number_of_nonzeros = msk_task.getnumanz()
        self.results.problem.number_of_variables = msk_task.getnumvar()
        self.results.problem.number_of_integer_variables = msk_task.getnumintvar()
        self.results.problem.number_of_continuous_variables = msk_task.getnumvar() - \
            msk_task.getnumintvar()
        self.results.problem.number_of_objectives = 1
        self.results.problem.number_of_solutions = 1

        # if a solve was stopped by a limit, we still need to check to
        # see if there is a solution available - this may not always
        # be the case, both in LP and MIP contexts.
        if self._save_results:
            """
            This code in this if statement is only needed for backwards compatability. It is more efficient to set
            _save_results to False and use load_vars, load_duals, etc.
            """
            if self.results.problem.number_of_solutions > 0:
                soln_variables = soln.variable
                soln_constraints = soln.constraint

                mosek_vars = list(range(msk_task.getnumvar()))
                mosek_vars = list(set(mosek_vars).intersection(
                    set(self._pyomo_var_to_solver_var_map.values())))
                var_vals = [0.0] * len(mosek_vars)
                self._solver_model.getxx(whichsol, var_vals)
                names = []
                for i in mosek_vars:
                    names.append(msk_task.getvarname(i))

                for mosek_var, val, name in zip(mosek_vars, var_vals, names):
                    pyomo_var = self._solver_var_to_pyomo_var_map[mosek_var]
                    if self._referenced_variables[pyomo_var] > 0:
                        pyomo_var.stale = False
                        soln_variables[name] = {"Value": val}

                if extract_reduced_costs:
                    vals = [0.0]*len(mosek_vars)
                    msk_task.getreducedcosts(
                        whichsol, 0, len(mosek_vars), vals)
                    for mosek_var, val, name in zip(mosek_vars, vals, names):
                        pyomo_var = self._solver_var_to_pyomo_var_map[mosek_var]
                        if self._referenced_variables[pyomo_var] > 0:
                            soln_variables[name]["Rc"] = val

                if extract_duals or extract_slacks:
                    mosek_cons = list(range(msk_task.getnumcon()))
                    con_names = []
                    for con in mosek_cons:
                        con_names.append(msk_task.getconname(con))
                    for name in con_names:
                        soln_constraints[name] = {}
                    """TODO wrong length, needs to be getnumvars()
                    mosek_cones = list(range(msk_task.getnumcone()))
                    cone_names = []
                    for cone in mosek_cones:
                        cone_names.append(msk_task.getconename(cone))
                    for name in cone_names:
                        soln_constraints[name] = {}
                    """

                if extract_duals:
                    ncon = msk_task.getnumcon()
                    if ncon > 0:
                        vals = [0.0]*ncon
                        msk_task.gety(whichsol, vals)
                        for val, name in zip(vals, con_names):
                            soln_constraints[name]["Dual"] = val
                    """TODO: wrong length, needs to be getnumvars()
                    ncone = msk_task.getnumcone()
                    if ncone > 0:
                        vals = [0.0]*ncone
                        msk_task.getsnx(whichsol, vals)
                        for val, name in zip(vals, cone_names):
                            soln_constraints[name]["Dual"] = val
                    """

                if extract_slacks:
                    Ax = [0]*len(mosek_cons)
                    msk_task.getxc(self._whichsol, Ax)
                    for con, name in zip(mosek_cons, con_names):
                        Us = Ls = 0

                        bk, lb, ub = msk_task.getconbound(con)

                        if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.up]:
                            Us = ub - Ax[con]
                        if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.lo]:
                            Ls = Ax[con] - lb

                        if Us > Ls:
                            soln_constraints[name]["Slack"] = Us
                        else:
                            soln_constraints[name]["Slack"] = -Ls

        elif self._load_solutions:
            if self.results.problem.number_of_solutions > 0:

                self._load_vars()

                if extract_reduced_costs:
                    self._load_rc()

                if extract_duals:
                    self._load_duals()

                if extract_slacks:
                    self._load_slacks()

        self.results.solution.insert(soln)

        # finally, clean any temporary files registered with the temp file
        # manager, created populated *directly* by this plugin.
        TempfileManager.pop(remove=not self._keepfiles)

        return DirectOrPersistentSolver._postsolve(self)

    def warm_start_capable(self):
        return True

    def _warm_start(self):
        for pyomo_var, mosek_var in self._pyomo_var_to_solver_var_map.items():
            if pyomo_var.value is not None:
                for solType in self._mosek.soltype._values:
                    self._solver_model.putxxslice(
                        solType, mosek_var, mosek_var+1, [(pyomo_var.value)])

    def _load_vars(self, vars_to_load=None):
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        mosek_vars_to_load = [var_map[pyomo_var] for pyomo_var in vars_to_load]
        var_vals = [0.0] * len(mosek_vars_to_load)
        self._solver_model.getxx(self._whichsol, var_vals)

        for var, val in zip(vars_to_load, var_vals):
            if ref_vars[var] > 0:
                var.stale = False
                var.value = val

    def _load_rc(self, vars_to_load=None):
        if not hasattr(self._pyomo_model, 'rc'):
            self._pyomo_model.rc = Suffix(direction=Suffix.IMPORT)
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        rc = self._pyomo_model.rc
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        mosek_vars_to_load = [var_map[pyomo_var] for pyomo_var in vars_to_load]
        vals = [0.0]*len(mosek_vars_to_load)
        self._solver_model.getreducedcosts(
            self._whichsol, 0, len(mosek_vars_to_load), vals)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                rc[var] = val

    def _load_duals(self, objs_to_load=None):
        if not hasattr(self._pyomo_model, 'dual'):
            self._pyomo_model.dual = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        reverse_con_map = self._solver_con_to_pyomo_con_map
        cone_map = self._pyomo_cone_to_solver_cone_map
        reverse_cone_map = self._solver_cone_to_pyomo_cone_map
        dual = self._pyomo_model.dual

        if objs_to_load is None:
            # constraints
            mosek_cons_to_load = range(self._solver_model.getnumcon())
            vals = [0.0]*len(mosek_cons_to_load)
            self._solver_model.gety(self._whichsol, vals)
            for mosek_con, val in zip(mosek_cons_to_load, vals):
                pyomo_con = reverse_con_map[mosek_con]
                dual[pyomo_con] = val
            """TODO wrong length, needs to be getnumvars()
            # cones
            mosek_cones_to_load = range(self._solver_model.getnumcone())
            vals = [0.0]*len(mosek_cones_to_load)
            self._solver_model.getsnx(self._whichsol, vals)
            for mosek_cone, val in zip(mosek_cones_to_load, vals):
                pyomo_cone = reverse_cone_map[mosek_cone]
                dual[pyomo_cone] = val
            """
        else:
            mosek_cons_to_load = []
            mosek_cones_to_load = []
            for obj in objs_to_load:
                if obj in con_map:
                    mosek_cons_to_load.append(con_map[obj])
                else:
                    # assume it is a cone
                    mosek_cones_to_load.append(cone_map[obj])
            # constraints
            mosek_cons_first = min(mosek_cons_to_load)
            mosek_cons_last = max(mosek_cons_to_load)
            vals = [0.0]*(mosek_cons_last - mosek_cons_first + 1)
            self._solver_model.getyslice(self._whichsol,
                                         mosek_cons_first,
                                         mosek_cons_last,
                                         vals)
            for mosek_con in mosek_cons_to_load:
                slice_index = mosek_con - mosek_cons_first
                val = vals[slice_index]
                pyomo_con = reverse_con_map[mosek_con]
                dual[pyomo_con] = val
            """TODO wrong length, needs to be getnumvars()
            # cones
            mosek_cones_first = min(mosek_cones_to_load)
            mosek_cones_last = max(mosek_cones_to_load)
            vals = [0.0]*(mosek_cones_last - mosek_cones_first + 1)
            self._solver_model.getsnxslice(self._whichsol,
                                           mosek_cones_first,
                                           mosek_cones_last,
                                           vals)
            for mosek_cone in mosek_cones_to_load:
                slice_index = mosek_cone - mosek_cones_first
                val = vals[slice_index]
                pyomo_cone = reverse_cone_map[mosek_cone]
                dual[pyomo_cone] = val
            """

    def _load_slacks(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'slack'):
            self._pyomo_model.slack = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        reverse_con_map = self._solver_con_to_pyomo_con_map
        slack = self._pyomo_model.slack
        msk = self._mosek

        if cons_to_load is None:
            mosek_cons_to_load = range(self._solver_model.getnumcon())
        else:
            mosek_cons_to_load = set([con_map[pyomo_con]
                                      for pyomo_con in cons_to_load])

        Ax = [0]*len(mosek_cons_to_load)
        self._solver_model.getxc(self._whichsol, Ax)
        for con in mosek_cons_to_load:
            pyomo_con = reverse_con_map[con]
            Us = Ls = 0

            bk, lb, ub = self._solver_model.getconbound(con)

            if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.up]:
                Us = ub - Ax[con]
            if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.lo]:
                Ls = Ax[con] - lb

            if Us > Ls:
                slack[pyomo_con] = Us
            else:
                slack[pyomo_con] = -Ls

    def load_duals(self, cons_to_load=None):
        """
        Load the duals into the 'dual' suffix. The 'dual' suffix must live on the parent model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_duals(cons_to_load)

    def load_rc(self, vars_to_load):
        """
        Load the reduced costs into the 'rc' suffix. The 'rc' suffix must live on the parent model.

        Parameters
        ----------
        vars_to_load: list of Var
        """
        self._load_rc(vars_to_load)

    def load_slacks(self, cons_to_load=None):
        """
        Load the values of the slack variables into the 'slack' suffix. The 'slack' suffix must live on the parent
        model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_slacks(cons_to_load)
    def test_enth(self):
        m = self._make_model()
        state = m.fs.state

        n_scen = 4
        K = pyunits.K
        state_values = {
            "flow_mass": [1.0 * pyunits.kg / pyunits.s] * n_scen,
            "temperature": [1000.0 * K, 1100 * K, 1200 * K, 1300 * K],
            "particle_porosity": [0.27] * n_scen,
            "mass_frac_comp[Fe2O3]": [1.0 / 3.0] * n_scen,
            "mass_frac_comp[Fe3O4]": [1.0 / 3.0] * n_scen,
            "mass_frac_comp[Al2O3]": [1.0 / 3.0] * n_scen,
        }
        state_values = ComponentMap((state.find_component(name), values)
                                    for name, values in state_values.items())
        kJmol = pyunits.kJ / pyunits.mol
        kJkg = pyunits.kJ / pyunits.kg
        target_values = {
            "enth_mol_comp[Fe2O3]": [
                101.043 * kJmol,
                115.086 * kJmol,
                129.198 * kJmol,
                143.385 * kJmol,
            ],
            "enth_mol_comp[Fe3O4]": [
                147.591 * kJmol,
                167.674 * kJmol,
                187.757 * kJmol,
                207.841 * kJmol,
            ],
            "enth_mol_comp[Al2O3]": [
                77.925 * kJmol,
                90.513 * kJmol,
                103.279 * kJmol,
                116.199 * kJmol,
            ],
            "enth_mass": [
                678.156 * kJkg,
                777.534 * kJkg,
                877.640 * kJkg,
                978.408 * kJkg,
            ],
        }
        target_values = ComponentMap((state.find_component(name), values)
                                     for name, values in target_values.items())

        param_sweeper = ParamSweeper(
            n_scen,
            state_values,
            output_values=target_values,
        )
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(state)

                # Check that we have eliminated infeasibility
                assert number_large_residuals(state, tol=1e-8) == 0

                # Sanity check that inputs have been set properly
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                for var, val in outputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #28
0
    def test_oc_conv(self):
        m = self._make_model()
        rxn_block = m.fs.reaction_block

        n_scen = 3
        mols = pyunits.mol/pyunits.s
        kgs = pyunits.kg/pyunits.s
        K = pyunits.K
        bar = pyunits.bar
        state_values = {
            "gas_state.flow_mol": [1.0*mols]*n_scen,
            "solid_state.flow_mass": [1.0*kgs]*n_scen,
            "gas_state.temperature": [1273.0*K]*n_scen,
            "solid_state.temperature": [1273.0*K]*n_scen,
            "gas_state.pressure": [1.0*bar]*n_scen,
            "solid_state.particle_porosity": [0.27]*n_scen,
            "gas_state.mole_frac_comp[O2]": [0.25]*n_scen,
            "gas_state.mole_frac_comp[N2]": [0.25]*n_scen,
            "gas_state.mole_frac_comp[H2O]": [0.25]*n_scen,
            "gas_state.mole_frac_comp[CO2]": [0.25]*n_scen,
            "solid_state.mass_frac_comp[Fe2O3]": [2/3, 0.0, 1/3],
            "solid_state.mass_frac_comp[Fe3O4]": [0.0, 2/3, 1/3],
            "solid_state.mass_frac_comp[Al2O3]": [1/3]*n_scen,
            }
        state_values = ComponentMap((m.fs.find_component(name), values)
                for name, values in state_values.items())

        target_values = {
                "reaction_block.OC_conv": [
                    1.0,
                    0.0,
                    0.4915,
                    ],
                "reaction_block.OC_conv_temp": [
                    2.005e-4,
                    1.0,
                    0.6371,
                    ],
                }
        target_values = ComponentMap((m.fs.find_component(name), values)
                for name, values in target_values.items())

        assert degrees_of_freedom(m.fs) == 0

        param_sweeper = ParamSweeper(n_scen, state_values,
                output_values=target_values)
        with param_sweeper:
            for inputs, outputs in param_sweeper:
                solve_strongly_connected_components(m.fs)

                # Make sure property equalites have been converged
                assert number_large_residuals(m.fs, tol=1e-8) == 0

                # Sanity checks that inputs are properly set
                for var, val in inputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)

                # Make sure properties have been calculated as expected
                for var, val in outputs.items():
                    val = value(pyunits.convert(val, var.get_units()))
                    assert var.value == pytest.approx(value(val), abs=1e-3)
Exemple #29
0
class Ipopt(PersistentSolver):
    def __init__(self):
        self._config = IpoptConfig()
        self._solver_options = dict()
        self._writer = NLWriter()
        self._filename = None
        self._dual_sol = dict()
        self._primal_sol = ComponentMap()
        self._reduced_costs = ComponentMap()
        self._last_results_object: Optional[Results] = None

    def available(self):
        if self.config.executable.path() is None:
            return self.Availability.NotFound
        return self.Availability.FullLicense

    def version(self):
        results = subprocess.run([str(self.config.executable), '--version'],
                                 timeout=1,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 universal_newlines=True)
        version = results.stdout.splitlines()[0]
        version = version.split(' ')[1]
        version = version.strip()
        version = tuple(int(i) for i in version.split('.'))
        return version

    def nl_filename(self):
        if self._filename is None:
            return None
        else:
            return self._filename + '.nl'

    def sol_filename(self):
        if self._filename is None:
            return None
        else:
            return self._filename + '.sol'

    def options_filename(self):
        if self._filename is None:
            return None
        else:
            return self._filename + '.opt'

    @property
    def config(self):
        return self._config

    @config.setter
    def config(self, val):
        self._config = val

    @property
    def ipopt_options(self):
        """
        Returns
        -------
        ipopt_options: dict
            A dictionary mapping solver options to values for those options. These
            are solver specific.
        """
        return self._solver_options

    @ipopt_options.setter
    def ipopt_options(self, val: Dict):
        self._solver_options = val

    @property
    def update_config(self):
        return self._writer.update_config

    @property
    def writer(self):
        return self._writer

    @property
    def symbol_map(self):
        return self._writer.symbol_map

    def set_instance(self, model):
        self._writer.set_instance(model)

    def add_variables(self, variables: List[_GeneralVarData]):
        self._writer.add_variables(variables)

    def add_params(self, params: List[_ParamData]):
        self._writer.add_params(params)

    def add_constraints(self, cons: List[_GeneralConstraintData]):
        self._writer.add_constraints(cons)

    def add_block(self, block: _BlockData):
        self._writer.add_block(block)

    def remove_variables(self, variables: List[_GeneralVarData]):
        self._writer.remove_variables(variables)

    def remove_params(self, params: List[_ParamData]):
        self._writer.remove_params(params)

    def remove_constraints(self, cons: List[_GeneralConstraintData]):
        self._writer.remove_constraints(cons)

    def remove_block(self, block: _BlockData):
        self._writer.remove_block(block)

    def set_objective(self, obj: _GeneralObjectiveData):
        self._writer.set_objective(obj)

    def update_variables(self, variables: List[_GeneralVarData]):
        self._writer.update_variables(variables)

    def update_params(self):
        self._writer.update_params()

    def _write_options_file(self):
        f = open(self._filename + '.opt', 'w')
        for k, val in self.ipopt_options.items():
            if k not in ipopt_command_line_options:
                f.write(str(k) + ' ' + str(val) + '\n')
        f.close()

    def solve(self, model, timer: HierarchicalTimer = None):
        avail = self.available()
        if not avail:
            raise PyomoException(
                f'Solver {self.__class__} is not available ({avail}).')
        if self._last_results_object is not None:
            self._last_results_object.solution_loader.invalidate()
        if timer is None:
            timer = HierarchicalTimer()
        try:
            TempfileManager.push()
            if self.config.filename is None:
                nl_filename = TempfileManager.create_tempfile(suffix='.nl')
                self._filename = nl_filename.split('.')[0]
            else:
                self._filename = self.config.filename
                TempfileManager.add_tempfile(self._filename + '.nl',
                                             exists=False)
            TempfileManager.add_tempfile(self._filename + '.sol', exists=False)
            TempfileManager.add_tempfile(self._filename + '.opt', exists=False)
            self._write_options_file()
            timer.start('write nl file')
            self._writer.write(model, self._filename + '.nl', timer=timer)
            timer.stop('write nl file')
            res = self._apply_solver(timer)
            self._last_results_object = res
            if self.config.report_timing:
                logger.info('\n' + str(timer))
            return res
        finally:
            # finally, clean any temporary files registered with the
            # temp file manager, created/populated *directly* by this
            # plugin.
            TempfileManager.pop(remove=not self.config.keepfiles)
            if not self.config.keepfiles:
                self._filename = None

    def _parse_sol(self):
        solve_vars = self._writer.get_ordered_vars()
        solve_cons = self._writer.get_ordered_cons()
        results = Results()

        f = open(self._filename + '.sol', 'r')
        all_lines = list(f.readlines())
        f.close()

        termination_line = all_lines[1]
        if 'Optimal Solution Found' in termination_line:
            results.termination_condition = TerminationCondition.optimal
        elif 'Problem may be infeasible' in termination_line:
            results.termination_condition = TerminationCondition.infeasible
        elif 'problem might be unbounded' in termination_line:
            results.termination_condition = TerminationCondition.unbounded
        elif 'Maximum Number of Iterations Exceeded' in termination_line:
            results.termination_condition = TerminationCondition.maxIterations
        elif 'Maximum CPU Time Exceeded' in termination_line:
            results.termination_condition = TerminationCondition.maxTimeLimit
        else:
            results.termination_condition = TerminationCondition.unknown

        n_cons = len(solve_cons)
        n_vars = len(solve_vars)
        dual_lines = all_lines[12:12 + n_cons]
        primal_lines = all_lines[12 + n_cons:12 + n_cons + n_vars]

        rc_upper_info_line = all_lines[12 + n_cons + n_vars + 1]
        assert rc_upper_info_line.startswith('suffix')
        n_rc_upper = int(rc_upper_info_line.split()[2])
        assert 'ipopt_zU_out' in all_lines[12 + n_cons + n_vars + 2]
        upper_rc_lines = all_lines[12 + n_cons + n_vars + 3:12 + n_cons +
                                   n_vars + 3 + n_rc_upper]

        rc_lower_info_line = all_lines[12 + n_cons + n_vars + 3 + n_rc_upper]
        assert rc_lower_info_line.startswith('suffix')
        n_rc_lower = int(rc_lower_info_line.split()[2])
        assert 'ipopt_zL_out' in all_lines[12 + n_cons + n_vars + 3 +
                                           n_rc_upper + 1]
        lower_rc_lines = all_lines[12 + n_cons + n_vars + 3 + n_rc_upper +
                                   2:12 + n_cons + n_vars + 3 + n_rc_upper +
                                   2 + n_rc_lower]

        self._dual_sol = dict()
        self._primal_sol = ComponentMap()
        self._reduced_costs = ComponentMap()

        for ndx, dual in enumerate(dual_lines):
            dual = float(dual)
            con = solve_cons[ndx]
            self._dual_sol[con] = dual

        for ndx, primal in enumerate(primal_lines):
            primal = float(primal)
            var = solve_vars[ndx]
            self._primal_sol[var] = primal

        for rcu_line in upper_rc_lines:
            split_line = rcu_line.split()
            var_ndx = int(split_line[0])
            rcu = float(split_line[1])
            var = solve_vars[var_ndx]
            self._reduced_costs[var] = rcu

        for rcl_line in lower_rc_lines:
            split_line = rcl_line.split()
            var_ndx = int(split_line[0])
            rcl = float(split_line[1])
            var = solve_vars[var_ndx]
            if var in self._reduced_costs:
                if abs(rcl) > abs(self._reduced_costs[var]):
                    self._reduced_costs[var] = rcl
            else:
                self._reduced_costs[var] = rcl

        for var in solve_vars:
            if var not in self._reduced_costs:
                self._reduced_costs[var] = 0

        if results.termination_condition == TerminationCondition.optimal and self.config.load_solution:
            for v, val in self._primal_sol.items():
                v.set_value(val, skip_validation=True)

            if self._writer.get_active_objective() is None:
                results.best_feasible_objective = None
            else:
                results.best_feasible_objective = value(
                    self._writer.get_active_objective().expr)
        elif results.termination_condition == TerminationCondition.optimal:
            if self._writer.get_active_objective() is None:
                results.best_feasible_objective = None
            else:
                obj_expr_evaluated = replace_expressions(
                    self._writer.get_active_objective().expr,
                    substitution_map={
                        id(v): val
                        for v, val in self._primal_sol.items()
                    },
                    descend_into_named_expressions=True,
                    remove_named_expressions=True)
                results.best_feasible_objective = value(obj_expr_evaluated)
        elif self.config.load_solution:
            raise RuntimeError(
                'A feasible solution was not found, so no solution can be loaded.'
                'Please set opt.config.load_solution=False and check '
                'results.termination_condition and '
                'resutls.best_feasible_objective before loading a solution.')

        results.solution_loader = PersistentSolutionLoader(solver=self)

        return results

    def _apply_solver(self, timer: HierarchicalTimer):
        config = self.config

        if config.time_limit is not None:
            timeout = config.time_limit + min(max(1, 0.01 * config.time_limit),
                                              100)
        else:
            timeout = None

        ostreams = [
            LogStream(level=self.config.log_level,
                      logger=self.config.solver_output_logger)
        ]
        if self.config.stream_solver:
            ostreams.append(sys.stdout)

        cmd = [
            str(config.executable), self._filename + '.nl', '-AMPL',
            'option_file_name=' + self._filename + '.opt'
        ]
        if 'option_file_name' in self.ipopt_options:
            raise ValueError(
                'Use Ipopt.config.filename to specify the name of the options file. '
                'Do not use Ipopt.ipopt_options["option_file_name"].')
        for k, v in self.ipopt_options.items():
            cmd.append(str(k) + '=' + str(v))

        env = os.environ.copy()
        if 'PYOMO_AMPLFUNC' in env:
            env['AMPLFUNC'] = "\n".join(
                filter(None, (env.get('AMPLFUNC',
                                      None), env.get('PYOMO_AMPLFUNC', None))))

        with TeeStream(*ostreams) as t:
            timer.start('subprocess')
            cp = subprocess.run(cmd,
                                timeout=timeout,
                                stdout=t.STDOUT,
                                stderr=t.STDERR,
                                env=env,
                                universal_newlines=True)
            timer.stop('subprocess')

        if cp.returncode != 0:
            if self.config.load_solution:
                raise RuntimeError(
                    'A feasible solution was not found, so no solution can be loaded.'
                    'Please set opt.config.load_solution=False and check '
                    'results.termination_condition and '
                    'results.best_feasible_objective before loading a solution.'
                )
            results = Results()
            results.termination_condition = TerminationCondition.error
            results.best_feasible_objective = None
            self._primal_sol = None
            self._dual_sol = None
        else:
            timer.start('parse solution')
            results = self._parse_sol()
            timer.stop('parse solution')

        if self._writer.get_active_objective() is None:
            results.best_objective_bound = None
        else:
            if self._writer.get_active_objective().sense == minimize:
                results.best_objective_bound = -math.inf
            else:
                results.best_objective_bound = math.inf

        return results

    def get_primals(
        self,
        vars_to_load: Optional[Sequence[_GeneralVarData]] = None
    ) -> Mapping[_GeneralVarData, float]:
        res = ComponentMap()
        if vars_to_load is None:
            for v, val in self._primal_sol.items():
                res[v] = val
        else:
            for v in vars_to_load:
                res[v] = self._primal_sol[v]
        return res

    def get_duals(self, cons_to_load=None):
        if cons_to_load is None:
            return {k: v for k, v in self._dual_sol.items()}
        else:
            return {c: self._dual_sol[c] for c in cons_to_load}

    def get_reduced_costs(
        self,
        vars_to_load: Optional[Sequence[_GeneralVarData]] = None
    ) -> Mapping[_GeneralVarData, float]:
        if vars_to_load is None:
            return ComponentMap((k, v) for k, v in self._reduced_costs.items())
        else:
            return ComponentMap(
                (v, self._reduced_costs[v]) for v in vars_to_load)
Exemple #30
0
    def __init__(self, reference_model):

        self.reference_model = None
        self.objective = None
        self.time_stages = None

        self.stage_to_variables_map = {}
        self.variable_to_stage_map = {}

        # the set of stochastic data objects
        # (possibly mapped to some distribution)
        self.stochastic_data = None

        # maps between variables and objectives
        self.variable_to_objectives_map = ComponentMap()
        self.objective_to_variables_map = ComponentMap()

        # maps between variables and constraints
        self.variable_to_constraints_map = ComponentMap()
        self.constraint_to_variables_map = ComponentMap()

        # maps between stochastic data and objectives
        self.stochastic_data_to_objectives_map = ComponentMap()
        self.objective_to_stochastic_data_map = ComponentMap()

        # maps between stochastic data and constraints
        self.stochastic_data_to_constraints_map = ComponentMap()
        self.constraint_to_stochastic_data_map = ComponentMap()

        # maps between stochastic data and variable lower and upper bounds
        self.stochastic_data_to_variables_lb_map = ComponentMap()
        self.variable_to_stochastic_data_lb_map = ComponentMap()

        self.stochastic_data_to_variables_ub_map = ComponentMap()
        self.variable_to_stochastic_data_ub_map = ComponentMap()

        self.variable_symbols = ComponentMap()

        if not isinstance(reference_model, Block):
            raise TypeError("reference model input must be a Pyomo model")
        self.reference_model = reference_model

        #
        # Extract stochastic parameters from the
        # StochasticDataAnnotation object
        #
        self.stochastic_data = \
            _extract_stochastic_data(self.reference_model)

        #
        # Get the variable stages from the
        # VariableStageAnnotation object
        #
        (self.stage_to_variables_map,
         self.variable_to_stage_map,
         self._variable_stage_assignments) = \
            _map_variable_stages(self.reference_model)
        self.time_stages = tuple(sorted(self.stage_to_variables_map))
        assert self.time_stages[0] == 1
        self.variable_symbols = ComponentUID.generate_cuid_string_map(
            self.reference_model, ctype=Var,
            repr_version=tree_structure.CUID_repr_version)
        # remove the parent blocks from this map
        keys_to_delete = []
        for var in self.variable_symbols:
            if var.parent_component().ctype is not Var:
                keys_to_delete.append(var)
        for key in keys_to_delete:
            del self.variable_symbols[key]

        #
        # Get the stage cost components from the StageCostAnnotation
        # and generate a dummy single-scenario scenario tree
        #
        stage_cost_annotation = locate_annotations(
            self.reference_model,
            StageCostAnnotation,
            max_allowed=1)
        if len(stage_cost_annotation) == 0:
            raise ValueError(
                "Reference model is missing stage cost "
                "annotation: %s" % (StageCostAnnotation.__name__))
        else:
            assert len(stage_cost_annotation) == 1
            stage_cost_annotation = stage_cost_annotation[0][1]
        stage_cost_assignments = ComponentMap(
            stage_cost_annotation.expand_entries())

        stage1_cost = None
        stage2_cost = None
        for cdata, stagenum in stage_cost_assignments.items():
            if stagenum == 1:
                stage1_cost = cdata
            elif stagenum == 2:
                stage2_cost = cdata
        if stage1_cost is None:
            raise ValueError("Missing stage cost annotation "
                             "for time stage: 1")
        if stage2_cost is None:
            raise ValueError("Missing stage cost annotation "
                             "for time stage: 2")
        assert stage1_cost is not stage2_cost
        self._stage1_cost = stage1_cost
        self._stage2_cost = stage2_cost

        #
        # Extract the locations of variables and stochastic data
        # within the model
        #
        sto_obj = StochasticObjectiveAnnotation()
        for objcntr, obj in enumerate(
                  self.reference_model.component_data_objects(
                Objective,
                active=True,
                descend_into=True), 1):

            if objcntr > 1:
                raise ValueError(
                    "Reference model can not contain more than one "
                    "active objective")

            self.objective = obj
            self.objective_sense = obj.sense

            obj_params = tuple(
                self._collect_mutable_parameters(obj.expr).values())
            self.objective_to_stochastic_data_map[obj] = []
            for paramdata in obj_params:
                if paramdata in self.stochastic_data:
                    self.stochastic_data_to_objectives_map.\
                        setdefault(paramdata, []).append(obj)
                    self.objective_to_stochastic_data_map[obj].\
                        append(paramdata)
            if len(self.objective_to_stochastic_data_map[obj]) == 0:
                del self.objective_to_stochastic_data_map[obj]
            else:
                # TODO: Can we make this declaration sparse
                #       by identifying which variables have
                #       stochastic coefficients? How to handle
                #       non-linear expressions?
                sto_obj.declare(obj)

            obj_variables = tuple(
                self._collect_variables(obj.expr).values())
            self.objective_to_variables_map[obj] = []
            for var in obj_variables:
                self.variable_to_objectives_map.\
                    setdefault(var, []).append(obj)
                self.objective_to_variables_map[obj].append(var)
            if len(self.objective_to_variables_map[obj]) == 0:
                del self.objective_to_variables_map[obj]

        sto_conbounds = StochasticConstraintBoundsAnnotation()
        sto_conbody = StochasticConstraintBodyAnnotation()
        for con in self.reference_model.component_data_objects(
                Constraint,
                active=True,
                descend_into=True):

            lower_params = tuple(
                self._collect_mutable_parameters(con.lower).values())
            body_params = tuple(
                self._collect_mutable_parameters(con.body).values())
            upper_params = tuple(
                self._collect_mutable_parameters(con.upper).values())

            # TODO: Can we make this declaration sparse
            #       by idenfifying which variables have
            #       stochastic coefficients? How to handle
            #       non-linear expressions? Currently, this
            #       code also fails to detect that mutable
            #       "constant" expressions might fall out
            #       of the body and into the bounds.
            if len(body_params):
                sto_conbody.declare(con)
            if len(body_params) or \
               len(lower_params) or \
               len(upper_params):
                sto_conbounds.declare(con,
                                      lb=bool(len(lower_params) or len(body_params)),
                                      ub=bool(len(upper_params) or len(body_params)))

            all_stochastic_params = {}
            for param in itertools.chain(lower_params,
                                         body_params,
                                         upper_params):
                if param in self.stochastic_data:
                    all_stochastic_params[id(param)] = param

            if len(all_stochastic_params) > 0:
                self.constraint_to_stochastic_data_map[con] = []
                # no params will appear twice in this iteration
                for param in all_stochastic_params.values():
                    self.stochastic_data_to_constraints_map.\
                        setdefault(param, []).append(con)
                    self.constraint_to_stochastic_data_map[con].\
                        append(param)

            body_variables = tuple(
                self._collect_variables(con.body).values())
            self.constraint_to_variables_map[con] = []
            for var in body_variables:
                self.variable_to_constraints_map.\
                    setdefault(var, []).append(con)
                self.constraint_to_variables_map[con].append(var)

        # For now, it is okay to have SOSConstraints in the
        # representation of a problem, but the SOS
        # constraints can't have custom weights that
        # represent stochastic data
        for soscon in self.reference_model.component_data_objects(
                SOSConstraint,
                active=True,
                descend_into=True):
            for var, weight in soscon.get_items():
                weight_params = tuple(
                    self._collect_mutable_parameters(weight).values())
                if param in self.stochastic_data:
                    raise ValueError(
                        "SOSConstraints with stochastic data are currently"
                        " not supported in embedded stochastic programs. "
                        "The SOSConstraint component '%s' has a weight "
                        "term for variable '%s' that references stochastic"
                        " parameter '%s'"
                        % (soscon.name,
                           var.name,
                           param.name))
                self.variable_to_constraints_map.\
                    setdefault(var, []).append(soscon)
                self.constraint_to_variables_map.\
                    setdefault(soscon, []).append(var)

        sto_varbounds = StochasticVariableBoundsAnnotation()
        for var in self.reference_model.component_data_objects(
                Var,
                descend_into=True):

            lower_params = tuple(
                self._collect_mutable_parameters(var.lb).values())
            upper_params = tuple(
                self._collect_mutable_parameters(var.ub).values())

            if (len(lower_params) > 0) or \
               (len(upper_params) > 0):
                sto_varbounds.declare(var,
                                      lb=bool(len(lower_params) > 0),
                                      ub=bool(len(upper_params) > 0))

            self.variable_to_stochastic_data_lb_map[var] = []
            for param in lower_params:
                if param in self.stochastic_data:
                    self.stochastic_data_to_variables_lb_map.\
                        setdefault(param, []).append(var)
                    self.variable_to_stochastic_data_lb_map[var].\
                        append(param)
            if len(self.variable_to_stochastic_data_lb_map[var]) == 0:
                del self.variable_to_stochastic_data_lb_map[var]

            self.variable_to_stochastic_data_ub_map[var] = []
            for param in upper_params:
                if param in self.stochastic_data:
                    self.stochastic_data_to_variables_ub_map.\
                        setdefault(param, []).append(var)
                    self.variable_to_stochastic_data_ub_map[var].\
                        append(param)
            if len(self.variable_to_stochastic_data_ub_map[var]) == 0:
                del self.variable_to_stochastic_data_ub_map[var]

        #
        # Generate the explicit annotations
        #

        # first make sure these annotations do not already exist
        if len(locate_annotations(self.reference_model,
                                  StochasticConstraintBoundsAnnotation)) > 0:
            raise ValueError("Reference model can not contain "
                             "a StochasticConstraintBoundsAnnotation declaration.")
        if len(locate_annotations(self.reference_model,
                                  StochasticConstraintBodyAnnotation)) > 0:
            raise ValueError("Reference model can not contain "
                             "a StochasticConstraintBodyAnnotation declaration.")
        if len(locate_annotations(self.reference_model,
                                  StochasticObjectiveAnnotation)) > 0:
            raise ValueError("Reference model can not contain "
                             "a StochasticObjectiveAnnotation declaration.")

        # now add any necessary annotations
        if sto_obj.has_declarations:
            assert not hasattr(self.reference_model,
                               ".pyspembeddedsp_stochastic_objective_annotation")
            setattr(self.reference_model,
                    ".pyspembeddedsp_stochastic_objective_annotation",
                    sto_obj)
        if sto_conbody.has_declarations:
            assert not hasattr(self.reference_model,
                               ".pyspembeddedsp_stochastic_constraint_body_annotation")
            setattr(self.reference_model,
                    ".pyspembeddedsp_stochastic_constraint_body_annotation",
                    sto_conbody)
        if sto_conbounds.has_declarations:
            assert not hasattr(self.reference_model,
                               ".pyspembeddedsp_stochastic_constraint_bounds_annotation")
            setattr(self.reference_model,
                    ".pyspembeddedsp_stochastic_constraint_bounds_annotation",
                    sto_conbounds)
        if sto_varbounds.has_declarations:
            assert not hasattr(self.reference_model,
                               ".pyspembeddedsp_stochastic_variable_bounds_annotation")
            setattr(self.reference_model,
                    ".pyspembeddedsp_stochastic_variable_bounds_annotation",
                    sto_varbounds)