Esempio n. 1
0
def make_model_2():
    m = ConcreteModel()
    m.x = Var(initialize=0.1, bounds=(0, 1))
    m.y = Var(initialize=0.1, bounds=(0, 1))
    m.obj = Objective(expr=-m.x**2 - m.y**2)
    m.c = Constraint(expr=m.y <= pe.exp(-m.x))
    return m
Esempio n. 2
0
        def b3(b):
            b.v = Var()
            b.v1 = Var(m.space)

            @b.Block(m.space)
            def b(bb):
                bb.v = Var(m.set)
Esempio n. 3
0
        def b2(b):
            b.v = Var()
            b.v1 = Var(m.set)

            @b.Block()
            def b(bl):
                bl.v = Var()
                bl.v1 = Var(m.set)
                bl.v2 = Var(m.time)
Esempio n. 4
0
def add_decision_rule_variables(model_data, config):
    '''
    Function to add decision rule (DR) variables to the working model. DR variables become first-stage design
    variables which do not get copied at each iteration. Currently support static_approx (no DR), affine DR,
    and quadratic DR.
    :param model_data: the data container for the working model
    :param config: the config block
    :return:
    '''
    second_stage_variables = model_data.working_model.util.second_stage_variables
    first_stage_variables = model_data.working_model.util.first_stage_variables
    uncertain_params = model_data.working_model.util.uncertain_params
    decision_rule_vars = []
    degree = config.decision_rule_order
    bounds = (None, None)
    if degree == 0:
        for i in range(len(second_stage_variables)):
            model_data.working_model.add_component(
                    "decision_rule_var_" + str(i),
                    Var(initialize=value(second_stage_variables[i], exception=False),
                        bounds=bounds,domain=Reals)
            )
            first_stage_variables.extend(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values())
            decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i)))
    elif degree == 1:
        for i in range(len(second_stage_variables)):
            index_set = list(range(len(uncertain_params) + 1))
            model_data.working_model.add_component("decision_rule_var_" + str(i),
                    Var(index_set,
                        initialize=0,
                        bounds=bounds,
                        domain=Reals))
            # === For affine drs, the [0]th constant term is initialized to the control variable values, all other terms are initialized to 0
            getattr(model_data.working_model, "decision_rule_var_" + str(i))[0].set_value(value(second_stage_variables[i], exception=False), skip_validation=True)
            first_stage_variables.extend(list(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values()))
            decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i)))
    elif degree == 2 or degree == 3 or degree == 4:
        for i in range(len(second_stage_variables)):
            num_vars = int(sp.special.comb(N=len(uncertain_params) + degree, k=degree))
            dict_init = {}
            for r in range(num_vars):
                if r == 0:
                    dict_init.update({r: value(second_stage_variables[i], exception=False)})
                else:
                    dict_init.update({r: 0})
            model_data.working_model.add_component("decision_rule_var_" + str(i),
                                                   Var(list(range(num_vars)), initialize=dict_init, bounds=bounds,
                                                       domain=Reals))
            first_stage_variables.extend(
                list(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values()))
            decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i)))
    else:
        raise ValueError(
            "Decision rule order " + str(config.decision_rule_order) +
            " is not yet supported. PyROS supports polynomials of degree 0 (static approximation), 1, 2.")
    model_data.working_model.util.decision_rule_vars = decision_rule_vars
def two_objective_model():
    model = ConcreteModel()

    # Define variables
    model.x1 = Var(within=NonNegativeReals)
    model.x2 = Var(within=NonNegativeReals)

    # --------------------------------------
    #   Define the objective functions
    # --------------------------------------

    def objective1(model):
        return model.x1

    def objective2(model):
        return 3 * model.x1 + 4 * model.x2

    # --------------------------------------
    #   Define the regular constraints
    # --------------------------------------

    def constraint1(model):
        return model.x1 <= 20

    def constraint2(model):
        return model.x2 <= 40

    def constraint3(model):
        return 5 * model.x1 + 4 * model.x2 <= 200

    # --------------------------------------
    #   Add components to the model
    # --------------------------------------

    # Add the constraints to the model
    model.con1 = Constraint(rule=constraint1)
    model.con2 = Constraint(rule=constraint2)
    model.con3 = Constraint(rule=constraint3)

    # Add the objective functions to the model using ObjectiveList(). Note
    # that the first index is 1 instead of 0!
    model.obj_list = ObjectiveList()
    model.obj_list.add(expr=objective1(model), sense=maximize)
    model.obj_list.add(expr=objective2(model), sense=maximize)

    # By default deactivate all the objective functions
    for o in range(len(model.obj_list)):
        model.obj_list[o + 1].deactivate()

    return model
Esempio n. 6
0
        def b3(b, c):
            b.v = Var(m.set2, initialize=3)

            @b.Constraint(m.set2)
            def con(b, s):
                return (5*b.v[s] ==
                        m.fs.b2[m.time.first(), m.space.first()].v[c])
Esempio n. 7
0
    def convert_prob(self):
        self.logger.info("Converting optimization problem")

        self.model.con_list = ConstraintList()

        # Set of objective functions
        self.model.Os = Set(ordered=True, initialize=[o + 2 for o in self.iter_obj2])

        # Slack for objectives introduced as constraints
        self.model.Slack = Var(self.model.Os, within=NonNegativeReals)
        self.model.e = Param(
            self.model.Os,
            initialize=[np.nan for _ in self.model.Os],
            within=Any,
            mutable=True,
        )  # RHS of constraints

        # Add p-1 objective functions as constraints
        for o in range(1, self.n_obj):
            self.model.obj_list[1].expr += self.opts.eps * (
                10 ** (-1 * (o - 1)) * self.model.Slack[o + 1] / self.obj_range[o - 1]
            )

            self.model.con_list.add(
                expr=self.model.obj_list[o + 1].expr - self.model.Slack[o + 1]
                == self.model.e[o + 1]
            )
Esempio n. 8
0
    def _discretize_bilinear(self, b, v, v_idx, u, u_idx):
        _z = b.z
        _dv = b.dv[v_idx]
        _u = Var(b.DISCRETIZATION, within=u.domain, bounds=u.bounds)
        logger.info("Discretizing (v=%s)*(u=%s) as u%s_v%s" %
                    (v.name, u.name, u_idx, v_idx))
        b.add_component("u%s_v%s" % (u_idx, v_idx), _u)
        _lb, _ub = u.bounds
        if _lb is None or _ub is None:
            raise RuntimeError("Couldn't relax variable %s: missing "
                               "finite lower/upper bounds." % (u.name))
        _c = ConstraintList()
        b.add_component("c_disaggregate_u%s_v%s" % (u_idx, v_idx), _c)
        for k in b.DISCRETIZATION:
            # _lb * z[v_idx,k] <= _u[k] <= _ub * z[v_idx,k]
            _c.add(expr=_lb * _z[v_idx, k] <= _u[k])
            _c.add(expr=_u[k] <= _ub * _z[v_idx, k])
            # _lb * (1-z[v_idx,k]) <= u - _u[k] <= _ub * (1-z[v_idx,k])
            _c.add(expr=_lb * (1 - _z[v_idx, k]) <= u - _u[k])
            _c.add(expr=u - _u[k] <= _ub * (1 - _z[v_idx, k]))

        _v_lb, _v_ub = v.bounds
        _bnd_rng = (_v_lb * _lb, _v_lb * _ub, _v_ub * _lb, _v_ub * _ub)
        _w = Var(bounds=(min(_bnd_rng), max(_bnd_rng)))
        b.add_component("w%s_v%s" % (u_idx, v_idx), _w)

        K = max(b.DISCRETIZATION)

        _dw = Var(bounds=(min(0, _lb * 2**-K, _ub * 2**-K),
                          max(0, _lb * 2**-K, _ub * 2**-K)))
        b.add_component("dw%s_v%s" % (u_idx, v_idx), _dw)

        _c = Constraint(expr=_w == _v_lb * u + (_v_ub - _v_lb) *
                        (sum(2**-k * _u[k] for k in b.DISCRETIZATION) + _dw))
        b.add_component("c_bilinear_u%s_v%s" % (u_idx, v_idx), _c)

        _c = ConstraintList()
        b.add_component("c_mccormick_u%s_v%s" % (u_idx, v_idx), _c)
        # u_lb * dv <= dw <= u_ub * dv
        _c.add(expr=_lb * _dv <= _dw)
        _c.add(expr=_dw <= _ub * _dv)
        # (u-u_ub)*2^-K + u_ub*dv <= dw <= (u-u_lb)*2^-K + u_lb*dv
        _c.add(expr=(u - _ub) * 2**-K + _ub * _dv <= _dw)
        _c.add(expr=_dw <= (u - _lb) * 2**-K + _lb * _dv)

        return _w
Esempio n. 9
0
def make_model():
    m = ConcreteModel()
    m.x = Var([1,2,3], initialize=0)
    m.f = Var([1,2,3], initialize=0)
    m.F = Var(initialize=0)
    m.f[1].fix(1)
    m.f[2].fix(2)

    m.sum_con = Constraint(expr= 
            (1 == m.x[1] + m.x[2] + m.x[3]))
    def bilin_rule(m, i):
        return m.F*m.x[i] == m.f[i]
    m.bilin_con = Constraint([1,2,3], rule=bilin_rule)

    m.obj = Objective(expr=m.F**2)

    return m
Esempio n. 10
0
def make_model():
    m = ConcreteModel()
    m.time = ContinuousSet(bounds=(0, 10))
    m.space = ContinuousSet(bounds=(0, 5))
    m.set1 = Set(initialize=['a', 'b', 'c'])
    m.set2 = Set(initialize=['d', 'e', 'f'])
    m.fs = Block()

    m.fs.v0 = Var(m.space, initialize=1)

    @m.fs.Block()
    def b1(b):
        b.v = Var(m.time, m.space, initialize=1)
        b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0)

        b.con = Constraint(m.time,
                           m.space,
                           rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x])
        # Inconsistent

        @b.Block(m.time)
        def b2(b, t):
            b.v = Var(initialize=2)

    @m.fs.Block(m.time, m.space)
    def b2(b, t, x):
        b.v = Var(m.set1, initialize=2)

        @b.Block(m.set1)
        def b3(b, c):
            b.v = Var(m.set2, initialize=3)

            @b.Constraint(m.set2)
            def con(b, s):
                return (5 * b.v[s] == m.fs.b2[m.time.first(),
                                              m.space.first()].v[c])
                # inconsistent

    @m.fs.Constraint(m.time)
    def con1(fs, t):
        return fs.b1.v[t, m.space.last()] == 5

    # Will be inconsistent

    @m.fs.Constraint(m.space)
    def con2(fs, x):
        return fs.b1.v[m.time.first(), x] == fs.v0[x]

    # will be consistent

    disc = TransformationFactory('dae.collocation')
    disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
    disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')

    return m
Esempio n. 11
0
def two_kp_model(type):
    model = ConcreteModel()

    # Define input files
    xlsx = pd.ExcelFile(
        f"{Path(__file__).parent.absolute()}/input/{type}.xlsx",
        engine="openpyxl")
    a = pd.read_excel(xlsx, index_col=0, sheet_name="a").to_numpy()
    b = pd.read_excel(xlsx, index_col=0, sheet_name="b").to_numpy()
    c = pd.read_excel(xlsx, index_col=0, sheet_name="c").to_numpy()

    # Define variables
    model.ITEMS = Set(initialize=range(len(a[0])))
    model.x = Var(model.ITEMS, within=Binary)

    # --------------------------------------
    #   Define the objective functions
    # --------------------------------------

    def objective1(model):
        return sum(c[0][i] * model.x[i] for i in model.ITEMS)

    def objective2(model):
        return sum(c[1][i] * model.x[i] for i in model.ITEMS)

    # --------------------------------------
    #   Define the regular constraints
    # --------------------------------------

    def constraint1(model):
        return sum(a[0][i] * model.x[i] for i in model.ITEMS) <= b[0][0]

    def constraint2(model):
        return sum(a[1][i] * model.x[i] for i in model.ITEMS) <= b[1][0]

    # --------------------------------------
    #   Add components to the model
    # --------------------------------------

    # Add the constraints to the model
    model.con1 = Constraint(rule=constraint1)
    model.con2 = Constraint(rule=constraint2)

    # Add the objective functions to the model using ObjectiveList(). Note
    # that the first index is 1 instead of 0!
    model.obj_list = ObjectiveList()
    model.obj_list.add(expr=objective1(model), sense=maximize)
    model.obj_list.add(expr=objective2(model), sense=maximize)

    # By default deactivate all the objective functions
    for o in range(len(model.obj_list)):
        model.obj_list[o + 1].deactivate()

    return model
Esempio n. 12
0
    def b1(b):
        b.v = Var(m.time, m.space, initialize=1)
        b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0)

        b.con = Constraint(m.time, m.space,
                rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x])
        # Inconsistent

        @b.Block(m.time)
        def b2(b, t):
            b.v = Var(initialize=2)
Esempio n. 13
0
def make_model_tri(n, small_val=1e-7, big_val=1e2):
    m = ConcreteModel()
    m.x = Var(range(n), initialize=0.5)

    def c_rule(m, i):
        return big_val*m.x[i-1] + small_val*m.x[i] + big_val*m.x[i+1] == 1
    
    m.c = Constraint(range(1,n-1), rule=c_rule)

    m.obj = Objective(expr=small_val*sum((m.x[i]-1)**2 for i in range(n)))

    return m
Esempio n. 14
0
    def compile(self, model, start_time):
        """
        Compile the optimization model

        :param model: The entire optimization model
        :param block: The pipe model object
        :param start_time: The optimization start_time

        :return:
        """
        Component.compile(self, model, start_time)

        if not self.compiled:
            if self.repr_days is None:
                self.block.heat_flow_in = Var(self.TIME)
                self.block.heat_flow_out = Var(self.TIME)
                self.block.mass_flow = Var(self.TIME)

                def _heat_flow(b, t):
                    return b.heat_flow_in[t] == b.heat_flow_out[t]

                self.block.heat_flow = Constraint(self.TIME, rule=_heat_flow)
            else:
                self.block.heat_flow_in = Var(self.TIME, self.REPR_DAYS)
                self.block.heat_flow_out = Var(self.TIME, self.REPR_DAYS)
                self.block.mass_flow = Var(self.TIME, self.REPR_DAYS)

                def _heat_flow(b, t, c):
                    return b.heat_flow_in[t] == b.heat_flow_out[t]

                self.block.heat_flow = Constraint(self.TIME,
                                                  self.REPR_DAYS,
                                                  rule=_heat_flow)

        self.compiled = True
Esempio n. 15
0
    def set_external_model(self, external_grey_box_model):
        self._ex_model = ex_model = external_grey_box_model
        if ex_model is None:
            self._input_names = self._output_names = None
            self.inputs = self.outputs = None
            return

        self._input_names = ex_model.input_names()
        if self._input_names is None or len(self._input_names) == 0:
            raise ValueError(
                'No input_names specified for external_grey_box_model.'
                ' Must specify at least one input.')
        self.inputs = Var(self._input_names)

        self._equality_constraint_names = ex_model.equality_constraint_names()
        self._output_names = ex_model.output_names()

        # Note, this works even if output_names is an empty list
        self.outputs = Var(self._output_names)

        # call the callback so the model can set initialization, bounds, etc.
        external_grey_box_model.finalize_block_construction(self)
Esempio n. 16
0
 def getvar(name, ndx=None):
     v = _vars.get((name, ndx), None)
     if v is None:
         v = Var()
         if ndx is None:
             v_name = name
         elif type(ndx) is tuple:
             v_name = "%s[%s]" % (name, ','.join(map(str, ndx)))
         else:
             v_name = "%s[%s]" % (name, str(ndx))
         setattr(dual, v_name, v)
         _vars[name, ndx] = v
     return v
Esempio n. 17
0
    def make_noisy(self, cov_dict, conf_level=2):
        self.d1.name = "Noisy plant (d1)"
        k = 0
        for x in self.states:
            s = getattr(self.d1, x)  #: state
            xicc = getattr(self.d1, x + "_icc")
            xicc.deactivate()
            for j in self.state_vars[x]:
                self.xp_l.append(s[(1, 0) + j])
                self.xp_key[(x, j)] = k
                k += 1

        self.d1.xS_pnoisy = Set(initialize=[
            i for i in range(0, len(self.xp_l))
        ])  #: Create set of noisy_states
        self.d1.w_pnoisy = Var(self.d1.xS_pnoisy,
                               initialize=0.0)  #: Model disturbance
        self.d1.Q_pnoisy = Param(self.d1.xS_pnoisy, initialize=1, mutable=True)
        self.d1.obj_fun_noisy = Objective(
            sense=maximize,
            expr=0.5 * sum(self.d1.Q_pnoisy[k] * self.d1.w_pnoisy[k]**2
                           for k in self.d1.xS_pnoisy))
        self.d1.ics_noisy = ConstraintList()

        k = 0
        for x in self.states:
            s = getattr(self.d1, x)  #: state
            xic = getattr(self.d1, x + "_ic")
            for j in self.state_vars[x]:
                expr = s[(1, 1) + j] == xic[j] + self.d1.w_pnoisy[k]
                self.d1.ics_noisy.add(expr)
                k += 1

        for key in cov_dict:
            vni = key
            v_i = self.xp_key[vni]
            self.d1.Q_pnoisy[v_i].value = cov_dict[vni]
            self.d1.w_pnoisy[v_i].setlb(-conf_level * cov_dict[vni])
            self.d1.w_pnoisy[v_i].setub(conf_level * cov_dict[vni])

        with open("debug.txt", "w") as f:
            self.d1.Q_pnoisy.display(ostream=f)
            self.d1.obj_fun_noisy.pprint(ostream=f)
            self.d1.ics_noisy.pprint(ostream=f)
            self.d1.w_pnoisy.display(ostream=f)
    def _relax_bilinear(self, b, u, v):
        u_lb, u_ub = u.bounds
        v_lb, v_ub = v.bounds
        if u_lb is None or u_ub is None:
             raise RuntimeError("Couldn't relax variable %s: missing "
                               "finite lower/upper bounds." % (u.cname(True)))
        if v_lb is None or v_ub is None:
             raise RuntimeError("Couldn't relax variable %s: missing "
                               "finite lower/upper bounds." % (v.cname(True)))
        w = Var(bounds=(min(u_lb*v_lb, u_lb*v_ub, u_ub*v_lb, u_ub*v_ub),
                         max(u_lb*v_lb, u_lb*v_ub, u_ub*v_lb, u_ub*v_ub)))
        b.add_component("w_%s_%s" % (u.cname(), v.cname()), w)

        _c = ConstraintList(noruleinit=True)
        b.add_component( "c_mccormick_%s_%s" % (u.cname(), v.cname()), _c )

        _c.add(expr=w >= u * v_lb + u_lb * v - u_lb*v_lb)
        _c.add(expr=w >= u * v_ub + u_ub * v - u_ub*v_ub)
        _c.add(expr=w <= u * v_lb + u_ub * v - u_ub*v_lb)
        _c.add(expr=w <= u * v_ub + u_lb * v - u_lb*v_ub)

        return w
Esempio n. 19
0
 def setUp(self):
     self.model = ConcreteModel()
     self.model.x = Var()
     # set by derived class
     self._arg = None
def make_separation_problem(model_data, config):
    """
    Swap out uncertain param Param objects for Vars
    Add uncertainty set constraints and separation objectives
    """
    separation_model = model_data.original.clone()
    separation_model.del_component("coefficient_matching_constraints")
    separation_model.del_component("coefficient_matching_constraints_index")

    uncertain_params = separation_model.util.uncertain_params
    separation_model.util.uncertain_param_vars = param_vars = Var(
        range(len(uncertain_params)))
    map_new_constraint_list_to_original_con = ComponentMap()

    if config.objective_focus is ObjectiveType.worst_case:
        separation_model.util.zeta = Param(initialize=0, mutable=True)
        constr = Constraint(expr=separation_model.first_stage_objective +
                            separation_model.second_stage_objective -
                            separation_model.util.zeta <= 0)
        separation_model.add_component("epigraph_constr", constr)

    substitution_map = {}
    #Separation problem initialized to nominal uncertain parameter values
    for idx, var in enumerate(list(param_vars.values())):
        param = uncertain_params[idx]
        var.set_value(param.value, skip_validation=True)
        substitution_map[id(param)] = var

    separation_model.util.new_constraints = constraints = ConstraintList()

    uncertain_param_set = ComponentSet(uncertain_params)
    for c in separation_model.component_data_objects(Constraint):
        if any(v in uncertain_param_set
               for v in identify_mutable_parameters(c.expr)):
            if c.equality:
                constraints.add(
                    replace_expressions(expr=c.lower,
                                        substitution_map=substitution_map) ==
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            elif c.lower is not None:
                constraints.add(
                    replace_expressions(expr=c.lower,
                                        substitution_map=substitution_map) <=
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            elif c.upper is not None:
                constraints.add(
                    replace_expressions(expr=c.upper,
                                        substitution_map=substitution_map) >=
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            else:
                raise ValueError(
                    "Unable to parse constraint for building the separation problem."
                )
            c.deactivate()
            map_new_constraint_list_to_original_con[constraints[
                constraints.index_set().last()]] = c

    separation_model.util.map_new_constraint_list_to_original_con = map_new_constraint_list_to_original_con

    # === Add objectives first so that the uncertainty set
    #     Constraints do not get picked up into the set
    #	  of performance constraints which become objectives
    make_separation_objective_functions(separation_model, config)
    add_uncertainty_set_constraints(separation_model, config)

    # === Deactivate h(x,q) == 0 constraints
    for c in separation_model.util.h_x_q_constraints:
        c.deactivate()

    return separation_model
Esempio n. 21
0
def coefficient_matching(model, constraint, uncertain_params, config):
    '''
    :param model: master problem model
    :param constraint: the constraint from the master problem model
    :param uncertain_params: the list of uncertain parameters
    :param first_stage_variables: the list of effective first-stage variables (includes ssv if decision_rule_order = 0)
    :return: True if the coefficient matching was successful, False if its proven robust_infeasible due to
             constraints of the form 1 == 0
    '''
    # === Returned flags
    successful_matching = True
    robust_infeasible = False

    # === Efficiency for q_LB = q_UB
    actual_uncertain_params = []

    for i in range(len(uncertain_params)):
        if not is_certain_parameter(uncertain_param_index=i, config=config):
            actual_uncertain_params.append(uncertain_params[i])

    # === Add coefficient matching constraint list
    if not hasattr(model, "coefficient_matching_constraints"):
        model.coefficient_matching_constraints = ConstraintList()
    if not hasattr(model, "swapped_constraints"):
        model.swapped_constraints = ConstraintList()

    variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
    params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    first_stage_variables = model.util.first_stage_variables
    second_stage_variables = model.util.second_stage_variables

    # === Determine if we need to do DR expression/ssv substitution to
    #     make h(x,z,q) == 0 into h(x,d,q) == 0 (which is just h(x,q) == 0)
    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        # h(x, q) == 0
        pass
    elif all(v in ComponentSet(first_stage_variables + second_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        constraint = substitute_ssv_in_dr_constraints(model=model, constraint=constraint)
        variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
        params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    else:
        pass

    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):

        # Swap param objects for variable objects in this constraint
        model.param_set = []
        for i in range(len(list(variables_in_constraint))):
            # Initialize Params to non-zero value due to standard_repn bug
            model.add_component("p_%s" % i, Param(initialize=1, mutable=True))
            model.param_set.append(getattr(model, "p_%s" % i))

        model.variable_set = []
        for i in range(len(list(actual_uncertain_params))):
            model.add_component("x_%s" % i, Var(initialize=1))
            model.variable_set.append(getattr(model, "x_%s" % i))

        original_var_to_param_map = list(zip(list(variables_in_constraint), model.param_set))
        original_param_to_vap_map = list(zip(list(actual_uncertain_params), model.variable_set))

        var_to_param_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_forward[id(var)] = param

        param_to_var_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_forward[id(param)] = var

        var_to_param_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_reverse[id(param)] = var

        param_to_var_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_reverse[id(var)] = param

        model.swapped_constraints.add(
            replace_expressions(
                expr=replace_expressions(expr=constraint.lower,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward) ==
            replace_expressions(
                expr=replace_expressions(expr=constraint.body,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward))

        swapped = model.swapped_constraints[max(model.swapped_constraints.keys())]

        val = generate_standard_repn(swapped.body, compute_values=False)

        if val.constant is not None:
            if type(val.constant) not in native_types:
                temp_expr = replace_expressions(val.constant, substitution_map=var_to_param_substitution_map_reverse)
                if temp_expr.is_potentially_variable():
                    model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
            elif math.isclose(value(val.constant), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                pass
            else:
                successful_matching = False
                robust_infeasible = True
        if val.linear_coefs is not None:
            for coeff in val.linear_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.quadratic_coefs:
            for coeff in val.quadratic_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.nonlinear_expr is not None:
            successful_matching = False
            robust_infeasible = False

        if successful_matching:
            model.util.h_x_q_constraints.add(constraint)

    for i in range(len(list(variables_in_constraint))):
        model.del_component("p_%s" % i)

    for i in range(len(list(params_in_constraint))):
        model.del_component("x_%s" % i)

    model.del_component("swapped_constraints")
    model.del_component("swapped_constraints_index")

    return successful_matching, robust_infeasible
Esempio n. 22
0
    def _validate_and_expand_connector_set(self, connectors):
        ref = {}
        # First, go through the connectors and get the superset of all fields
        for c in connectors:
            for k, v in iteritems(c.vars):
                if k in ref:
                    # We have already seen this var
                    continue
                if v is None:
                    # This is an implicit var
                    continue
                # OK: New var, so add it to the reference list
                _len = (
                    #-3 if v is None else
                    -2 if k in c.aggregators else
                    -1 if not hasattr(v, 'is_indexed') or not v.is_indexed()
                    else len(v))
                ref[k] = (v, _len, c)

        if not ref:
            logger.warning(
                "Cannot identify a reference connector: no connectors "
                "in the connector set have assigned variables:\n\t(%s)" %
                (', '.join(sorted(c.name for c in connectors)), ))
            return ref

        # Now make sure that connectors match
        empty_or_partial = []
        for c in connectors:
            c_is_partial = False
            if not c.vars:
                # This is an empty connector and should be defined with
                # "auto" vars
                empty_or_partial.append(c)
                continue

            for k, v in iteritems(ref):
                if k not in c.vars:
                    raise ValueError(
                        "Connector mismatch: Connector '%s' missing variable "
                        "'%s' (appearing in reference connector '%s')" %
                        (c.name, k, v[2].name))
                _v = c.vars[k]
                if _v is None:
                    if not c_is_partial:
                        empty_or_partial.append(c)
                        c_is_partial = True
                    continue
                _len = (-3 if _v is None else -2 if k in c.aggregators else
                        -1 if not hasattr(_v, 'is_indexed')
                        or not _v.is_indexed() else len(_v))
                if (_len >= 0) ^ (v[1] >= 0):
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' mixing "
                        "indexed and non-indexed targets on connectors '%s' "
                        "and '%s'" % (k, v[2].name, c.name))
                if _len >= 0 and _len != v[1]:
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' index "
                        "mismatch (%s elements in reference connector '%s', "
                        "but %s elements in connector '%s')" %
                        (k, v[1], v[2].name, _len, c.name))
                if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()):
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' has "
                        "mismatched indices on connectors '%s' and '%s'" %
                        (k, v[2].name, c.name))

        # as we are adding things to the model, sort by key so that
        # the order things are added is deterministic
        sorted_refs = sorted(iteritems(ref))
        if len(empty_or_partial) > 1:
            # This is expensive (names aren't cheap), but does result in
            # a deterministic ordering
            empty_or_partial.sort(key=lambda x: x.getname(
                fully_qualified=True, name_buffer=self._name_buffer))

        # Fill in any empty connectors
        for c in empty_or_partial:
            block = c.parent_block()
            for k, v in sorted_refs:
                if k in c.vars and c.vars[k] is not None:
                    continue

                if v[1] >= 0:
                    idx = (v[0].index_set(), )
                else:
                    idx = ()
                var_args = {}
                try:
                    var_args['domain'] = v[0].domain
                except AttributeError:
                    pass
                try:
                    var_args['bounds'] = v[0].bounds
                except AttributeError:
                    pass
                new_var = Var(*idx, **var_args)
                block.add_component(
                    '%s.auto.%s' %
                    (c.getname(fully_qualified=True,
                               name_buffer=self._name_buffer), k), new_var)
                if idx:
                    for i in idx[0]:
                        new_var[i].domain = v[0][i].domain
                        new_var[i].setlb(v[0][i].lb)
                        new_var[i].setub(v[0][i].ub)
                c.vars[k] = new_var

        return ref
Esempio n. 23
0
def ROSolver_iterative_solve(model_data, config):
    '''
    GRCS algorithm implementation
    :model_data: ROSolveData object with deterministic model information
    :config: ConfigBlock for the instance being solved
    '''

    # === The "violation" e.g. uncertain parameter values added to the master problem are nominal in iteration 0
    #     User can supply a nominal_uncertain_param_vals if they want to set nominal to a certain point,
    #     Otherwise, the default init value for the params is used as nominal_uncertain_param_vals
    violation = list(p for p in config.nominal_uncertain_param_vals)

    # === Do coefficient matching
    constraints = [
        c for c in model_data.working_model.component_data_objects(Constraint)
        if c.equality and c not in ComponentSet(
            model_data.working_model.util.decision_rule_eqns)
    ]
    model_data.working_model.util.h_x_q_constraints = ComponentSet()
    for c in constraints:
        coeff_matching_success, robust_infeasible = coefficient_matching(
            model=model_data.working_model,
            constraint=c,
            uncertain_params=model_data.working_model.util.uncertain_params,
            config=config)
        if not coeff_matching_success and not robust_infeasible:
            raise ValueError(
                "Equality constraint \"%s\" cannot be guaranteed to be robustly feasible, "
                "given the current partitioning between first-stage, second-stage and state variables. "
                "You might consider editing this constraint to reference some second-stage "
                "and/or state variable(s)." % c.name)
        elif not coeff_matching_success and robust_infeasible:
            config.progress_logger.info(
                "PyROS has determined that the model is robust infeasible. "
                "One reason for this is that equality constraint \"%s\" cannot be satisfied "
                "against all realizations of uncertainty, "
                "given the current partitioning between first-stage, second-stage and state variables. "
                "You might consider editing this constraint to reference some (additional) second-stage "
                "and/or state variable(s)." % c.name)
            return None, None
        else:
            pass

    # h(x,q) == 0 becomes h'(x) == 0
    for c in model_data.working_model.util.h_x_q_constraints:
        c.deactivate()

    # === Build the master problem and master problem data container object
    master_data = master_problem_methods.initial_construct_master(model_data)

    # === If using p_robustness, add ConstraintList for additional constraints
    if config.p_robustness:
        master_data.master_model.p_robust_constraints = ConstraintList()

    # === Add scenario_0
    master_data.master_model.scenarios[0, 0].transfer_attributes_from(
        master_data.original.clone())
    if len(master_data.master_model.scenarios[
            0, 0].util.uncertain_params) != len(violation):
        raise ValueError

    # === Set the nominal uncertain parameters to the violation values
    for i, v in enumerate(violation):
        master_data.master_model.scenarios[
            0, 0].util.uncertain_params[i].value = v

    # === Add objective function (assuming minimization of costs) with nominal second-stage costs
    if config.objective_focus is ObjectiveType.nominal:
        master_data.master_model.obj = Objective(
            expr=master_data.master_model.scenarios[0,
                                                    0].first_stage_objective +
            master_data.master_model.scenarios[0, 0].second_stage_objective)
    elif config.objective_focus is ObjectiveType.worst_case:
        # === Worst-case cost objective
        master_data.master_model.zeta = Var(initialize=value(
            master_data.master_model.scenarios[0, 0].first_stage_objective +
            master_data.master_model.scenarios[0, 0].second_stage_objective))
        master_data.master_model.obj = Objective(
            expr=master_data.master_model.zeta)
        master_data.master_model.scenarios[0, 0].epigraph_constr = Constraint(
            expr=master_data.master_model.scenarios[0,
                                                    0].first_stage_objective +
            master_data.master_model.scenarios[0, 0].second_stage_objective <=
            master_data.master_model.zeta)
        master_data.master_model.scenarios[
            0,
            0].util.first_stage_variables.append(master_data.master_model.zeta)

    # === Add deterministic constraints to ComponentSet on original so that these become part of separation model
    master_data.original.util.deterministic_constraints = \
        ComponentSet(c for c in master_data.original.component_data_objects(Constraint, descend_into=True))

    # === Make separation problem model once before entering the solve loop
    separation_model = separation_problem_methods.make_separation_problem(
        model_data=master_data, config=config)

    # === Create separation problem data container object and add information to catalog during solve
    separation_data = SeparationProblemData()
    separation_data.separation_model = separation_model
    separation_data.points_separated = [
    ]  # contains last point separated in the separation problem
    separation_data.points_added_to_master = [
        config.nominal_uncertain_param_vals
    ]  # explicitly robust against in master
    separation_data.constraint_violations = [
    ]  # list of constraint violations for each iteration
    separation_data.total_global_separation_solves = 0  # number of times global solve is used
    separation_data.timing = master_data.timing  # timing object

    # === Keep track of subsolver termination statuses from each iteration
    separation_data.separation_problem_subsolver_statuses = []

    # === Nominal information
    nominal_data = Block()
    nominal_data.nom_fsv_vals = []
    nominal_data.nom_ssv_vals = []
    nominal_data.nom_first_stage_cost = 0
    nominal_data.nom_second_stage_cost = 0
    nominal_data.nom_obj = 0

    # === Time information
    timing_data = Block()
    timing_data.total_master_solve_time = 0
    timing_data.total_separation_local_time = 0
    timing_data.total_separation_global_time = 0
    timing_data.total_dr_polish_time = 0

    dr_var_lists_original = []
    dr_var_lists_polished = []

    k = 0
    while config.max_iter == -1 or k < config.max_iter:
        master_data.iteration = k

        # === Add p-robust constraint if iteration > 0
        if k > 0 and config.p_robustness:
            master_problem_methods.add_p_robust_constraint(
                model_data=master_data, config=config)

        # === Solve Master Problem
        config.progress_logger.info("PyROS working on iteration %s..." % k)
        master_soln = master_problem_methods.solve_master(
            model_data=master_data, config=config)
        #config.progress_logger.info("Done solving Master Problem!")
        master_soln.master_problem_subsolver_statuses = []

        # === Keep track of total time and subsolver termination conditions
        timing_data.total_master_solve_time += get_time_from_solver(
            master_soln.results)
        timing_data.total_master_solve_time += get_time_from_solver(
            master_soln.feasibility_problem_results)

        master_soln.master_problem_subsolver_statuses.append(
            master_soln.results.solver.termination_condition)

        # === Check for robust infeasibility or error or time-out in master problem solve
        if master_soln.master_subsolver_results[
                1] is pyrosTerminationCondition.robust_infeasible:
            term_cond = pyrosTerminationCondition.robust_infeasible
            output_logger(config=config, robust_infeasible=True)
        elif master_soln.pyros_termination_condition is pyrosTerminationCondition.subsolver_error:
            term_cond = pyrosTerminationCondition.subsolver_error
        else:
            term_cond = None
        if term_cond == pyrosTerminationCondition.subsolver_error or \
                term_cond == pyrosTerminationCondition.robust_infeasible:
            update_grcs_solve_data(pyros_soln=model_data,
                                   k=k,
                                   term_cond=term_cond,
                                   nominal_data=nominal_data,
                                   timing_data=timing_data,
                                   separation_data=separation_data,
                                   master_soln=master_soln)
            return model_data, []
        # === Check if time limit reached
        elapsed = get_main_elapsed_time(model_data.timing)
        if config.time_limit:
            if elapsed >= config.time_limit:
                output_logger(config=config, time_out=True, elapsed=elapsed)
                update_grcs_solve_data(
                    pyros_soln=model_data,
                    k=k,
                    term_cond=pyrosTerminationCondition.time_out,
                    nominal_data=nominal_data,
                    timing_data=timing_data,
                    separation_data=separation_data,
                    master_soln=master_soln)
                return model_data, []

        # === Save nominal information
        if k == 0:
            for val in master_soln.fsv_vals:
                nominal_data.nom_fsv_vals.append(val)

            for val in master_soln.ssv_vals:
                nominal_data.nom_ssv_vals.append(val)

            nominal_data.nom_first_stage_cost = master_soln.first_stage_objective
            nominal_data.nom_second_stage_cost = master_soln.second_stage_objective
            nominal_data.nom_obj = value(master_data.master_model.obj)

        if (
                # === Decision rule polishing (do not polish on first iteration if no ssv or if decision_rule_order = 0)
            (config.decision_rule_order != 0
             and len(config.second_stage_variables) > 0 and k != 0)):
            # === Save initial values of DR vars to file
            for varslist in master_data.master_model.scenarios[
                    0, 0].util.decision_rule_vars:
                vals = []
                for dvar in varslist.values():
                    vals.append(dvar.value)
                dr_var_lists_original.append(vals)

            polishing_results = master_problem_methods.minimize_dr_vars(
                model_data=master_data, config=config)
            timing_data.total_dr_polish_time += get_time_from_solver(
                polishing_results)

            #=== Save after polish
            for varslist in master_data.master_model.scenarios[
                    0, 0].util.decision_rule_vars:
                vals = []
                for dvar in varslist.values():
                    vals.append(dvar.value)
                dr_var_lists_polished.append(vals)

        # === Set up for the separation problem
        separation_data.opt_fsv_vals = [
            v.value for v in master_soln.master_model.scenarios[
                0, 0].util.first_stage_variables
        ]
        separation_data.opt_ssv_vals = master_soln.ssv_vals

        # === Provide master model scenarios to separation problem for initialization options
        separation_data.master_scenarios = master_data.master_model.scenarios

        if config.objective_focus is ObjectiveType.worst_case:
            separation_model.util.zeta = value(master_soln.master_model.obj)

        # === Solve Separation Problem
        separation_data.iteration = k
        separation_data.master_nominal_scenario = master_data.master_model.scenarios[
            0, 0]

        separation_data.master_model = master_data.master_model

        separation_solns, violating_realizations, constr_violations, is_global, \
            local_sep_time, global_sep_time = \
                separation_problem_methods.solve_separation_problem(model_data=separation_data, config=config)

        for sep_soln_list in separation_solns:
            for s in sep_soln_list:
                separation_data.separation_problem_subsolver_statuses.append(
                    s.termination_condition)

        if is_global:
            separation_data.total_global_separation_solves += 1

        timing_data.total_separation_local_time += local_sep_time
        timing_data.total_separation_global_time += global_sep_time

        separation_data.constraint_violations.append(constr_violations)

        if not any(s.found_violation for solve_data_list in separation_solns
                   for s in solve_data_list):
            separation_data.points_separated = []
        else:
            separation_data.points_separated = violating_realizations

        # === Check if time limit reached
        elapsed = get_main_elapsed_time(model_data.timing)
        if config.time_limit:
            if elapsed >= config.time_limit:
                output_logger(config=config, time_out=True, elapsed=elapsed)
                termination_condition = pyrosTerminationCondition.time_out
                update_grcs_solve_data(pyros_soln=model_data,
                                       k=k,
                                       term_cond=termination_condition,
                                       nominal_data=nominal_data,
                                       timing_data=timing_data,
                                       separation_data=separation_data,
                                       master_soln=master_soln)
                return model_data, separation_solns

        # === Check if we exit due to solver returning unsatisfactory statuses (not in permitted_termination_conditions)
        local_solve_term_conditions = {
            TerminationCondition.optimal, TerminationCondition.locallyOptimal,
            TerminationCondition.globallyOptimal
        }
        global_solve_term_conditions = {
            TerminationCondition.optimal, TerminationCondition.globallyOptimal
        }
        if (is_global and any((s.termination_condition not in global_solve_term_conditions)
                                  for sep_soln_list in separation_solns for s in sep_soln_list)) or \
            (not is_global and any((s.termination_condition not in local_solve_term_conditions)
                                  for sep_soln_list in separation_solns for s in sep_soln_list)):
            termination_condition = pyrosTerminationCondition.subsolver_error
            update_grcs_solve_data(pyros_soln=model_data,
                                   k=k,
                                   term_cond=termination_condition,
                                   nominal_data=nominal_data,
                                   timing_data=timing_data,
                                   separation_data=separation_data,
                                   master_soln=master_soln)
            return model_data, separation_solns

        # === Check if we terminate due to robust optimality or feasibility
        if not any(s.found_violation for sep_soln_list in separation_solns
                   for s in sep_soln_list) and is_global:
            if config.solve_master_globally and config.objective_focus is ObjectiveType.worst_case:
                output_logger(config=config, robust_optimal=True)
                termination_condition = pyrosTerminationCondition.robust_optimal
            else:
                output_logger(config=config, robust_feasible=True)
                termination_condition = pyrosTerminationCondition.robust_feasible
            update_grcs_solve_data(pyros_soln=model_data,
                                   k=k,
                                   term_cond=termination_condition,
                                   nominal_data=nominal_data,
                                   timing_data=timing_data,
                                   separation_data=separation_data,
                                   master_soln=master_soln)
            return model_data, separation_solns

        # === Add block to master at violation
        master_problem_methods.add_scenario_to_master(master_data,
                                                      violating_realizations)
        separation_data.points_added_to_master.append(violating_realizations)

        k += 1

    output_logger(config=config, max_iter=True)
    update_grcs_solve_data(pyros_soln=model_data,
                           k=k,
                           term_cond=pyrosTerminationCondition.max_iter,
                           nominal_data=nominal_data,
                           timing_data=timing_data,
                           separation_data=separation_data,
                           master_soln=master_soln)

    # === In this case we still return the final solution objects for the last iteration
    return model_data, separation_solns
Esempio n. 24
0
 def _dualize(self, block, unfixed=[]):
     """
     Generate the dual of a block
     """
     #
     # Collect linear terms from the block
     #
     A, b_coef, c_rhs, c_sense, d_sense, vnames, cnames, v_domain = collect_linear_terms(
         block, unfixed)
     #
     # Construct the block
     #
     if isinstance(block, Model):
         dual = ConcreteModel()
     else:
         dual = Block()
     for v, is_indexed in vnames:
         if is_indexed:
             setattr(dual, v + '_Index', Set(dimen=None))
             setattr(dual, v, Var(getattr(dual, v + '_Index')))
         else:
             setattr(dual, v, Var())
     for cname, is_indexed in cnames:
         if is_indexed:
             setattr(dual, cname + '_Index', Set(dimen=None))
             setattr(dual, cname,
                     Constraint(getattr(dual, cname + '_Index')))
             setattr(dual, cname + '_lower_',
                     Var(getattr(dual, cname + '_Index')))
             setattr(dual, cname + '_upper_',
                     Var(getattr(dual, cname + '_Index')))
         else:
             setattr(dual, cname, Constraint())
             setattr(dual, cname + '_lower_', Var())
             setattr(dual, cname + '_upper_', Var())
     dual.construct()
     #
     # Add variables
     #
     # TODO: revisit this hack.  We shouldn't be calling
     # _getitem_when_not_present()
     #
     for name, ndx in b_coef:
         v = getattr(dual, name)
         if not ndx in v:
             v._getitem_when_not_present(ndx)
     #
     # Construct the objective
     #
     if d_sense == minimize:
         dual.o = Objective(expr=sum(-b_coef[name, ndx] *
                                     getattr(dual, name)[ndx]
                                     for name, ndx in b_coef),
                            sense=d_sense)
     else:
         dual.o = Objective(expr=sum(b_coef[name, ndx] *
                                     getattr(dual, name)[ndx]
                                     for name, ndx in b_coef),
                            sense=d_sense)
     #
     # Construct the constraints
     #
     for cname in A:
         c = getattr(dual, cname)
         c_index = getattr(dual, cname +
                           "_Index") if c.is_indexed() else None
         for ndx, terms in iteritems(A[cname]):
             if not c_index is None and not ndx in c_index:
                 c_index.add(ndx)
             expr = 0
             for term in terms:
                 v = getattr(dual, term.var)
                 if not term.ndx in v:
                     v.add(term.ndx)
                 expr += term.coef * v[term.ndx]
             if not (cname, ndx) in c_rhs:
                 c_rhs[cname, ndx] = 0.0
             if c_sense[cname, ndx] == 'e':
                 c.add(ndx, expr - c_rhs[cname, ndx] == 0)
             elif c_sense[cname, ndx] == 'l':
                 c.add(ndx, expr - c_rhs[cname, ndx] <= 0)
             else:
                 c.add(ndx, expr - c_rhs[cname, ndx] >= 0)
         for (name, ndx), domain in iteritems(v_domain):
             v = getattr(dual, name)
             flag = type(ndx) is tuple and (ndx[-1] == 'lb'
                                            or ndx[-1] == 'ub')
             if domain == 1:
                 if flag:
                     v[ndx].domain = NonNegativeReals
                 else:
                     v.domain = NonNegativeReals
             elif domain == -1:
                 if flag:
                     v[ndx].domain = NonPositiveReals
                 else:
                     v.domain = NonPositiveReals
             else:
                 if flag:
                     # TODO: verify that this case is possible
                     v[ndx].domain = Reals
                 else:
                     v.domain = Reals
     return dual
Esempio n. 25
0
                  initialize=lambda m, k: 0.62 if k <= 21 else 0.35)

# --------------------------------------------------------------------------------------------------------------
#: First define differential state variables (state: x, ic-Param: x_ic, derivative-Var:xdot
#: States (differential) section

def __m_init(m, i, k):
    if k < m.Ntray:
        return 4000.
    elif k == 1:
        return 104340.
    elif k == m.Ntray:
        return 5000.

#: Liquid hold-up
mod.M = Var(mod.t, mod.tray, initialize=__m_init)
#: Mole-fraction
mod.x = Var(mod.t, mod.tray, initialize=lambda m, i, k: 0.999 * k / m.Ntray)

#: Initial state-Param
mod.M_ic = Param(mod.tray, default=0.0, mutable=True)
mod.x_ic = Param(mod.tray, default=0.0, mutable=True)

#:  Derivative-var
mod.Mdot = DerivativeVar(mod.M, initialize=0.0)
mod.xdot = DerivativeVar(mod.x, initialize=0.0)
# --------------------------------------------------------------------------------------------------------------
# States (algebraic) section
# Tray temperature
mod.T = Var(mod.t, mod.tray,
            initialize=lambda m, i, k: ((370.781 - 335.753) / m.Ntray) * k + 370.781)
Esempio n. 26
0
def solveropfnlp_2(ppc, solver="ipopt"):
    if solver == "ipopt":
        opt = SolverFactory("ipopt", executable="/home/iso/PycharmProjects/opfLC_python3/Python3/py_solvers/ipopt-linux64/ipopt")
    if solver == "bonmin":
        opt = SolverFactory("bonmin", executable="/home/iso/PycharmProjects/opfLC_python3/Python3/py_solvers/bonmin-linux64/bonmin")
    if solver == "knitro":
        opt = SolverFactory("knitro", executable="D:/ICT/Artelys/Knitro 10.2.1/knitroampl/knitroampl")

    ppc = ext2int(ppc)      # convert to continuous indexing starting from 0

    # Gather information about the system
    # =============================================================
    baseMVA, bus, gen, branch = \
        ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]

    nb = bus.shape[0]       # number of buses
    ng = gen.shape[0]       # number of generators
    nl = branch.shape[0]    # number of lines

    # generator buses
    gb = tolist(np.array(gen[:, GEN_BUS]).astype(int))

    sb = find((bus[:, BUS_TYPE] == REF))    # slack bus index
    fr = branch[:, F_BUS].astype(int)       # from bus indices
    to = branch[:, T_BUS].astype(int)       # to bus indices

    tr = branch[:, TAP]     # transformation ratios
    tr[find(tr == 0)] = 1   # set to 1 transformation ratios that are 0

    r = branch[:, BR_R]     # branch resistances
    x = branch[:, BR_X]     # branch reactances
    b = branch[:, BR_B]     # branch susceptances

    start_time = time.clock()

    # Admittance matrix computation
    # =============================================================
    y = makeYbus(baseMVA, bus, branch)[0]   # admittance matrix

    yk = 1./(r+x*1j)                        # branch admittance
    yft = yk + 0.5j*b                       # branch admittance + susceptance
    gk = yk.real                            # branch resistance
    yk = yk/tr                              # include /tr in yk

    # Optimization
    # =============================================================
    branch[find(branch[:, RATE_A] == 0), RATE_A] = 9999     # set undefined Sflow limit to 9999
    Smax = branch[:, RATE_A] / baseMVA                      # Max. Sflow

    # Power demand parameters
    Pd = bus[:, PD] / baseMVA
    Qd = bus[:, QD] / baseMVA

    # Max and min Pg and Qg
    Pg_max = zeros(nb)
    Pg_max[gb] = gen[:, PMAX] / baseMVA
    Pg_min = zeros(nb)
    Pg_min[gb] = gen[:, PMIN] / baseMVA
    Qg_max = zeros(nb)
    Qg_max[gb] = gen[:, QMAX] / baseMVA
    Qg_min = zeros(nb)
    Qg_min[gb] = gen[:, QMIN] / baseMVA

    # Vmax and Vmin vectors
    Vmax = bus[:, VMAX]
    Vmin = bus[:, VMIN]

    vm = bus[:, VM]
    va = bus[:, VA]*pi/180

    # create a new optimization model
    model = ConcreteModel()

    # Define sets
    # ------------
    model.bus = Set(ordered=True, initialize=range(nb))     # Set of all buses
    model.gen = Set(ordered=True, initialize=gb)                # Set of buses with generation
    model.line = Set(ordered=True, initialize=range(nl))    # Set of all lines

    # Define variables
    # -----------------
    # Voltage magnitudes vector (vm)
    model.vm = Var(model.bus)

    # Voltage angles vector (va)
    model.va = Var(model.bus)

    # Reactive power generation, synchronous machines(SM) (Qg)
    model.Qg = Var(model.gen)
    Qg0 = zeros(nb)
    Qg0[gb] = gen[:, QG]/baseMVA

    # Active power generation, synchronous machines(SM) (Pg)
    model.Pg = Var(model.gen)
    Pg0 = zeros(nb)
    Pg0[gb] = gen[:, PG] / baseMVA

    # Active and reactive power from at all branches
    model.Pf = Var(model.line)
    model.Qf = Var(model.line)

    # Active and reactive power to at all branches
    model.Pt = Var(model.line)
    model.Qt = Var(model.line)

    # Warm start the problem
    # ------------------------
    for i in range(nb):
        model.vm[i] = vm[i]
        model.va[i] = va[i]
        if i in gb:
            model.Pg[i] = Pg0[i]
            model.Qg[i] = Qg0[i]
    for i in range(nl):
        model.Pf[i] = vm[fr[i]] ** 2 * abs(yft[i]) / (tr[i] ** 2) * np.cos(-ang(yft[i])) -\
                      vm[fr[i]] * vm[to[i]] * abs(yk[i]) * np.cos(va[fr[i]] - va[to[i]] - ang(yk[i]))
        model.Qf[i] = vm[fr[i]] ** 2 * abs(yft[i]) / (tr[i] ** 2) * np.sin(-ang(yft[i])) -\
                      vm[fr[i]] * vm[to[i]] * abs(yk[i]) * np.sin(va[fr[i]] - va[to[i]] - ang(yk[i]))
        model.Pt[i] = vm[to[i]] ** 2 * abs(yft[i]) * np.cos(-ang(yft[i])) -\
                      vm[to[i]] * vm[fr[i]] * abs(yk[i]) * np.cos(va[to[i]] - va[fr[i]] - ang(yk[i]))
        model.Qt[i] = vm[to[i]] ** 2 * abs(yft[i]) * np.sin(-ang(yft[i])) -\
                      vm[to[i]] * vm[fr[i]] * abs(yk[i]) * np.sin(va[to[i]] - va[fr[i]] - ang(yk[i]))

    # Define constraints
    # ----------------------------

    # Equalities:
    # ------------

    # Active power flow equalities
    def powerflowact(model, i):
        if i in gb:
            return model.Pg[i]-Pd[i] == sum(model.vm[i]*model.vm[j]*abs(y[i, j]) *
                                            cos(model.va[i] - model.va[j] - ang(y[i, j])) for j in range(nb))
        else:
            return sum(model.vm[i]*model.vm[j]*abs(y[i, j]) * cos(model.va[i] - model.va[j] -
                                                                  ang(y[i, j])) for j in range(nb)) == -Pd[i]

    model.const1 = Constraint(model.bus, rule=powerflowact)

    # Reactive power flow equalities
    def powerflowreact(model, i):
        if i in gb:
            return model.Qg[i]-Qd[i] == sum(model.vm[i]*model.vm[j]*abs(y[i, j]) *
                                            sin(model.va[i] - model.va[j] - ang(y[i, j])) for j in range(nb))
        else:
            return sum(model.vm[i]*model.vm[j]*abs(y[i, j]) * sin(model.va[i] - model.va[j] -
                                                                  ang(y[i, j])) for j in range(nb)) == -Qd[i]

    model.const2 = Constraint(model.bus, rule=powerflowreact)

    # Active power from
    def pfrom(model, i):
        return model.Pf[i] == model.vm[fr[i]] ** 2 * abs(yft[i]) / (tr[i] ** 2) * np.cos(-ang(yft[i])) - \
                              model.vm[fr[i]] * model.vm[to[i]] * abs(yk[i]) * \
                              cos(model.va[fr[i]] - model.va[to[i]] - ang(yk[i]))

    model.const3 = Constraint(model.line, rule=pfrom)

    # Reactive power from
    def qfrom(model, i):
        return model.Qf[i] == model.vm[fr[i]] ** 2 * abs(yft[i]) / (tr[i] ** 2) * np.sin(-ang(yft[i])) - \
                              model.vm[fr[i]] * model.vm[to[i]] * abs(yk[i]) * \
                              sin(model.va[fr[i]] - model.va[to[i]] - ang(yk[i]))

    model.const4 = Constraint(model.line, rule=qfrom)

    # Active power to
    def pto(model, i):
        return model.Pt[i] == model.vm[to[i]] ** 2 * abs(yft[i]) * np.cos(-ang(yft[i])) - \
                              model.vm[to[i]] * model.vm[fr[i]] * abs(yk[i]) * \
                              cos(model.va[to[i]] - model.va[fr[i]] - ang(yk[i]))

    model.const5 = Constraint(model.line, rule=pto)

    # Reactive power to
    def qto(model, i):
        return model.Qt[i] == model.vm[to[i]] ** 2 * abs(yft[i]) * np.sin(-ang(yft[i])) - \
                              model.vm[to[i]] * model.vm[fr[i]] * abs(yk[i]) * \
                              sin(model.va[to[i]] - model.va[fr[i]] - ang(yk[i]))

    model.const6 = Constraint(model.line, rule=qto)

    # Slack bus phase angle
    model.const7 = Constraint(expr=model.va[sb[0]] == 0)

    # Inequalities:
    # ----------------

    # Active power generator limits Pg_min <= Pg <= Pg_max
    def genplimits(model, i):
        return Pg_min[i] <= model.Pg[i] <= Pg_max[i]

    model.const8 = Constraint(model.gen, rule=genplimits)

    # Reactive power generator limits Qg_min <= Qg <= Qg_max
    def genqlimits(model, i):
        return Qg_min[i] <= model.Qg[i] <= Qg_max[i]

    model.const9 = Constraint(model.gen, rule=genqlimits)

    # Voltage constraints ( Vmin <= V <= Vmax )
    def vlimits(model, i):
        return Vmin[i] <= model.vm[i] <= Vmax[i]

    model.const10 = Constraint(model.bus, rule=vlimits)

    # Sfrom line limit
    def sfrommax(model, i):
        return model.Pf[i]**2 + model.Qf[i]**2 <= Smax[i]**2

    model.const11 = Constraint(model.line, rule=sfrommax)

    # Sto line limit
    def stomax(model, i):
        return model.Pt[i]**2 + model.Qt[i]**2 <= Smax[i]**2

    model.const12 = Constraint(model.line, rule=stomax)

    # Set objective function
    # ------------------------
    def obj_fun(model):
        return sum(gk[i] * ((model.vm[fr[i]] / tr[i])**2 + model.vm[to[i]]**2 -
                         2/tr[i] * model.vm[fr[i]] * model.vm[to[i]] *
                         cos(model.va[fr[i]] - model.va[to[i]])) for i in range(nl))

    model.obj = Objective(rule=obj_fun, sense=minimize)

    mt = time.clock() - start_time                  # Modeling time

    # Execute solve command with the selected solver
    # ------------------------------------------------
    start_time = time.clock()
    results = opt.solve(model, tee=True)
    et = time.clock() - start_time                  # Elapsed time
    print(results)

    # Update the case info with the optimized variables
    # ==================================================
    for i in range(nb):
        bus[i, VM] = model.vm[i].value              # Bus voltage magnitudes
        bus[i, VA] = model.va[i].value*180/pi       # Bus voltage angles
    # Include Pf - Qf - Pt - Qt in the branch matrix
    branchsol = zeros((nl, 17))
    branchsol[:, :-4] = branch
    for i in range(nl):
        branchsol[i, PF] = model.Pf[i].value * baseMVA
        branchsol[i, QF] = model.Qf[i].value * baseMVA
        branchsol[i, PT] = model.Pt[i].value * baseMVA
        branchsol[i, QT] = model.Qt[i].value * baseMVA
    # Update gen matrix variables
    for i in range(ng):
        gen[i, PG] = model.Pg[gb[i]].value * baseMVA
        gen[i, QG] = model.Qg[gb[i]].value * baseMVA
        gen[i, VG] = bus[gb[i], VM]
    # Convert to external (original) numbering and save case results
    ppc = int2ext(ppc)
    ppc['bus'][:, 1:] = bus[:, 1:]
    branchsol[:, 0:2] = ppc['branch'][:, 0:2]
    ppc['branch'] = branchsol
    ppc['gen'][:, 1:] = gen[:, 1:]
    ppc['obj'] = value(obj_fun(model))
    ppc['ploss'] = value(obj_fun(model)) * baseMVA
    ppc['et'] = et
    ppc['mt'] = mt
    ppc['success'] = 1

    # ppc solved case is returned
    return ppc
Esempio n. 27
0
#: if steady == True fallback to steady-state computation
mod = ConcreteModel()
#: mod.nfe_t = nfe_t  #:
#: mod.ncp_t = ncp_t
mod.discretized = False
ncstr = 1
mod.ncstr = Set(initialize=[i for i in range(0, ncstr)])

mod.t = ContinuousSet(bounds=(0, 1))

mod.Cainb = Param(default=1.0)
mod.Tinb = Param(default=275.0)
# mod.Tjinb = Param(default=250.0)

#: Our control var
mod.Tjinb = Var(mod.t, initialize=250)
mod.u1 = Param(mod.t, default=250,
               mutable=True)  #: We are making a sort-of port


def u1_rule(m, i):
    return m.Tjinb[i] == m.u1[i]


# mod.u1_cdummy = Constraint(mod.t, rule=lambda m, i: m.Tjinb[i] == mod.u1[i])
mod.u1_cdummy = Constraint(mod.t, rule=u1_rule)
#: u1 will contain the information from the NMPC problem. This is what drives the plant.
#: how about smth like nmpc_u1 or u1_nmpc

mod.V = Param(initialize=100)
mod.UA = Param(initialize=20000 * 60)
Esempio n. 28
0
    def __init__(self, **kwargs):
        NmpcGen.__init__(self, **kwargs)
        self.int_file_mhe_suf = int(time.time())-1

        # Need a list of relevant measurements y

        self.y = kwargs.pop('y', [])
        self.y_vars = kwargs.pop('y_vars', {})

        # Need a list or relevant noisy-states z

        self.x_noisy = kwargs.pop('x_noisy', [])
        self.x_vars = kwargs.pop('x_vars', {})
        self.deact_ics = kwargs.pop('del_ics', True)
        self.diag_Q_R = kwargs.pop('diag_QR', True)  #: By default use diagonal matrices for Q and R matrices
        self.u = kwargs.pop('u', [])
        self.IgnoreProcessNoise = kwargs.pop('IgnoreProcessNoise', False)


        print("-" * 120)
        print("I[[create_lsmhe]] lsmhe (full) model created.")
        print("-" * 120)
        nstates = sum(len(self.x_vars[x]) for x in self.x_noisy)

        self.journalizer("I", self._c_it, "MHE with \t", str(nstates) + "states")
        self.journalizer("I", self._c_it, "MHE with \t", str(nstates*self.nfe_t*self.ncp_t) + "noise vars")
        self.lsmhe = self.d_mod(self.nfe_t, self.ncp_t, _t=self._t)
        self.lsmhe.name = "LSMHE (Least-Squares MHE)"
        self.lsmhe.create_bounds()
        #: create x_pi constraint

        #: Create list of noisy-states vars
        self.xkN_l = []
        self.xkN_nexcl = []
        self.xkN_key = {}
        k = 0
        for x in self.x_noisy:
            n_s = getattr(self.lsmhe, x)  #: Noisy-state
            for jth in self.x_vars[x]:  #: the jth variable
                self.xkN_l.append(n_s[(1, 0) + jth])
                self.xkN_nexcl.append(1)  #: non-exclusion list for active bounds
                self.xkN_key[(x, jth)] = k
                k += 1

        self.lsmhe.xkNk_mhe = Set(initialize=[i for i in range(0, len(self.xkN_l))])  #: Create set of noisy_states
        self.lsmhe.x_0_mhe = Param(self.lsmhe.xkNk_mhe, initialize=0.0, mutable=True)  #: Prior-state
        self.lsmhe.wk_mhe = Param(self.lsmhe.fe_t, self.lsmhe.cp_ta, self.lsmhe.xkNk_mhe, initialize=0.0) \
            if self.IgnoreProcessNoise else Expression(self.lsmhe.fe_t, self.lsmhe.cp_ta, self.lsmhe.xkNk_mhe)  #: Model disturbance
        self.lsmhe.PikN_mhe = Param(self.lsmhe.xkNk_mhe, self.lsmhe.xkNk_mhe,
                                initialize=lambda m, i, ii: 1. if i == ii else 0.0, mutable=True)  #: Prior-Covariance
        self.lsmhe.Q_mhe = Param(range(1, self.nfe_t), self.lsmhe.xkNk_mhe, initialize=1, mutable=True) if self.diag_Q_R\
            else Param(range(1, self.nfe_t), self.lsmhe.xkNk_mhe, self.lsmhe.xkNk_mhe,
                             initialize=lambda m, t, i, ii: 1. if i == ii else 0.0, mutable=True)  #: Disturbance-weight
        j = 0
        for i in self.x_noisy:
            de_exp = getattr(self.lsmhe, "de_" + i)
            for k in self.x_vars[i]:
                for tfe in range(1, self.nfe_t+1):
                    for tcp in range(1, self.ncp_t + 1):
                        self.lsmhe.wk_mhe[tfe, tcp, j].set_value(de_exp[(tfe, tcp) + k]._body)
                        de_exp[(tfe, tcp) + k].deactivate()
                j += 1



        #: Create list of measurements vars
        self.yk_l = {}
        self.yk_key = {}
        k = 0
        self.yk_l[1] = []
        for y in self.y:
            m_v = getattr(self.lsmhe, y)  #: Measured "state"
            for jth in self.y_vars[y]:  #: the jth variable
                self.yk_l[1].append(m_v[(1, self.ncp_t) + jth])
                self.yk_key[(y, jth)] = k  #: The key needs to be created only once, that is why the loop was split
                k += 1

        for t in range(2, self.nfe_t + 1):
            self.yk_l[t] = []
            for y in self.y:
                m_v = getattr(self.lsmhe, y)  #: Measured "state"
                for jth in self.y_vars[y]:  #: the jth variable
                    self.yk_l[t].append(m_v[(t, self.ncp_t) + jth])

        self.lsmhe.ykk_mhe = Set(initialize=[i for i in range(0, len(self.yk_l[1]))])  #: Create set of measured_vars
        self.lsmhe.nuk_mhe = Var(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=0.0)   #: Measurement noise
        self.lsmhe.yk0_mhe = Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=1.0, mutable=True)
        self.lsmhe.hyk_c_mhe = Constraint(self.lsmhe.fe_t, self.lsmhe.ykk_mhe,
                                          rule=
                                          lambda mod, t, i:mod.yk0_mhe[t, i] - self.yk_l[t][i] - mod.nuk_mhe[t, i] == 0.0)
        self.lsmhe.hyk_c_mhe.deactivate()
        self.lsmhe.R_mhe = Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=1.0, mutable=True) if self.diag_Q_R else \
            Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, self.lsmhe.ykk_mhe,
                             initialize=lambda mod, t, i, ii: 1.0 if i == ii else 0.0, mutable=True)
        f = open("file_cv.txt", "w")
        f.close()

        #: Constraints for the input noise
        for u in self.u:
            # cv = getattr(self.lsmhe, u)  #: Get the param
            # c_val = [value(cv[i]) for i in cv.keys()]  #: Current value
            # self.lsmhe.del_component(cv)  #: Delete the param
            # self.lsmhe.add_component(u + "_mhe", Var(self.lsmhe.fe_t, initialize=lambda m, i: c_val[i-1]))
            self.lsmhe.add_component("w_" + u + "_mhe", Var(self.lsmhe.fe_t, initialize=0.0))  #: Noise for input
            self.lsmhe.add_component("w_" + u + "c_mhe", Constraint(self.lsmhe.fe_t))
            self.lsmhe.equalize_u(direction="r_to_u")
            # cc = getattr(self.lsmhe, u + "_c")  #: Get the constraint for input
            con_w = getattr(self.lsmhe, "w_" + u + "c_mhe")  #: Get the constraint-noisy
            var_w = getattr(self.lsmhe, "w_" + u + "_mhe")  #: Get the constraint-noisy
            ce = getattr(self.lsmhe, u + "_e")  #: Get the expression
            cp = getattr(self.lsmhe, u)  #: Get the param

            con_w.rule = lambda m, i: cp[i] == ce[i] + var_w[i]
            con_w.reconstruct()
            con_w.deactivate()

            # con_w.rule = lambda m, i: cp[i] == cv[i] + var_w[i]
            # con_w.reconstruct()
            # with open("file_cv.txt", "a") as f:
            #     cc.pprint(ostream=f)
            #     con_w.pprint(ostream=f)
                # f.close()

        self.lsmhe.U_mhe = Param(range(1, self.nfe_t + 1), self.u, initialize=1, mutable=True)

        #: Deactivate icc constraints
        if self.deact_ics:
            pass
            # for i in self.states:
            #     self.lsmhe.del_component(i + "_icc")
        #: Maybe only for a subset of the states
        else:
            for i in self.states:
                if i in self.x_noisy:
                    ic_con = getattr(self.lsmhe, i + "_icc")
                    for k in self.x_vars[i]:
                        ic_con[k].deactivate()

        #: Put the noise in the continuation equations (finite-element)
        j = 0
        self.lsmhe.noisy_cont = ConstraintList()
        for i in self.x_noisy:
            # cp_con = getattr(self.lsmhe, "cp_" + i)
            cp_exp = getattr(self.lsmhe, "noisy_" + i)
            # self.lsmhe.del_component(cp_con)
            for k in self.x_vars[i]:  #: This should keep the same order
                for t in range(1, self.nfe_t):
                    self.lsmhe.noisy_cont.add(cp_exp[t, k] == 0.0)
                    # self.lsmhe.noisy_cont.add(cp_exp[t, k] == 0.0)
                j += 1
            # cp_con.reconstruct()
        j = 0
        self.lsmhe.noisy_cont.deactivate()

        #: Expressions for the objective function (least-squares)
        self.lsmhe.Q_e_mhe = 0.0 if self.IgnoreProcessNoise else Expression(
            expr=0.5 * sum(
                sum(
                    sum(self.lsmhe.Q_mhe[1, k] * self.lsmhe.wk_mhe[i, j, k]**2 for k in self.lsmhe.xkNk_mhe) for j in range(1, self.ncp_t +1))
                for i in range(1, self.nfe_t+1))) if self.diag_Q_R else Expression(
            expr=sum(sum(self.lsmhe.wk_mhe[i, j] *
                         sum(self.lsmhe.Q_mhe[i, j, k] * self.lsmhe.wk_mhe[i, 1, k] for k in self.lsmhe.xkNk_mhe)
                         for j in self.lsmhe.xkNk_mhe) for i in range(1, self.nfe_t)))

        self.lsmhe.R_e_mhe = Expression(
            expr=0.5 * sum(
                sum(
                    self.lsmhe.R_mhe[i, k] * self.lsmhe.nuk_mhe[i, k]**2 for k in self.lsmhe.ykk_mhe)
                for i in self.lsmhe.fe_t)) if self.diag_Q_R else Expression(
            expr=sum(sum(self.lsmhe.nuk_mhe[i, j] *
                         sum(self.lsmhe.R_mhe[i, j, k] * self.lsmhe.nuk_mhe[i, k] for k in self.lsmhe.ykk_mhe)
                         for j in self.lsmhe.ykk_mhe) for i in self.lsmhe.fe_t))
        expr_u_obf = 0
        for i in self.lsmhe.fe_t:
            for u in self.u:
                var_w = getattr(self.lsmhe, "w_" + u + "_mhe")  #: Get the constraint-noisy
                expr_u_obf += self.lsmhe.U_mhe[i, u] * var_w[i] ** 2

        self.lsmhe.U_e_mhe = Expression(expr=0.5 * expr_u_obf)  # how about this
        # with open("file_cv.txt", "a") as f:
        #     self.lsmhe.U_e_mhe.pprint(ostream=f)
        #     f.close()

        self.lsmhe.Arrival_e_mhe = Expression(
            expr=0.5 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) *
                     sum(self.lsmhe.PikN_mhe[j, k] * (self.xkN_l[k] - self.lsmhe.x_0_mhe[k]) for k in self.lsmhe.xkNk_mhe)
                     for j in self.lsmhe.xkNk_mhe))

        self.lsmhe.Arrival_dummy_e_mhe = Expression(
            expr=100000.0 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) ** 2 for j in self.lsmhe.xkNk_mhe))

        self.lsmhe.obfun_dum_mhe_deb = Objective(sense=minimize,
                                             expr=self.lsmhe.Q_e_mhe)
        self.lsmhe.obfun_dum_mhe = Objective(sense=minimize,
                                             expr=self.lsmhe.R_e_mhe + self.lsmhe.Q_e_mhe + self.lsmhe.U_e_mhe) # no arrival
        self.lsmhe.obfun_dum_mhe.deactivate()

        self.lsmhe.obfun_mhe_first = Objective(sense=minimize,
                                         expr=self.lsmhe.Arrival_dummy_e_mhe + self.lsmhe.Q_e_mhe)
        self.lsmhe.obfun_mhe_first.deactivate()


        self.lsmhe.obfun_mhe = Objective(sense=minimize,
                                         expr=self.lsmhe.Arrival_dummy_e_mhe + self.lsmhe.R_e_mhe + self.lsmhe.Q_e_mhe + self.lsmhe.U_e_mhe)
        self.lsmhe.obfun_mhe.deactivate()

        # with open("file_cv.txt", "a") as f:
        #     self.lsmhe.obfun_mhe.pprint(ostream=f)
        #     f.close()

        self._PI = {}  #: Container of the KKT matrix
        self.xreal_W = {}
        self.curr_m_noise = {}   #: Current measurement noise
        self.curr_y_offset = {}  #: Current offset of measurement
        for y in self.y:
            for j in self.y_vars[y]:
                self.curr_m_noise[(y, j)] = 0.0
                self.curr_y_offset[(y, j)] = 0.0

        self.s_estimate = {}
        self.s_real = {}
        for x in self.x_noisy:
            self.s_estimate[x] = []
            self.s_real[x] = []

        self.y_estimate = {}
        self.y_real = {}
        self.y_noise_jrnl = {}
        self.yk0_jrnl = {}
        for y in self.y:
            self.y_estimate[y] = []
            self.y_real[y] = []
            self.y_noise_jrnl[y] = []
            self.yk0_jrnl[y] = []
Esempio n. 29
0
    def test_indexed_by(self):
        m = ConcreteModel()
        m.time = ContinuousSet(bounds=(0, 10))
        m.space = ContinuousSet(bounds=(0, 10))
        m.set = Set(initialize=['a', 'b', 'c'])
        m.set2 = Set(initialize=[('a', 1), ('b', 2)])
        m.v = Var()
        m.v1 = Var(m.time)
        m.v2 = Var(m.time, m.space)
        m.v3 = Var(m.set, m.space, m.time)
        m.v4 = Var(m.time, m.set2)
        m.v5 = Var(m.set2, m.time, m.space)

        @m.Block()
        def b(b):
            b.v = Var()
            b.v1 = Var(m.time)
            b.v2 = Var(m.time, m.space)
            b.v3 = Var(m.set, m.space, m.time)

        @m.Block(m.time)
        def b1(b):
            b.v = Var()
            b.v1 = Var(m.space)
            b.v2 = Var(m.space, m.set)

        @m.Block(m.time, m.space)
        def b2(b):
            b.v = Var()
            b.v1 = Var(m.set)

            @b.Block()
            def b(bl):
                bl.v = Var()
                bl.v1 = Var(m.set)
                bl.v2 = Var(m.time)

        @m.Block(m.set2, m.time)
        def b3(b):
            b.v = Var()
            b.v1 = Var(m.space)

            @b.Block(m.space)
            def b(bb):
                bb.v = Var(m.set)

        disc = TransformationFactory('dae.collocation')
        disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
        disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')

        self.assertFalse(is_explicitly_indexed_by(m.v, m.time))
        self.assertTrue(is_explicitly_indexed_by(m.b.v2, m.space))
        self.assertTrue(is_explicitly_indexed_by(m.b.v3, m.time, m.space))

        self.assertFalse(is_in_block_indexed_by(m.v1, m.time))
        self.assertFalse(is_in_block_indexed_by(m.v2, m.set))
        self.assertTrue(is_in_block_indexed_by(m.b1[m.time[1]].v2, m.time))

        self.assertTrue(is_in_block_indexed_by(
            m.b2[m.time[1], m.space[1]].b.v1, m.time))
        self.assertTrue(is_in_block_indexed_by(
            m.b2[m.time[1], m.space[1]].b.v2, m.time))
        self.assertTrue(is_explicitly_indexed_by(
            m.b2[m.time[1], m.space[1]].b.v2, m.time))
        self.assertFalse(is_in_block_indexed_by(
            m.b2[m.time[1], m.space[1]].b.v1, m.set))

        self.assertFalse(is_in_block_indexed_by(
            m.b2[m.time[1], m.space[1]].b.v1, 
            m.space, stop_at=m.b2[m.time[1], m.space[1]]))

        # Explicit indexing with multi-dimensional set:
        self.assertTrue(is_explicitly_indexed_by(m.v4, m.time, m.set2))
        self.assertTrue(is_explicitly_indexed_by(m.v5, m.time, m.set2, m.space))

        # Implicit indexing with multi-dimensional set:
        self.assertTrue(is_in_block_indexed_by(
            m.b3['a', 1, m.time[1]].v, m.set2))
        self.assertTrue(is_in_block_indexed_by(
            m.b3['a', 1, m.time[1]].v, m.time))
        self.assertTrue(is_in_block_indexed_by(
            m.b3['a', 1, m.time[1]].v1[m.space[1]], m.set2))
        self.assertFalse(is_in_block_indexed_by(
            m.b3['a', 1, m.time[1]].v1[m.space[1]], m.space))
        self.assertTrue(is_in_block_indexed_by(
            m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set2))
        self.assertTrue(is_in_block_indexed_by(
            m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time))
        self.assertTrue(is_in_block_indexed_by(
            m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.space))
        self.assertFalse(is_in_block_indexed_by(
            m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set))
        self.assertFalse(is_in_block_indexed_by(
            m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time,
            stop_at=m.b3['b', 2, m.time[2]]))
        self.assertFalse(is_in_block_indexed_by(
            m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time,
            stop_at=m.b3))
Esempio n. 30
0
 def b2(b, t):
     b.v = Var(initialize=2)