Beispiel #1
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the FactorModelSet uncertainty set.

        Args:
            uncertain_params: uncertain parameter objects for writing constraint objects
        """
        model = kwargs['model']

        # === Ensure dimensions
        if len(uncertain_params) != len(self.origin):
                raise AttributeError("Dimensions of origin and uncertain_param lists must be equal.")

        # Make F-dim cassi variable
        n = list(range(self.number_of_factors))
        model.util.cassi = Var(n, initialize=0, bounds=(-1, 1))

        conlist = ConstraintList()
        conlist.construct()

        disturbances = [sum(self.psi_mat[i][j] * model.util.cassi[j] for j in n)
                        for i in range(len(uncertain_params))]

        # Make n equality constraints
        for i in range(len(uncertain_params)):
            conlist.add(self.origin[i] + disturbances[i] == uncertain_params[i])
        conlist.add(sum(model.util.cassi[i] for i in n) <= +self.beta * self.number_of_factors)
        conlist.add(sum(model.util.cassi[i] for i in n) >= -self.beta * self.number_of_factors)
        return conlist
Beispiel #2
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the CardinalitySet uncertainty set.

        Args:
            uncertain_params: uncertain parameter objects for writing constraint objects
        """

        # === Ensure dimensions
        if len(uncertain_params) != len(self.origin):
               raise AttributeError("Dimensions of origin and uncertain_param lists must be equal.")

        model = kwargs['model']
        set_i = list(range(len(uncertain_params)))
        model.util.cassi = Var(set_i, initialize=0, bounds=(0, 1))

        # Make n equality constraints
        conlist = ConstraintList()
        conlist.construct()
        for i in set_i:
            conlist.add(self.origin[i] + self.positive_deviation[i] * model.util.cassi[i] == uncertain_params[i])

        conlist.add(sum(model.util.cassi[i] for i in set_i) <= self.gamma)

        return conlist
Beispiel #3
0
def generate_norm_constraint(fp_nlp, solve_data, config):
    """Generate the norm constraint for the FP-NLP subproblem.

    Parameters
    ----------
    fp_nlp : Pyomo model
        The feasibility pump NLP subproblem.
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.
    """
    if config.fp_main_norm == 'L1':
        # TODO: check if we can access the block defined in FP-main problem
        generate_norm1_norm_constraint(fp_nlp,
                                       solve_data.mip,
                                       config,
                                       discrete_only=True)
    elif config.fp_main_norm == 'L2':
        fp_nlp.norm_constraint = Constraint(expr=sum(
            (nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef *
            (nlp_var.value - mip_var.value)**2 for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list,
                solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0)
    elif config.fp_main_norm == 'L_infinity':
        fp_nlp.norm_constraint = ConstraintList()
        rhs = config.fp_norm_constraint_coef * max(
            nlp_var.value - mip_var.value for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list,
                solve_data.mip.MindtPy_utils.discrete_variable_list))
        for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list,
                solve_data.mip.MindtPy_utils.discrete_variable_list):
            fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs)
 def __init__(self, maximize=False):
     """
     Parameters
     __________
     maximize : Optional[bool]
         If True, then the objective sense is maximization.
     """
     import pyomo.environ
     self.model = ConcreteModel()
     self.model.o = ObjectiveList()
     self.model.c = ConstraintList()
     self._maximize = maximize
Beispiel #5
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the EllipsoidalSet uncertainty set.

        Args:
           uncertain_params: uncertain parameter objects for writing constraint objects
        """
        inv_covar = np.linalg.inv(self.shape_matrix)

        if len(uncertain_params) != len(self.center):
               raise AttributeError("Center of ellipsoid must be same dimensions as vector of uncertain parameters.")

        # Calculate row vector of differences
        diff = []
        # === Assume VarList uncertain_param_vars
        for idx, i in enumerate(uncertain_params):
            if uncertain_params[idx].is_indexed():
                for index in uncertain_params[idx]:
                    diff.append(uncertain_params[idx][index] - self.center[idx])
            else:
                diff.append(uncertain_params[idx] - self.center[idx])

        # Calculate inner product of difference vector and covar matrix
        product1 = [sum([x * y for x, y in zip(diff, column(inv_covar, i))]) for i in range(len(inv_covar))]
        constraint = sum([x * y for x, y in zip(product1, diff)])

        conlist = ConstraintList()
        conlist.construct()
        conlist.add(constraint <= self.scale)
        return conlist
Beispiel #6
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the AxisAlignedEllipsoid uncertainty set.

        Args:
           uncertain_params: uncertain parameter objects for writing constraint objects
        """
        if len(uncertain_params) != len(self.center):
               raise AttributeError("Center of ellipsoid must be same dimensions as vector of uncertain parameters.")
        # square and invert half lengths
        inverse_squared_half_lengths = list(1.0/(a**2) for a in self.half_lengths)
        # Calculate row vector of differences
        diff_squared = []
        # === Assume VarList uncertain_param_vars
        for idx, i in enumerate(uncertain_params):
           if uncertain_params[idx].is_indexed():
               for index in uncertain_params[idx]:
                   diff_squared.append((uncertain_params[idx][index] - self.center[idx])**2)
           else:
               diff_squared.append((uncertain_params[idx] - self.center[idx])**2)

        # Calculate inner product of difference vector and variance matrix
        constraint = sum([x * y for x, y in zip(inverse_squared_half_lengths, diff_squared)])

        conlist = ConstraintList()
        conlist.construct()
        conlist.add(constraint <= 1)
        return conlist
Beispiel #7
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the PolyhedralSet uncertainty set.

        Args:
            uncertain_params: uncertain parameter objects for writing constraint objects
        """

        # === Ensure valid dimensions of lhs and rhs w.r.t uncertain_params
        if np.asarray(self.coefficients_mat).shape[1] != len(uncertain_params):
            raise AttributeError("Columns of coefficients_mat matrix "
                                 "must equal length of uncertain parameters list.")

        set_i = list(range(len(self.coefficients_mat)))

        conlist = ConstraintList()
        conlist.construct()

        for i in set_i:
            constraint = 0
            for j in range(len(uncertain_params)):
                constraint += float(self.coefficients_mat[i][j]) * uncertain_params[j]
            conlist.add(constraint <= float(self.rhs_vec[i]))

        return conlist
Beispiel #8
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the IntersectedSet uncertainty set.
        Args:
            uncertain_params: list of uncertain param objects participating in the sets to be intersected
        """
        try:
            nlp_solver = kwargs["config"].global_solver
        except:
            raise AttributeError("set_as_constraint for SetIntersection requires access to an NLP solver via"
                                 "the PyROS Solver config.")
        is_empty_intersection = self.is_empty_intersection(uncertain_params=uncertain_params, nlp_solver=nlp_solver)

        def _intersect(Q1, Q2):
            return self.intersect(Q1, Q2)

        if not is_empty_intersection:
            Qint = functools.reduce(_intersect, self.all_sets)

            if Qint.type == "discrete":
                return Qint.set_as_constraint(uncertain_params=uncertain_params)
            else:
                conlist = ConstraintList()
                conlist.construct()
                for set in Qint.all_sets:
                    for con in list(set.set_as_constraint(uncertain_params=uncertain_params).values()):
                        conlist.add(con.expr)
                return conlist
        else:
            raise AttributeError("Set intersection is empty, cannot proceed with PyROS robust optimization.")
Beispiel #9
0
def generate_norm_constraint(fp_nlp, solve_data, config):
    if config.fp_main_norm == 'L1':
        # TODO: check if we can access the block defined in FP-main problem
        generate_norm1_norm_constraint(
            fp_nlp, solve_data.mip, config, discrete_only=True)
    elif config.fp_main_norm == 'L2':
        fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2
                                                     for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0)
    elif config.fp_main_norm == 'L_infinity':
        fp_nlp.norm_constraint = ConstraintList()
        rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip(
            fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list))
        for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list):
            fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs)
Beispiel #10
0
    def intersect(Q1, Q2):
        """
        Binary function intersecting two UncertaintySet objects
        Args:
            Q1: uncertainty set 1
            Q2: uncertainty set 2
        """
        constraints = ConstraintList()
        constraints.construct()

        for set in (Q1, Q2):
            other = Q1 if set is Q2 else Q2
            if set.type == "discrete":
                intersected_scenarios = []
                for point in set.scenarios:
                    if other.point_in_set(point=point):
                        intersected_scenarios.append(point)
                return DiscreteScenarioSet(scenarios=intersected_scenarios)

        # === This case is if both sets are continuous
        return IntersectionSet(set1=Q1, set2=Q2)

        return
Beispiel #11
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the EllipsoidalSet uncertainty set.

        Args:
           uncertain_params: uncertain parameter objects for writing constraint objects
        """
        # === Ensure point is of correct dimensionality as the uncertain parameters
        dim = len(uncertain_params)
        if any(len(d) != dim for d in self.scenarios):
                raise AttributeError("All scenarios must have same dimensions as uncertain parameters.")

        conlist = ConstraintList()
        conlist.construct()

        for n in list(range(len(self.scenarios))):
            for i in list(range(len(uncertain_params))):
                conlist.add(uncertain_params[i] == self.scenarios[n][i])

        conlist.deactivate()
        return conlist
Beispiel #12
0
    def set_as_constraint(self, uncertain_params, **kwargs):
        """
        Function to generate constraints for the BoxSet uncertainty set.

        Args:
            uncertain_params: uncertain parameter objects for writing constraint objects
        """

        conlist = ConstraintList()
        conlist.construct()

        set_i = list(range(len(uncertain_params)))

        for i in set_i:
            conlist.add(uncertain_params[i] >= self.bounds[i][0])
            conlist.add(uncertain_params[i] <= self.bounds[i][1])

        return conlist
Beispiel #13
0
def build_model(skel_dict, project_dir) -> ConcreteModel:
    """
    Builds a pyomo model from a given saved skeleton dictionary
    """
    links = skel_dict["links"]
    positions = skel_dict["positions"]
    dofs = skel_dict["dofs"]
    markers = skel_dict["markers"]
    rot_dict = {}
    pose_dict = {}
    L = len(positions)

    phi = [sp.symbols(f"\\phi_{{{l}}}") for l in range(L)]
    theta = [sp.symbols(f"\\theta_{{{l}}}") for l in range(L)]
    psi = [sp.symbols(f"\\psi_{{{l}}}") for l in range(L)]

    i = 0
    for part in dofs:
        rot_dict[part] = sp.eye(3)
        if dofs[part][1]:
            rot_dict[part] = rot_y(theta[i]) @ rot_dict[part]
        if dofs[part][0]:
            rot_dict[part] = rot_x(phi[i]) @ rot_dict[part]
        if dofs[part][2]:
            rot_dict[part] = rot_z(psi[i]) @ rot_dict[part]

        rot_dict[part + "_i"] = rot_dict[part].T
        i += 1

    x, y, z = sp.symbols("x y z")
    dx, dy, dz = sp.symbols("\\dot{x} \\dot{y} \\dot{z}")
    ddx, ddy, ddz = sp.symbols("\\ddot{x} \\ddot{y} \\ddot{z}")

    for link in links:
        if len(link) == 1:
            pose_dict[link[0]] = sp.Matrix([x, y, z])
        else:
            if link[0] not in pose_dict:
                pose_dict[link[0]] = sp.Matrix([x, y, z])

            translation_vec = sp.Matrix([
                positions[link[1]][0] - positions[link[0]][0],
                positions[link[1]][1] - positions[link[0]][1],
                positions[link[1]][2] - positions[link[0]][2]
            ])
            rot_dict[link[1]] = rot_dict[link[1]] @ rot_dict[link[0]]
            rot_dict[link[1] + "_i"] = rot_dict[link[1] + "_i"].T
            pose_dict[link[1]] = pose_dict[
                link[0]] + rot_dict[link[0] + "_i"] @ translation_vec

    t_poses = []
    for pose in pose_dict:
        t_poses.append(pose_dict[pose].T)

    t_poses_mat = sp.Matrix(t_poses)

    func_map = {"sin": sin, "cos": cos, "ImmutableDenseMatrix": np.array}
    sym_list = [x, y, z, *phi, *theta, *psi]
    pose_to_3d = sp.lambdify(sym_list, t_poses_mat, modules=[func_map])
    pos_funcs = []

    for i in range(t_poses_mat.shape[0]):
        lamb = sp.lambdify(sym_list, t_poses_mat[i, :], modules=[func_map])
        pos_funcs.append(lamb)

    scene_path = os.path.join(project_dir, "4_cam_scene_static_sba.json")

    K_arr, D_arr, R_arr, t_arr, _ = utils.load_scene(scene_path)
    D_arr = D_arr.reshape((-1, 4))

    markers_dict = dict(enumerate(markers))

    print(f"\n\n\nLoading data")

    df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))
    points_2d_df = utils.create_dlc_points_2d_file(df_paths)
    print("2d df points:")
    print(points_2d_df)

    # break here

    def get_meas_from_df(n, c, l, d):
        n_mask = points_2d_df["frame"] == n - 1
        l_mask = points_2d_df["marker"] == markers[l - 1]
        c_mask = points_2d_df["camera"] == c - 1
        d_idx = {1: "x", 2: "y"}
        val = points_2d_df[n_mask & l_mask & c_mask]
        return val[d_idx[d]].values[0]

    def get_likelihood_from_df(n, c, l):
        n_mask = points_2d_df["frame"] == n - 1
        if (markers[l - 1] == "neck"):
            return 0
        else:
            l_mask = points_2d_df["marker"] == markers[l - 1]
        c_mask = points_2d_df["camera"] == c - 1
        val = points_2d_df[n_mask & l_mask & c_mask]
        return val["likelihood"].values[0]

    h = 1 / 120  #timestep
    start_frame = 1600  # 50
    N = 50
    P = 3 + len(phi) + len(theta) + len(psi)
    L = len(pos_funcs)
    C = len(K_arr)
    D2 = 2
    D3 = 3

    proj_funcs = [pt3d_to_x2d, pt3d_to_y2d]

    R = 5  # measurement standard deviation
    Q = np.array([  # model parameters variance
        4.0, 7.0, 5.0, 13.0, 32.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 9.0, 18.0, 43.0, 53.0, 90.0, 118.0, 247.0, 186.0, 194.0,
        164.0, 295.0, 243.0, 334.0, 149.0, 26.0, 12.0, 0.0, 34.0, 43.0, 51.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    ])**2

    triangulate_func = calib.triangulate_points_fisheye
    points_2d_filtered_df = points_2d_df[points_2d_df['likelihood'] > 0.5]
    points_3d_df = calib.get_pairwise_3d_points_from_df(
        points_2d_filtered_df, K_arr, D_arr, R_arr, t_arr, triangulate_func)
    print("3d points")
    print(points_3d_df)

    # estimate initial points
    nose_pts = points_3d_df[points_3d_df["marker"] == "forehead"][[
        "x", "y", "z", "frame"
    ]].values
    print(nose_pts[:, 3])
    print(nose_pts[:, 0])
    xs = stats.linregress(nose_pts[:, 3], nose_pts[:, 0])
    ys = stats.linregress(nose_pts[:, 3], nose_pts[:, 1])
    zs = stats.linregress(nose_pts[:, 3], nose_pts[:, 2])
    frame_est = np.arange(N)

    x_est = np.array([
        frame_est[i] * (xs.slope) + (xs.intercept)
        for i in range(len(frame_est))
    ])
    y_est = np.array([
        frame_est[i] * (ys.slope) + (ys.intercept)
        for i in range(len(frame_est))
    ])
    z_est = np.array([
        frame_est[i] * (zs.slope) + (zs.intercept)
        for i in range(len(frame_est))
    ])
    print(x_est)
    #print("x est shape:")
    #print(x_est.shape)
    psi_est = np.arctan2(ys.slope, xs.slope)

    print("Started Optimisation")
    m = ConcreteModel(name="Skeleton")

    # ===== SETS =====
    m.N = RangeSet(N)  #number of timesteps in trajectory
    m.P = RangeSet(
        P
    )  #number of pose parameters (x, y, z, phi_1..n, theta_1..n, psi_1..n)
    m.L = RangeSet(L)  #number of labels
    m.C = RangeSet(C)  #number of cameras
    m.D2 = RangeSet(D2)  #dimensionality of measurements
    m.D3 = RangeSet(D3)  #dimensionality of measurements

    def init_meas_weights(model, n, c, l):
        likelihood = get_likelihood_from_df(n + start_frame, c, l)
        if likelihood > 0.5:
            return 1 / R
        else:
            return 0

    m.meas_err_weight = Param(
        m.N, m.C, m.L, initialize=init_meas_weights, mutable=True, within=Any
    )  # IndexError: index 0 is out of bounds for axis 0 with size 0

    def init_model_weights(m, p):
        #if Q[p-1] != 0.0:
        #return 1/Q[p-1]
        #else:
        return 0.01

    m.model_err_weight = Param(m.P, initialize=init_model_weights, within=Any)

    m.h = h

    def init_measurements_df(m, n, c, l, d2):
        if (markers[l - 1] == "neck"):
            return Constraint.Skip
        else:
            return get_meas_from_df(n + start_frame, c, l, d2)

    m.meas = Param(m.N,
                   m.C,
                   m.L,
                   m.D2,
                   initialize=init_measurements_df,
                   within=Any)

    # ===== VARIABLES =====
    m.x = Var(m.N, m.P)  #position
    m.dx = Var(m.N, m.P)  #velocity
    m.ddx = Var(m.N, m.P)  #acceleration
    m.poses = Var(m.N, m.L, m.D3)
    m.slack_model = Var(m.N, m.P)
    m.slack_meas = Var(m.N, m.C, m.L, m.D2, initialize=0.0)

    # ===== VARIABLES INITIALIZATION =====
    init_x = np.zeros((N, P))
    init_x[:, 0] = x_est  #x
    init_x[:, 1] = y_est  #y
    init_x[:, 2] = z_est  #z
    #init_x[:,(3+len(pos_funcs)*2)] = psi_est #yaw - psi
    init_dx = np.zeros((N, P))
    init_ddx = np.zeros((N, P))
    for n in range(1, N + 1):
        for p in range(1, P + 1):
            if n < len(init_x):  #init using known values
                m.x[n, p].value = init_x[n - 1, p - 1]
                m.dx[n, p].value = init_dx[n - 1, p - 1]
                m.ddx[n, p].value = init_ddx[n - 1, p - 1]
            else:  #init using last known value
                m.x[n, p].value = init_x[-1, p - 1]
                m.dx[n, p].value = init_dx[-1, p - 1]
                m.ddx[n, p].value = init_ddx[-1, p - 1]
        #init pose
        var_list = [m.x[n, p].value for p in range(1, P + 1)]
        for l in range(1, L + 1):
            [pos] = pos_funcs[l - 1](*var_list)
            for d3 in range(1, D3 + 1):
                m.poses[n, l, d3].value = pos[d3 - 1]

    # ===== CONSTRAINTS =====
    # 3D POSE
    def pose_constraint(m, n, l, d3):
        #get 3d points
        var_list = [m.x[n, p] for p in range(1, P + 1)]
        [pos] = pos_funcs[l - 1](*var_list)
        return pos[d3 - 1] == m.poses[n, l, d3]

    m.pose_constraint = Constraint(m.N, m.L, m.D3, rule=pose_constraint)

    def backwards_euler_pos(m, n, p):  # position
        if n > 1:
            #             return m.x[n,p] == m.x[n-1,p] + m.h*m.dx[n-1,p] + m.h**2 * m.ddx[n-1,p]/2
            return m.x[n, p] == m.x[n - 1, p] + m.h * m.dx[n, p]

        else:
            return Constraint.Skip

    m.integrate_p = Constraint(m.N, m.P, rule=backwards_euler_pos)

    def backwards_euler_vel(m, n, p):  # velocity
        if n > 1:
            return m.dx[n, p] == m.dx[n - 1, p] + m.h * m.ddx[n, p]
        else:
            return Constraint.Skip

    m.integrate_v = Constraint(m.N, m.P, rule=backwards_euler_vel)

    m.angs = ConstraintList()
    for n in range(1, N):
        for i in range(3, 3 * len(positions)):
            m.angs.add(expr=(abs(m.x[n, i]) <= np.pi / 2))

    # MODEL
    def constant_acc(m, n, p):
        if n > 1:
            return m.ddx[n, p] == m.ddx[n - 1, p] + m.slack_model[n, p]
        else:
            return Constraint.Skip

    m.constant_acc = Constraint(m.N, m.P, rule=constant_acc)

    # MEASUREMENT
    def measurement_constraints(m, n, c, l, d2):
        #project
        K, D, R, t = K_arr[c - 1], D_arr[c - 1], R_arr[c - 1], t_arr[c - 1]
        x, y, z = m.poses[n, l, 1], m.poses[n, l, 2], m.poses[n, l, 3]
        if (markers[l - 1] == "neck"):
            return Constraint.Skip
        else:
            return proj_funcs[d2 - 1](
                x, y, z, K, D, R,
                t) - m.meas[n, c, l, d2] - m.slack_meas[n, c, l, d2] == 0

    m.measurement = Constraint(m.N,
                               m.C,
                               m.L,
                               m.D2,
                               rule=measurement_constraints)

    def obj(m):
        slack_model_err = 0.0
        slack_meas_err = 0.0
        for n in range(1, N + 1):
            #Model Error
            for p in range(1, P + 1):
                slack_model_err += m.model_err_weight[p] * m.slack_model[n,
                                                                         p]**2
            #Measurement Error
            for l in range(1, L + 1):
                for c in range(1, C + 1):
                    for d2 in range(1, D2 + 1):
                        slack_meas_err += redescending_loss(
                            m.meas_err_weight[n, c, l] *
                            m.slack_meas[n, c, l, d2], 3, 10, 20)
        return slack_meas_err + slack_model_err

    m.obj = Objective(rule=obj)

    return (m, pose_to_3d)
Beispiel #14
0
def solve_fp_subproblem(solve_data, config):
    """
    Solves the feasibility pump NLP

    This function sets up the 'fp_nlp' by relax integer variables.
    precomputes dual values, deactivates trivial constraints, and then solves NLP model.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    fp_nlp: Pyomo model
        Fixed-NLP from the model
    results: Pyomo results object
        result from solving the Fixed-NLP
    """

    fp_nlp = solve_data.working_model.clone()
    MindtPy = fp_nlp.MindtPy_utils
    config.logger.info('FP-NLP %s: Solve feasibility pump NLP subproblem.'
                       % (solve_data.fp_iter,))

    # Set up NLP
    fp_nlp.MindtPy_utils.objective_list[-1].deactivate()
    if solve_data.objective_sense == minimize:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value <= solve_data.UB)
    else:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value >= solve_data.LB)

    # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations
    # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP'
    # the norm type is consistant with the norm obj of the FP-main problem.
    if config.fp_norm_constraint:
        if config.fp_main_norm == 'L1':
            # TODO: check if we can access the block defined in FP-main problem
            generate_norm1_norm_constraint(
                fp_nlp, solve_data.mip, config, discrete_only=True)
        elif config.fp_main_norm == 'L2':
            fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2
                                                         for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0)
        elif config.fp_main_norm == 'L_infinity':
            fp_nlp.norm_constraint = ConstraintList()
            rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list))
            for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list):
                fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs)

    MindtPy.fp_nlp_obj = generate_norm2sq_objective_function(
        fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only)

    MindtPy.cuts.deactivate()
    TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp)
    try:
        TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(
            fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance)
    except ValueError:
        config.logger.warning(
            'infeasibility detected in deactivate_trivial_constraints')
        results = SolverResults()
        results.solver.termination_condition = tc.infeasible
        return fp_nlp, results
    # Solve the NLP
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        with time_code(solve_data.timing, 'fp subproblem'):
            results = nlpopt.solve(
                fp_nlp, tee=config.nlp_solver_tee, **nlp_args)
    return fp_nlp, results
def make_separation_problem(model_data, config):
    """
    Swap out uncertain param Param objects for Vars
    Add uncertainty set constraints and separation objectives
    """
    separation_model = model_data.original.clone()
    separation_model.del_component("coefficient_matching_constraints")
    separation_model.del_component("coefficient_matching_constraints_index")

    uncertain_params = separation_model.util.uncertain_params
    separation_model.util.uncertain_param_vars = param_vars = Var(
        range(len(uncertain_params)))
    map_new_constraint_list_to_original_con = ComponentMap()

    if config.objective_focus is ObjectiveType.worst_case:
        separation_model.util.zeta = Param(initialize=0, mutable=True)
        constr = Constraint(expr=separation_model.first_stage_objective +
                            separation_model.second_stage_objective -
                            separation_model.util.zeta <= 0)
        separation_model.add_component("epigraph_constr", constr)

    substitution_map = {}
    #Separation problem initialized to nominal uncertain parameter values
    for idx, var in enumerate(list(param_vars.values())):
        param = uncertain_params[idx]
        var.set_value(param.value, skip_validation=True)
        substitution_map[id(param)] = var

    separation_model.util.new_constraints = constraints = ConstraintList()

    uncertain_param_set = ComponentSet(uncertain_params)
    for c in separation_model.component_data_objects(Constraint):
        if any(v in uncertain_param_set
               for v in identify_mutable_parameters(c.expr)):
            if c.equality:
                constraints.add(
                    replace_expressions(expr=c.lower,
                                        substitution_map=substitution_map) ==
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            elif c.lower is not None:
                constraints.add(
                    replace_expressions(expr=c.lower,
                                        substitution_map=substitution_map) <=
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            elif c.upper is not None:
                constraints.add(
                    replace_expressions(expr=c.upper,
                                        substitution_map=substitution_map) >=
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            else:
                raise ValueError(
                    "Unable to parse constraint for building the separation problem."
                )
            c.deactivate()
            map_new_constraint_list_to_original_con[constraints[
                constraints.index_set().last()]] = c

    separation_model.util.map_new_constraint_list_to_original_con = map_new_constraint_list_to_original_con

    # === Add objectives first so that the uncertainty set
    #     Constraints do not get picked up into the set
    #	  of performance constraints which become objectives
    make_separation_objective_functions(separation_model, config)
    add_uncertainty_set_constraints(separation_model, config)

    # === Deactivate h(x,q) == 0 constraints
    for c in separation_model.util.h_x_q_constraints:
        c.deactivate()

    return separation_model
Beispiel #16
0
    def apply(self, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("Calling ConnectorExpander")
                
        instance = kwds['instance']
        blockList = list(instance.block_data_objects(active=True))
        noConnectors = True
        for b in blockList:
            if b.component_map(Connector):
                noConnectors = False
                break
        if noConnectors:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("   Connectors found!")

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        #options = kwds['options']
        #model = kwds['model']

        # In general, blocks should be relatively self-contained, so we
        # should build the connectors from the "bottom up":
        blockList.reverse()

        # Expand each constraint involving a connector
        for block in blockList:
            if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                logger.debug("   block: " + block.cname())

            CCC = {}
            for name, constraint in itertools.chain\
                    ( iteritems(block.component_map(Constraint)), 
                      iteritems(block.component_map(ConstraintList)) ):
                cList = []
                CCC[name+'.expanded'] = cList
                for idx, c in iteritems(constraint._data):
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (looking at constraint %s[%s])", name, idx)
                    connectors = []
                    self._gather_connectors(c.body, connectors)
                    if len(connectors) == 0:
                        continue
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (found connectors in constraint)")
                    
                    # Validate that all connectors match
                    errors, ref, skip = self._validate_connectors(connectors)
                    if errors:
                        logger.error(
                            ( "Connector mismatch: errors detected when "
                              "constructing constraint %s\n    " %
                              (name + (idx and '[%s]' % idx or '')) ) +
                            '\n    '.join(reversed(errors)) )
                        raise ValueError(
                            "Connector mismatch in constraint %s" % \
                            name + (idx and '[%s]' % idx or ''))
                    
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (connectors valid)")

                    # Fill in any empty connectors
                    for conn in connectors:
                        if conn.vars:
                            continue
                        for var in ref.vars:
                            if var in skip:
                                continue
                            v = Var()
                            block.add_component(conn.cname() + '.auto.' + var, v)
                            conn.vars[var] = v
                            v.construct()
                    
                    # OK - expand this constraint
                    self._expand_constraint(block, name, idx, c, ref, skip, cList)
                    # Now deactivate the original constraint
                    c.deactivate()
            for name, exprs in iteritems(CCC):
                cList = ConstraintList()
                block.add_component( name, cList )
                cList.construct()
                for expr in exprs:
                    cList.add(expr)
                

        # Now, go back and implement VarList aggregators
        for block in blockList:
            for conn in itervalues(block.component_map(Connector)):
                for var, aggregator in iteritems(conn.aggregators):
                    c = Constraint(expr=aggregator(block, var))
                    block.add_component(
                        conn.cname() + '.' + var.cname() + '.aggregate', c)
                    c.construct()
Beispiel #17
0
    def apply(self, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("Calling ConnectorExpander")

        instance = kwds['instance']
        blockList = list(instance.block_data_objects(active=True))
        noConnectors = True
        for b in blockList:
            if b.component_map(Connector):
                noConnectors = False
                break
        if noConnectors:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("   Connectors found!")

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        #options = kwds['options']
        #model = kwds['model']

        # In general, blocks should be relatively self-contained, so we
        # should build the connectors from the "bottom up":
        blockList.reverse()

        # Expand each constraint involving a connector
        for block in blockList:
            if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover
                logger.debug("   block: " + block.name)

            CCC = {}
            for name, constraint in itertools.chain\
                    ( iteritems(block.component_map(Constraint)),
                      iteritems(block.component_map(ConstraintList)) ):
                cList = []
                CCC[name+'.expanded'] = cList
                for idx, c in iteritems(constraint._data):
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (looking at constraint %s[%s])", name, idx)
                    connectors = []
                    self._gather_connectors(c.body, connectors)
                    if len(connectors) == 0:
                        continue
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (found connectors in constraint)")

                    # Validate that all connectors match
                    errors, ref, skip = self._validate_connectors(connectors)
                    if errors:
                        logger.error(
                            ( "Connector mismatch: errors detected when "
                              "constructing constraint %s\n    " %
                              (name + (idx and '[%s]' % idx or '')) ) +
                            '\n    '.join(reversed(errors)) )
                        raise ValueError(
                            "Connector mismatch in constraint %s" % \
                            name + (idx and '[%s]' % idx or ''))

                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (connectors valid)")

                    # Fill in any empty connectors
                    for conn in connectors:
                        if conn.vars:
                            continue
                        for var in ref.vars:
                            if var in skip:
                                continue
                            v = Var()
                            block.add_component(conn.local_name + '.auto.' + var, v)
                            conn.vars[var] = v
                            v.construct()

                    # OK - expand this constraint
                    self._expand_constraint(block, name, idx, c, ref, skip, cList)
                    # Now deactivate the original constraint
                    c.deactivate()
            for name, exprs in iteritems(CCC):
                cList = ConstraintList()
                block.add_component( name, cList )
                cList.construct()
                for expr in exprs:
                    cList.add(expr)

        # Now, go back and implement VarList aggregators
        for block in blockList:
            for conn in itervalues(block.component_map(Connector)):
                for var, aggregator in iteritems(conn.aggregators):
                    c = Constraint(expr=aggregator(block, var))
                    block.add_component(
                        conn.local_name + '.' + var.local_name + '.aggregate', c)
                    c.construct()