def setup(self, dim, p, A=None):
     """
     Parameters
     ----------
     dim : int
         Dimension.
     p : 1,2 or np.inf
         What p-norm to use.
     A : array, optional
         Direct mapping matrix, must be square and invertible. Identity by
         default.
     """
     A = np.eye(dim) if A is None else A
     self.p = p
     if self.p==1:
         # Polytope
         # Generate the matrix of facet normals
         P = np.array(list(itertools.product([-1,1], repeat=dim)))
         P = P.dot(la.inv(A))
         # Make the set with a default facet distance
         p = np.ones(dim*2)
         self.set = Polytope(P,p)
     elif self.p==2:
         # Ellipsoid
         self.set = Ellipsoid(la.matrix_power(la.inv(A),2))
         #sampling_function = lambda x,u: E(phi(x,u).value**2)
     else:
         # Polytope
         # Generate the matrix of facet normals
         P = Polytope(R=[(-1.,1.) for i in range(dim)]).A
         P = P.dot(la.inv(A))
         # Make the set with a default facet distance
         p = np.ones(dim*2)
         self.set = Polytope(P,p)
Beispiel #2
0
def pendulum_example(abs_frac=0.5, abs_err=None, rel_err=2.0):
    # Plant
    pendulum = mpc_library.InvertedPendulumOnCart()
    # Create the set to be partitioned
    # --------------------------------
    # Problem: overlap=0 for ECC feasibility algorithm if we use the vanilla
    # full set. Reason: when dxdt<v_eps and dxdt>v_eps at simplex vertices,
    # the commutation is forced to have the first two elements zero for the
    # first case and the last three elements zero for the seconds case. Hence,
    # ECC will not converge.
    # Solution: manually divide the set into three sections:
    #   1) dxdt>=v_eps
    #   2) dxdt<=-v_eps
    #   3) -v_eps<=dxdt<=v_eps
    full_set = Polytope(
        R=[(-pendulum.p_err,
            pendulum.p_err), (-pendulum.ang_err, pendulum.ang_err),
           (-pendulum.v_err,
            pendulum.v_err), (-pendulum.rate_err, pendulum.rate_err)])
    set_1 = Polytope(
        R=[(-pendulum.p_err,
            pendulum.p_err), (-pendulum.ang_err, pendulum.ang_err),
           (pendulum.v_eps,
            pendulum.v_err), (-pendulum.rate_err, pendulum.rate_err)])
    set_2 = Polytope(
        R=[(-pendulum.p_err,
            pendulum.p_err), (-pendulum.ang_err, pendulum.ang_err),
           (-pendulum.v_err,
            -pendulum.v_eps), (-pendulum.rate_err, pendulum.rate_err)])
    set_3 = Polytope(
        R=[(-pendulum.p_err,
            pendulum.p_err), (-pendulum.ang_err, pendulum.ang_err),
           (-pendulum.v_eps,
            pendulum.v_eps), (-pendulum.rate_err, pendulum.rate_err)])
    full_set_vrep = np.row_stack(full_set.V)
    set_1_vrep = np.row_stack(set_1.V)
    set_2_vrep = np.row_stack(set_2.V)
    set_3_vrep = np.row_stack(set_3.V)
    # Create the optimization problem oracle
    oracle = create_oracle(pendulum, full_set_vrep, abs_frac, abs_err, rel_err)
    # Initial triangulation
    partition_set_1, _, _ = tools.delaunay(set_1_vrep)
    partition_set_2, _, _ = tools.delaunay(set_2_vrep)
    partition_set_3, _, _ = tools.delaunay(set_3_vrep)
    partition = partition_set_1
    tools.join_triangulation(partition_set_2, partition_set_3)
    tools.join_triangulation(partition_set_1, partition_set_2)
    return full_set, partition_set_1, oracle
Beispiel #3
0
def compute_polygon_hull(B, c):
    """
    Compute the vertex representation of a polygon defined by:

    .. math::

        B x \leq c

    where `x` is a 2D vector.

    Parameters
    ----------
    B : array, shape=(2, K)
        Linear inequality matrix.
    c : array, shape=(K,)
        Linear inequality vector.

    Returns
    -------
    vertices : list of arrays
        List of 2D vertices in counterclowise order.
    """
    x = None
    if not all(c > 0):
        x = Polytope.compute_chebyshev_center(B, c)
        c = c - dot(B, x)
    if not all(c > 0):
        raise Exception("Polygon is empty (min. dist. to edge %.2f)" % min(c))
    vertices = __compute_polygon_hull(B, c)
    if x is not None:
        vertices = [v + x for v in vertices]
    return vertices
Beispiel #4
0
def compute_polygon_hull(B, c):
    """
    Compute the vertex representation of a polygon defined by:

        B * x <= c

    where x is a 2D vector.

    INPUT:

    - ``B`` -- (2 x K) matrix
    - ``c`` -- vector of length K and positive coordinates

    OUTPUT:

    List of 2D vertices in counterclowise order.
    """
    x = None
    if not all(c > 0):
        x = Polytope.compute_chebyshev_center(B, c)
        c = c - dot(B, x)
    vertices = __compute_polygon_hull(B, c)
    if x is not None:
        vertices = [v + x for v in vertices]
    return vertices
 def convertToPolytope(self, r):
     if self.p==2:
         H = self.set.convertToHyperrectangle(r**2)
         A,b,poly = H.convertToPolytope()
     else:
         # 1- or infinity-norm
         A,b = self.set.A, self.set.b*r
         poly = Polytope(A,b)
     return A,b,poly
Beispiel #6
0
def satellite_z_example(abs_frac=0.5, abs_err=None, rel_err=2.0):
    # Plant
    sat = mpc_library.SatelliteZ()
    # The set to partition
    full_set = Polytope(
        R=[(-sat.pars['pos_err_max'], sat.pars['pos_err_max']
            ), (-sat.pars['vel_err_max'], sat.pars['vel_err_max'])])
    full_set_vrep = np.row_stack(full_set.V)
    # Create the optimization problem oracle
    oracle = create_oracle(sat, full_set_vrep, abs_frac, abs_err, rel_err)
    # Initial triangulation
    partition, number_init_simplices, vol = tools.delaunay(full_set_vrep)
    return full_set, partition, oracle
    def setup_RMPC(self, Q_coeff):
        """
        Setup robust MPC optimization problem.
        
        Parameters
        ----------
        Q_coeff : float
            Coefficient to multiply identity state weight by in the cost.
        """
        # Setup optimization problem matrices and other values
        W = self.specs.P.W
        R = self.specs.P.R
        r = self.specs.P.r
        U_partitions = subtractHyperrectangles(self.specs.U_int,
                                               self.specs.U_ext)
        H = [Up.A for Up in U_partitions]
        h = [Up.b for Up in U_partitions]
        self.delta_size = len(
            H
        )  # Number of convex sets whose union makes the control set (**excluding** the {0} set)
        qq = [(1. / (1 - 1. / dep.pq) if dep.pq != 1 else np.inf)
              for dep in self.specs.P.dependency]
        D = self.plant.D(self.specs)
        n_q = len(self.specs.P.L)
        # scaling
        D_w = Polytope(self.specs.P.R, self.specs.P.r).computeScalingMatrix()
        self.D_x = self.specs.X.computeScalingMatrix()
        self.p_u, self.D_u = [None for _ in range(self.delta_size)
                              ], [None for _ in range(self.delta_size)]
        for i in range(self.delta_size):
            # Scale to unity and re-center with respect to set {u_i: H[i]*u_i<=h[i]}
            self.p_u[i] = np.mean(Polytope(A=H[i], b=h[i]).V,
                                  axis=0)  # barycenter
            self.D_u[i] = np.empty(H[i].shape)
            for j in range(H[i].shape[0]):
                alpha = cvx.Variable()
                center = self.p_u[i]
                ray = H[i][j]
                cost = cvx.Maximize(alpha)
                constraints = [H[i] * (center + alpha * ray) <= h[i]]
                problem = cvx.Problem(cost, constraints)
                problem.solve(**global_vars.SOLVER_OPTIONS)
                self.D_u[i][j] = alpha.value * ray
            self.D_u[i] = self.D_u[i].T
        self.D_u_box = self.specs.U_ext.computeScalingMatrix()

        # Robust term for polytopic noise computation
        G, n_g = self.specs.X.A, self.specs.X.b.size

        def robust_term(i, j, k):
            """
            Computes \max_{R*w<=r} G_j^T*A^{k-1-i}*D*W*w which is the effect of
            the worst case independent disturbance at time step i when looking
            at its propagated effect at time step k, along the j-th facet of
            the invariant set.
            
            Parameters
            ----------
            i : int
                Time step in the k-step horizon.
            j : int
                Invariant polytope facet index.
            k : int
                Horizon length, k.
                
            Returns
            -------
            : float
                \max_{R*w<=r} G_j^T*A^{k-1-i}*D*W*w.
            """
            w = cvx.Variable(R.shape[1])
            cost = cvx.Maximize(G[j].dot(mpow(
                self.plant.A, k - 1 - i)).dot(D).dot(W).dot(D_w) * w)
            constraints = [R.dot(D_w) * w <= r]
            problem = cvx.Problem(cost, constraints)
            return problem.solve(**global_vars.SOLVER_OPTIONS)

        sum_sigma = []
        for k in tools.fullrange(self.N):
            sum_sigma_facet = []
            for j in range(n_g):
                sum_sigma_facet.append(
                    sum([robust_term(i, j, k) for i in range(k)]))
            sum_sigma += sum_sigma_facet
        sum_sigma = np.array(sum_sigma)

        # Setup MPC optimization problem
        def make_ux():
            """
            Make input and state optimization variables.
            
            Returns
            -------
            variables : list
                Dictionary of variable lists. 'x' amd 'u' are dimensional
                (unscaled) states and inputs; 'xhat' and 'uhat' are
                dimensionless (scaled) states and inputs.
            """
            uhat = [[
                cvx.Variable(self.D_u[i].shape[1]) for k in range(self.N)
            ] for i in range(self.delta_size)]
            u = [[
                self.p_u[i] + self.D_u[i] * uhat[i][k] for k in range(self.N)
            ] for i in range(self.delta_size)]
            xhat = [cvx.Variable(self.n_x) for k in range(self.N + 1)]
            x = [self.D_x * xhat[k] for k in range(self.N + 1)]
            variables = dict(u=u, x=x, uhat=uhat, xhat=xhat)
            return variables

        self.make_ux = make_ux

        self.x0 = cvx.Parameter(self.n_x)
        self.delta = cvx.Variable(self.delta_size * self.N, boolean=True)
        xu = self.make_ux()
        self.u = xu['u']
        self.x = xu['x']

        def get_u0_value():
            """
            Get the optimal value of the first input in the MPC horizon.
            
            Returns
            -------
            u0_opt : array
                Optimal value of the first input in the MPC horizon.
            """
            return sum(
                [self.u[__i][0].value for __i in range(self.delta_size)])

        self.get_u0_value = get_u0_value

        Q = Q_coeff * np.eye(self.n_x)
        R = np.eye(self.n_u)
        self.V = (sum([
            cvx.quad_form(
                la.inv(self.D_u_box) *
                sum([self.u[i][k] for i in range(self.delta_size)]), R)
            for k in range(self.N)
        ]) + sum([
            cvx.quad_form(xu['xhat'][k], Q)
            for k in tools.fullrange(1, self.N)
        ]))
        self.cost = cvx.Minimize(self.V)

        def make_constraints(theta, x, u, delta, delta_sum_constraint=True):
            constraints = []
            # Nominal dynamics
            sum_u = lambda k: sum(
                [u[__i][k] for __i in range(self.delta_size)])
            constraints += [
                x[k + 1] == self.plant.A * x[k] + self.plant.B * sum_u(k)
                for k in range(self.N)
            ]
            constraints += [x[0] == theta]
            # Robustness constraint tightening
            G, g, n_g = self.specs.X.A, self.specs.X.b, self.specs.X.b.size
            constraints += [
                G * x[k] + sum_sigma[(k - 1) * n_g:k * n_g] + sum([
                    sum([
                        np.array([
                            la.norm(G[j].dot(mpow(self.plant.A, k - 1 - i)).
                                    dot(D).dot(self.specs.P.L[l]),
                                    ord=qq[l]) for j in range(n_g)
                        ]) *
                        self.specs.P.dependency[l].phi_direct(x[i], sum_u(i))
                        for l in range(n_q)
                    ]) for i in range(k)
                ]) <= g for k in tools.fullrange(self.N)
            ]
            # Input constraint
            for i in range(self.delta_size):
                constraints += [
                    H[i] * u[i][k] <= h[i] * delta[self.delta_size * k + i]
                    for k in range(self.N)
                ]
            # input is in at least one of the convex subsets
            if delta_sum_constraint:
                constraints += [
                    sum([
                        delta[self.delta_size * k + i]
                        for i in range(self.delta_size)
                    ]) <= 1 for k in range(self.N)
                ]
            return constraints

        self.make_constraints = make_constraints
    def __init__(self):
        super().__init__()
        # Parameters
        self.N = global_vars.MPC_HORIZON  # Prediction horizon length
        self.pars = satellite_parameters()
        self.T_s = self.pars['T_s']

        # Specifications
        X = Polytope(
            R=[(-self.pars['pos_err_max'], self.pars['pos_err_max']
                ), (-self.pars['pos_err_max'], self.pars['pos_err_max']
                    ), (-self.pars['pos_err_max'], self.pars['pos_err_max']),
               (-self.pars['vel_err_max'], self.pars['vel_err_max']
                ), (-self.pars['vel_err_max'], self.pars['vel_err_max']
                    ), (-self.pars['vel_err_max'], self.pars['vel_err_max'])])
        U_ext = Polytope(
            R=[(-self.pars['delta_v_max'], self.pars['delta_v_max']
                ), (-self.pars['delta_v_max'], self.pars['delta_v_max']
                    ), (-self.pars['delta_v_max'], self.pars['delta_v_max'])])
        U_int = Polytope(
            R=[(-self.pars['delta_v_min'], self.pars['delta_v_min']
                ), (-self.pars['delta_v_min'], self.pars['delta_v_min']
                    ), (-self.pars['delta_v_min'], self.pars['delta_v_min'])])
        # uncertainty set
        P = uc.UncertaintySet()  # Left empty for chosen_spec=='nominal'
        n = 3  # Position dimension
        one, I, O = np.ones(n), np.eye(n), np.zeros((n, n))
        P.addIndependentTerm('process',
                             lb=-self.pars['w_max'] * one,
                             ub=self.pars['w_max'] * one)
        P.addIndependentTerm(
            'state',
            lb=-np.concatenate(
                [self.pars['p_max'] * one, self.pars['v_max'] * one]),
            ub=np.concatenate(
                [self.pars['p_max'] * one, self.pars['v_max'] * one]))
        P.addDependentTerm('input',
                           uc.DependencyDescription(
                               lambda: cvx.Constant(self.pars['sigma_fix']),
                               pq=2),
                           dim=n)
        P.addDependentTerm('state',
                           uc.DependencyDescription(
                               lambda nfx: self.pars['sigma_pos'] * nfx,
                               pq=np.inf,
                               px=2,
                               Fx=np.hstack((I, O))),
                           dim=n,
                           L=np.vstack((I, O)))
        P.addDependentTerm('state',
                           uc.DependencyDescription(
                               lambda nfx: self.pars['sigma_vel'] * nfx,
                               pq=np.inf,
                               px=2,
                               Fx=np.hstack((O, I))),
                           dim=n,
                           L=np.vstack((O, I)))
        P.addDependentTerm('input',
                           uc.DependencyDescription(
                               lambda nfu: self.pars['sigma_rcs'] * nfu,
                               pq=2,
                               pu=2),
                           dim=n)
        self.specs = Specifications(X, (U_int, U_ext), P)

        # Make plant
        # continuous-time
        self.n_x, self.n_u = 6, 3
        A_c = np.array(
            [[0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 1., 0.],
             [0., 0., 0., 0., 0., 1.],
             [3. * self.pars['wo']**2, 0., 0., 0., 2. * self.pars['wo'], 0.],
             [0., 0., 0., -2. * self.pars['wo'], 0., 0.],
             [0., 0., -self.pars['wo']**2, 0., 0., 0.]])
        B_c = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [1., 0., 0.],
                        [0., 1., 0.], [0., 0., 1.]])
        E_c = B_c.copy()
        # discrete-time
        A = sla.expm(A_c * self.T_s)
        B = A.dot(B_c)
        M = np.block([[A_c, E_c], [np.zeros((self.n_u, self.n_x + self.n_u))]])
        E = sla.expm(M * self.T_s)[:self.n_x, self.n_x:]
        self.plant = LinearPlant(self.T_s, A, B, E)

        # Setup the RMPC problem
        self.setup_RMPC(Q_coeff=1e-2)
class NormBall(Set):
    """
    A p-norm ball {A*x : ||x||_p <= b} \subset R^n where b can be variable.
    """
    def setup(self, dim, p, A=None):
        """
        Parameters
        ----------
        dim : int
            Dimension.
        p : 1,2 or np.inf
            What p-norm to use.
        A : array, optional
            Direct mapping matrix, must be square and invertible. Identity by
            default.
        """
        A = np.eye(dim) if A is None else A
        self.p = p
        if self.p==1:
            # Polytope
            # Generate the matrix of facet normals
            P = np.array(list(itertools.product([-1,1], repeat=dim)))
            P = P.dot(la.inv(A))
            # Make the set with a default facet distance
            p = np.ones(dim*2)
            self.set = Polytope(P,p)
        elif self.p==2:
            # Ellipsoid
            self.set = Ellipsoid(la.matrix_power(la.inv(A),2))
            #sampling_function = lambda x,u: E(phi(x,u).value**2)
        else:
            # Polytope
            # Generate the matrix of facet normals
            P = Polytope(R=[(-1.,1.) for i in range(dim)]).A
            P = P.dot(la.inv(A))
            # Make the set with a default facet distance
            p = np.ones(dim*2)
            self.set = Polytope(P,p)

    def generateRandomPoint(self, r):
        """
        Generates a random point inside the norm ball.
        
        Parameters
        ----------
        r : float
            Ball radius.
            
        Returns
        -------
        random_point : array
            A random point in the norm ball.
        """
        if self.p==2:
            random_point = self.set(r**2)
        else:
            # 1- or infinity-norm
            p_default = np.copy(self.set.b)
            self.set.b *= r
            random_point = self.set.randomPoint()
            self.set.b = p_default
        return random_point
        
    def convertToPolytope(self, r):
        if self.p==2:
            H = self.set.convertToHyperrectangle(r**2)
            A,b,poly = H.convertToPolytope()
        else:
            # 1- or infinity-norm
            A,b = self.set.A, self.set.b*r
            poly = Polytope(A,b)
        return A,b,poly
 def convertToPolytope(self):
     R = []
     for lb,ub in zip(self.lb,self.ub):
         R.append((lb,ub))
     poly = Polytope(R=R)
     return poly.A, poly.b, poly
 def __iter__(self):
     for polytope in product(*([self.cube] * self.vertex_count)):
         yield Polytope(tuple(polytope))
import torch
from polytope import Polytope
from dataLoader import loadData
from network2 import Net

imbedDim = 6

poly0 = Polytope([(0.,1.),(-1.,-1.), (1.,0.)], 0, imbedDim)
poly1 = Polytope([(-1.,2.),(-1.,-1.),(2.,-1.)], 1, imbedDim)
poly2 = Polytope([(0.,1.),(-1.,0.),(-1.,-1.),(1.,0.)], 2, imbedDim)
poly3 = Polytope([(-1.,2.),(-1.,-1.),(1.,-1.),(1.,0.)], 3, imbedDim)
poly4 = Polytope([(0.,1.),(-1.,1.),(-1.,-1.),(1.,0.)], 4, imbedDim)
poly5 = Polytope([(-1.,2.),(-1.,-1.),(-1.,0.),(1.,0.)], 5, imbedDim)
poly6 = Polytope([(0.,1.),(-1.,0.),(-1.,-1.),(0.,-1.),(1.,0.)], 6, imbedDim)
poly7 = Polytope([(0.,1.),(-1.,1.),(-1.,-1.),(1.,-1.),(1.,0.)], 7, imbedDim)

# poly8 = polytope.Polytope([], 0, imbedDim)
# poly9 = polytope.Polytope([(-1.,2.),(-1.,-1.),(2.,-1.)], 1, imbedDim)
# poly10 = polytope.Polytope([(0.,1.),(-1.,0.),(-1.,-1.),(1.,0.)], 2, imbedDim)
# poly11 = polytope.Polytope([(-1.,2.),(-1.,-1.),(1.,-1.),(1.,0.)], 3, imbedDim)
# poly12 = polytope.Polytope([(0.,1.),(-1.,1.),(-1.,-1.),(1.,0.)], 4, imbedDim)
# poly13 = polytope.Polytope([(-1.,2.),(-1.,-1.),(-1.,0.),(1.,0.)], 5, imbedDim)
# poly14 = polytope.Polytope([(0.,1.),(-1.,0.),(-1.,-1.),(0.,-1.),(1.,0.)], 6, imbedDim)
# poly15 = polytope.Polytope([(0.,1.),(-1.,1.),(-1.,-1.),(1.,-1.),(1.,0.)], 7, imbedDim)

polytopes = [poly0, poly1, poly2, poly3, poly4, poly5, poly6, poly7]

## Note batch_size must divide evenly into len(train_data_raw)
train_data, test_data = loadData(polytopes, batch_size=12)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")