예제 #1
0
파일: LRC.py 프로젝트: monimoyb/kendama
def sys_load():

    dt = 0.01
    N = 25

    A = eye(2)
    B = dt * eye(2)

    nx = A.shape[1]
    nu = B.shape[1]

    Q = np.diag([500, 500])
    R = np.diag([0.4, 0.4])

    ua = 8
    uxmin = -1.0 * ua
    uxmax = 1.0 * ua
    uzmin = -1.0 * ua
    uzmax = 1.0 * ua
    U = Polytope(lb=np.array([[uxmin], [uzmin]]),
                 ub=np.array([[uxmax], [uzmax]]))

    # # #  Defining Process Noise Bounds
    wlb_true = -0.00
    # Lower bound of additive noise value ######
    wub_true = -wlb_true
    # Upper bound of additive noise value ######

    y_0 = Polytope(lb=np.array([-0.3492, -0.2457]),
                   ub=np.array([-0.3158, -0.2095]))

    _, Pinf, _ = dlqr(A, B, Q, R)
    return A, B, U, nx, nu, wub_true, wlb_true, y_0, Q, R, N, dt, Pinf
예제 #2
0
 def test_determine_H_rep(self):
     # Create a polyope from a vertex list, determine its H-rep, use that H-rep
     # to create a new polytope, determine the vertices of the new polytope, and
     # ascertain that the vertex lists are the same.
     V1 = np.array([[
         -1.42, -1.87, -1.53, -1.38, -0.80, 1.88, 1.93, 1.90, 1.59, 0.28
     ], [1.96, -0.26, -1.53, -1.78, -1.76, -1.48, -0.49, 1.18, 1.79,
         1.89]]).T
     P1 = Polytope(V1)
     P1.determine_H_rep()
     P2 = Polytope(P1.A, P1.b)
     P2.determine_V_rep()
     self.assertTrue(np.allclose(P1.V_sorted(), P2.V_sorted()))
예제 #3
0
 def test_scale(self):
     # Create a polytope from a vertex list, scale, and check resulting vertices
     V = np.array([[1.9, 0.2], [0, -2], [-0.3, 0.17], [3, 4.01]])
     PV = Polytope(V)
     factorV = 1.3
     # Create a polytope from inequalities, scale, and check resulting (A, b)
     A = np.array([[-1, -2], [2, 0.9], [-1.3, 3]])
     b = np.array([[-0.6, 3.1, 4]]).T
     PH = Polytope(A, b)
     factorH = -0.8
     # Check that scaling is commutative
     self.assertTrue(np.allclose((PV * factorV).V, (factorV * PV).V))
     self.assertTrue(np.allclose((PH * factorH).H, (factorH * PH).H))
     # Check that scaling only changes V and b, not A
     self.assertTrue(np.allclose((PV * factorV).V, V * factorV))
     self.assertTrue(np.allclose((PH * factorH).A, A))
     self.assertTrue(np.allclose((PH * factorH).b, b * factorH))
예제 #4
0
 def test_minimize_H_rep(self):
     # Create a polytope from with four redundant constraints. The non-redundant
     # constraints specify a square with vertices (+- 1, -+ 1), the redundant
     # constraints an outer square with vertices (+- 2, -+ 2). Check that the
     # polytope first is specified with eight halfspaces, minimize the H-rep and
     # verify the number goes down to four (corresponding to the inner square),
     # and finally check that the correct (outer) halfspaces are removed from the
     # inequality set.
     n = 2
     lb_inner = np.array([[-1, -1]]).T
     ub_inner = -lb_inner
     A_inner = np.vstack((-np.eye(n), np.eye(n)))
     b_inner = np.vstack((-lb_inner, ub_inner))
     H_inner = np.hstack((A_inner, b_inner))
     A_outer = A_inner
     b_outer = 2 * b_inner
     A = np.vstack((A_inner, A_outer))
     b = np.vstack((b_inner, b_outer))
     P = Polytope(A, b)
     self.assertTrue(P.H.shape[0] == 8)
     P.minimize_H_rep()
     self.assertTrue(P.H.shape[0] == H_inner.shape[0] == 4)
     self.assertTrue(all(h in H_inner for h in P.H))
예제 #5
0
 def test_P_plus_p(self):
     # Test that a 2D Polytope plus a few different 2D vectors give the
     # correct shift when using the + and - operators. Test by manually
     # computing the shifted vertices and comparing the result to the vertices
     # of the Polytope that results from the addition using the +/- operators.
     # Test that the vector can be in a variety of formats, including tuple,
     # list, list of lists, 1D numpy array, and 2D numpy row vector (array). Test
     # by constructing the Polytope both from the vertex list V and from a half-
     # space representation (A, b)
     V = np.array([[-1, 0], [1, 0], [0, 1]])
     PV = Polytope(V)
     A = [[-1, 0], [0, -1], [1, 1]]
     b = [-2, -3, 8]
     PH = Polytope(A, b)
     PH_V = PH.V
     points = [(1, 1), [-1, 2], [[1.5], [-0.5]],
               np.array([-2, -0.1]),
               np.array([[-2], [-0.1]])]
     p_columns = [
         np.array(np.squeeze(p), dtype=float)[:, np.newaxis] for p in points
     ]
     PV_plus_p_results = [PV + p for p in points]
     PV_minus_p_results = [PV - p for p in points]
     PH_plus_p_results = [PH + p for p in points]
     PH_minus_p_results = [PH - p for p in points]
     self.assertTrue(
         all([(PVpp.V == V + p.T).all()
              for PVpp, p in zip(PV_plus_p_results, p_columns)]))
     self.assertTrue(
         all([(PVmp.V == V - p.T).all()
              for PVmp, p in zip(PV_minus_p_results, p_columns)]))
     self.assertTrue(
         all([(PHpp.V == PH_V + p.T).all()
              for PHpp, p in zip(PH_plus_p_results, p_columns)]))
     self.assertTrue(
         all([(PHmp.V == PH_V - p.T).all()
              for PHmp, p in zip(PH_minus_p_results, p_columns)]))
예제 #6
0
 def test_minimal_V_rep(self):
     # Create a polytope from a minimal set of vertices, vertices on the convex
     # hull of those vertices, and random vertices in the interior of the convex
     # hull. Compute the minimal V-representation and test whether it matches the
     # minimal vertex list.
     x_lb = (-3, 0.9)
     x_ub = (0.6, 4)
     # Set of vertices that form the convex hull:
     V_minimal = np.array([[x_lb[0], x_lb[1]], [x_lb[0], x_ub[1]],
                           [x_ub[0], x_ub[1]], [x_ub[0], x_lb[1]]])
     # Points that are redundant in the sense they are on simplices of the
     # convex hull but they are not vertices:
     V_redundant = np.array([[(x_ub[0] + x_lb[0]) / 2, x_lb[1]],
                             [x_ub[0], (x_ub[1] + x_lb[1]) / 2]])
     # Random points in the interior of the convex hull:
     V_random = np.random.uniform(x_lb, x_ub, (40, len(x_lb)))
     V = np.vstack((V_minimal, V_redundant, V_random))
     P_min = Polytope(V_minimal)
     P = Polytope(V)
     self.assertTrue(P.nV == V.shape[0])
     P.minimize_V_rep()
     self.assertTrue(P.nV == V_minimal.shape[0])
     self.assertTrue(np.allclose(P.V_sorted(), P_min.V_sorted()))
예제 #7
0
파일: LRC.py 프로젝트: monimoyb/kendama
def v_constructGauss(v_samples, W, conf, nx, nu, A, B, Q, R, U, N, y_0, Ks, Ke,
                     L):

    bsSize = 1000
    # Bootstrap copies
    vbs = zeros([bsSize, nx, size(v_samples, 2)])
    ## First construct the convex hull vertices
    mincvxH = amin(v_samples, axis=1).reshape(nx, 1)
    maxcvxH = amax(v_samples, axis=1).reshape(nx, 1)

    ## Start Bootstrap here
    meanEmp = mean(v_samples, 1)
    stdEmp = std(v_samples, 1)

    for j in range(bsSize):
        for i in range(size(v_samples, 2)):
            entr = randint(0, size(v_samples, 2))
            vbs[j, :, i] = v_samples[:, entr]

    meanBatch = zeros([nx, bsSize])
    stdBatch = zeros([nx, bsSize])

    for j in range(bsSize):
        meanBatch[:, j] = mean(vbs[j, :, :], 1)
        stdBatch[:, j] = std(vbs[j, :, :], 1)

    ###### Take the confidence set #########
    minMu = zeros([nx, 1])
    maxMu = zeros([nx, 1])
    maxStd = zeros([nx, 1])

    ###########################
    ##
    feas_conf = 0
    count = 0
    count2 = 0

    print("Adding Constraints")

    while feas_conf == 0:
        print(
            count,
            "========================================================================================================================================================="
        )
        if conf >= 0.001:
            conf = conf * (0.9999**count)
            for i in range(nx):
                minMu[i, 0] = quantile(meanBatch[i, :], (1 - conf) / 2)
                maxMu[i, 0] = quantile(meanBatch[i, :], 1 - (1 - conf) / 2)
                maxStd[i, 0] = quantile(stdBatch[i, :], 1 - (1 - conf) / 2)

            ###########################

            v_lb = minimum(mincvxH, minMu - 3.08 * maxStd)
            v_ub = maximum(maxcvxH, maxMu + 3.08 * maxStd)

            # v_lb = minMu - 3.08*maxStd;
            # v_ub = maxMu + 3.08*maxStd;                       # NO CVX HULL UNION
            Vmn = Polytope(lb=v_lb, ub=v_ub)
            # CVX HULL OF UNION

        else:
            v_lb = min(mincvxH, meanEmp - (3.08 - 0.1 * count2) * stdEmp)
            v_ub = max(maxcvxH, meanEmp + (3.08 - 0.1 * count2) * stdEmp)

            # v_lb = meanEmp - (3.08-0.1*count2)*stdEmp;
            # v_ub = meanEmp + (3.08-0.1*count2)*stdEmp;                          # NO CVX HULL UNION

            Vmn = Polytope(lb=v_lb, ub=v_ub)
            # CVX HULL OF UNION
            count2 = count2 + 1

        ### ALL CHECKS MUST GO THROUGH WITH THIS SET

    ####################### MAYNE APPROACH (1) ######################
        ALcl = A - L
        # FOLLOW MAYNE NOTATIONS HERE
        LVsc = (-L) * Vmn
        DeltaTilde = W + LVsc
        minRTilde = computeInvariantUgo(ALcl, DeltaTilde)

        ### second piece
        Acl = A + B @ Ke
        DeltaBar = L * minRTilde + L * Vmn

        Vlist = DeltaBar.V
        tolV = 300
        # after 100 vertices, max box outside

        if size(Vlist, 1) < tolV:
            print(
                '***** NOT FITTING BOX. EXACT MIN INVARIANT SET ATTEMPT******')
            l = np.array([[min(Vlist[:, 0])], [min(Vlist[:, 1])]])
            u = np.array([[max(Vlist[:, 0])], [max(Vlist[:, 1])]])
            polOut = Polytope(lb=l, ub=u)
            minRBar = computeInvariantUgo(Acl, polOut)
            print("UGO COMPUTED")
        else:
            print('***** FITTING BOX. APPROX MIN INVARIANT SET ******')
            minRBar = computeInvariantUgo(Acl, DeltaBar)
            print("UGO COMPUTED")

        minR = minRTilde + minRBar
        # NET PIECE
        print("minRTilde", minRTilde.V)
        print("minRBar", minRBar.V)
        print("minr", minR.V)

        # Compute the Tightened Ubar
        Ubar = U - Ke * minRBar
        Hubar = Ubar.A
        hubar = Ubar.b

        # Terminal Condition = 0
        Xn_nom = Polytope(lb=zeros(nx), ub=zeros(nx))
        Hxn_nom = Xn_nom.A
        hxn_nom = Xn_nom.b

        ## Checking if x_hat exists
        maxB = np.linalg.norm([v_ub, -v_lb], np.inf, 0)
        vmnN0 = Polytope(lb=-maxB, ub=maxB)
        mthX0 = y_0 + (-vmnN0)
        try:
            polxhat0 = mthX0 - minRTilde
        except:
            polxhat0 = -(minRTilde - mthX0)

        if isEmptySet(polxhat0) == 0 and isEmptySet(Ubar) == 0:
            gamma_hat = np.linalg.norm((polxhat0 + (-minRBar)).V, np.inf, 0)
            Xbar = Polytope(lb=-gamma_hat, ub=gamma_hat)
            Hxbar = Xbar.A
            hxbar = Xbar.b
            if isEmptySet(Xbar) == 0:
                print("Xbar", Xbar.V)
                print("Ubar", Ubar.V)
                print("Xn_nom", Xn_nom.V)
                print("Vmn", Vmn.V)
                feas_conf = 1
            else:
                feas_conf = 0

        else:
            print("no v gauss - lower confidence")
            feas_conf = 0
        count = count + 1

    return Vmn, v_lb, v_ub, minRTilde, minRBar, minR, Hxn_nom, hxn_nom, Hxbar, hxbar, Hubar, hubar, Xbar, Ubar, Xn_nom, polxhat0
예제 #8
0
import numpy as np

from pytope import Polytope

import matplotlib.pyplot as plt

np.random.seed(1)

# Create a polytope in R^2 with -1 <= x1 <= 4, -2 <= x2 <= 3
lower_bound1 = (-1, -2)  # [-1, -2]' <= x
upper_bound1 = (4, 3)  # x <= [4, 3]'
P1 = Polytope(lb=lower_bound1, ub=upper_bound1)
# Print the halfspace representation A*x <= b and H = [A b]
print('P1: ', repr(P1))
print('A =\n', P1.A)
print('b =\n', P1.b)
print('H =\n', P1.H)

# Create a square polytope in R^2 from specifying the four vertices
V2 = np.array([[1, 0], [0, -1], [-1, 0], [0, 1]])
P2 = Polytope(V2)
# Print the array of vertices:
print('P2: ', repr(P2))
print('V =\n', P2.V)

# Create a triangle in R^2 from specifying three half spaces (inequalities)
A3 = [[1, 0], [0, 1], [-1, -1]]
b3 = (2, 1, -1.5)
P3 = Polytope(A3, b3)
# Print the halfspace representation A*x <= b and H = [A b]
print('P3: ', repr(P3))
예제 #9
0
    def test___init__(self):

        # Create an R^2 Polytope in H-representation from upper and lower bounds.
        # Check that dimension n and the matrices A, b, and H = [A b] are all
        # set correctly.
        lb1 = (1, -4)
        ub1 = (3, -2)

        n1 = len(ub1)
        A1 = np.vstack((-np.eye(n1), np.eye(n1)))
        b1 = np.concatenate((-np.asarray(lb1), np.asarray(ub1)))[:, np.newaxis]
        V1 = [[1, -4], [1, -2], [3, -4], [3, -2]]

        P1 = Polytope(lb=lb1, ub=ub1)

        self.assertTrue(P1.in_H_rep)
        self.assertFalse(P1.in_V_rep)
        self.assertEqual(P1.n, n1)
        self.assertTrue(np.all(P1.A == A1))
        self.assertTrue(np.all(P1.b == b1))
        self.assertTrue(np.all(P1.H == np.hstack((A1, b1))))
        self.assertTrue(all(v in P1.V.tolist() for v in V1))
        self.assertTrue(P1.in_V_rep)
        self.assertTrue(np.issubdtype(P1.A.dtype, np.float))
        self.assertTrue(np.issubdtype(P1.b.dtype, np.float))
        self.assertTrue(np.issubdtype(P1.H.dtype, np.float))
        self.assertTrue(np.issubdtype(P1.V.dtype, np.float))

        # Create an R^2 Polytope in V-representation from a list of four vertices
        # Check that dimension n and vertex list V are set correctly
        V2 = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
        n2 = V2.shape[1]

        P2 = Polytope(V2)

        self.assertTrue(P2.in_V_rep)
        self.assertFalse(P2.in_H_rep)
        self.assertEqual(P2.n, n2)
        self.assertTrue(all(v in P2.V.tolist() for v in V2.tolist()))
        self.assertTrue(np.issubdtype(P2.V.dtype, np.float))

        # Create an R^2 Polytope in H-representation by specifying A and b in
        # Ax <= b. Check that dimension n and vertex list V are set correctly
        A3 = [[-1, 0], [0, -1], [1, 1]]
        b3 = (0, 0, 2)
        n3 = 2
        H3 = np.hstack((A3, np.asarray(b3, dtype=float)[:, np.newaxis]))
        V3 = [[0, 0], [0, 2], [2, 0]]

        P3 = Polytope(A3, b3)

        self.assertTrue(P3.in_H_rep)
        self.assertFalse(P3.in_V_rep)
        self.assertEqual(P3.n, n3)
        self.assertTrue(np.all(P3.A == np.asarray(A3, dtype=float)))
        self.assertTrue(
            np.all(P3.b == np.asarray(b3, dtype=float)[:, np.newaxis]))
        self.assertTrue(np.all(P3.H == H3))
        self.assertTrue(all(v in P3.V.tolist() for v in V3))
        self.assertTrue(P3.in_V_rep)
        self.assertTrue(np.issubdtype(P3.A.dtype, np.float))
        self.assertTrue(np.issubdtype(P3.b.dtype, np.float))
        self.assertTrue(np.issubdtype(P3.H.dtype, np.float))
        self.assertTrue(np.issubdtype(P3.V.dtype, np.float))

        # Ensure illegal use of the constructor raises an error.
        with self.assertRaises(ValueError):
            Polytope(V2, A=A3, b=b3)
        with self.assertRaises(ValueError):
            Polytope(A3, b3, V=V2)
        with self.assertRaises(ValueError):
            Polytope(A=A3)
        with self.assertRaises(ValueError):
            Polytope(b=b3)
        with self.assertRaises(ValueError):
            Polytope(V2, lb=lb1, ub=ub1)
        with self.assertRaises(ValueError):
            Polytope(A3, b3, lb=lb1, ub=ub1)
예제 #10
0
 
print("Loading parameters")
# #  Loading all system parameters
A,B,U,nx,nu,wub_true,wlb_true, y_0, Q,R,N,dt,Pinf = sys_load() #Loads relevant system dynamics, cost, and noise matrices

nsamples = 50 # Number of samples (n) used to construct V_hat(n)
num_iterations = 1000 # Number of rollouts to test for each V_hat(n)


# Simulated noise parameters for mujoco (not needed in experiment)
lower = -0.05
upper = 0.05
mu = 0.0
sigma = 0.03 
v_samples = scipy.stats.truncnorm.rvs((lower-mu)/sigma,(upper-mu)/sigma,loc=mu,scale=sigma,size=(2,20000)) # full collection of noise samples
trueV = Polytope(lb = np.ones(2)*lower, ub = np.ones(2)*upper); # true noise which is unknown in experiments

W = Polytope(lb = wlb_true*ones([nx,1]), ub = wub_true*ones([nx,1])); # additive uncertainty, not currently used

print("Loading Stabilizing Controller")
Ks, Ke = DefineStabilizingController(A, B,Q,R); # Gain for feedback controller

 
# L,_,_ = dlqr(A,eye(nx),eye(nx),eye(nx)*10);
L = block_diag(0.25,0.267) # Gain for observer


conf = 0.9975; # desired confidence value (95% confidence support)
 
print("Startin constructing V") 
예제 #11
0
def eps_MRPI(A, W, epsilon, s_max=20):
    """ Determines an outer epsilon-approximation of a minimal RPI set.
  
  Implements Algorithm 1 Raković et al. [1] for determining an outer 
  epsilon-approximation of the minimal RPI set for the autonomous system
    x+ = A*x + w
  using the following algorithm (copied directly from the paper and referenced 
  throughout the code):
  
    ALGORITHM 1: Computation of an RPI, outer epsilon-approximation of the MRPI 
    set F_infinity
    REQUIRE: A, W, and epsilon > 0
    ENSURE: F(alpha, s) such that 
        F_infinity <= F(alpha, s) <= F_infinity + B_infinity^n(epsilon)
      1: Choose any s in N (ideally, set s <- 0).
      2: REPEAT
      3:   Increment s by one.
      4:   Compute alpha^o(s) as in (11) and set alpha <- alpha^o(s).
      5:   Compute M(s) as in (13).
      6: UNTIL alpha <= epsilon / (epsilon + M(s))
      7: Compute F_s as the Minkowski sum (2) and scale it to give 
         F(alpha, s) := (1 - alpha)^(-1) * F_s.

  The s-term Minkowski sum (2) is computed in V-rep; computing the sum in
  H-rep can be both slow and numerically more challenging.

  Args:
    A: A numpy array (the state transition matrix --  must be strictly stable).
    W: A Polytope instance that bounds the disturbance w (must be compact and
      contain the origin).
    epsilon: A (positive) error bound (the radius of the infinity-norm ball).
    s_max: An optional maximum value of s, at which the algorithm terminates.

  Returns:
    F_alpha_s: A Polytope instance that is the outer-epsilon approximation of
      the MRPI set for (A, W).
    result: A dict with keys
      alpha: A scalar in [0, 1]:  A^s W subset alpha W  (Eq. (4)).
      s: A positive integer:  F_alpha_s := (1 - alpha)^(-1) F_s  (Eq. (5)).
      M: A numpy array (shape (s + 1,)) of the numbers M(k), k = 0, ..., s.
        The last element, M[-1], is M(s), which satisfies
        alpha <= epsilon / (epsilon + M(s))  (Eq. (14)).
      status: 0 if the algorithm terminated successfully, otherwise -1.
      alpha_o_s: A numpy array (shape (s + 1,)) of the number alpha at
        every iteration k = 0, ..., s.
      F_s: A numpy array of Polytope instances: F_s[s] is the s-term Minkowski
        sum from i = 0 to s over A^i W  (Eq. (2)).
      eps_min: The minimal epsilon that does not require increasing s.

  Raises:
    ValueError: An argument did not satisfy a necessary condition or the support
      function could not be evaluated successfully.

  Paper reference:
  [1] Raković, S.V., Kerrigan, E.C., Kouramas, K.I., & Mayne, D.Q. (2005).
  Invariant approximations of the minimal robust positively invariant set. IEEE
  Transactions on Automatic Control, 50(3), 406-410.
  """

    status = -1  # set to 0 at successful termination (as in SciPy's linprog)

    m, n = A.shape
    if m != n:
        raise ValueError('A must be a square matrix')

    # The disturbance set W is in the form
    # W := {w in R^n | f_i' * w <= g_i, i in I}
    W.minimize_V_rep()
    F = W.A  # the columns f_i of A in the H-rep [A b] of W
    g = W.b  # the right-hand side b in the H-rep [A b] of W
    I = g.size  # the number of inequalities in the H-rep of W

    if not all(g > 0):
        raise ValueError(
            'W does not contain the origin: g > 0 is not satisfied')

    # array of upper bounds on alpha values -- the scaling factor in the subset
    # condition A^s W subset alpha * W  (Eq. (10))
    alpha_o_s = np.full(s_max, np.nan)

    # To determine M(s) (used to bound the approximation error on F(alpha, s)):
    # Store support functions for each power of A, A^(s-1),
    # and each direction j = 1, ..., n. One row per s, each row has n support
    # functions for A^(s-1) positive and n for A^(s-1) negative; see (13).
    # M(s) is the maximum of all elements of each row s.
    # Store all values used to determine M(s) -- this is not necessary but useful
    # when debugging numerically challenging cases. Note that the first row
    # (s = 0) remains all zero (which is OK).
    M_s_row = np.zeros((s_max, 2 * n))  # each M(s) is the max over 2n values
    M = np.full(s_max, np.nan)  # M[s] is that maximum for each s

    # Pre-compute all powers of A, A^s, s = 0, ..., s_max
    A_pwr = np.stack([np.linalg.matrix_power(A, i) for i in range(s_max)])

    alpha_o = np.full(I, np.nan)

    # Step 1: Choose any s in N [natural numbers] (ideally, set s <- 0).
    s = 0

    # Step 2: repeat
    while s < s_max - 1:

        # Step 3: Increment s by one.
        s += 1

        # Step 4: Compute alpha^o(s) as in (11) and set alpha <- alpha^o(s).
        # alpha^o(s) = max_{i in I) h_W((A^s)' f_i) / g_i
        for i in range(I):
            fi = F[i, :].T
            h_W_i, status = W.support(A_pwr[s].T @ fi)
            if not status.success:
                print(f'Unsuccessful evaluation of the support function '
                      f'h_W((A^{s})'
                      ' * f_{s}): {status.message}')
            alpha_o[i] = h_W_i / g[i]
        alpha_o_s[s] = np.max(alpha_o)
        alpha = alpha_o_s[s]

        # Step 5: Compute M(s) as in (13).
        # M(s) = max_j {sum_i(h_W_sm1_pos_j), sum_i(h_W_sm1_neg_j)}  (Eq. (13))
        # At iteration s, evaluate the support for the rows of A^(s-1) and use the
        # supports evaluated at previous iterations s to evaluate the sum over i,
        # i = 0, ..., s - 1.
        h_W_sm1_pos_j = np.full(
            n, np.nan)  # h_W((A^(s-1))' * e_j, j = 0, ..., n-1
        h_W_sm1_neg_j = np.full(
            n, np.nan)  # h_W((-A^(s-1))' * e_j, j = 0, ..., n-1
        # Evaluate support in direction +- (A^i)' * e_j, with e_j the jth standard
        # basis vector in R^n. That is, (A^i)' * e_j is the jth column of (A^i)', or
        # the jth row of A^i (A_pwr_i[j])
        for j in range(n):
            A_pwr_i = A_pwr[s - 1]  # i = 0, ..., s - 1
            h_W_sm1_pos_j[j], status_lhs = W.support(A_pwr_i[j])
            h_W_sm1_neg_j[j], status_rhs = W.support(-A_pwr_i[j])
            if not all(status.success for status in (status_lhs, status_rhs)):
                raise ValueError(
                    f'Unsuccessful evaluation of the support function in '
                    f'the direction of row {j} of A^{s - 1} (s = {s})')
        # Store all 2n support-function evaluations for this iteration s. That is,
        # {h_W((A^(s-1))' * e_j,  h_W((-A^(s-1))' * e_j}, j = 0, ..., n-1:
        M_s_row[s] = M_s_row[s - 1] + np.concatenate(
            (h_W_sm1_pos_j, h_W_sm1_neg_j))
        # Take the sum over i from 0 to s - 1 (so include row s, hence ": s + 1"
        # M_s_argument = np.sum(M_s_row[s], axis=0)
        M[s] = np.max(M_s_row[s])  # Eq. (13), see above

        # Step 6: until alpha <= epsilon / (epsilon + M(s))
        if alpha <= epsilon / (epsilon + M[s]):
            status = 0  # success
            break

    s_final = s

    # Step 7: Compute F_s as the Minkowski sum (2) and scale it to give
    # F(alpha, s) = (1 - alpha)^(-1) F_s.
    # F_s = sum_{i = 0}^{s - 1} A^i W,  F_0 = {0}  (Eq. (2))
    F_s = np.full(s_final + 1, Polytope(n=n))  # F_s, s = 0, ..., s_final
    for s in range(1, s_final + 1):  # determine F_s for s = 1, ..., s_final
        F_s[s] = F_s[s - 1] + A_pwr[s - 1] * W  # F_s[0] is empty
        F_s[s].minimize_V_rep()  # critical when s_final is large
    # Scale to obtain the epsilon-approximation of the minimal RPI:
    F_alpha_s = F_s[s_final] * (1 / (1 - alpha))
    # TODO: Improve performance for large s_final by not constructing polytopes
    # for every s -- instead compute the vertices directly for every power of A
    # and add them together at the end (and finally remove redundant vertices)

    # The smallest epsilon for s_final terms in the Minkowski sum:
    eps_min = M[s_final] * alpha / (1 - alpha)

    result = {
        'alpha': alpha,
        's': s_final,
        'M': M[:s_final + 1],
        'status': status,
        'alpha_o_s': alpha_o_s[:s_final + 1],
        'F_s': F_s,
        'eps_min': eps_min
    }

    return F_alpha_s, result