コード例 #1
0
def plot_polytope_3d(A, b, ax=None, color='red', trans=0.2):
    verts = np.array(ppm.compute_polytope_vertices(A, b))
    # compute the triangles that make up the convex hull of the data points
    hull = ConvexHull(verts)
    triangles = [verts[s] for s in hull.simplices]
    # combine co-planar triangles into a single face
    faces = Faces(triangles, sig_dig=1).simplify()
    # plot
    if ax == None:
        ax = a3.Axes3D(plt.figure())

    pc = a3.art3d.Poly3DCollection(faces,
                                   facecolor=color,
                                   edgecolor="k",
                                   alpha=trans)
    ax.add_collection3d(pc)
    # define view
    yllim, ytlim = ax.get_ylim()
    xllim, xtlim = ax.get_xlim()
    zllim, ztlim = ax.get_zlim()
    x = verts[:, 0]
    x = np.append(x, [xllim, xtlim])
    y = verts[:, 1]
    y = np.append(y, [yllim, ytlim])
    z = verts[:, 2]
    z = np.append(z, [zllim, ztlim])
    ax.set_xlim(np.min(x) - 1, np.max(x) + 1)
    ax.set_ylim(np.min(y) - 1, np.max(y) + 1)
    ax.set_zlim(np.min(z) - 1, np.max(z) + 1)
コード例 #2
0
    def vertices(self):
        A_ineq = -1 * self.A_ineq 
        b_ineq = -1 * self.b_ineq 
        A_eq= self.A_eq
        b_eq= self.b_eq
        A= np.vstack((A_ineq, A_eq,-1*A_eq))
        b= np.concatenate((b_ineq, b_eq,-1*b_eq))
        
        vertices = pypoman.compute_polytope_vertices(A, b)

        return np.array(vertices)
コード例 #3
0
    def __init__(self,
                 start,
                 goal,
                 obstacle_list,
                 rand_area,
                 expand_dis=3.0,
                 path_resolution=0.01,
                 goal_sample_rate=5,
                 max_iter=5000):
        """
        Setting Parameter

        start:Start Position [x,y]
        goal:Goal Position [x,y]
        obstacleList:obstacle Positions [[x,y,size],...]
        randArea:Random Sampling Area [min,max]

        """
        """
        New Parameters

        start set: [A_start, b_start] -> Use Chebyshev center as starting point
        goal set: [A_goal, b_goal] -> Use Chebyshev center as goal point
        """
        start_poly = pc.Polytope(start[0], start[1])
        end_poly = pc.Polytope(goal[0], goal[1])
        self.start_vtc = ppm.compute_polytope_vertices(start[0], start[1])
        self.end_vtc = ppm.compute_polytope_vertices(goal[0], goal[1])
        start_pt = start_poly.chebXc
        end_pt = end_poly.chebXc

        self.start = self.Node(start_pt[0], start_pt[1])
        self.end = self.Node(end_pt[0], end_pt[1])
        self.min_rand = rand_area[0]
        self.max_rand = rand_area[1]
        self.expand_dis = expand_dis
        self.path_resolution = path_resolution
        self.goal_sample_rate = goal_sample_rate
        self.max_iter = max_iter
        self.obstacle_list = obstacle_list
        self.node_list = []
コード例 #4
0
def vertex_fun(env):
    S = env.S
    C = env.C
    custs = env.customers
    num_consts = 1 + 2 * C

    # pypoman (pm) for vertices
    # uncertainty set is C-dimensional
    A = np.concatenate((np.ones([1, C]), np.eye(C), -1 * np.eye(C)), axis=0)
    b = np.concatenate((np.array([env.gamma]).reshape(
        1, 1), env.z_hat * np.ones([1, C]), np.zeros([1, C])),
                       axis=1).reshape(num_consts)
    vertices_array = np.array(pm.compute_polytope_vertices(A, b))
    max_sum_vertices = max(np.sum(vertices_array, axis=1))
    needed_vertices = vertices_array[np.where(
        np.sum(vertices_array, axis=1) >= max_sum_vertices)]
    return needed_vertices
コード例 #5
0
ファイル: pymanoid_common.py プロジェクト: ytwboxing/jet-leg
    def compute(self, draw_height=None):
        assert len(self.working_set) > 0 and len(self.working_set[0]) == 2
        self.all_vertices = []
        for i_cur, p_cur in enumerate(self.working_set):
            p_cur = array(p_cur)
            A_voronoi, b_voronoi = [], []
            for i_other, p_other in enumerate(self.working_set):
                if i_other == i_cur:
                    continue
                p_other = array(p_other)
                p_mid = 0.5 * (p_other + p_cur)
                a = p_other - p_cur
                A_voronoi.append(a)
                b_voronoi.append(dot(a, p_mid))
            A_voronoi = vstack(A_voronoi)
            b_voronoi = hstack(b_voronoi)

            self.stance.com.set_x(p_cur[0])
            self.stance.com.set_y(p_cur[1])
            self.robot.ik.solve(warm_start=True)
            proj_vertices = compute_local_actuation_dependent_polygon(
                self.robot, self.stance, method="bretl")
            A_proj, b_proj = compute_polytope_halfspaces(proj_vertices)
            A = vstack([A_proj, A_voronoi])
            b = hstack([b_proj, b_voronoi])
            if draw_height is not None and (dot(A, p_cur) > b).any():
                self.sample_handles.append(
                    draw_point([p_cur[0], p_cur[1], draw_height],
                               color='r',
                               pointsize=5e-3))
                continue
            elif draw_height is not None:
                self.sample_handles.append(
                    draw_point([p_cur[0], p_cur[1], draw_height],
                               color='g',
                               pointsize=5e-3))
            vertices = pypoman.compute_polytope_vertices(A, b)
            if draw_height is not None:
                self.polygons.append(
                    draw_horizontal_polygon(vertices,
                                            draw_height,
                                            combined='b-#'))
            self.all_vertices.extend(vertices)
        return self.all_vertices
コード例 #6
0
    def draw_graph(self, rnd=None):
        plt.clf()
        if rnd is not None:
            plt.plot(rnd.x, rnd.y, "^k")
        for node in self.node_list:
            if node.parent:
                plt.plot(node.path_x, node.path_y, "-g")

        for (A, b) in self.obstacle_list:
            vtc = ppm.compute_polytope_vertices(A, b)
            ppm.plot_polygon(vtc, color='r')
            '''
            plt.plot(ox, oy, "ok", ms=30 * size)
            '''

        ppm.plot_polygon(self.start_vtc, color='b')
        ppm.plot_polygon(self.end_vtc, color='g')
        # plt.axis([-2, 15, -2, 15])
        plt.grid(True)
        plt.pause(0.01)
コード例 #7
0
    def get_inter_prob(self, X, scaling_factors):
        """
        Compute intersection probability
        In the notation of Matthias' thesis, the scaling_factors are:
            gamma=m(0), m(1), ..., m(k-1)
        """
        n = self.G.shape[0]
        assert n == 2  # what follows is only valid in 2D
        scaling_factors = np.array(scaling_factors)
        k = scaling_factors.shape[0]

        # calling pc.extreme on this one is fast, maybe because it is a plain reactangle?
        X = Polygon(pc.extreme(X.to_poly()))

        confid_sets = self.get_confidence_sets(scaling_factors)
        # append m=0 to the set of scaling factors
        scaling_factors = np.append(scaling_factors, 0.0)

        h = ((2.0 * np.pi)**(-n / 2.0) * np.linalg.det(self.Sig)**(-0.5) *
             np.exp(-0.5 * np.array(scaling_factors)**2.0))

        # compute intersection volumes
        V = np.zeros((k, ))
        for i in range(k):
            A, b = confid_sets[i].to_H()
            vertices = compute_polytope_vertices(
                A, b)  # fast C code for vertex enumeration
            vertices.sort(
                key=lambda c: math.atan2(c[0], c[1]))  # sort those vertices
            X2 = Polygon(vertices)  # construct a Polygon
            V[i] = X2.intersection(
                X).area  # this is faster than intersect of polytope

        # compute intersection prob
        prob = 1 - erf(scaling_factors[0] / np.sqrt(2.0))**(2.0 * n)
        prob += h[0] * V[0]
        for i in range(k):
            prob += (h[i + 1] - h[i]) * V[i]

        return min(prob, 1.0)
コード例 #8
0
ファイル: Problem.py プロジェクト: rte-france/learning-lp
    def compute_vertices_poly(self):
        """
        Computes the vertices of the domain of the linear
        optimisation problem and saves them in self.domain.

        Method will only be used when self.is_simple is True. If that is the case, the
        computed vertices will be used by the module problem_generator.py to generate new problems
        out of this Problem instance.
        """
        if self.domain.domain_vertices is None:
            matrix, rhs = self.get_matrix()
            index = len(matrix[0])

            identity = -np.identity(index)
            zeros = np.zeros(index)

            matrix = np.concatenate((matrix, identity))
            rhs = np.concatenate((rhs, zeros))

            vertices = pypoman.compute_polytope_vertices(matrix, rhs)

            self.domain.set_domain_vertices(vertices)
コード例 #9
0
    def __call__(self):
        if self.mode == Mode.CHANNEL:
            raise NotImplementedError(
                "Wang interval works only for state tomography")
        EPS = 1e-15
        rho = self.tmg.point_estimate('lin', physical=False)
        dim = 2**self.tmg.state.n_qubits
        bloch_dim = dim**2 - 1

        frequencies = np.clip(
            self.tmg.raw_results / self.tmg.n_measurements[:, None], EPS,
            1 - EPS)

        povm_matrix = (np.reshape(
            self.tmg.povm_matrix * self.tmg.n_measurements[:, None, None] /
            np.sum(self.tmg.n_measurements),
            (-1, self.tmg.povm_matrix.shape[-1])) * dim *
                       self.tmg.povm_matrix.shape[0])

        max_delta = self._count_delta(self.max_confidence, frequencies)

        if self.method == 'coarse':
            A = _left_inv(povm_matrix)
            prob_dim = povm_matrix.shape[0]
            coef1 = (np.linalg.norm(A, ord=2) *
                     (self.tmg.povm_matrix.shape[1] - 1) * np.sqrt(prob_dim))
            coef2 = np.linalg.norm(A, ord=2)**2 * prob_dim
            max_dist = max(max_delta * coef1, np.sqrt(max_delta * coef2))
            dist_dummy = np.linspace(0, max_dist, self.n_points)
            deltas = np.maximum(dist_dummy / coef1, dist_dummy**2 / coef2)
        else:
            deltas = np.linspace(0, max_delta, self.n_points)
            dist_dummy = []
            A = np.ascontiguousarray(povm_matrix[:, 1:])
            for delta in deltas:
                b = np.clip(np.hstack(frequencies) + delta, EPS,
                            1 - EPS) - povm_matrix[:, 0] / dim
                if self.method == 'exact':
                    vertices = pypoman.compute_polytope_vertices(A, b)
                    vertex_states = [
                        _make_feasible(Qobj(vertex)) for vertex in vertices
                    ]
                    if vertices:
                        radius = max([
                            self.tmg.dst(vertex_state, rho)
                            for vertex_state in vertex_states
                        ])
                    else:
                        radius = 0
                elif self.method == 'bbox':
                    lb, ub = pc.Polytope(A, b).bounding_box
                    volume = np.prod(ub - lb)
                    radius = ((volume * math.gamma(bloch_dim / 2 + 1))
                              **(1 / bloch_dim) / math.sqrt(math.pi))
                elif self.method == 'approx':
                    volume = compute_polytope_volume(pc.Polytope(A, b))
                    radius = ((volume * math.gamma(bloch_dim / 2 + 1))
                              **(1 / bloch_dim) / math.sqrt(math.pi))
                elif self.method == 'hit_and_run':
                    rho_bloch = rho.bloch[1:]
                    radius = find_max_distance_to_polytope(
                        A, b, rho_bloch, rho_bloch)
                else:
                    raise ValueError("Invalid value for argument `mode`.")
                dist_dummy.append(radius)

        CLs_dummy = []
        for delta in deltas:
            CLs_dummy.append(self._count_confidence(delta, frequencies))
        cl_to_dist = interp1d(CLs_dummy, dist_dummy)
        CLs = np.linspace(0, self.max_confidence, self.n_points)
        dist = cl_to_dist(CLs)
        return dist, CLs
コード例 #10
0
ファイル: solver.py プロジェクト: vismayagrawal/syndisc
def extreme_points(P, Px, SVDmethod='standard'):
    """
    Calculation of extreme points of the polytope S
    Parameters
    ----------
    P : np.ndarray
        binary matrix of transitions
    Px : np.ndarray
        distribution of the dataset
    SVDmethod : str
        'standard' does the full SVD 
        'fast' uses a trick described in Apendix XX of journal paper

    Returns
    -------
    sols : np.ndarray
        N-by-D array with N vertices of the polytope
    """
    if SVDmethod == 'standard':
        U, S, Vh = svd(P)

    elif SVDmethod == 'fast':
        PtimesPt = P * P.transpose()

        # multiplication by 1+epsilon for numerical stability
        U0, S0, Vh0 = svd((1 + 1e-20) * PtimesPt)
        S = np.sqrt(S0)

        # Faster than using U.transpose() as it avoids the transpose operation
        Vh = Vh0 * P

    else:
        raise ValueError("SVDMethod not recognised. Must be either \
                'standard' or 'fast'.")

    # Extract reduced A matrix using a small threshold to avoid numerical
    # problems
    rankP = np.sum(S > 1e-6)
    A = Vh[:rankP, :]

    b = np.matmul(A, Px)

    # Turn np.matrix into np.array for polytope computations
    A = np.array(A)
    b = np.array(b).flatten()

    # Build polytope and find extremes
    A = np.vstack([A, -A, -np.eye(A.shape[1])])
    b = np.hstack([b, -b, np.zeros(A.shape[1])])
    V = pm.compute_polytope_vertices(A, b)  # <-- magic happens here
    V = np.vstack(V)

    # To avoid numerical problems, manually cast all small negative values to 0
    if np.any(V < -1e-6):
        raise RuntimeError("Polytope vertices computation failed \
                (found negative vertices).")
    V[V < 0] = 0

    # Normalise the rows of V to 1, since they are conditional PMFs
    V = V / (V.sum(axis=1)[:, np.newaxis])

    # Eliminate vertices that are too similar (distance less than 1e-10)
    SS = cdist(V, V)
    BB = SS < 1e-10
    indices = [BB[:i, i].sum() for i in range(len(V))]
    idx = [bool(1 - indices[i]) for i in range(len(indices))]

    return V[idx]
コード例 #11
0
def compute_volume(template: np.ndarray, item: np.ndarray):
    vertices = pypoman.compute_polytope_vertices(template, item)
    volume = ConvexHull(vertices).volume
    return volume
    def get_one_step_reachable_set(self, input_constraint, output_constraint):

        if isinstance(input_constraint, constraints.PolytopeConstraint):
            A_inputs = input_constraint.A
            b_inputs = input_constraint.b

            # Get bounds on each state from A_inputs, b_inputs
            try:
                vertices = np.stack(
                    pypoman.compute_polytope_vertices(A_inputs, b_inputs))
            except:
                # Sometimes get arithmetic error... this may fix it
                vertices = np.stack(
                    pypoman.compute_polytope_vertices(A_inputs,
                                                      b_inputs + 1e-6))
            x_max = np.max(vertices, 0)
            x_min = np.min(vertices, 0)
            norm = np.inf
        elif isinstance(input_constraint, constraints.LpConstraint):
            x_min = input_constraint.range[..., 0]
            x_max = input_constraint.range[..., 1]
            norm = input_constraint.p
            A_inputs = None
            b_inputs = None
        else:
            raise NotImplementedError

        if isinstance(output_constraint, constraints.PolytopeConstraint):
            A_out = output_constraint.A
            num_facets = A_out.shape[0]
            bs = np.zeros((num_facets))
        elif isinstance(output_constraint, constraints.LpConstraint):
            A_out = np.eye(x_min.shape[0])
            num_facets = A_out.shape[0]
            ranges = np.zeros((num_facets, 2))
        else:
            raise NotImplementedError

        # Because there might sensor noise, the NN could see a different set of
        # states than the system is actually in
        prev_state_max = torch.Tensor([x_max])
        prev_state_min = torch.Tensor([x_min])
        nn_input_max = prev_state_max
        nn_input_min = prev_state_min
        if self.dynamics.sensor_noise is not None:
            nn_input_max += torch.Tensor([self.dynamics.sensor_noise[:, 1]])
            nn_input_min += torch.Tensor([self.dynamics.sensor_noise[:, 0]])

        # Compute the NN output matrices (for the input constraints)
        num_control_inputs = self.dynamics.bt.shape[1]
        C = torch.eye(num_control_inputs).unsqueeze(0)
        lower_A, upper_A, lower_sum_b, upper_sum_b = self.network(
            method_opt=self.method_opt,
            norm=norm,
            x_U=nn_input_max,
            x_L=nn_input_min,
            upper=True,
            lower=True,
            C=C,
            return_matrices=True,
        )

        for i in range(num_facets):
            # For each dimension of the output constraint (facet/lp-dimension):
            # compute a bound of the NN output using the pre-computed matrices
            if A_out is None:
                A_out_torch = None
            else:
                A_out_torch = torch.Tensor([A_out[i, :]])

            # CROWN was initialized knowing dynamics, no need to pass them here
            # (unless they've changed, e.g., time-varying At matrix)
            (
                A_out_xt1_max,
                A_out_xt1_min,
            ) = self.network.compute_bound_from_matrices(
                lower_A,
                lower_sum_b,
                upper_A,
                upper_sum_b,
                prev_state_max,
                prev_state_min,
                norm,
                A_out_torch,
                A_in=A_inputs,
                b_in=b_inputs,
            )

            if isinstance(output_constraint, constraints.PolytopeConstraint):
                bs[i] = A_out_xt1_max
            elif isinstance(output_constraint, constraints.LpConstraint):
                ranges[i, 0] = A_out_xt1_min
                ranges[i, 1] = A_out_xt1_max
            else:
                raise NotImplementedError

        if isinstance(output_constraint, constraints.PolytopeConstraint):
            output_constraint.b = bs
        elif isinstance(output_constraint, constraints.LpConstraint):
            output_constraint.range = ranges
        else:
            raise NotImplementedError
        return output_constraint, {}
    def get_one_step_backprojection_set(self,
                                        output_constraint,
                                        input_constraint,
                                        num_partitions=None):
        # Given an output_constraint, compute the input_constraint
        # that ensures that starting from within the input_constraint
        # will lead to a state within the output_constraint

        # Extract elementwise bounds on xt1 from the lp-ball or polytope constraint
        if isinstance(output_constraint, constraints.PolytopeConstraint):
            A_t1 = output_constraint.A
            b_t1 = output_constraint.b[0]

            # Get bounds on each state from A_t1, b_t1
            try:
                vertices = np.stack(
                    pypoman.compute_polytope_vertices(A_t1, b_t1))
            except:
                # Sometimes get arithmetic error... this may fix it
                vertices = np.stack(
                    pypoman.compute_polytope_vertices(A_t1, b_t1 + 1e-6))
            xt1_max = np.max(vertices, 0)
            xt1_min = np.min(vertices, 0)
            norm = np.inf
        elif isinstance(output_constraint, constraints.LpConstraint):
            xt1_min = output_constraint.range[..., 0]
            xt1_max = output_constraint.range[..., 1]
            norm = output_constraint.p
            A_t1 = None
            b_t1 = None
        else:
            raise NotImplementedError
        '''
        Step 1: 
        Find backreachable set: all the xt for which there is
        some u in U that leads to a state xt1 in output_constraint
        '''

        if self.dynamics.u_limits is None:
            u_min = -np.inf
            u_max = np.inf
        else:
            u_min = self.dynamics.u_limits[:, 0]
            u_max = self.dynamics.u_limits[:, 1]

        num_states = xt1_min.shape[0]
        num_control_inputs = 1
        xt = cp.Variable(xt1_min.shape + (2, ))
        ut = cp.Variable(num_control_inputs)

        A_t = np.eye(xt1_min.shape[0])
        num_facets = A_t.shape[0]
        coords = np.empty((2 * num_states, num_states))

        # For each dimension of the output constraint (facet/lp-dimension):
        # compute a bound of the NN output using the pre-computed matrices
        for i in range(num_facets):
            xt = cp.Variable(xt1_min.shape)
            ut = cp.Variable(num_control_inputs)

            constrs = []
            constrs += [u_min <= ut]
            constrs += [ut <= u_max]
            constrs += [
                self.dynamics.At @ xt + self.dynamics.bt @ ut <= xt1_max
            ]
            constrs += [
                self.dynamics.At @ xt + self.dynamics.bt @ ut >= xt1_min
            ]

            obj = A_t[i, :] @ xt
            prob = cp.Problem(cp.Minimize(obj), constrs)
            prob.solve()
            coords[2 * i, :] = xt.value
            prob = cp.Problem(cp.Maximize(obj), constrs)
            prob.solve()
            coords[2 * i + 1, :] = xt.value

        # min/max of each element of xt in the backreachable set
        ranges = np.vstack([coords.min(axis=0), coords.max(axis=0)]).T
        '''
        Step 2: 
        Partition the backreachable set (xt).
        For each cell in the partition:
        - relax the NN (use CROWN to compute matrices for affine bounds)
        - use the relaxed NN to compute bounds on xt1
        - use those bounds to define constraints on xt, and if valid, add
            to input_constraint
        '''

        # Setup the partitions
        if num_partitions is None:
            num_partitions = np.array([10, 10])
        input_range = ranges
        input_shape = input_range.shape[:-1]
        slope = np.divide((input_range[..., 1] - input_range[..., 0]),
                          num_partitions)

        # Set an empty Constraint that will get filled in
        input_constraint = constraints.PolytopeConstraint(A=[], b=[])

        # Iterate through each partition
        for element in product(
                *[range(num) for num in num_partitions.flatten()]):
            # Compute this partition's min/max xt values
            element_ = np.array(element).reshape(input_shape)
            input_range_ = np.empty_like(input_range)
            input_range_[..., 0] = input_range[..., 0] + np.multiply(
                element_, slope)
            input_range_[..., 1] = input_range[..., 0] + np.multiply(
                element_ + 1, slope)
            ranges = input_range_

            # Because there might sensor noise, the NN could see a different
            # set of states than the system is actually in
            xt_min = ranges[..., 0]
            xt_max = ranges[..., 1]
            prev_state_max = torch.Tensor([xt_max])
            prev_state_min = torch.Tensor([xt_min])
            nn_input_max = prev_state_max
            nn_input_min = prev_state_min
            if self.dynamics.sensor_noise is not None:
                raise NotImplementedError
                # nn_input_max += torch.Tensor([self.dynamics.sensor_noise[:, 1]])
                # nn_input_min += torch.Tensor([self.dynamics.sensor_noise[:, 0]])

            # Compute the NN output matrices (for this xt partition)
            num_control_inputs = self.dynamics.bt.shape[1]
            C = torch.eye(num_control_inputs).unsqueeze(0)
            lower_A, upper_A, lower_sum_b, upper_sum_b = self.network(
                method_opt=self.method_opt,
                norm=norm,
                x_U=nn_input_max,
                x_L=nn_input_min,
                upper=True,
                lower=True,
                C=C,
                return_matrices=True,
            )

            # Extract numpy array from pytorch tensors
            upper_A = upper_A.detach().numpy()[0]
            lower_A = lower_A.detach().numpy()[0]
            upper_sum_b = upper_sum_b.detach().numpy()[0]
            lower_sum_b = lower_sum_b.detach().numpy()[0]

            # The NN matrices define three types of constraints:
            # - NN's resulting lower bnds on xt1 >= lower bnds on xt1
            # - NN's resulting upper bnds on xt1 <= upper bnds on xt1
            # - NN matrices are only valid within the partition
            A_NN, b_NN = range_to_polytope(ranges)
            A_ = np.vstack([(self.dynamics.At + self.dynamics.bt @ upper_A),
                            -(self.dynamics.At + self.dynamics.bt @ lower_A),
                            A_NN])
            b_ = np.hstack([
                xt1_max - self.dynamics.bt @ upper_sum_b,
                -xt1_min + self.dynamics.bt @ lower_sum_b, b_NN
            ])

            # If those constraints to a non-empty set, then add it to
            # the list of input_constraints. Otherwise, skip it.
            try:
                pypoman.polygon.compute_polygon_hull(A_, b_ + 1e-10)
                input_constraint.A.append(A_)
                input_constraint.b.append(b_)
            except:
                continue

        # input_constraint contains lists for A, b
        return input_constraint, {}
コード例 #14
0
ファイル: test_pypoman.py プロジェクト: ytwboxing/jet-leg
import numpy
import pypoman
import unittest

#class TestPypoman(unittest.TestCase):
#    def testPolytopeProjection(self):
A = numpy.array([[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                 [0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                 [0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                 [0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0],
                 [0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0],
                 [0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
                 [0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],
                 [0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
                 [0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
                 [0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],
                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],
                 [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                 [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
                 [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
                 [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0],
                 [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0],
                 [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1]])
b = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 2, 3])
vertices = pypoman.compute_polytope_vertices(A, b)
print vertices
#unitTest = unittest()
#unitTest.assertAlmostEquals(388, numpy.size(vertices,0))self
コード例 #15
0
    def get_one_step_reachable_set(self, input_constraint, output_constraint):

        if isinstance(input_constraint, constraints.PolytopeConstraint):
            A_inputs = input_constraint.A
            b_inputs = input_constraint.b

            # Get bounds on each state from A_inputs, b_inputs
            try:
                vertices = np.stack(
                    pypoman.compute_polytope_vertices(A_inputs, b_inputs))
            except:
                # Sometimes get arithmetic error... this may fix it
                vertices = np.stack(
                    pypoman.compute_polytope_vertices(A_inputs,
                                                      b_inputs + 1e-6))
            x_max = np.max(vertices, 0)
            x_min = np.min(vertices, 0)
            norm = np.inf
        elif isinstance(input_constraint, constraints.LpConstraint):
            x_min = input_constraint.range[..., 0]
            x_max = input_constraint.range[..., 1]
            norm = input_constraint.p
            A_inputs = None
            b_inputs = None
        else:
            raise NotImplementedError

        if isinstance(output_constraint, constraints.PolytopeConstraint):
            A_out = output_constraint.A
            num_facets = A_out.shape[0]
            bs = np.zeros((num_facets))
        elif isinstance(output_constraint, constraints.LpConstraint):
            A_out = np.eye(x_min.shape[0])
            num_facets = A_out.shape[0]
            ranges = np.zeros((num_facets, 2))
        else:
            raise NotImplementedError

        # Because there might sensor noise, the NN could see a different set of
        # states than the system is actually in
        prev_state_max = torch.Tensor([x_max])
        prev_state_min = torch.Tensor([x_min])
        nn_input_max = prev_state_max
        nn_input_min = prev_state_min
        if self.dynamics.sensor_noise is not None:
            nn_input_max += torch.Tensor([self.dynamics.sensor_noise[:, 1]])
            nn_input_min += torch.Tensor([self.dynamics.sensor_noise[:, 0]])

        # Compute the NN output matrices (for the input constraints)
        num_control_inputs = self.dynamics.bt.shape[1]
        C = torch.eye(num_control_inputs).unsqueeze(0)

        u_min, u_max = self.get_min_max_controls(nn_input_max, nn_input_min, C)

        # # Sample a grid of pts from the input set, to get exact NN output polytope
        # x0 = np.linspace(x_min[0], x_max[0], num=10)
        # x1 = np.linspace(x_min[1], x_max[1], num=10)
        # xx, yy = np.meshgrid(x0, x1)
        # pts = np.reshape(np.dstack([xx, yy]), (-1, 2))
        # sampled_outputs = self.network.forward(torch.Tensor(pts))

        # # Print and compare the two bounds numerically
        # sampled_output_min = np.min(sampled_outputs.data.numpy())
        # sampled_output_max = np.max(sampled_outputs.data.numpy())
        # print("(u_min, u_max): ({.4f}, {.4f})".format(u_min, u_max))
        # print("(sampled_min, sampled_max): ({.4f}, {.4f})".format(sampled_output_min, sampled_output_max))

        for i in range(num_facets):
            # For each dimension of the output constraint (facet/lp-dimension):
            # compute a bound of the NN output using the pre-computed matrices

            (
                A_out_xt1_max,
                A_out_xt1_min,
            ) = self.compute_bound_cf(
                u_min,
                u_max,
                x_max,
                x_min,
                A_out[i, :],
                A_in=A_inputs,
                b_in=b_inputs,
            )

            if isinstance(output_constraint, constraints.PolytopeConstraint):
                bs[i] = A_out_xt1_max
            elif isinstance(output_constraint, constraints.LpConstraint):
                ranges[i, 0] = A_out_xt1_min
                ranges[i, 1] = A_out_xt1_max
            else:
                raise NotImplementedError

        if isinstance(output_constraint, constraints.PolytopeConstraint):
            output_constraint.b = bs
        elif isinstance(output_constraint, constraints.LpConstraint):
            output_constraint.range = ranges
        else:
            raise NotImplementedError
        return output_constraint, {}
コード例 #16
0
def vertices(A,b):
       
        vertices = pypoman.compute_polytope_vertices(A, b)

        return np.array(vertices)