def set_birds_eye_view_spectator(spectator: carla.Actor,
                                 followed_location: carla.Location,
                                 above: float):
    birds_eye_view = carla.Transform(
        carla.Location(x=followed_location.x, y=followed_location.y, z=above),
        carla.Rotation(pitch=-90),
    )
    spectator.set_transform(birds_eye_view)
def set_spectator_above_actor(spectator: carla.Actor,
                              transform: np.array) -> None:
    '''
    Changes position of the spectator relative to actor position.
    :param spectator:
    :param transform:
    :return:
    '''
    transform = numpy_to_transform(transform + [0, 0, 10, 0])
    transform.rotation.pitch = -15
    spectator.set_transform(transform)
示例#3
0
def map_dynamic_actor(actor: carla.Actor,
                      world_map: carla.Map) -> DynamicActor:
    location: carla.Location = actor.get_location()
    gnss: carla.GeoLocation = world_map.transform_to_geolocation(location)
    velocity: carla.Vector3D = actor.get_velocity()
    acceleration: carla.Vector3D = actor.get_acceleration()
    color: str = str(
        actor.attributes['color']) if 'color' in actor.attributes else None
    extent: carla.Vector3D = actor.bounding_box.extent

    transform: carla.Transform = carla.Transform(
        rotation=actor.get_transform().rotation)

    transformed_extent: Tuple[carla.Location, carla.Location, carla.Location,
                              carla.Location] = (
                                  transform.transform(
                                      carla.Location(+extent.x, +extent.y, 0)),
                                  transform.transform(
                                      carla.Location(+extent.x, -extent.y, 0)),
                                  transform.transform(
                                      carla.Location(-extent.x, -extent.y, 0)),
                                  transform.transform(
                                      carla.Location(-extent.x, +extent.y, 0)),
                              )

    bbox: Tuple[Point2D, Point2D, Point2D, Point2D] = cast(
        Tuple[Point2D, Point2D, Point2D, Point2D],
        tuple(
            map(
                lambda t: geoloc2point2d(
                    world_map.transform_to_geolocation(
                        carla.Vector3D(location.x + t.x, location.y + t.y,
                                       location.z))), transformed_extent)))

    return DynamicActor(
        id=actor.id,
        type=UncertainProperty(1., resolve_carla_type(actor.type_id)),
        type_id=actor.type_id,
        location=UncertainProperty(1.,
                                   Point3D(location.x, location.y,
                                           location.z)),
        gnss=UncertainProperty(
            1., Point3D(gnss.latitude, gnss.longitude, gnss.altitude)),
        dynamics=ActorDynamics(velocity=UncertainProperty(
            1., Point3D(velocity.x, velocity.y, velocity.z)),
                               acceleration=UncertainProperty(
                                   1.,
                                   Point3D(acceleration.x, acceleration.y,
                                           acceleration.z))),
        props=ActorProperties(color=UncertainProperty(1., color),
                              extent=UncertainProperty(
                                  1., Point3D(extent.x, extent.y, extent.z)),
                              bbox=UncertainProperty(1., bbox)))
示例#4
0
def compute_all_paths_for_actor(
        vehicle: carla.Actor,
        dist_between_waypoints: float,
        path_length: int = 10,
        lane_type: carla.LaneType = carla.LaneType.Driving,
        verbose: bool = False):
    world = vehicle.get_world()
    location = vehicle.get_location()
    paths = compute_all_paths_from_location(location, world,
                                            dist_between_waypoints,
                                            path_length, lane_type, verbose)

    return paths
示例#5
0
    def agent_vehicle_mask(self, agent: carla.Actor) -> Mask:
        canvas = self.make_empty_mask()
        bb = agent.bounding_box.extent
        corners = [
            carla.Location(x=-bb.x, y=-bb.y),
            carla.Location(x=bb.x, y=-bb.y),
            carla.Location(x=bb.x, y=bb.y),
            carla.Location(x=-bb.x, y=bb.y),
        ]

        agent.get_transform().transform(corners)
        corners = [self.location_to_pixel(loc) for loc in corners]
        cv.fillPoly(img=canvas, pts=np.int32([corners]), color=COLOR_ON)
        return canvas
示例#6
0
def actor_to_corners(actor: carla.Actor) -> Sequence[Tuple[float, float]]:  # pylint: disable=no-member
    """Draws a rectangular for the bounding box of `actor`."""
    bb = actor.bounding_box.extent
    # Draw actor as a rectangular.
    corners = [
        carla.Location(x=-bb.x, y=-bb.y),  # pylint: disable=no-member
        carla.Location(x=-bb.x, y=+bb.y),  # pylint: disable=no-member
        carla.Location(x=+bb.x, y=+bb.y),  # pylint: disable=no-member
        carla.Location(x=+bb.x, y=-bb.y),  # pylint: disable=no-member
    ]
    # Convert to global coordinates.
    actor.get_transform().transform(corners)

    return corners
    def apply_agent_following_transformation_to_masks(
            self, agent_vehicle: carla.Actor, masks: np.ndarray) -> np.ndarray:
        """Returns image of shape: height, width, channels"""
        agent_transform = agent_vehicle.get_transform()
        angle = (agent_transform.rotation.yaw + 90
                 )  # vehicle's front will point to the top

        # Rotating around the center
        crop_with_car_in_the_center = masks
        masks_n, h, w = crop_with_car_in_the_center.shape
        rotation_center = Coord(x=w // 2, y=h // 2)

        # warpAffine from OpenCV requires the first two dimensions to be in order: height, width, channels
        crop_with_centered_car = np.transpose(crop_with_car_in_the_center,
                                              axes=(1, 2, 0))
        rotated = rotate(crop_with_centered_car, angle, center=rotation_center)

        half_width = self.target_size.width // 2
        hslice = slice(rotation_center.x - half_width,
                       rotation_center.x + half_width)

        if self._crop_type is BirdViewCropType.FRONT_AREA_ONLY:
            vslice = slice(rotation_center.y - self.target_size.height,
                           rotation_center.y)
        elif self._crop_type is BirdViewCropType.FRONT_AND_REAR_AREA:
            half_height = self.target_size.height // 2
            vslice = slice(rotation_center.y - half_height,
                           rotation_center.y + half_height)
        else:
            raise NotImplementedError
        assert (
            vslice.start > 0 and hslice.start > 0
        ), "Trying to access negative indexes is not allowed, check for calculation errors!"
        car_on_the_bottom = rotated[vslice, hslice]
        return car_on_the_bottom
    def produce(self, agent_vehicle: carla.Actor) -> BirdView:

        all_actors = actors.query_all(world=self._world)
        segregated_actors = actors.segregate_by_type(actors=all_actors)
        agent_vehicle_loc = agent_vehicle.get_location()

        # Reusing already generated static masks for whole map
        self.masks_generator.disable_local_rendering_mode()
        agent_global_px_pos = self.masks_generator.location_to_pixel(
            agent_vehicle_loc)

        cropping_rect = CroppingRect(
            x=int(agent_global_px_pos.x - self.rendering_area.width / 2),
            y=int(agent_global_px_pos.y - self.rendering_area.height / 2),
            width=self.rendering_area.width,
            height=self.rendering_area.height,
        )

        masks = np.zeros(
            shape=(
                len(BirdViewMasks),
                self.rendering_area.height,
                self.rendering_area.width,
            ),
            dtype=np.uint8,
        )
        masks[BirdViewMasks.ROAD.value] = self.full_road_cache[
            cropping_rect.vslice, cropping_rect.hslice]
        masks[BirdViewMasks.LANES.value] = self.full_lanes_cache[
            cropping_rect.vslice, cropping_rect.hslice]
        masks[BirdViewMasks.CENTERLINES.value] = self.full_centerlines_cache[
            cropping_rect.vslice, cropping_rect.hslice]
        masks[BirdViewMasks.BUILDINGS.value] = self.full_buildings_cache[
            cropping_rect.vslice, cropping_rect.hslice]

        # Dynamic masks
        rendering_window = RenderingWindow(origin=agent_vehicle_loc,
                                           area=self.rendering_area)
        self.masks_generator.enable_local_rendering_mode(rendering_window)
        masks = self._render_actors_masks(agent_vehicle, segregated_actors,
                                          masks)
        cropped_masks = self.apply_agent_following_transformation_to_masks(
            agent_vehicle, masks)
        ordered_indices = [
            mask.value for mask in BirdViewMasks.bottom_to_top()
        ]
        return cropped_masks[:, :, ordered_indices]
示例#9
0
    def curved_road_segments_enclosure_from_actor(
            self,
            actor: carla.Actor,
            max_distance: float,
            choices=[],
            flip_x=False,
            flip_y=False) -> util.AttrDict:
        """Get segmented enclosure of fixed size from curved road.

        Parameters
        ==========
        actor : carla.Actor
            Position of actor to get starting waypoint.
        max_distance : float
            The max distance of the path starting from `start_wp`. Use to specify length of path.
        choices : list of int
            The indices of the turns to make when reaching a fork on the road network.
            `choices[0]` is the index of the first turn, `choices[1]` is the index of the second turn, etc.

        Returns
        =======
        util.AttrDict
            Container of road segment properties.
            - spline : scipy.interpolate.CubicSpline
                The spline representing the path the vehicle should motion plan on.
            - polytopes : list of (ndarray, ndarray)
                List of polytopes in H-representation (A, b)
                where x is in polytope if Ax <= b.
            - distances : ndarray
                The distances along the spline to follow from nearest endpoint
                before encountering corresponding covering polytope in index.
            - positions : ndarray
                The 2D positions of center of the covering polytope in index.
        """
        t = actor.get_transform()
        wp = self.carla_map.get_waypoint(t.location)
        wp = wp.previous(5)[0]
        lane_width = wp.lane_width
        return cover_along_waypoints_fixedsize(wp,
                                               choices,
                                               max_distance + 7,
                                               lane_width,
                                               flip_x=flip_x,
                                               flip_y=flip_y)
示例#10
0
def getActorWorldBBox(actor: carla.Actor):
    actorBBox = actor.bounding_box
    cords = np.zeros((8, 4))

    # Get the box extent
    extent = actorBBox.extent
    cords[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
    cords[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
    cords[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
    cords[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
    cords[4, :] = np.array([extent.x, extent.y, extent.z, 1])
    cords[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
    cords[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
    cords[7, :] = np.array([extent.x, -extent.y, extent.z, 1])

    bb_transform = carla.Transform(actorBBox.location)
    bb_matrix = get_matrix(bb_transform)
    actor_world_matrix = get_matrix(actor.get_transform())
    bb_world_matrix = np.dot(actor_world_matrix, bb_matrix)
    world_cords = np.dot(bb_world_matrix, np.transpose(cords))
    return world_cords
    def __init__(self, ego_veh: carla.Actor, gt_config: dict, debug=False):
        """
        Constructor method. 

        Input:
            ego_veh: Carla.Actor obj of the ego vehicle. Carla.Actor provides a get_world() method to get the 
                     Carla.World it belongs to; thus, ego vehicle actor is sufficient to retrieve other info.
            gt_config (dict): Configurations.
            debug (bool): True to turn on debug features.
        """
        self.debug = debug
        # Retrieve carla.World object from ego vehicle carla.Actor object
        carla_world = ego_veh.get_world()

        # Dict as an buffer to store all ground truth data of interest
        # Using dict helps automate data selection during recording since data can be queried by keys
        # This buffer is automatically updated when the child ground truth extractors update their buffer
        self.all_gt = {'static': {}, 'seq': {}}

        # Traffic signs
        self.all_gt['static']['traffic_sign'] = self.get_traffic_signs(
            carla_world, self.debug)

        # Front bumper's transform in Carla's coordinate system
        # It's for the convenience of querying waypoints for lane using carla's APIs
        # TODO: Try put this frame's origin at the intersection of camera FOV and ground surface?
        self._fbumper_carla_tform = None

        # Pose ground truth extractor
        self.pose_gt = PoseGTExtractor(ego_veh, gt_config['pose'])
        # Lane ground truth extractor
        self.lane_gt = LaneGTExtractor(carla_world, gt_config['lane'],
                                       self.debug)

        # Set up buffer for sequential data
        # Refer to pose ground truth extractor's gt buffer
        self.all_gt['seq']['pose'] = self.pose_gt.gt
        # Refer to lane ground truth extractor's gt buffer
        self.all_gt['seq']['lane'] = self.lane_gt.gt
示例#12
0
    def road_boundary_constraints_from_actor(
            self,
            actor: carla.Actor,
            max_distance: float,
            choices=[],
            flip_x=False,
            flip_y=False) -> RoadBoundaryConstraint:
        """Get segmented enclosure of fixed size from curved road.

        Parameters
        ==========
        actor : carla.Actor
            Position of actor to get starting waypoint.
        max_distance : float
            The max distance of the path starting from `start_wp`. Use to specify length of path.
        choices : list of int
            The indices of the turns to make when reaching a fork on the road network.
            `choices[0]` is the index of the first turn, `choices[1]` is the index of the second turn, etc.
        
        Returns
        =======
        RoadBoundaryConstraint
            The road boundary constraints.
        
        TODO: MapQuerier.road_segment_enclosure_from_actor() and
        MapQuerier.curved_road_segments_enclosure_from_actor()
        should be refactored to use RoadBoundaryConstraint as a factory.
        """
        t = actor.get_transform()
        wp = self.carla_map.get_waypoint(t.location)
        wp = wp.previous(5)[0]
        lane_width = wp.lane_width
        return RoadBoundaryConstraint(wp,
                                      max_distance + 7,
                                      lane_width,
                                      choices,
                                      flip_x=flip_x,
                                      flip_y=flip_y)
    def __init__(self, ego_veh: carla.Actor, pose_gt_config: dict):
        # Ego vehicle
        self.ego_veh = ego_veh
        self.ego_veh_tform = ego_veh.get_transform()
        # Distance from rear axle to front bumper
        self.raxle_to_fbumper = pose_gt_config['raxle_to_fbumper']
        # Distance from rear axle to center of gravity
        self.raxle_to_cg = pose_gt_config['raxle_to_cg']

        self.gt = {}
        # Rear axle in Carla's coordinate system (left-handed z-up) as a carla.Vector3D object
        raxle_location = self.ego_veh_tform.transform(
            carla.Location(x=-self.raxle_to_cg))
        # Rear axle's location in our coordinate system (right-handed z-up) as a numpy array
        # This is the ground truth of the rear axle's location
        self.gt['raxle_location'] = np.array(
            [raxle_location.x, -raxle_location.y, raxle_location.z])
        # Rear axle's orientation in our coordinate system (right-handed z-up) as a numpy array (roll, pitch, yaw)
        # This is the ground truth of the rear axle's orientation (rad)
        self.gt['raxle_orientation'] = np.array([
            self.ego_veh_tform.rotation.roll,
            -self.ego_veh_tform.rotation.pitch,
            -self.ego_veh_tform.rotation.yaw
        ]) * np.pi / 180
示例#14
0
def get_speed(actor: carla.Actor) -> float:
    """in km/h"""
    vector: carla.Vector3D = actor.get_velocity()
    MPS_TO_KMH = 3.6
    return MPS_TO_KMH * math.sqrt(vector.x**2 + vector.y**2 + vector.z**2)
示例#15
0
def speed(actor: carla.Actor) -> float:
    """Returns the speed of the given actor in km/h."""
    return 3.6 * vector_norm(actor.get_velocity())