def agent_vehicle_mask(self, agent: carla.Actor) -> Mask: canvas = self.make_empty_mask() bb = agent.bounding_box.extent corners = [ carla.Location(x=-bb.x, y=-bb.y), carla.Location(x=bb.x, y=-bb.y), carla.Location(x=bb.x, y=bb.y), carla.Location(x=-bb.x, y=bb.y), ] agent.get_transform().transform(corners) corners = [self.location_to_pixel(loc) for loc in corners] cv.fillPoly(img=canvas, pts=np.int32([corners]), color=COLOR_ON) return canvas
def actor_to_corners(actor: carla.Actor) -> Sequence[Tuple[float, float]]: # pylint: disable=no-member """Draws a rectangular for the bounding box of `actor`.""" bb = actor.bounding_box.extent # Draw actor as a rectangular. corners = [ carla.Location(x=-bb.x, y=-bb.y), # pylint: disable=no-member carla.Location(x=-bb.x, y=+bb.y), # pylint: disable=no-member carla.Location(x=+bb.x, y=+bb.y), # pylint: disable=no-member carla.Location(x=+bb.x, y=-bb.y), # pylint: disable=no-member ] # Convert to global coordinates. actor.get_transform().transform(corners) return corners
def apply_agent_following_transformation_to_masks( self, agent_vehicle: carla.Actor, masks: np.ndarray) -> np.ndarray: """Returns image of shape: height, width, channels""" agent_transform = agent_vehicle.get_transform() angle = (agent_transform.rotation.yaw + 90 ) # vehicle's front will point to the top # Rotating around the center crop_with_car_in_the_center = masks masks_n, h, w = crop_with_car_in_the_center.shape rotation_center = Coord(x=w // 2, y=h // 2) # warpAffine from OpenCV requires the first two dimensions to be in order: height, width, channels crop_with_centered_car = np.transpose(crop_with_car_in_the_center, axes=(1, 2, 0)) rotated = rotate(crop_with_centered_car, angle, center=rotation_center) half_width = self.target_size.width // 2 hslice = slice(rotation_center.x - half_width, rotation_center.x + half_width) if self._crop_type is BirdViewCropType.FRONT_AREA_ONLY: vslice = slice(rotation_center.y - self.target_size.height, rotation_center.y) elif self._crop_type is BirdViewCropType.FRONT_AND_REAR_AREA: half_height = self.target_size.height // 2 vslice = slice(rotation_center.y - half_height, rotation_center.y + half_height) else: raise NotImplementedError assert ( vslice.start > 0 and hslice.start > 0 ), "Trying to access negative indexes is not allowed, check for calculation errors!" car_on_the_bottom = rotated[vslice, hslice] return car_on_the_bottom
def map_dynamic_actor(actor: carla.Actor, world_map: carla.Map) -> DynamicActor: location: carla.Location = actor.get_location() gnss: carla.GeoLocation = world_map.transform_to_geolocation(location) velocity: carla.Vector3D = actor.get_velocity() acceleration: carla.Vector3D = actor.get_acceleration() color: str = str( actor.attributes['color']) if 'color' in actor.attributes else None extent: carla.Vector3D = actor.bounding_box.extent transform: carla.Transform = carla.Transform( rotation=actor.get_transform().rotation) transformed_extent: Tuple[carla.Location, carla.Location, carla.Location, carla.Location] = ( transform.transform( carla.Location(+extent.x, +extent.y, 0)), transform.transform( carla.Location(+extent.x, -extent.y, 0)), transform.transform( carla.Location(-extent.x, -extent.y, 0)), transform.transform( carla.Location(-extent.x, +extent.y, 0)), ) bbox: Tuple[Point2D, Point2D, Point2D, Point2D] = cast( Tuple[Point2D, Point2D, Point2D, Point2D], tuple( map( lambda t: geoloc2point2d( world_map.transform_to_geolocation( carla.Vector3D(location.x + t.x, location.y + t.y, location.z))), transformed_extent))) return DynamicActor( id=actor.id, type=UncertainProperty(1., resolve_carla_type(actor.type_id)), type_id=actor.type_id, location=UncertainProperty(1., Point3D(location.x, location.y, location.z)), gnss=UncertainProperty( 1., Point3D(gnss.latitude, gnss.longitude, gnss.altitude)), dynamics=ActorDynamics(velocity=UncertainProperty( 1., Point3D(velocity.x, velocity.y, velocity.z)), acceleration=UncertainProperty( 1., Point3D(acceleration.x, acceleration.y, acceleration.z))), props=ActorProperties(color=UncertainProperty(1., color), extent=UncertainProperty( 1., Point3D(extent.x, extent.y, extent.z)), bbox=UncertainProperty(1., bbox)))
def curved_road_segments_enclosure_from_actor( self, actor: carla.Actor, max_distance: float, choices=[], flip_x=False, flip_y=False) -> util.AttrDict: """Get segmented enclosure of fixed size from curved road. Parameters ========== actor : carla.Actor Position of actor to get starting waypoint. max_distance : float The max distance of the path starting from `start_wp`. Use to specify length of path. choices : list of int The indices of the turns to make when reaching a fork on the road network. `choices[0]` is the index of the first turn, `choices[1]` is the index of the second turn, etc. Returns ======= util.AttrDict Container of road segment properties. - spline : scipy.interpolate.CubicSpline The spline representing the path the vehicle should motion plan on. - polytopes : list of (ndarray, ndarray) List of polytopes in H-representation (A, b) where x is in polytope if Ax <= b. - distances : ndarray The distances along the spline to follow from nearest endpoint before encountering corresponding covering polytope in index. - positions : ndarray The 2D positions of center of the covering polytope in index. """ t = actor.get_transform() wp = self.carla_map.get_waypoint(t.location) wp = wp.previous(5)[0] lane_width = wp.lane_width return cover_along_waypoints_fixedsize(wp, choices, max_distance + 7, lane_width, flip_x=flip_x, flip_y=flip_y)
def getActorWorldBBox(actor: carla.Actor): actorBBox = actor.bounding_box cords = np.zeros((8, 4)) # Get the box extent extent = actorBBox.extent cords[0, :] = np.array([extent.x, extent.y, -extent.z, 1]) cords[1, :] = np.array([-extent.x, extent.y, -extent.z, 1]) cords[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1]) cords[3, :] = np.array([extent.x, -extent.y, -extent.z, 1]) cords[4, :] = np.array([extent.x, extent.y, extent.z, 1]) cords[5, :] = np.array([-extent.x, extent.y, extent.z, 1]) cords[6, :] = np.array([-extent.x, -extent.y, extent.z, 1]) cords[7, :] = np.array([extent.x, -extent.y, extent.z, 1]) bb_transform = carla.Transform(actorBBox.location) bb_matrix = get_matrix(bb_transform) actor_world_matrix = get_matrix(actor.get_transform()) bb_world_matrix = np.dot(actor_world_matrix, bb_matrix) world_cords = np.dot(bb_world_matrix, np.transpose(cords)) return world_cords
def road_boundary_constraints_from_actor( self, actor: carla.Actor, max_distance: float, choices=[], flip_x=False, flip_y=False) -> RoadBoundaryConstraint: """Get segmented enclosure of fixed size from curved road. Parameters ========== actor : carla.Actor Position of actor to get starting waypoint. max_distance : float The max distance of the path starting from `start_wp`. Use to specify length of path. choices : list of int The indices of the turns to make when reaching a fork on the road network. `choices[0]` is the index of the first turn, `choices[1]` is the index of the second turn, etc. Returns ======= RoadBoundaryConstraint The road boundary constraints. TODO: MapQuerier.road_segment_enclosure_from_actor() and MapQuerier.curved_road_segments_enclosure_from_actor() should be refactored to use RoadBoundaryConstraint as a factory. """ t = actor.get_transform() wp = self.carla_map.get_waypoint(t.location) wp = wp.previous(5)[0] lane_width = wp.lane_width return RoadBoundaryConstraint(wp, max_distance + 7, lane_width, choices, flip_x=flip_x, flip_y=flip_y)
def __init__(self, ego_veh: carla.Actor, pose_gt_config: dict): # Ego vehicle self.ego_veh = ego_veh self.ego_veh_tform = ego_veh.get_transform() # Distance from rear axle to front bumper self.raxle_to_fbumper = pose_gt_config['raxle_to_fbumper'] # Distance from rear axle to center of gravity self.raxle_to_cg = pose_gt_config['raxle_to_cg'] self.gt = {} # Rear axle in Carla's coordinate system (left-handed z-up) as a carla.Vector3D object raxle_location = self.ego_veh_tform.transform( carla.Location(x=-self.raxle_to_cg)) # Rear axle's location in our coordinate system (right-handed z-up) as a numpy array # This is the ground truth of the rear axle's location self.gt['raxle_location'] = np.array( [raxle_location.x, -raxle_location.y, raxle_location.z]) # Rear axle's orientation in our coordinate system (right-handed z-up) as a numpy array (roll, pitch, yaw) # This is the ground truth of the rear axle's orientation (rad) self.gt['raxle_orientation'] = np.array([ self.ego_veh_tform.rotation.roll, -self.ego_veh_tform.rotation.pitch, -self.ego_veh_tform.rotation.yaw ]) * np.pi / 180