def __init__(self, carla_world: carla.World, ego_vehicle: carla.Vehicle): # Params for images self.img_width = 1280 # in [px] self.img_height = 720 # in [px] self.fov_horizontal = 120 # in [deg] self.display_width = 640 # in [px] self.display_height = 360 # in [px] # Params for static camera calibration self.translation_x = 0.4 # in [m] from center of car self.translation_z = 1.5 # in [m] from bottom of car self.translation_y = 0.0 # in [m] from center of car self.pitch = 0.0 # in [deg] self.roll = 0.0 # in [deg] self.yaw = 0.0 # in [deg] # Images that are stored on disk per frame self.rgb_img: np.array = np.zeros((self.img_height, self.img_width, 3), dtype=np.uint8) self.depth_img: np.array = np.zeros((self.img_height, self.img_width, 3), dtype=np.uint8) self.semseg_img: np.array = np.zeros((self.img_height, self.img_width, 3), dtype=np.uint8) # Images that are displayed in pygame for visualization (note spectator img is just needed for visu) self.display_spectator_img: np.array = np.zeros((self.display_height, self.display_width, 3), dtype=np.uint8) self.display_rgb_img: np.array = np.zeros((self.display_height, self.display_width, 3), dtype=np.uint8) self.display_depth_img: np.array = np.zeros((self.display_height, self.display_width, 3), dtype=np.uint8) self.display_semseg_img: np.array = np.zeros((self.display_height, self.display_width, 3), dtype=np.uint8) # Create sensors blueprint_library: carla.BlueprintLibrary = carla_world.get_blueprint_library() cam_transform = carla.Transform( carla.Location(x=self.translation_x, z=self.translation_z, y=self.translation_y), carla.Rotation(pitch=self.pitch, yaw=self.yaw, roll=self.roll) ) # Attach dummy rgb sensor to follow the ego vehicle (only for visulization) spectator_bp = blueprint_library.find('sensor.camera.rgb') self._add_attr_to_blueprint(spectator_bp) spectator_transform = carla.Transform(carla.Location(x=-6, z=3)) self.spectator: carla.Actor = carla_world.spawn_actor(spectator_bp, spectator_transform, attach_to=ego_vehicle) self.spectator.listen(self._read_spectator_img) # Attach rgb cam sensors to ego vehicle cam_rgb_bp = blueprint_library.find('sensor.camera.rgb') self._add_attr_to_blueprint(cam_rgb_bp) self.cam_rgb: carla.Actor = carla_world.spawn_actor(cam_rgb_bp, cam_transform, attach_to=ego_vehicle) self.cam_rgb.listen(self._read_rgb_img) # Attach depth sensor to ego vehicle cam_depth_bp = blueprint_library.find("sensor.camera.depth") self._add_attr_to_blueprint(cam_depth_bp) self.cam_depth: carla.Actor = carla_world.spawn_actor(cam_depth_bp, cam_transform, attach_to=ego_vehicle) self.cam_depth.listen(self._read_depth_img) # Attach semseg sensor to ego vehicle cam_semseg_bp = blueprint_library.find("sensor.camera.semantic_segmentation") self._add_attr_to_blueprint(cam_semseg_bp) self.cam_semseg: carla.Actor = carla_world.spawn_actor(cam_semseg_bp, cam_transform, attach_to=ego_vehicle) self.cam_semseg.listen(self._read_semseg_img)
def spawn_hero( world: carla.World, # pylint: disable=no-member spawn_point: carla.Transform, # pylint: disable=no-member vehicle_id: Optional[str] = None, ) -> carla.Vehicle: # pylint: disable=no-member """Spawns `hero` in `spawn_point`. Args: world: The world object associated with the simulation. spawn_point: The point to spawn the hero actor. vehicle_id: An (optional) valid id for the vehicle object. Returns: The actor (vehicle) object. """ # Blueprints library. bl = world.get_blueprint_library() if vehicle_id is not None: # Get the specific vehicle from the library. hero_bp = bl.find(vehicle_id) else: # Randomly choose a vehicle from the list. hero_bp = random.choice(bl.filter("vehicle.*")) # Rename the actor to `hero`. hero_bp.set_attribute("role_name", "hero") logging.debug("Spawns hero actor at {}".format( carla_xyz_to_ndarray(spawn_point.location))) hero = world.spawn_actor(hero_bp, spawn_point) return hero
def spawn_sensors(world: carla.World, actor: carla.Actor) -> Dict[str, carla.Sensor]: sensors = {} blueprints = { "rgb": world.get_blueprint_library().find('sensor.camera.rgb'), "segmentation": world.get_blueprint_library().find( 'sensor.camera.semantic_segmentation'), "depth": world.get_blueprint_library().find('sensor.camera.depth') } fov = random.randint(60, 110) yaw = random.gauss(0, 45) sensor_transform = carla.Transform(carla.Location(0, 0, 3), carla.Rotation(0, yaw, 0)) for sensor_name, blueprint in blueprints.items(): blueprint.set_attribute("image_size_x", str(1920 / 2)) blueprint.set_attribute("image_size_y", str(1080 / 2)) blueprint.set_attribute("fov", str(fov)) blueprint.set_attribute( "sensor_tick", str(SENSOR_TICK)) # Get new data every x second sensor: carla.Sensor = world.spawn_actor(blueprint, sensor_transform, attach_to=actor) sensors[sensor_name] = sensor return sensors
def __init__(self, world: carla.World, carla_vehicle: carla.Vehicle): self.has_collided = False def on_collision(e): self.has_collided = True blueprint_library = world.get_blueprint_library() blueprint = blueprint_library.find('sensor.other.collision') self._collision_sensor = world.spawn_actor( blueprint, carla_vehicle.get_transform(), attach_to=carla_vehicle) self._collision_sensor.listen(on_collision)
def prepare_ego_vehicle(world: carla.World) -> carla.Actor: car_blueprint = world.get_blueprint_library().find("vehicle.audi.a2") # This will allow external scripts like manual_control.py or no_rendering_mode.py # from the official CARLA examples to take control over the ego agent car_blueprint.set_attribute("role_name", "hero") # spawn points doesnt matter - scenario sets up position in reset ego_vehicle = world.spawn_actor( car_blueprint, carla.Transform(carla.Location(0, 0, 500), carla.Rotation())) assert ego_vehicle is not None, "Ego vehicle could not be spawned" # Setup any car sensors you like, collect observations and then use them as input to your model return ego_vehicle