예제 #1
0
def process_depth_images(msg,
                         depth_camera_setup,
                         ego_vehicle,
                         speed,
                         csv,
                         surface,
                         visualize=False):
    print("Received a message for the time: {}".format(msg.timestamp))

    # If we are in distance to the destination, stop and exit with success.
    if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5:
        ego_vehicle.set_velocity(carla.Vector3D())
        CLEANUP_FUNCTION()
        sys.exit(0)

    # Get the RGB image corresponding to the given depth image timestamp.
    rgb_image = retrieve_rgb_image(msg.timestamp)

    # Visualize the image and the bounding boxes if needed.
    if visualize:
        draw_image(rgb_image, surface)

    # Transform the pedestrians into the detected objects relative to the
    # current frame.
    bb_surface = None
    resolution = (depth_camera_setup.width, depth_camera_setup.height)
    if visualize:
        bb_surface = pygame.Surface(resolution)
        bb_surface.set_colorkey((0, 0, 0))

    detected_pedestrians = []
    for pedestrian in ego_vehicle.get_world().get_actors().filter('walker.*'):
        bbox = get_2d_bbox_from_3d_box(
            depth_to_array(msg),
            to_pylot_transform(ego_vehicle.get_transform()),
            to_pylot_transform(pedestrian.get_transform()),
            BoundingBox(pedestrian.bounding_box),
            depth_camera_setup.get_unreal_transform(),
            depth_camera_setup.get_intrinsic(), resolution, 1.0, 3.0)
        if bbox is not None:
            detected_pedestrians.append(bbox)
            if visualize:
                draw_bounding_box(bbox, bb_surface)

    # We have drawn all the bounding boxes on the bb_surface, now put it on
    # the RGB image surface.
    if visualize:
        surface.blit(bb_surface, (0, 0))
        pygame.display.flip()

    # Compute the mAP.
    print("We detected a total of {} pedestrians.".format(len(detected_pedestrians)))
    compute_and_log_map(detected_pedestrians, msg.timestamp, csv)

    # Move the ego_vehicle according to the given speed.
    ego_vehicle.set_velocity(carla.Vector3D(x=-speed))

    ego_vehicle.get_world().tick()
예제 #2
0
def log_speed_limits(world):
    world_map = world.get_map()
    (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)
    transforms_of_interest = []
    # Add transforms that are close to speed limit signs.
    for speed_sign in speed_signs:
        for offset in range(10, 25, 5):
            # Speed signs have different coordinate systems, hence
            # we need to offset y, instead of x.
            offset_loc = pylot.simulation.utils.Location(x=0, y=offset, z=0)
            offset_rot = pylot.simulation.utils.Rotation(pitch=0,
                                                         yaw=0,
                                                         roll=0)
            offset_trans = pylot.simulation.utils.Transform(
                offset_loc, offset_rot)
            transform = speed_sign.transform * offset_trans
            location = to_carla_location(transform.location)
            w = world_map.get_waypoint(location,
                                       project_to_road=True,
                                       lane_type=carla.LaneType.Driving)
            camera_transform = to_pylot_transform(w.transform)
            camera_transform.location.z += 2.0
            transform = to_carla_transform(camera_transform)
            transforms_of_interest.append(transform)
    # Ensure all traffic lights are red.
    change_traffic_light_colors(world, carla.TrafficLightState.Red)
    world.tick()
    time.sleep(1)
    (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)
    for weather in find_weather_presets():
        change_weather(world, weather)
        log_obstacles(world, transforms_of_interest, traffic_lights,
                      carla.TrafficLightState.Red, speed_signs, traffic_stops,
                      weather, world_map.name)
예제 #3
0
def on_depth_msg(carla_image):
    game_time = int(carla_image.timestamp * 1000)
    print("Received depth camera msg {}".format(game_time))

    depth_camera_transform = to_pylot_transform(carla_image.transform)

    depth_msg = pylot.simulation.messages.DepthFrameMessage(
        depth_to_array(carla_image), depth_camera_transform, carla_image.fov,
        None)

    for (x, y) in pixels_to_check:
        print("{} Depth at pixel {}".format((x, y), depth_msg.frame[y][x]))
        pos3d_depth = get_3d_world_position_with_depth_map(
            x, y, depth_msg.frame, depth_msg.width, depth_msg.height,
            depth_msg.fov, depth_camera_transform)
        print("{} Computed using depth map {}".format((x, y), pos3d_depth))

    depth_point_cloud = pylot.simulation.utils.depth_to_local_point_cloud(
        depth_msg.frame,
        depth_msg.width,
        depth_msg.height,
        depth_msg.fov,
        max_depth=1.0)
    # Transform the depth cloud to world coordinates.
    transform = camera_to_unreal_transform(depth_camera_transform)
    depth_point_cloud = transform.transform_points(depth_point_cloud)

    global depth_pc
    depth_pc = depth_point_cloud.tolist()
예제 #4
0
    def compute_waypoints(self, source_loc, destination_loc):
        """ Computes waypoints between two locations.

        Assumes that the ego vehicle has the same orientation as
        the lane on whch it is on.

        Args:
            source_loc: Source world location.
            destination_loc: Destination world location.
        """
        start_waypoint = self._map.get_waypoint(
            source_loc,
            project_to_road=True,
            lane_type=carla.LaneType.Driving)
        end_waypoint = self._map.get_waypoint(
            destination_loc,
            project_to_road=True,
            lane_type=carla.LaneType.Driving)
        assert start_waypoint and end_waypoint, 'Map could not find waypoints'
        route = self._grp.trace_route(
            start_waypoint.transform.location,
            end_waypoint.transform.location)
        # TODO(ionel): The planner returns several options in intersections.
        # We always take the first one, but this is not correct.
        return deque([to_pylot_transform(waypoint[0].transform)
                      for waypoint in route])
예제 #5
0
def log_stop_signs(world):
    world_map = world.get_map()
    (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)
    transforms_of_interest = []
    # Add transforms that are close to stop signs.
    for stop_sign in traffic_stops:
        for offset in range(10, 25, 5):
            offset_loc = pylot.simulation.utils.Location(x=-offset, y=0, z=0)
            offset_rot = pylot.simulation.utils.Rotation(
                pitch=0, yaw=0, roll=0)
            offset_trans = pylot.simulation.utils.Transform(
                offset_loc, offset_rot)
            transform = stop_sign.transform * offset_trans
            location = to_carla_location(transform.location)
            w = world_map.get_waypoint(
                location,
                project_to_road=True,
                lane_type=carla.LaneType.Driving)
            camera_transform = to_pylot_transform(w.transform)
            camera_transform.location.z += 2.0
            transform = to_carla_transform(camera_transform)
            transforms_of_interest.append(transform)
    # Ensure all traffic lights are red.
    change_traffic_light_colors(world, carla.TrafficLightState.Red)
    world.tick()
    time.sleep(3)
    (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)
    log_obstacles(world,
                  transforms_of_interest,
                  traffic_lights,
                  carla.TrafficLightState.Red,
                  speed_signs,
                  traffic_stops)
예제 #6
0
파일: ERDOSAgent.py 프로젝트: Rmao99/pylot
 def send_can_bus_reading(self, data, timestamp, watermark_msg):
     # The can bus dict contains other fields as well, but we don't
     # curently use them.
     vehicle_transform = to_pylot_transform(data['transform'])
     forward_speed = data['speed']
     can_bus = pylot.simulation.utils.CanBus(vehicle_transform,
                                             forward_speed)
     self._can_bus_stream.send(Message(can_bus, timestamp))
예제 #7
0
 def __publish_hero_vehicle_data(self, timestamp, watermark_msg):
     vec_transform = to_pylot_transform(
         self._driving_vehicle.get_transform())
     forward_speed = pylot.simulation.utils.get_speed(
         self._driving_vehicle.get_velocity())
     can_bus = pylot.simulation.utils.CanBus(vec_transform, forward_speed)
     self.get_output_stream('can_bus').send(Message(can_bus, timestamp))
     self.get_output_stream('can_bus').send(watermark_msg)
예제 #8
0
def log_bounding_boxes(carla_image, depth_frame, segmented_frame,
                       traffic_lights, tl_color, speed_signs, stop_signs,
                       weather, town):
    game_time = int(carla_image.timestamp * 1000)
    print("Processing game time {} in {} with weather {}".format(
        game_time, town, weather))
    frame = bgra_to_bgr(to_bgra_array(carla_image))
    # Copy the frame to ensure its on the heap.
    frame = copy.deepcopy(frame)
    transform = to_pylot_transform(carla_image.transform)
    _, world = get_world()
    town_name = world.get_map().name

    speed_limit_det_objs = []
    if speed_signs:
        speed_limit_det_objs = pylot.simulation.utils.get_speed_limit_det_objs(
            speed_signs, transform, transform, depth_frame, FLAGS.frame_width,
            FLAGS.frame_height, FLAGS.camera_fov, segmented_frame)

    traffic_stop_det_objs = []
    if stop_signs:
        traffic_stop_det_objs = pylot.simulation.utils.get_traffic_stop_det_objs(
            stop_signs, transform, depth_frame, FLAGS.frame_width,
            FLAGS.frame_height, FLAGS.camera_fov)

    traffic_light_det_objs = []
    if traffic_lights:
        traffic_light_det_objs = get_traffic_light_objs(
            traffic_lights, transform, depth_frame, segmented_frame,
            FLAGS.frame_width, FLAGS.frame_height, tl_color, town_name)

    det_objs = (speed_limit_det_objs + traffic_stop_det_objs +
                traffic_light_det_objs)

    if FLAGS.visualize_bboxes:
        visualize_ground_bboxes('bboxes', game_time, frame, det_objs)

    # Log the frame.
    rgb_frame = bgr_to_rgb(frame)
    file_name = '{}signs-{}_{}_{}.png'.format(FLAGS.data_path, game_time,
                                              weather, town)
    rgb_img = Image.fromarray(np.uint8(rgb_frame))
    rgb_img.save(file_name)

    if FLAGS.log_bbox_images:
        annotate_image_with_bboxes(game_time, frame, det_objs)
        rgb_frame = bgr_to_rgb(frame)
        file_name = '{}annotated-signs-{}_{}_{}.png'.format(
            FLAGS.data_path, game_time, weather, town)
        rgb_img = Image.fromarray(np.uint8(rgb_frame))
        rgb_img.save(file_name)

    # Log the bounding boxes.
    bboxes = [det_obj.get_bbox_label() for det_obj in det_objs]
    file_name = '{}bboxes-{}_{}_{}.json'.format(FLAGS.data_path, game_time,
                                                weather, town)
    with open(file_name, 'w') as outfile:
        json.dump(bboxes, outfile)
예제 #9
0
 def send_waypoints(self, timestamp):
     # Send once the global waypoints.
     if self._waypoints is None:
         self._waypoints = self._global_plan_world_coord
         data = [(to_pylot_transform(transform), road_option)
                 for (transform, road_option) in self._waypoints]
         self._global_trajectory_stream.send(Message(data, timestamp))
         #self._global_trajectory_stream.send(self._top_watermark)
     assert self._waypoints == self._global_plan_world_coord,\
         'Global plan has been updated.'
예제 #10
0
def convert_speed_limit_actors(speed_limit_actors):
    """ Converts a Carla speed limit actor into a Pylot speed limit object."""
    speed_limits = []
    for ts_actor in speed_limit_actors:
        transform = to_pylot_transform(ts_actor.get_transform())
        speed_limit = int(ts_actor.type_id.split('.')[-1])
        speed_sign = pylot.simulation.utils.SpeedLimitSign(
            transform, speed_limit)
        speed_limits.append(speed_sign)
    return speed_limits
예제 #11
0
 def send_can_bus_reading(self, data, timestamp, watermark_msg):
     # The can bus dict contains other fields as well, but we don't
     # curently use them.
     vehicle_transform = to_pylot_transform(data['transform'])
     # TODO(ionel): Scenario runner computes speed differently from
     # the way we do it in the CARLA operator. This affects
     # agent stopping constants. Check!
     forward_speed = data['speed']
     can_bus = pylot.simulation.utils.CanBus(vehicle_transform,
                                             forward_speed)
     self._can_bus_stream.send(Message(can_bus, timestamp))
예제 #12
0
def convert_pedestrian_actors(pedestrian_actors):
    """ Converts a Carla pedestrian actor into a Pylot pedestrian object."""
    pedestrians = []
    for ped_actor in pedestrian_actors:
        transform = to_pylot_transform(ped_actor.get_transform())
        bounding_box = pylot.simulation.utils.BoundingBox(
            ped_actor.bounding_box)
        speed = pylot.simulation.utils.get_speed(ped_actor.get_velocity())
        pedestrian = pylot.simulation.utils.Pedestrian(ped_actor.id, transform,
                                                       bounding_box, speed)
        pedestrians.append(pedestrian)
    return pedestrians
예제 #13
0
    def are_on_same_lane(self, location1, location2):
        """ Returns True if the two locations are on the same lane.

        Args:
            location1: Location in world coordinates.
            location1: Location in world coordinates.
        """
        loc1 = to_carla_location(location1)
        waypoint1 = self._map.get_waypoint(loc1,
                                           project_to_road=False,
                                           lane_type=carla.LaneType.Driving)
        if not waypoint1:
            # First location is not on a drivable lane.
            return False
        loc2 = to_carla_location(location2)
        waypoint2 = self._map.get_waypoint(loc2,
                                           project_to_road=False,
                                           lane_type=carla.LaneType.Driving)
        if not waypoint2:
            # Second location is not on a drivable lane.
            return False
        w_t1 = to_pylot_transform(waypoint1.transform)
        w_t2 = to_pylot_transform(waypoint2.transform)
        self._logger.info('same_lane location1 {} to waypoint1 {}'.format(
            location1, w_t1.location))
        self._logger.info('same_lane location2 {} to waypoint2 {}'.format(
            location2, w_t2.location))
        if waypoint1.road_id == waypoint2.road_id:
            return waypoint1.lane_id == waypoint2.lane_id
        else:
            # Return False if we're in intersection and the other
            # obstacle isn't.
            if waypoint1.is_intersection and not waypoint2.is_intersection:
                return False
            if waypoint2.lane_type == carla.LaneType.Driving:
                # This may return True when the lane is different, but in
                # with a different road_id.
                # TODO(ionel): Figure out how lane id map across road id.
                return True
        return False
예제 #14
0
def convert_traffic_stop_actors(traffic_stop_actors):
    """ Converts a Carla traffic stop actor into a Pylot stop sign object."""
    stop_signs = []
    for ts_actor in traffic_stop_actors:
        transform = to_pylot_transform(ts_actor.get_transform())
        world_trigger_volume = ts_actor.get_transform().transform(
            ts_actor.trigger_volume.location)
        bbox = pylot.simulation.utils.BoundingBox(
            carla.BoundingBox(world_trigger_volume,
                              ts_actor.trigger_volume.extent))
        stop_sign = pylot.simulation.utils.StopSign(transform, bbox)
        stop_signs.append(stop_sign)
    return stop_signs
예제 #15
0
def convert_vehicle_actors(vec_actors):
    """ Converts a Carla vehicle actor into a Pylot Vehicle object."""
    vehicles = []
    # TODO(ionel): Handle hero vehicle!
    for vec_actor in vec_actors:
        transform = to_pylot_transform(vec_actor.get_transform())
        bounding_box = pylot.simulation.utils.BoundingBox(
            vec_actor.bounding_box)
        speed = pylot.simulation.utils.get_speed(vec_actor.get_velocity())
        vehicle = pylot.simulation.utils.Vehicle(vec_actor.id, transform,
                                                 bounding_box, speed)
        vehicles.append(vehicle)
    return vehicles
예제 #16
0
    def __publish_hero_vehicle_data(self, timestamp, watermark_msg):
        vec_transform = to_pylot_transform(
            self._driving_vehicle.get_transform())
        forward_speed = pylot.simulation.utils.get_speed(
            self._driving_vehicle.get_velocity())
        can_bus = pylot.simulation.utils.CanBus(vec_transform, forward_speed)
        self.get_output_stream('can_bus').send(Message(can_bus, timestamp))
        self.get_output_stream('can_bus').send(watermark_msg)

        # Set the world simulation view with respect to the vehicle.
        v_pose = self._driving_vehicle.get_transform()
        v_pose.location -= 10 * carla.Location(v_pose.get_forward_vector())
        v_pose.location.z = 5
        self._world.get_spectator().set_transform(v_pose)
예제 #17
0
def main():
    world = setup_world()
    transform = get_spawnpoint(world, 0)
    print('Spawning at {}'.format(to_pylot_transform(transform)))
    try:
        camera = add_camera(world, transform, on_camera_msg)

        # Move the spectactor view to the camera position.
        world.get_spectator().set_transform(transform)
        world.tick()

        explore_api(world, transform)
    finally:
        # Destroy the actors.
        camera.destroy()
예제 #18
0
def convert_traffic_light_actor(tl_actor):
    transform = to_pylot_transform(tl_actor.get_transform())
    tl_state = tl_actor.get_state()
    erdos_tl_state = None
    if tl_state == carla.TrafficLightState.Red:
        erdos_tl_state = TrafficLightColor.RED
    elif tl_state == carla.TrafficLightState.Yellow:
        erdos_tl_state = TrafficLightColor.YELLOW
    elif tl_state == carla.TrafficLightState.Green:
        erdos_tl_state = TrafficLightColor.GREEN
    else:
        erdos_tl_state = TrafficLightColor.OFF
    extent = pylot.simulation.utils.Extent(tl_actor.trigger_volume.extent.x,
                                           tl_actor.trigger_volume.extent.y,
                                           tl_actor.trigger_volume.extent.z)
    traffic_light = pylot.simulation.utils.TrafficLight(
        tl_actor.id, transform, erdos_tl_state, extent)
    return traffic_light
예제 #19
0
def on_lidar_msg(carla_pc):
    game_time = int(carla_pc.timestamp * 1000)
    print("Received lidar msg {}".format(game_time))
    points = np.frombuffer(carla_pc.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (int(points.shape[0] / 3), 3))

    lidar_transform = to_pylot_transform(carla_pc.transform)

    # Transform lidar points from lidar coordinates to camera coordinates.
    points = lidar_point_cloud_to_camera_coordinates(points)
    for (x, y) in pixels_to_check:
        pos3d_pc = get_3d_world_position_with_point_cloud(
            x, y, points.tolist(), lidar_transform, 800, 600, 90.0)
        print("{} Computed using lidar {}".format((x, y), pos3d_pc))

    global lidar_pc
    lidar_pc = points.tolist()
예제 #20
0
def convert_vehicle_actors(vec_actors):
    """ Converts a Carla vehicle object into a Pylot Vehicle object.

    Args:
        vec_actors: A list of Carla vehicle actors.

    Returns:
        A list of Pylot Vehicles.
    """
    vehicles = []
    for vec_actor in vec_actors:
        transform = to_pylot_transform(vec_actor.get_transform())
        bounding_box = pylot.simulation.utils.BoundingBox(
            vec_actor.bounding_box)
        speed = pylot.simulation.utils.get_speed(vec_actor.get_velocity())
        vehicle = pylot.simulation.utils.Vehicle(vec_actor.id, transform,
                                                 bounding_box, speed)
        vehicles.append(vehicle)
    return vehicles
예제 #21
0
def main(args):
    """ The main function that orchestrates the setup of the world, connection
    to the scenario and the subsequent logging of the frames for computation
    of the mIoU.

    Args:
        args: The argparse.Namespace instance that is retrieved from parsing
        the arguments.
    """

    # Setup the world.
    world = setup_world(args.host, args.port, args.delta)

    # Retrieve the ego-vehicle from the simulation.
    ego_vehicle = None
    while ego_vehicle is None:
        print("Waiting for the scenario to be ready ...")
        sleep(1)
        ego_vehicle = retrieve_actor(world, 'vehicle.*', 'hero')
        world.tick()

    # Transform of the cameras.
    camera_transform = carla.Transform(location=carla.Location(1.5, 0.0, 1.4),
                                       rotation=carla.Rotation(0, 0, 0))

    # Connect the RGB camera to the vehicle.
    rgb_camera = spawn_camera('sensor.camera.rgb', camera_transform,
                              ego_vehicle, *args.res.split('x'))

    # Connect the depth camera to the vehicle.
    depth_camera = spawn_camera('sensor.camera.depth', camera_transform,
                                ego_vehicle, *args.res.split('x'))

    # Open the CSV file for writing.
    csv_file = open(args.output, 'w')
    csv_writer = csv.writer(csv_file)

    # Create the cleanup function.
    global CLEANUP_FUNCTION
    CLEANUP_FUNCTION = functools.partial(cleanup_function,
                                         world=world,
                                         cameras=[rgb_camera, depth_camera],
                                         csv_file=csv_file)

    # Create a PyGame surface for debugging purposes.
    width, height = map(int, args.res.split('x'))
    surface = None
    if args.visualize:
        surface = pygame.display.set_mode((width, height),
                                          pygame.HWSURFACE | pygame.DOUBLEBUF)

    # Register a callback function with the camera.
    rgb_camera.listen(process_rgb_images)

    depth_camera_setup = CameraSetup("depth_camera", 'sensor.camera.depth',
                                     width, height,
                                     to_pylot_transform(camera_transform))
    depth_camera.listen(
        functools.partial(process_depth_images,
                          depth_camera_setup=depth_camera_setup,
                          ego_vehicle=ego_vehicle,
                          speed=args.speed,
                          csv=csv_writer,
                          surface=surface,
                          visualize=args.visualize))

    try:
        # To keep the thread alive so that the images can be processed.
        while True:
            pass
    except KeyboardInterrupt:
        CLEANUP_FUNCTION()
        sys.exit(0)
예제 #22
0
def log_traffic_lights(world):
    world_map = world.get_map()
    (traffic_lights, _, traffic_stops, speed_signs) = get_actors(world)
    tl_colors = [
        carla.TrafficLightState.Yellow, carla.TrafficLightState.Green,
        carla.TrafficLightState.Red
    ]
    for light in traffic_lights:
        print("Working for traffic light {}".format(light.id))
        # For every traffic light, get the neighbouring lights except the one
        # directly opposite.
        group_lights = []
        for n_light in light.get_group_traffic_lights():
            if not check_lights_opposite(light, n_light):
                group_lights.append(convert_to_pylot_traffic_light(n_light))

        transforms_of_interest = []
        for offset in range(10, 40, 5):
            # Traffic lights have different coordinate systems, hence
            # we need to offset y, instead of x and add that to the trigger
            # volume location.
            offset_loc = pylot.simulation.utils.Location(
                x=light.trigger_volume.location.x,
                y=light.trigger_volume.location.y + offset,
                z=light.trigger_volume.location.z)
            offset_rot = pylot.simulation.utils.Rotation(pitch=0,
                                                         yaw=0,
                                                         roll=0)
            offset_trans = pylot.simulation.utils.Transform(
                offset_loc, offset_rot)

            # Transform the offset relative to the traffic light.
            transform = to_pylot_transform(
                light.get_transform()) * offset_trans
            location = to_carla_location(transform.location)

            # Get the waypoint nearest to the transform.
            w = world_map.get_waypoint(location,
                                       project_to_road=True,
                                       lane_type=carla.LaneType.Driving)
            w_rotation = w.transform.rotation
            camera_transform = to_pylot_transform(w.transform)
            camera_transform.location.z += 2.0
            transform = to_carla_transform(camera_transform)
            transforms_of_interest.append(transform)

            # Get the right lanes.
            wp_right = w.get_right_lane()
            while wp_right and wp_right.lane_type == carla.LaneType.Driving \
                    and w_rotation == wp_right.transform.rotation:
                camera_transform = to_pylot_transform(wp_right.transform)
                camera_transform.location.z += 2.0
                transform = to_carla_transform(camera_transform)
                transforms_of_interest.append(transform)
                wp_right = wp_right.get_right_lane()

            # Get the left lanes.
            wp_left = w.get_left_lane()
            while wp_left and wp_left.lane_type == carla.LaneType.Driving and \
                    w_rotation == wp_left.transform.rotation:
                camera_transform = to_pylot_transform(wp_left.transform)
                camera_transform.location.z += 2.0
                transform = to_carla_transform(camera_transform)
                transforms_of_interest.append(transform)
                wp_left = wp_left.get_left_lane()

        print("The total number of transforms were: {}".format(
            len(transforms_of_interest)))
        for tl_color in tl_colors:
            change_traffic_light_colors(world, tl_color)
            world.tick()
            time.sleep(3)
            log_obstacles(world, transforms_of_interest, group_lights,
                          tl_color, speed_signs, traffic_stops)