예제 #1
0
def extract_data_in_pylot_format(actor_list):
    """Extracts actor information in pylot format from an actor list.

    Args:
        actor_list (carla.ActorList): An actor list object with all the
            simulation actors.

    Returns:
        A tuple that contains objects for all different types of actors.
    """
    # Note: the output will include the ego vehicle as well.
    vec_actors = actor_list.filter('vehicle.*')
    vehicles = [
        Obstacle.from_carla_actor(vec_actor) for vec_actor in vec_actors
    ]

    person_actors = actor_list.filter('walker.pedestrian.*')
    people = [
        Obstacle.from_carla_actor(ped_actor) for ped_actor in person_actors
    ]

    tl_actors = actor_list.filter('traffic.traffic_light*')
    traffic_lights = [
        TrafficLight.from_carla_actor(tl_actor) for tl_actor in tl_actors
    ]

    speed_limit_actors = actor_list.filter('traffic.speed_limit*')
    speed_limits = [
        SpeedLimitSign.from_carla_actor(ts_actor)
        for ts_actor in speed_limit_actors
    ]

    traffic_stop_actors = actor_list.filter('traffic.stop')
    traffic_stops = [
        StopSign.from_carla_actor(ts_actor) for ts_actor in traffic_stop_actors
    ]

    return (vehicles, people, traffic_lights, speed_limits, traffic_stops)
예제 #2
0
def process_depth_images(msg,
                         depth_camera_setup,
                         ego_vehicle,
                         speed,
                         csv,
                         surface,
                         visualize=False):
    print("Received a message for the time: {}".format(msg.timestamp))

    # If we are in distance to the destination, stop and exit with success.
    if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5:
        ego_vehicle.set_velocity(carla.Vector3D())
        CLEANUP_FUNCTION()
        sys.exit(0)

    # Get the RGB image corresponding to the given depth image timestamp.
    rgb_image = retrieve_rgb_image(msg.timestamp)

    # Get the semantic image corresponding to the given depth image timestamp.
    semantic_image = retrieve_semantic_image(msg.timestamp)
    semantic_frame = SegmentedFrame.from_carla_image(semantic_image,
                                                     depth_frame.camera_setup)

    # Visualize the image and the bounding boxes if needed.
    if visualize:
        draw_image(rgb_image, surface)

    # Transform people into obstacles relative to the current frame.
    bb_surface = None
    resolution = (depth_camera_setup.width, depth_camera_setup.height)
    if visualize:
        bb_surface = pygame.Surface(resolution)
        bb_surface.set_colorkey((0, 0, 0))

    vehicle_transform = Transform.from_carla_transform(
        ego_vehicle.get_transform())

    depth_frame = DepthFrame.from_carla_frame(msg, depth_camera_setup)
    # Transform the static camera setup with respect to the location of the
    # vehicle in the world.
    depth_frame.camera_setup.set_transform(vehicle_transform *
                                           depth_frame.camera_setup.transform)

    detected_people = []
    for person in ego_vehicle.get_world().get_actors().filter('walker.*'):
        obstacle = Obstacle.from_carla_actor(person)
        if obstacle.distance(vehicle_transform) > 125:
            bbox = None
        else:
            bbox = obstacle.to_camera_view(depth_frame, semantic_frame.frame)
        if bbox is not None:
            detected_people.append(bbox)
            if visualize:
                draw_bounding_box(bbox, bb_surface)

    # We have drawn all the bounding boxes on the bb_surface, now put it on
    # the RGB image surface.
    if visualize:
        surface.blit(bb_surface, (0, 0))
        pygame.display.flip()

    # Compute the mAP.
    print("We detected a total of {} people.".format(len(detected_people)))
    compute_and_log_map(detected_people, msg.timestamp, csv)

    # Move the ego_vehicle according to the given speed.
    ego_vehicle.set_velocity(carla.Vector3D(x=-speed))

    ego_vehicle.get_world().tick()