コード例 #1
0
    def test_above_and_to_the_left(self):

        location = (40, 70)
        center_of_image_in_global = (50, 50)
        center_of_image_in_pixels = (300, 300)

        pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global,
                                               center_of_image_in_pixels)
        answer = (100, 200)
        self.assertTupleEqual(pixels, answer)

        pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global,
                                                center_of_image_in_pixels, resolution=0.2)
        answer = (200, 250)
        self.assertTupleEqual(answer, pixels)
コード例 #2
0
def get_track_box(annotation: Dict[str, Any],
                  center_coordinates: Tuple[float, float],
                  center_pixels: Tuple[float, float],
                  resolution: float = 0.1) -> np.ndarray:
    """
    Get four corners of bounding box for agent in pixels.
    :param annotation: The annotation record of the agent.
    :param center_coordinates: (x, y) coordinates in global frame
        of the center of the image.
    :param center_pixels: (row_index, column_index) location of the center
        of the image in pixel coordinates.
    :param resolution: Resolution pixels/meter of the image.
    """

    assert resolution > 0

    location = annotation['translation'][:2]
    yaw_in_radians = quaternion_yaw(Quaternion(annotation['rotation']))

    row_pixel, column_pixel = convert_to_pixel_coords(location,
                                                      center_coordinates,
                                                      center_pixels,
                                                      resolution)

    width = annotation['size'][0] / resolution
    length = annotation['size'][1] / resolution

    # Width and length are switched here so that we can draw them along the x-axis as
    # opposed to the y. This makes rotation easier.
    return pixels_to_box_corners(row_pixel, column_pixel, length, width,
                                 yaw_in_radians)
コード例 #3
0
    def test_same_location(self):

        location = (50, 50)
        center_of_image_in_global = (50, 50)
        center_of_image_in_pixels = (400, 250)

        pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
        self.assertTupleEqual(pixels, (400, 250))
コード例 #4
0
    def test_below_and_to_the_left(self):

        location = (30, 40)
        center_of_image_in_global = (50, 50)
        center_of_image_in_pixels = (400, 250)

        pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels)
        answer = (500, 50)
        self.assertTupleEqual(pixels, answer)
コード例 #5
0
def draw_lanes_on_image(
    image: np.ndarray,
    lanes: Dict[str, List[Tuple[float, float, float]]],
    agent_global_coords: Tuple[float, float],
    agent_yaw_in_radians: float,
    agent_pixels: Tuple[int, int],
    resolution: float,
    color_function: Callable[[float, float],
                             Color] = color_by_yaw) -> np.ndarray:
    """
    Draws lanes on image.
    :param image: Image to draw lanes on. Preferably all-black or all-white image.
    :param lanes: Mapping from lane id to list of coordinate tuples in global coordinate system.
    :param agent_global_coords: Location of the agent in the global coordinate frame.
    :param agent_yaw_in_radians: Yaw of agent in radians.
    :param agent_pixels: Location of the agent in the image as (row_pixel, column_pixel).
    :param resolution: Resolution in meters/pixel.
    :param color_function: By default, lanes are colored by the yaw difference between the pose
    on the lane and the agent yaw. However, you can supply your own function to color the lanes.
    :return: Image (represented as np.ndarray) with lanes drawn.
    """

    for poses_along_lane in lanes.values():

        for start_pose, end_pose in zip(poses_along_lane[:-1],
                                        poses_along_lane[1:]):

            start_pixels = convert_to_pixel_coords(start_pose[:2],
                                                   agent_global_coords,
                                                   agent_pixels, resolution)
            end_pixels = convert_to_pixel_coords(end_pose[:2],
                                                 agent_global_coords,
                                                 agent_pixels, resolution)

            start_pixels = (start_pixels[1], start_pixels[0])
            end_pixels = (end_pixels[1], end_pixels[0])

            color = color_function(agent_yaw_in_radians, start_pose[2])

            # Need to flip the row coordinate and the column coordinate
            # because of cv2 convention
            cv2.line(image, start_pixels, end_pixels, color, thickness=5)

    return image