예제 #1
0
def play_depth(bag_file: Bag, camera: str, depth: str) -> None:
    """
    Play the ZED data in a bag file.

    Args:
        bag_file: the bag file to play
        camera: the topic to use to read compressed or raw camera data
        depth: the topic to use to read 32-bit floating point depth measures

    Returns:
        None

    """
    # open windows to stream the camera and depth image data to
    camera_window = None
    depth_window = None
    # iterate over the messages
    for topic, msg, _ in bag.read_messages(topics=[camera, depth]):
        # if topic is depth, unwrap and send to the depth window
        if topic == depth:
            # if the depth window is not setup yet, open it
            if depth_window is None:
                # create a title for the window
                title = '{} ({})'.format(bag_file.filename, depth)
                # initialize the window
                depth_window = Window(title, msg.height, msg.width)
            # get a depth image from the data and dimensions
            img = get_depth_image(msg.data, depth_window.shape)
            # show the image on the depth window
            depth_window.show(img)
        # if topic is camera, unwrap and send to the camera window
        elif topic == camera:
            # if the camera window is not setup yet, open it
            if camera_window is None:
                # create a title for the window
                title = '{} ({})'.format(bag_file.filename, camera)
                # initialize the window
                camera_window = Window(title, msg.height, msg.width)
            # get an image from the data and dimensions
            img = get_camera_image(msg.data, camera_window.shape)
            # show the image on the camera window
            camera_window.show(img)
    # shut down the viewer windows
    camera_window.close()
    depth_window.close()
예제 #2
0
def play_superpixel(bag_file: Bag, camera_info: str, camera: str, depth: str,
                    segmentation: str, downscale: int) -> None:
    """
    Play the camera data in a bag file through a super pixel algorithm.

    Args:
        bag_file: the bag file to play
        camera_info: the topic to use to read metadata about the camera
        camera: the topic to use to read compressed or raw camera data
        depth: the topic to use to read 32-bit floating point depth measures
        segmentation: the algorithm to use for segmentation
        downscale: the factor to downscale the image by before segmentation

    Returns:
        None

    """
    # extract the camera dimensions from the bag
    dims = get_camera_dimensions(bag, camera_info)
    # open a window to stream the data to
    window = Window('{} ({})'.format(bag_file.filename, camera), *dims)
    # iterate over the messages
    for topic, msg, _ in bag.read_messages(topics=[camera, depth]):
        # if topic is camera, unwrap the camera data
        if topic == camera:
            camera_img = get_camera_image(msg.data, dims)
        # if topic is depth, unwrap and calculate the segmentation
        elif topic == depth:
            depth_img = get_depth_image(msg.data, dims)
            # combine the image with the depth channel (Red only)
            img = np.concatenate([camera_img, depth_img[..., 0:1]], axis=-1)
            # segment the image and get a copy of the segmented pixels
            img = segment(img, method=segmentation, downscale=downscale)
            # send the segmented image to the window
            window.show(img)
    # shut down the viewer windows
    window.close()