Exemplo n.º 1
0
def visualize_trajectory(dataset, index, title="target_positions movement with draw_trajectory"):
    data = dataset[index]
    im = data["image"].transpose(1, 2, 0)
    im = dataset.rasterizer.to_rgb(im)
    target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"])
    draw_trajectory(im, target_positions_pixels, TARGET_POINTS_COLOR, radius=1, yaws=data["target_yaws"])

    plt.title(title)
    plt.imshow(im[::-1])
    plt.show()
Exemplo n.º 2
0
    def visualizeAV(self):
        data = self.dataset[50]

        im = data["image"].transpose(1, 2, 0)
        im = self.dataset.rasterizer.to_rgb(im)
        target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2],
                                                   data["world_to_image"])
        draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR)

        plt.imshow(im[::-1])
        plt.show()
Exemplo n.º 3
0
def plot_image(data_point, rasterizer):
    im = data_point["image"].transpose(1, 2, 0)
    im = rasterizer.to_rgb(im)
    target_positions_pixels = transform_points(data_point["target_positions"],
                                               data_point["raster_from_agent"])
    draw_trajectory(im,
                    target_positions_pixels,
                    TARGET_POINTS_COLOR,
                    yaws=data_point["target_yaws"])
    fig, ax = plt.subplots(figsize=(5, 5))
    ax.imshow(im[::-1])
Exemplo n.º 4
0
def visualize_trajectory(dataset, index):
    data = dataset[index]
    im = data['image'].transpose(1, 2, 0)
    im = dataset.rasterizer.to_rgb(im)
    target_position_pixels = transform_points(
        data['target_positions'] + data['centroid'][:2],
        data['world_to_image'])
    draw_trajectory(im, target_position_pixels, data['target_yaws'],
                    TARGET_POINTS_COLOR)

    plt.imshow(im[::-1])
    plt.show()
Exemplo n.º 5
0
def test_draw_trajectory() -> None:
    on_image = np.zeros((224, 244, 3), dtype=np.uint8)
    positions = np.asarray([(0, 0), (0, 10),
                            (0, 20)])  # XY notation, pixel positions
    draw_trajectory(on_image, positions, (255, 255, 255))

    assert np.all(on_image[0, 0] == (255, 255, 255))
    assert np.all(on_image[10, 0] == (255, 255, 255))
    assert np.all(on_image[20, 0] == (255, 255, 255))

    assert np.all(on_image[0, 20] == (0, 0, 0))
    assert np.all(on_image[0, 10] == (0, 0, 0))
Exemplo n.º 6
0
 def plt_show_agent_map(self, idx):
     zarr_dataset = self.chunked_dataset("scenes/train.zarr")
     agent_dataset = AgentDataset(self.cfg, zarr_dataset, self.rast)
     data = agent_dataset[idx]
     im = data["image"].transpose(1, 2, 0)
     im = self.rast.to_rgb(im)
     target_positions_pixels = transform_points(
         data["target_positions"] + data["centroid"][:2],
         data["world_to_image"])
     draw_trajectory(im, target_positions_pixels, TARGET_POINTS_COLOR, 1,
                     data["target_yaws"])
     plt.imshow(im[::-1])
     plt.savefig("filename.png")
Exemplo n.º 7
0
def create_animate_for_indexes(dataset, indexes):
    images = []
    timestamps = []

    for idx in indexes:
        data = dataset[idx]
        im = data["image"].transpose(1, 2, 0)
        im = dataset.rasterizer.to_rgb(im)
        target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"])
        center_in_pixels = np.asarray(cfg["raster_params"]["ego_center"]) * cfg["raster_params"]["raster_size"]
        draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR)
        clear_output(wait=True)
        images.append(PIL.Image.fromarray(im[::-1]))
        timestamps.append(data["timestamp"])

    anim = animate_solution(images, timestamps)
    return anim
 def plot_dataset(self,
                  agent_dataset: AgentDataset,
                  plot_num: int = 10) -> None:
     print("Ploting dataset")
     ind = np.random.randint(0, len(agent_dataset), size=plot_num)
     for i in range(plot_num):
         data = agent_dataset[ind[i]]
         im = data["image"].transpose(1, 2, 0)
         im = agent_dataset.rasterizer.to_rgb(im)
         target_positions_pixels = transform_points(
             data["target_positions"], data["raster_from_agent"])
         draw_trajectory(
             im,
             target_positions_pixels,
             TARGET_POINTS_COLOR,
             yaws=data["target_yaws"],
         )
         plt.imshow(im[::-1])
         if self.is_debug:
             plt.show()
Exemplo n.º 9
0
def test_draw_trajectory() -> None:
    on_image = np.zeros((224, 244, 3), dtype=np.uint8)
    positions = np.asarray([(0, 0), (0, 10),
                            (0, 20)])  # XY notation, pixel positions
    draw_trajectory(on_image, positions, (255, 255, 255))

    assert np.all(on_image[0, 0] == (255, 255, 255))
    assert np.all(on_image[10, 0] == (255, 255, 255))
    assert np.all(on_image[20, 0] == (255, 255, 255))

    assert np.all(on_image[0, 20] == (0, 0, 0))
    assert np.all(on_image[0, 10] == (0, 0, 0))

    # test also with arrowed lines
    on_image = np.zeros((224, 244, 3), dtype=np.uint8)
    yaws = np.asarray([[0.1], [-0.1], [0.0]])
    draw_trajectory(on_image, positions, (255, 255, 255), yaws=yaws)

    assert np.all(on_image[0, 0] == (255, 255, 255))
    assert np.all(on_image[10, 0] == (255, 255, 255))
    assert np.all(on_image[20, 0] == (255, 255, 255))
Exemplo n.º 10
0
def draw_single_image(
        rasterizer,
        image: np.array,
        centroid: np.array,
        world_to_image: np.array,
        target_positions: np.array,
        target_yaws: np.array,
        predicted_positions: Optional[np.array] = None,
        predicted_yaws: Optional[np.array] = None,
        target_color: Optional[tuple] = TARGET_POINTS_COLOR,
        predicted_color: Optional[tuple] = PREDICTED_POINTS_COLOR,
) -> torch.Tensor:
    """
    Produce a single RGB representation of the rasterized input image and its corresponding position prediction
    :param rasterizer:
    :param image:
    :param centroid:
    :param world_to_image:
    :param target_positions:
    :param target_yaws:
    :param predicted_positions:
    :param predicted_yaws:
    :param target_color:
    :param predicted_color:
    :return:
    """
    predicted_yaws = predicted_yaws if predicted_yaws is not None else target_yaws
    im = _set_image_type(rasterizer.to_rgb(image.cpu().data.numpy().transpose(1, 2, 0)))  # Todo enhance
    draw_trajectory(im, transform_points(
        target_positions.cpu().data.numpy() + centroid[:2].cpu().data.numpy(), world_to_image.cpu().data.numpy()),
                    target_yaws.cpu().data.numpy(), target_color)
    if predicted_positions is not None:
        draw_trajectory(im, transform_points(
            predicted_positions.cpu().data.numpy() + centroid[:2].cpu().data.numpy(),
            world_to_image.cpu().data.numpy()), predicted_yaws.cpu().data.numpy(), predicted_color)
    return np.uint8(im).transpose(2, 0, 1)
Exemplo n.º 11
0
 
cfg["raster_params"]["map_type"] = "py_semantic"
rast = build_rasterizer(cfg, dm)
dataset = EgoDataset(cfg, zarr_dataset, rast)
scene_idx = 2
indexes = dataset.get_scene_indices(scene_idx)
images = []

for idx in indexes:
    
    data = dataset[idx]
    im = data["image"].transpose(1, 2, 0)
    im = dataset.rasterizer.to_rgb(im)
    target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"])
    center_in_pixels = np.asarray(cfg["raster_params"]["ego_center"]) * cfg["raster_params"]["raster_size"]
    draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR)
    clear_output(wait=True)
    #display(PIL.Image.fromarray(im[::-1]))

# %% [markdown]
# So, there's a lot of information in this one image. I'll try my best to point everything out, but do notify me if I make any errors. OK, let's get started with dissecting the image:
# + We have an intersection of four roads over here.
# + The green blob represents the AV's motion, and we would require to predict the movement of the AV in these traffic conditions as a sample.

# %% [markdown]
# I don't exactly know what other inferences we can make without more detail on this data, so let's try a satellite-format viewing of these images.

# %% [code]
import numpy as np
from IPython.display import display, clear_output
import PIL
Exemplo n.º 12
0
                data_agent = eval_dataset[v_index]

                out_net = model(
                    torch.from_numpy(
                        data_agent["image"]).unsqueeze(0).to(device))
                out_pos = out_net[0].reshape(-1, 2).detach().cpu().numpy()
                # store absolute world coordinates
                predicted_positions.append(out_pos +
                                           data_agent["centroid"][:2])
                # retrieve target positions from the GT and store as absolute coordinates
                track_id, timestamp = data_agent["track_id"], data_agent[
                    "timestamp"]
                target_positions.append(gt_rows[str(track_id) +
                                                str(timestamp)] +
                                        data_agent["centroid"][:2])

            # convert coordinates to AV point-of-view so we can draw them
            predicted_positions = transform_points(
                np.concatenate(predicted_positions),
                data_ego["world_to_image"])
            target_positions = transform_points(
                np.concatenate(target_positions), data_ego["world_to_image"])

            yaws = np.zeros((len(predicted_positions), 1))
            draw_trajectory(im_ego, predicted_positions, yaws,
                            PREDICTED_POINTS_COLOR)
            draw_trajectory(im_ego, target_positions, yaws,
                            TARGET_POINTS_COLOR)

            plt.imshow(im_ego[::-1])
            plt.show()