Пример #1
0
def plot_occs(static_map, h, ax1, ax2, ax3, traj_gt, traj1, traj2, occs_gt,
              occs1, occs2):
    num_points = static_map.shape[0]
    seq_length = 12
    for ii, ped in enumerate(traj_gt):

        cols1, index1 = on_occupied(traj1,
                                    ii,
                                    static_map,
                                    num_points,
                                    seq_length,
                                    minimum_distance=.1)
        cols2, index2 = on_occupied(traj2,
                                    ii,
                                    static_map,
                                    num_points,
                                    seq_length,
                                    minimum_distance=.1)

        pixels1 = get_pixels_from_world(traj1[ii], h)
        pixels2 = get_pixels_from_world(traj2[ii], h)

        if cols1 > 0:
            plot_occ_pix(ax2, pixels1[index1])
            occs1 += 1

        if cols2 > 0:
            plot_occ_pix(ax3, pixels2[index2])
            occs2 += 1

    return occs_gt, occs1, occs2
Пример #2
0
def plot_col_pix(ax, traj, index_agend_1, index_agend_2, index_time, h):
    pixels_gt1 = get_pixels_from_world(traj[index_agend_1], h)
    pixels_gt2 = get_pixels_from_world(traj[index_agend_2], h)
    ax.scatter(pixels_gt1[index_time][0],
               pixels_gt1[index_time][1],
               marker='*',
               color='red',
               s=100)
    ax.scatter(pixels_gt2[index_time][0],
               pixels_gt2[index_time][1],
               marker='*',
               color='green',
               s=100)
Пример #3
0
def plot_pixel(ax,
               trajectory,
               person,
               h,
               a=1,
               last=False,
               first=False,
               intermediate=True,
               size=10,
               colors=None,
               linestyle='-',
               label=False):
    if colors is None:
        colors = np.random.rand(trajectory.size(0), 3)
    pixels_obs = get_pixels_from_world(trajectory[person], h)

    if intermediate:
        ax.plot(pixels_obs[:, 0],
                pixels_obs[:, 1],
                marker='.',
                color=colors[person, :],
                markersize=1,
                alpha=a,
                linestyle=linestyle)
        ax.quiver(pixels_obs[-1, 0],
                  pixels_obs[-1, 1],
                  pixels_obs[-1, 0] - pixels_obs[-2, 0],
                  pixels_obs[-2, 1] - pixels_obs[-1, 1],
                  color=colors[person, :])

    if last:
        ax.scatter(pixels_obs[-1, 0],
                   pixels_obs[-1, 1],
                   marker='*',
                   color=colors[person, :],
                   s=20)
        ax.text(pixels_obs[0, 0] + 10,
                pixels_obs[0, 1] - 10,
                color=colors[person, :],
                s=str(person),
                fontsize=15)
    if first:
        ax.scatter(pixels_obs[0, 0],
                   pixels_obs[0, 1],
                   marker='p',
                   color=colors[person, :],
                   s=size)
Пример #4
0
def visualize_attention_weights(scene_name,
                                encoded_image_size,
                                attention_weights,
                                curr_end_pos,
                                ax1,
                                ax2,
                                counter=0):
    """
    Function to visualize the attention weights on their relative scene image during inference time (training or testing).
    :param scene_name: the name of the SDD scene from which the attention weights were computed
    :param encoded_image_size: the width/height dimension of the scene image used as input for the Attention Encoder. The image should
                               be a squared image, thus the dimension should be (encoded_image_size, encoded_image_size)
    :param attention_weights: the weights computed by the attention module
    :param curr_end_pos: the current positions of all agents in a scene
    """
    ped_id = 0
    grid_size = 8.0
    # 'upscaling_factor' is used to increment the size of the scene image (to make it better visualizable) as well as
    # the agents' positions coordinates do adapt them to the new image size
    upscaling_factor = 1
    ax1.cla()
    ax2.cla()

    # 'dataset_path' represents the path with the SDD scene folders inside
    dataset_path = get_root_dir() + "/datasets/safegan_dataset/SDD/"
    # Load the raw scene image on which the attention weights will be plotted.
    # Here I suppose they are inside a folder called "segmented_scenes"
    image_original = Image.open(get_root_dir() +
                                "/datasets/safegan_dataset/SDD/" + scene_name +
                                "/reference.jpg")
    original_image_size = Image.open(dataset_path + scene_name +
                                     "/annotated_boundaries.jpg").size

    # Increase the dimension of the raw scene image
    image_original = image_original.resize([
        original_image_size[0] * upscaling_factor,
        original_image_size[1] * upscaling_factor
    ], Image.LANCZOS)

    # In order to plot the agents's coordinates on the scene image it is necessary to load the homography matrix of that scene
    # and then to convert the world coordinates into pixel values
    h_matrix = pd.read_csv(dataset_path + scene_name +
                           '/{}_homography.txt'.format(scene_name),
                           delim_whitespace=True,
                           header=None).values
    pixels = get_pixels_from_world(curr_end_pos, h_matrix, True)
    #pixels = pixels * (encoded_image_size * upscaling_factor / original_image_size[0],
    #                   encoded_image_size * upscaling_factor / original_image_size[1])
    # Here it is necessary to resize also the pixel coordinates of the agents' positions, according to the upscaling
    # factor and the original dimension of the scene image (that I take from the image with the annotated boundary points)
    original_image_size = Image.open(dataset_path + scene_name +
                                     "/annotated_boundaries.jpg").size
    #

    w, h = image_original.size
    col = np.round(pixels[ped_id][0])
    row = np.round(pixels[ped_id][1])
    grid_left_upper_corner = curr_end_pos - torch.tensor(
        [grid_size / 2.0, grid_size / 2.0]).expand_as(curr_end_pos).to(device)
    pixels_grid = get_pixels_from_world(grid_left_upper_corner, h_matrix, True)

    col_grid = (col - np.round(pixels_grid[ped_id][0]))
    row_grid = (row - np.round(pixels_grid[ped_id][1]))

    if row - row_grid > 0 and row + row_grid < h and col - col_grid > 0 and col + col_grid < w:
        image = image_original.crop(
            (col - col_grid, row - row_grid, col + col_grid, row + row_grid))
    else:
        image = image_original.crop((w // 2 - col_grid, h // 2 - row_grid,
                                     w // 2 + col_grid, h // 2 + row_grid))
    #image = image.resize([20 * upscaling_factor, 20 * upscaling_factor], Image.LANCZOS)

    # Resize the attention weights dimension to match it with the dimension of the upscaled raw scene image.
    # To expand the attention weights I use skimage that allows us to also smooth the pixel values during the expansion
    attention_weights = attention_weights.view(
        -1, encoded_image_size, encoded_image_size).detach().cpu().numpy()
    upscaling_factor = image.size[0] / encoded_image_size
    alpha = skimage.transform.pyramid_expand(attention_weights[ped_id],
                                             upscale=upscaling_factor,
                                             sigma=8)
    #pixels = np.expand_dims( np.array([w//2, h//2]), axis=0)

    rect = patches.Rectangle((pixels_grid[ped_id, 0], pixels_grid[ped_id, 1]),
                             2 * col_grid,
                             2 * row_grid,
                             linewidth=1,
                             edgecolor='white',
                             facecolor='none')

    # Plot raw scene image, the agents' positions and the attention weights
    ax1.imshow(image_original)
    ax1.scatter(pixels[:, 0], pixels[:, 1], marker='.', color="b")
    ax1.scatter(pixels[ped_id, 0], pixels[ped_id, 1], marker='X', color="r")
    ax1.add_patch(rect)
    ax1.axis('off')

    ax2.imshow(image)
    ax2.imshow(np.flipud(alpha), alpha=0.7)
    plt.set_cmap(cm.Greys_r)
    ax2.axis('off')

    directory = get_root_dir() + '/results/plots/SDD/safeGAN_DP/attention'
    files = len(os.listdir(directory))
    plt.savefig(directory + '/frame_{}.png'.format(files + 1))
    plt.draw()
    plt.waitforbuttonpress()
Пример #5
0
def get_pixels(o, g, p, annotated_points, h1):
    op = get_pixels_from_world(o, h1)
    pg = get_pixels_from_world(g, h1)
    pp = get_pixels_from_world(p, h1)
    ap = get_pixels_from_world(annotated_points, h1)
    return op, pg, pp, ap
Пример #6
0
def evaluate_test_pixel_fde(data_set,
                            model_name,
                            selected_scene=None,
                            selected_batch=-1):
    pred_traj_gt = load_pickle('pred_traj_gt', selected_scene, selected_batch,
                               data_set, model_name)
    seq_start_end = load_pickle('seq_start_end', selected_scene,
                                selected_batch, data_set, model_name)
    scene_name_list = load_pickle('scene_name_list', selected_scene,
                                  selected_batch, data_set, model_name)

    pred_traj_fake1_list = load_pickle('pred_traj_fake1_list', selected_scene,
                                       selected_batch, data_set, model_name)
    pred_traj_fake2_list = load_pickle('pred_traj_fake2_list', selected_scene,
                                       selected_batch, data_set, model_name)

    homography_list = load_pickle('homography_list', selected_scene,
                                  selected_batch, data_set, model_name)

    num_samples = len(pred_traj_fake1_list)
    ade1 = []
    ade2 = []
    scene_name = np.unique(scene_name_list)
    print(scene_name)

    for s in range(num_samples):  # seq_len, batch, 2
        traj1_pixels, traj2_pixels, traj_gt_pixels = [], [], []
        total_traj = pred_traj_gt.size(1)
        for i, (start, end) in enumerate(seq_start_end):
            # get homography of current scene
            h = homography_list[i]
            num_ped = end - start
            # calculate pixels for a sample in a scene
            pixels_t1 = get_pixels_from_world(
                pred_traj_fake1_list[s][:, start:end].contiguous().view(-1, 2),
                h)
            pixels_t2 = get_pixels_from_world(
                pred_traj_fake2_list[s][:, start:end].contiguous().view(-1, 2),
                h)
            pixels_gt = get_pixels_from_world(
                pred_traj_gt[:, start:end].contiguous().view(-1, 2), h)
            t1 = torch.from_numpy(pixels_t1).view(-1, num_ped,
                                                  2)  # time, peds in scene, 2
            t2 = torch.from_numpy(pixels_t2).view(-1, num_ped, 2)
            tg = torch.from_numpy(pixels_gt).view(-1, num_ped, 2)

            traj1_pixels.append(t1)
            traj2_pixels.append(t2)
            traj_gt_pixels.append(tg)

        # concatignate along batch dimension
        traj1_pixels = torch.cat(traj1_pixels, dim=1)
        traj2_pixels = torch.cat(traj2_pixels, dim=1)
        traj_gt_pixels = torch.cat(traj_gt_pixels, dim=1)

        # (seq_len, batch, 2) for each sample we calculate displacement error
        ade1.append(
            final_displacement_error(traj1_pixels.view(-1, total_traj, 2)[-1],
                                     traj_gt_pixels.view(-1, total_traj,
                                                         2)[-1],
                                     mode='raw'))
        ade2.append(
            final_displacement_error(traj2_pixels.view(-1, total_traj, 2)[-1],
                                     traj_gt_pixels.view(-1, total_traj,
                                                         2)[-1],
                                     mode='raw'))

    pred_len = pred_traj_gt.size(0)
    ade1 = evaluate_helper(ade1, seq_start_end) / (total_traj)
    ade2 = evaluate_helper(ade2, seq_start_end) / (total_traj)
    return ade1, ade2
Пример #7
0
def evaluate_trajectory_quality(data_set,
                                scene,
                                model_name,
                                batch=5,
                                selection=-1):
    obs_traj = load_pickle('obs_traj', scene, batch, data_set, model_name)
    pred_traj_gt = load_pickle('pred_traj_gt', scene, batch, data_set,
                               model_name)
    seq_start_end = load_pickle('seq_start_end', scene, batch, data_set,
                                model_name)

    pred_traj_fake1_list = load_pickle('pred_traj_fake1_list', scene, batch,
                                       data_set, model_name)
    pred_traj_fake2_list = load_pickle('pred_traj_fake2_list', scene, batch,
                                       data_set, model_name)

    homography_list = load_pickle('homography_list', scene, batch, data_set,
                                  model_name)
    photo_list = load_pickle('photo_list', scene, batch, data_set, model_name)
    annotated_points_list = load_pickle('annotated_points_list', scene, batch,
                                        data_set, model_name)
    scene_name_list = load_pickle('scene_name_list', scene, batch, data_set,
                                  model_name)

    fig, ((ax1, ax14), (ax15, ax4)) = plt.subplots(2,
                                                   2,
                                                   figsize=(32, 32),
                                                   num=1)

    num_samples = len(pred_traj_fake1_list)
    for i, (start, end) in enumerate(seq_start_end):
        print(batch * len(seq_start_end) + i)
        if not (selection == -1
                or batch * len(seq_start_end) + i == selection):
            continue
        start = start.item()
        end = end.item()
        num_ped = end - start

        photo = photo_list[i]
        h = homography_list[i]
        annotated_points = annotated_points_list[i]
        annotated_points = get_pixels_from_world(annotated_points, h)
        subsample = annotated_points.shape[0] // 500

        plt.cla()
        traj_obs = obs_traj.permute(1, 0, 2)[start:end]
        traj_gt = pred_traj_gt.permute(1, 0, 2)[start:end]

        scene_name = np.unique(scene_name_list)
        print(scene_name)
        #if not (scene_name == scene).all():
        #    return 0, 0
        plot_photo(ax1, photo, 'model1')
        plot_photo(ax4, photo, 'model2')
        for p in range(np.minimum(num_ped, 5)):
            plot_pixel(ax1,
                       traj_obs,
                       p,
                       h,
                       a=1,
                       last=False,
                       first=False,
                       intermediate=True,
                       size=10,
                       colors=colors)
            plot_pixel(ax1,
                       traj_gt,
                       p,
                       h,
                       a=.1,
                       last=True,
                       first=False,
                       intermediate=False,
                       size=10,
                       colors=colors)
            plot_pixel(ax4,
                       traj_gt,
                       p,
                       h,
                       a=1,
                       last=True,
                       first=False,
                       intermediate=True,
                       size=10,
                       colors=colors)

        plot_photo(ax14, photo, 'model1')
        plot_photo(ax15, photo, 'model2')

        for sample in range(1):
            traj_pred1 = pred_traj_fake1_list[sample + 0].permute(1, 0,
                                                                  2)[start:end]
            traj_pred2 = pred_traj_fake1_list[sample + 1].permute(1, 0,
                                                                  2)[start:end]
            traj_pred3 = pred_traj_fake1_list[sample + 2].permute(1, 0,
                                                                  2)[start:end]

            traj_pred11 = pred_traj_fake2_list[sample + 0].permute(
                1, 0, 2)[start:end]
            traj_pred22 = pred_traj_fake2_list[sample + 1].permute(
                1, 0, 2)[start:end]
            traj_pred33 = pred_traj_fake2_list[sample + 2].permute(
                1, 0, 2)[start:end]

            for p in range(np.minimum(num_ped, 3)):
                if True:  #p == 0 or p==1 or p==2:
                    plot_pixel(ax14,
                               traj_pred1,
                               p,
                               h,
                               a=1,
                               last=False,
                               first=False,
                               size=10,
                               colors=colors)
                    plot_pixel(ax14,
                               traj_pred2,
                               p,
                               h,
                               a=1,
                               last=False,
                               first=False,
                               size=10,
                               colors=colors)
                    plot_pixel(ax14,
                               traj_pred3,
                               p,
                               h,
                               a=1,
                               last=False,
                               first=False,
                               size=10,
                               colors=colors)

                    plot_pixel(ax15,
                               traj_pred11,
                               p,
                               h,
                               a=1,
                               last=False,
                               first=False,
                               size=10,
                               colors=colors,
                               linestyle='-')
                    plot_pixel(ax15,
                               traj_pred22,
                               p,
                               h,
                               a=1,
                               last=False,
                               first=False,
                               size=10,
                               colors=colors,
                               linestyle='-')
                    plot_pixel(ax15,
                               traj_pred33,
                               p,
                               h,
                               a=1,
                               last=False,
                               first=False,
                               size=10,
                               colors=colors,
                               linestyle='-')

        #ax3.scatter(annotated_points[::subsample, 0], annotated_points[::subsample, 1], marker='.', color='white', s=1)
        #_, _, _ = plot_cols(ax2, ax3, ax4, traj_gt, traj_pred1, traj_pred2, cols_gt, cols1, cols2, h)
        #_, _, _ = plot_occs(annotated_points, h, ax2, ax3, ax4, traj_gt, traj1, traj2, cols_gt, cols1, cols2)
        plt.waitforbuttonpress()
        plt.draw()
        #plt.pause(.001)
        directory = '/results/plots/{}/'.format(data_set)
        frame = batch * len(seq_start_end) + i
        save_sample(fig,
                    ax1,
                    directory,
                    model_name,
                    scene,
                    model_variant='OBS',
                    frame=frame,
                    sample='-')
        save_sample(fig,
                    ax4,
                    directory,
                    model_name,
                    scene,
                    model_variant='GT',
                    frame=frame,
                    sample='-')
        save_sample(fig,
                    ax14,
                    directory,
                    model_name,
                    scene,
                    model_variant='DP',
                    frame=frame,
                    sample='123')
        save_sample(fig,
                    ax15,
                    directory,
                    model_name,
                    scene,
                    model_variant='DP_SP',
                    frame=frame,
                    sample='123')

    return 0, 0