Exemple #1
0
def gen_top_down_labels(path, affine, img_w, img_h, map_w, map_h, incl_path=True, incl_endp=True):
    seg_labels = np.zeros([img_w, img_h, 2]).astype(float)
    path_in_img = cf_to_img(path, np.array([map_w, map_h]))
    gauss_sigma = map_w / 96

    seg_labels[:, :, 0] = plot_path_on_img(seg_labels[:,:,0], path_in_img)
    if len(path_in_img) > 1:
        seg_labels[:,:,1] = plot_dot_on_img(seg_labels[:,:,1], path_in_img[-1], gauss_sigma)

    seg_labels_rot = apply_affine(seg_labels, affine, img_w, img_h)
    seg_labels_rot[:, :, 0] = gaussian_filter(seg_labels_rot[:, :, 0], gauss_sigma)
    seg_labels_rot[:, :, 1] = gaussian_filter(seg_labels_rot[:, :, 1], gauss_sigma)

    # Standardize both channels separately (each has mean zero, unit variance)
    seg_labels_path = standardize_2d_prob_dist(seg_labels_rot[:, :, 0:1])
    seg_labels_endpt = standardize_2d_prob_dist(seg_labels_rot[:, :, 1:2])

    if DEBUG:
        cv2.imshow("l_traj", seg_labels_path[0, :, :])
        cv2.imshow("l_endpt", seg_labels_endpt[0, :, :])
        cv2.waitKey(0)

    if incl_path and not incl_endp:
        seg_labels_rot = seg_labels_path
    elif incl_endp and not incl_path:
        seg_labels_rot = seg_labels_endpt
    else:
        seg_labels_rot = np.concatenate((seg_labels_path, seg_labels_endpt), axis=0)

    seg_labels_t = torch.from_numpy(seg_labels_rot).unsqueeze(0).float()
    return seg_labels_t
Exemple #2
0
def get_top_down_ground_truth_static_ego(env_id, start_idx, img_w, img_h,
                                         map_w, map_h):
    """
    Returns the ground-truth label oriented in the global map frame
    :param env_id:
    :param start_idx:
    :param img_w:
    :param img_h:
    :param map_w:
    :param map_h:
    :return:
    """
    path = load_path(env_id)
    #instruction_segments = [self.all_instr[env_id][set_idx]["instructions"][seg_idx]]

    start_pt, dir_yaw = tdd.get_start_pt_and_yaw(path, start_idx, map_w, map_h,
                                                 0)
    affine = tdd.get_affine_matrix(start_pt, dir_yaw, img_w, img_h)

    seg_labels = np.zeros([img_w, img_h, 2]).astype(float)
    path_in_img = cf_to_img(path, np.array([map_w, map_h]))

    #gauss_sigma = map_w / 96
    gauss_sigma = map_w / 32

    seg_labels[:, :, 0] = tdd.plot_path_on_img(seg_labels[:, :, 0],
                                               path_in_img)
    if len(path_in_img) > 1:
        seg_labels[:, :, 1] = tdd.plot_dot_on_img(seg_labels[:, :, 1],
                                                  path_in_img[-1], gauss_sigma)

    seg_labels_rot = tdd.apply_affine(seg_labels, affine, img_w, img_h)
    seg_labels_rot[:, :, 0] = gaussian_filter(seg_labels_rot[:, :, 0],
                                              gauss_sigma)
    seg_labels_rot[:, :, 1] = gaussian_filter(seg_labels_rot[:, :, 1],
                                              gauss_sigma)

    DEBUG = True
    if DEBUG:
        cv2.imshow("l_traj", seg_labels_rot[:, :, 0])
        cv2.imshow("l_endpt", seg_labels_rot[:, :, 1])
        cv2.waitKey(0)

    # Standardize both channels separately (each has mean zero, unit variance)
    seg_labels_path = standardize_2d_prob_dist(seg_labels_rot[:, :, 0:1])
    seg_labels_endpt = standardize_2d_prob_dist(seg_labels_rot[:, :, 1:2])

    seg_labels_rot = np.concatenate((seg_labels_path, seg_labels_endpt),
                                    axis=0)

    seg_labels_t = torch.from_numpy(seg_labels_rot).unsqueeze(0).float()
    return seg_labels_t
Exemple #3
0
def get_top_down_ground_truth_dynamic_global(env_id, start_idx, end_idx,
                                             drone_pos_as, img_w, img_h, map_w,
                                             map_h):
    """
    Returns the ground-truth label oriented in the global map frame
    :param env_id:
    :param start_idx:
    :param img_w:
    :param img_h:
    :param map_w:
    :param map_h:
    :return:
    """
    PROFILE = False
    prof = SimpleProfiler(False, PROFILE)
    path = load_path(env_id, anno=True)
    #print(len(path), start_idx, end_idx)

    path = path[start_idx:end_idx]
    #instruction_segments = [self.all_instr[env_id][set_idx]["instructions"][seg_idx]]

    prof.tick("load_path")
    units = UnrealUnits(1.0)
    drone_pos_cf = units.pos3d_from_as(drone_pos_as)

    #print("Dynamic ground truth for ", env_id, start_idx, end_idx)
    gt_dynamic = get_dynamic_ground_truth_v2(path, drone_pos_cf[:2])
    #Presenter().plot_path(env_id, [path[start_idx:end_idx], gt_dynamic])

    prof.tick("gen_gt_path")

    seg_labels = np.zeros([img_w, img_h, 2]).astype(float)
    path_in_img = cf_to_img(gt_dynamic, np.array([map_w, map_h]))
    gauss_sigma = map_w / 96

    seg_labels[:, :, 0] = tdd.plot_path_on_img(seg_labels[:, :, 0],
                                               path_in_img)
    if len(path_in_img) > 1:
        seg_labels[:, :, 1] = tdd.plot_dot_on_img(seg_labels[:, :, 1],
                                                  path_in_img[-1], gauss_sigma)

    prof.tick("plot_path")

    seg_labels[:, :, 0] = gaussian_filter(seg_labels[:, :, 0], gauss_sigma)
    seg_labels[:, :, 1] = gaussian_filter(seg_labels[:, :, 1], gauss_sigma)

    # Standardize both channels separately (each has mean zero, unit variance)
    seg_labels_path = standardize_2d_prob_dist(seg_labels[:, :, 0:1])
    seg_labels_endpt = standardize_2d_prob_dist(seg_labels[:, :, 1:2])

    prof.tick("process_img")

    DEBUG = False
    if DEBUG:
        gt_path_in_img = cf_to_img(path, np.asarray([map_w, map_h]))
        dbg_labels_gt = np.zeros([img_w, img_h, 1])
        dbg_labels_gt[:, :, 0] = tdd.plot_path_on_img(dbg_labels_gt[:, :, 0],
                                                      gt_path_in_img)
        Presenter().show_image(dbg_labels_gt,
                               "dbg",
                               torch=False,
                               waitkey=10,
                               scale=4)
        Presenter().show_image(torch.from_numpy(seg_labels_path),
                               "l_path",
                               torch=True,
                               waitkey=10,
                               scale=4)
        Presenter().show_image(torch.from_numpy(seg_labels_endpt),
                               "l_endp",
                               torch=True,
                               waitkey=100,
                               scale=4)

    seg_labels = np.concatenate((seg_labels_path, seg_labels_endpt), axis=0)

    seg_labels_t = torch.from_numpy(seg_labels).unsqueeze(0).float()

    prof.tick("prep_out")
    prof.print_stats()

    return seg_labels_t