コード例 #1
0
def keyPressEvent(obj, event):
    global frame_idx
    global refer_T
    key = obj.GetKeySym()
    if key == 'Right':
        # vis.clear_frame_poses()
        if frame_idx > 305:
            return

        cur_frame = frames.frames[frame_idx]
        cur_Tcw = cur_frame['extrinsic_Tcw']
        cur_name = cur_frame['file_name']
        cur_depth_name = cur_frame['depth_file_name']
        print(cur_name)

        K = K_from_frame(cur_frame)

        # Read image
        cur_img = cv2.imread(os.path.join(base_dir, cur_name)).astype(
            np.float32) / 255.0
        cur_depth = load_depth_from_png(os.path.join(base_dir, cur_depth_name),
                                        div_factor=5000)

        # Crop with new intrinsic
        cur_img = crop_by_intrinsic(cur_img, K, in_K, interp_method='nearest')
        # next_img = crop_by_intrinsic(next_img, K, in_K)
        cur_depth = crop_by_intrinsic(cur_depth,
                                      K,
                                      in_K,
                                      interp_method='nearest')
        h, w, c = cur_img.shape

        # rel_T = cam_opt.relateive_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3], next_Tcw[:3, :3], next_Tcw[:3, 3])
        X_3d = cam_opt.pi_inv(K, x_2d.reshape((h * w, 2)),
                              cur_depth.reshape((h * w, 1)))
        cur_Twc = cam_opt.camera_pose_inv(cur_Tcw[:3, :3], cur_Tcw[:3, 3])
        X_3d = cam_opt.transpose(cur_Twc[:3, :3], cur_Twc[:3, 3], X_3d)

        vis.set_point_cloud(X_3d, cur_img.reshape((h * w, 3)))
        vis.add_frame_pose(cur_Tcw[:3, :3],
                           cur_Tcw[:3, 3],
                           camera_obj_scale=0.01)

        frame_idx += 1

    return
コード例 #2
0
def keyPressEvent(obj, event):
    global frame_idx
    key = obj.GetKeySym()
    if key == 'Right':
        cur_frame = frames.frames[frame_idx]
        cur_Tcw = cur_frame['extrinsic_Tcw']
        cur_name = cur_frame['file_name']
        cur_depth_name = cur_frame['depth_file_name']

        next_frame = frames.frames[frame_idx + 1]
        next_Tcw = next_frame['extrinsic_Tcw']
        next_name = next_frame['file_name']

        K = K_from_frame(cur_frame)

        # Read image
        cur_img = cv2.imread(os.path.join(base_dir, cur_name)).astype(
            np.float32) / 255.0
        next_img = cv2.imread(os.path.join(base_dir, next_name)).astype(
            np.float32) / 255.0
        cur_depth = load_depth_from_png(os.path.join(base_dir, cur_depth_name))
        h, w, c = cur_img.shape

        rel_T = cam_opt.relateive_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3],
                                       next_Tcw[:3, :3], next_Tcw[:3, 3])
        X_3d = cam_opt.pi_inv(K, x_2d.reshape((h * w, 2)),
                              cur_depth.reshape((h * w, 1)))
        cur_Twc = cam_opt.camera_pose_inv(cur_Tcw[:3, :3], cur_Tcw[:3, 3])
        X_3d = cam_opt.transpose(cur_Twc[:3, :3], cur_Twc[:3, 3], X_3d)

        vis.set_point_cloud(X_3d, cur_img.reshape((h * w, 3)))
        vis.add_frame_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3])

        frame_idx += 20

    return
コード例 #3
0
seq = FrameSeqData(os.path.join(valid_set_dir, valid_seq_name, 'seq.json'))

frame_a = seq.frames[5]
frame_b = seq.frames[20]

Tcw_a = seq.get_Tcw(frame_a)
Tcw_b = seq.get_Tcw(frame_b)
K = seq.get_K_mat(frame_a)

img_a = cv2.imread(os.path.join(
    valid_set_dir, seq.get_image_name(frame_a))).astype(np.float32) / 255.0
img_b = cv2.imread(os.path.join(
    valid_set_dir, seq.get_image_name(frame_b))).astype(np.float32) / 255.0
depth_a = load_depth_from_png(os.path.join(valid_set_dir,
                                           seq.get_depth_name(frame_a)),
                              div_factor=5000.0)
depth_b = load_depth_from_png(os.path.join(valid_set_dir,
                                           seq.get_depth_name(frame_b)),
                              div_factor=5000.0)

rel_T = cam_opt.relateive_pose(Tcw_a[:3, :3], Tcw_a[:3, 3], Tcw_b[:3, :3],
                               Tcw_b[:3, 3])
wrap_b2a, _ = cam_opt.wrapping(img_a, img_b, depth_a, K, rel_T[:3, :3],
                               rel_T[:3, 3])
dense_a2b, _ = cam_opt.dense_corres_a2b(depth_a, K, Tcw_a, Tcw_b)
overlap_marks = cam_opt.mark_out_bound_pixels(dense_a2b, depth_a)
overlap_marks = overlap_marks.astype(np.float32)
overlap_ratio = cam_opt.photometric_overlap(depth_a, K, Tcw_a, Tcw_b)
print(overlap_ratio)
コード例 #4
0
    cur_Tcw = cur_frame['extrinsic_Tcw']
    cur_name = cur_frame['file_name']
    cur_depth_name = cur_frame['depth_file_name']

    next_frame = frames.frames[frame_idx + 10]
    next_Tcw = next_frame['extrinsic_Tcw']
    next_name = next_frame['file_name']

    K = K_from_frame(cur_frame)

    # Read image
    cur_img = cv2.imread(os.path.join(base_dir, cur_name)).astype(
        np.float32) / 255.0
    next_img = cv2.imread(os.path.join(base_dir, next_name)).astype(
        np.float32) / 255.0
    cur_depth = load_depth_from_png(os.path.join(base_dir, cur_depth_name))
    h, w, c = cur_img.shape

    rel_T = cam_opt.relateive_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3],
                                   next_Tcw[:3, :3], next_Tcw[:3, 3])

    # Translation
    Cb = cam_opt.camera_center_from_Tcw(rel_T[:3, :3], rel_T[:3, 3])
    baseline = np.linalg.norm(Cb)

    # View angle
    q = trans.quaternion_from_matrix(rel_T)
    R = trans.quaternion_matrix(q)
    rel_rad, rel_axis, _ = trans.rotation_from_matrix(R)
    rel_deg = np.rad2deg(rel_rad)