Пример #1
0
def convert_rel_vo(seq: FrameSeqData, ref_T):
    for frame_idx in range(0, len(seq)):
        frame = seq.get_frame(frame_idx)
        Tcw = seq.get_Tcw(frame)
        rel_T = cam_opt.relateive_pose(ref_T[:3, :3], ref_T[:3, 3],
                                       Tcw[:3, :3], Tcw[:3, 3])
        frame['extrinsic_Tcw'] = rel_T
Пример #2
0
    def edge_rel_Rt(self, edge_idx):
        def build_E(R, t):
            E = np.zeros((3, 4), dtype=np.float32)
            E[:, :3] = R
            E[:, 3] = t
            return E

        e = self.matches[edge_idx]
        n1_name = e['ids_name'][0] + '.jpg'
        n2_name = e['ids_name'][1] + '.jpg'

        if n1_name not in self.frame_idx_map or n2_name not in self.frame_idx_map:
            return None

        n1 = self.frame_idx_map[n1_name]
        n2 = self.frame_idx_map[n2_name]

        # rotations
        n1_Rt = build_E(self.recon_cams[n1].R, self.recon_cams[n1].t)
        n2_Rt = build_E(self.recon_cams[n2].R, self.recon_cams[n2].t)

        rel_Rt_gt = cam_opt.relateive_pose(n1_Rt[:, :3], n1_Rt[:, 3],
                                           n2_Rt[:, :3], n2_Rt[:, 3])

        return rel_Rt_gt
Пример #3
0
    def filtering(self):
        def rel_t_err(t1, t2):

            t1 = t1 / np.linalg.norm(t1)
            t2 = t2 / np.linalg.norm(t2)

            dot = np.dot(t1, t2)
            dot = np.clip(dot, -1, 1)
            t_err = np.arccos(dot)
            return t_err

        inlier_R_threshold = 5
        inlier_t_threshold = 25
        outlier_R_threshold = 90
        outlier_t_threshold = 1
        inlier_covis_threshold = 50
        R_errs = []
        t_errs = []

        n_Cameras = len(self.recon_cams)
        inoutMat = np.zeros([n_Cameras, n_Cameras])
        for e_i, edge in enumerate(self.edge_pair):
            n1 = edge[0]
            n2 = edge[1]

            if self.covis_map[n1, n2] == 0:
                continue

            n1_E = self.recon_cam_Es(n1)
            n2_E = self.recon_cam_Es(n2)
            rel_Rt_gt = cam_opt.relateive_pose(n2_E[:3, :3], n2_E[:3, 3],
                                               n1_E[:3, :3], n1_E[:3, 3])
            rel_Rt_pred = self.edge_rel_Rt(e_i)

            rel_R_gt = rel_Rt_gt[:3, :3]
            rel_t_gt = rel_Rt_gt[:3, 3]
            rel_t_gt = rel_t_gt / np.linalg.norm(rel_t_gt)

            rel_R = np.matmul(rel_Rt_pred[:3, :3], rel_R_gt.T)
            R_err = np.rad2deg(np.arccos((np.trace(rel_R) - 1) / 2))
            R_errs.append(R_err)

            t_err = np.rad2deg(
                rel_t_err(rel_Rt_pred[:3, 3].ravel(), rel_t_gt.ravel()))
            t_errs.append(t_err)

            if self.covis_map[n1, n2] > inlier_covis_threshold:
                if R_err < inlier_R_threshold and t_err < inlier_t_threshold:
                    inoutMat[n1, n2] = inoutMat[n2, n1] = 1
                elif R_err > outlier_R_threshold or t_err > outlier_t_threshold:
                    inoutMat[n1, n2] = inoutMat[n2, n1] = -1

            return inoutMat, R_errs, t_errs
Пример #4
0
    def buildInOutlierMat(self):
        R_errs = []
        t_errs = []
        fl = self.nC
        fl_st = 0
        self.inoutMat = np.zeros([fl, fl])
        for i in range(fl_st, fl):
            for j in range(fl_st, fl):
                if i == j:
                    self.inoutMat[i][j] = inf
                    continue
                elif i > j:
                    continue

                ti = i
                tj = j
                if (ti, tj) not in self.pairs:
                    ti, tj = tj, ti
                if (ti, tj) not in self.pairs:
                    self.inoutMat[ti][tj] = self.inoutMat[tj][ti] = inf
                    continue

                idxinpairs = self.pairs.index((ti, tj))

                Eij_R_pred = self.pred_rel_R[idxinpairs]
                Eij_t_pred = self.pred_rel_t[idxinpairs]

                Eij_Rt_pred = np.hstack((Eij_R_pred, Eij_t_pred.reshape(3, 1)))

                rel_Rt_gt = cam_opt.relateive_pose(self.Es[ti][:3, :3],
                                                   self.Es[ti][:3, 3],
                                                   self.Es[tj][:3, :3],
                                                   self.Es[tj][:3, 3])
                rel_R_gt = rel_Rt_gt[:3, :3]
                rel_t_gt = rel_Rt_gt[:3, 3]
                rel_t_gt = rel_t_gt / linalg.norm(rel_t_gt)

                c1 = self.Cs[i]
                c2 = self.Cs[j]
                rel_R = np.matmul(Eij_Rt_pred[:3, :3], rel_Rt_gt[:3, :3].T)
                R_err = np.rad2deg(np.arccos((np.trace(rel_R) - 1) / 2))
                R_errs.append(R_err)

                t_err = np.rad2deg(rel_t_err(Eij_Rt_pred, rel_Rt_gt))
                t_errs.append(t_err)
                if self.mvis[ti][tj] > self.inlier_covis_threshold:
                    self.inoutMat[tj][ti] = self.inoutMat[ti][tj] = 1
                elif R_err > self.outlier_R_threshold or t_err > self.outlier_t_threshold:
                    self.inoutMat[ti][tj] = self.inoutMat[tj][ti] = -1
                else:
                    self.inoutMat[ti][tj] = self.inoutMat[tj][ti] = 0
def rel_gt(edge_indices, cam_Es):

    gt_rel_Rts = list()
    for ei in range(len(edge_indices)):
        n1 = e_node_idx[ei][0].item()
        n2 = e_node_idx[ei][1].item()
        E_n1 = cam_Es[0, n1].cpu().numpy()
        E_n2 = cam_Es[0, n2].cpu().numpy()

        rel_Rt = cam_opt.relateive_pose(E_n1[:3, :3], E_n1[:3, 3],
                                        E_n2[:3, :3], E_n2[:3, 3])
        gt_rel_Rts.append(rel_Rt.reshape(1, 3, 4))

    gt_rel_Rts = np.vstack(gt_rel_Rts)
    return gt_rel_Rts
Пример #6
0
def keyPressEvent(obj, event):
    global frame_idx
    key = obj.GetKeySym()
    if key == 'Right':
        cur_frame = frames.frames[frame_idx]
        cur_Tcw = cur_frame['extrinsic_Tcw']
        cur_name = cur_frame['file_name']
        cur_depth_name = cur_frame['depth_file_name']

        next_frame = frames.frames[frame_idx + 1]
        next_Tcw = next_frame['extrinsic_Tcw']
        next_name = next_frame['file_name']

        K = K_from_frame(cur_frame)

        # Read image
        cur_img = cv2.imread(os.path.join(base_dir, cur_name)).astype(
            np.float32) / 255.0
        next_img = cv2.imread(os.path.join(base_dir, next_name)).astype(
            np.float32) / 255.0
        cur_depth = load_depth_from_png(os.path.join(base_dir, cur_depth_name))
        h, w, c = cur_img.shape

        rel_T = cam_opt.relateive_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3],
                                       next_Tcw[:3, :3], next_Tcw[:3, 3])
        X_3d = cam_opt.pi_inv(K, x_2d.reshape((h * w, 2)),
                              cur_depth.reshape((h * w, 1)))
        cur_Twc = cam_opt.camera_pose_inv(cur_Tcw[:3, :3], cur_Tcw[:3, 3])
        X_3d = cam_opt.transpose(cur_Twc[:3, :3], cur_Twc[:3, 3], X_3d)

        vis.set_point_cloud(X_3d, cur_img.reshape((h * w, 3)))
        vis.add_frame_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3])

        frame_idx += 20

    return
Пример #7
0
Tcw_a = seq.get_Tcw(frame_a)
Tcw_b = seq.get_Tcw(frame_b)
K = seq.get_K_mat(frame_a)

img_a = cv2.imread(os.path.join(
    valid_set_dir, seq.get_image_name(frame_a))).astype(np.float32) / 255.0
img_b = cv2.imread(os.path.join(
    valid_set_dir, seq.get_image_name(frame_b))).astype(np.float32) / 255.0
depth_a = load_depth_from_png(os.path.join(valid_set_dir,
                                           seq.get_depth_name(frame_a)),
                              div_factor=5000.0)
depth_b = load_depth_from_png(os.path.join(valid_set_dir,
                                           seq.get_depth_name(frame_b)),
                              div_factor=5000.0)

rel_T = cam_opt.relateive_pose(Tcw_a[:3, :3], Tcw_a[:3, 3], Tcw_b[:3, :3],
                               Tcw_b[:3, 3])
wrap_b2a, _ = cam_opt.wrapping(img_a, img_b, depth_a, K, rel_T[:3, :3],
                               rel_T[:3, 3])
dense_a2b, _ = cam_opt.dense_corres_a2b(depth_a, K, Tcw_a, Tcw_b)
overlap_marks = cam_opt.mark_out_bound_pixels(dense_a2b, depth_a)
overlap_marks = overlap_marks.astype(np.float32)
overlap_ratio = cam_opt.photometric_overlap(depth_a, K, Tcw_a, Tcw_b)
print(overlap_ratio)

# show_multiple_img([{'img': img_a, 'title': 'a'},
#                    {'img': img_b, 'title': 'b'},
#                    {'img': wrap_b2a, 'title':'a2b'},
#                    {'img': overlap_marks, 'title':'overlap', 'cmap':'gray'}], title='View', num_cols=4)

#
H, W, C = img_a.shape
Пример #8
0
    next_frame = frames.frames[frame_idx + 10]
    next_Tcw = next_frame['extrinsic_Tcw']
    next_name = next_frame['file_name']

    K = K_from_frame(cur_frame)

    # Read image
    cur_img = cv2.imread(os.path.join(base_dir, cur_name)).astype(
        np.float32) / 255.0
    next_img = cv2.imread(os.path.join(base_dir, next_name)).astype(
        np.float32) / 255.0
    cur_depth = load_depth_from_png(os.path.join(base_dir, cur_depth_name))
    h, w, c = cur_img.shape

    rel_T = cam_opt.relateive_pose(cur_Tcw[:3, :3], cur_Tcw[:3, 3],
                                   next_Tcw[:3, :3], next_Tcw[:3, 3])

    # Translation
    Cb = cam_opt.camera_center_from_Tcw(rel_T[:3, :3], rel_T[:3, 3])
    baseline = np.linalg.norm(Cb)

    # View angle
    q = trans.quaternion_from_matrix(rel_T)
    R = trans.quaternion_matrix(q)
    rel_rad, rel_axis, _ = trans.rotation_from_matrix(R)
    rel_deg = np.rad2deg(rel_rad)

    next2cur, _ = cam_opt.wrapping(cur_img, next_img, cur_depth, K,
                                   rel_T[:3, :3], rel_T[:3, 3])
    show_multiple_img([{
        'img': cur_img,
    sel_a_indices = select_nonzero_pixels(gray_img, depth,
                                          visualize=True)[:2000]

    img = img.transpose((2, 0, 1)) / 255.0
    pose = frame_a['extrinsic_Tcw']
    K = K_from_frame(frame_a)

    # Next frame
    frame_b = frames.frames[pair[1]]
    next_file_name = frame_b['file_name']
    next_img = cv2.imread(os.path.join(img_dir, next_file_name +
                                       '.jpg')).astype(np.float32)
    next_img = next_img.transpose((2, 0, 1)) / 255.0
    next_pose = frame_b['extrinsic_Tcw']

    T_gt = camera_op.relateive_pose(pose[:3, :3], pose[:3, 3],
                                    next_pose[:3, :3], next_pose[:3, 3])

    I_a_set.append(img)
    d_a_set.append(depth)
    I_b_set.append(next_img)
    K_set.append(K)
    sel_a_idx_set.append(sel_a_indices.reshape((1, 2000)))
    T_gt_set.append(T_gt)

    # Compute the pose
    # sel_a_indices = torch.from_numpy(sel_a_indices).cuda().view((1, -1)).long()

I_a_set = np.stack(I_a_set, axis=0)
d_a_set = np.stack(d_a_set, axis=0)
I_b_set = np.stack(I_b_set, axis=0)
K_set = np.stack(K_set, axis=0)
Пример #10
0
        assert n1 == pair[1] and n2 == pair[2]

        K1 = bundle_recon_cams[n1].K_matrix()
        K2 = bundle_recon_cams[n2].K_matrix()
        covis = max(int(covis_map[n1, n2]), int(covis_map[n2, n1]))

        n1_Rt = bundle_recon_cams[n1].recon_cam_Es()
        n2_Rt = bundle_recon_cams[n2].recon_cam_Es()

        n1_C = cam_opt.camera_center_from_Tcw(n1_Rt[:3, :3], n1_Rt[:3, 3])
        n2_C = cam_opt.camera_center_from_Tcw(n2_Rt[:3, :3], n2_Rt[:3, 3])

        if n1_Rt[2, 2] == 0 or n2_Rt[2, 2] == 0:
            continue
        rel_Rt_gt = cam_opt.relateive_pose(n1_Rt[:, :3], n1_Rt[:, 3],
                                           n2_Rt[:, :3], n2_Rt[:, 3])
        Rij = rel_Rt_gt[:, :3]
        tij = rel_Rt_gt[:, 3]

        tij = tij / np.sqrt(np.sum(np.square(tij)))

        rel_R = np.matmul(rel_Rt[:3, :3], Rij.T)
        R_err = np.rad2deg(np.arccos((np.trace(rel_R) - 1) / 2))
        t_err = np.rad2deg(rel_t_err(tij, rel_Rt[:3, 3]))

        if (inliers > inlier_match_threshold and R_err < inlier_r_threshold
                and t_err < inlier_t_threshold) or (
                    covis > 30 and R_err < inlier_r_threshold
                    and t_err < inlier_t_threshold):
            inlier_edges.append((e_i, n1, n2, covis, R_err, t_err))
Пример #11
0
def add_drift_noise(seq, rot_noise_deg=10.0, displacement_dist_std=0.1):
    """
    Add gaussian noise for the pose of keyframes, here we update the noise-track with random noise to
    simulate drift error.
    :param seq: keyframe sequences, dim: (M, 3, 4), M is the number of keyframes
    :param rot_noise_deg: noise in rotation (unit: deg)
    :param displacement_dist_std: displacement factor in translation, the unit 1 is the avg. baseline among all cameras.
    :return: noise sequences with dim (M, 3, 4), displacement std.
    """
    n_frames = seq.shape[0]

    avg_frame_dist = 0
    R, t = cam_opt.Rt(seq[0])
    pre_frame_center = cam_opt.camera_center_from_Tcw(R, t)
    for frame_idx in range(1, n_frames):
        R, t = cam_opt.Rt(seq[frame_idx])
        frame_center = cam_opt.camera_center_from_Tcw(R, t)
        dist = np.linalg.norm(frame_center - pre_frame_center)
        pre_frame_center = frame_center
        avg_frame_dist += dist
    avg_frame_dist /= n_frames

    # Set the translation noise
    loc_disp_noise_sigma = displacement_dist_std                    # std. for random displacement
    disp_noise = np.random.normal(0, loc_disp_noise_sigma, size=(n_frames, 3))

    # Set the rotation noise
    rot_noise_factor = np.deg2rad(rot_noise_deg)
    rot_noise = np.random.normal(0, rot_noise_factor, size=n_frames)

    # Add random noise, here we have two track: 'ground-truth-track' and 'noise-track'
    # the 'ground-truth-track' providing ground-truth relative pose, we then add noise
    # onto relative pose, and added to the 'noise-track' in the end.

    new_seq = seq.copy()
    pre_frame = seq[0]                                      # used for ground-truth track
    pre_noise_frame = np.eye(4)                             # noise-track, init the same with first ground-truth pose
    pre_noise_frame[:3, :] = pre_frame

    for frame_idx in range(1, n_frames):
        # Ground-truth-track
        # current frame:
        T = seq[frame_idx]
        R, t = cam_opt.Rt(T)

        # previous frame:
        pre_R, pre_t = cam_opt.Rt(pre_frame)
        pre_T = np.eye(4, dtype=np.float32)
        pre_T[:3, :3] = pre_R
        pre_T[:3, 3] = pre_t

        # inv_T = cam_opt.camera_pose_inv(R, t)
        # r_angle, r_axis, _ = trans.rotation_from_matrix(inv_T)
        # print('Old Rotation:', r_angle)

        # Compute ground-truth relative pose, and add random noise to translation
        rel_T = cam_opt.relateive_pose(pre_R, pre_t, R, t)
        rel_R, rel_t = cam_opt.Rt(rel_T)
        rel_C = cam_opt.camera_center_from_Tcw(rel_R, rel_t)
        rand_C = rel_C + disp_noise[frame_idx]

        # Add random noise to rotation
        temp_T = np.eye(4, dtype=np.float32)
        temp_T[:3, :3] = rel_R
        angle, axis, _ = trans.rotation_from_matrix(temp_T)
        new_angle = rot_noise[frame_idx]
        new_axis = np.random.normal(0, 1.0, size=3)
        noise_R = trans.rotation_matrix(new_angle, new_axis)[:3, :3]

        # print('New', np.rad2deg(new_angle))

        # Build new relative transformation matrix
        new_R = np.dot(noise_R, rel_R)
        new_t = cam_opt.translation_from_center(new_R, rand_C)
        temp_T[:3, :3] = new_R
        temp_T[:3, 3] = new_t

        # Add the noise relative transformation onto noise-track
        new_T = np.dot(temp_T, pre_noise_frame)
        new_seq[frame_idx][:3, :] = new_T[:3, :]

        # Update both ground-truth-track as well as noise-track
        pre_frame = T
        pre_noise_frame = new_T

        # inv_new_T = cam_opt.camera_pose_inv(new_T[:3, :3], new_T[:3, 3])
        # r_angle, r_axis, _ = trans.rotation_from_matrix(inv_new_T)
        # print('New Rotation:', r_angle)

    return new_seq, loc_disp_noise_sigma