예제 #1
0
    def relative_poses_to_rt(poses_list, trans_abs=False):
        import dsac_tools.utils_geo as utils_geo
        # from .dsac_tools import utils_geo as utils_geo
        rot_list = []
        trans_list = []
        for delta_Rtij_inv in poses_list:
            err_q = utils_geo.rot12_to_angle_error(np.identity(3),
                                                   delta_Rtij_inv[:3, :3])
            err_t = utils_geo.vector_angle(np.array([0, 0, -1]),
                                           delta_Rtij_inv[:3, 3:4])
            if trans_abs:
                err_t_r = utils_geo.vector_angle(np.array([0, 0, 1]),
                                                 delta_Rtij_inv[:3, 3:4])
                err_t = err_t if err_t < err_t_r else err_t_r

            rot_list.append(err_q)
            trans_list.append(err_t)
        print(f"rot_list: {len(rot_list)}, trans_list: {len(trans_list)}")
        return {"rot": rot_list, "trans": trans_list}
def _E_to_M_train(E_est_th,
                  K,
                  x1,
                  x2,
                  inlier_mask=None,
                  delta_Rt_gt_cam=None,
                  depth_thres=50.,
                  show_debug=False,
                  show_result=True,
                  method_name='ours'):
    if show_debug:
        print('--- Recovering pose from E...')
    count_N = x1.shape[0]
    R2s, t2s, M2s = _get_M2s(E_est_th)

    R1 = np.eye(3)
    t1 = np.zeros((3, 1))
    M1 = np.hstack((R1, t1))

    if inlier_mask is not None:
        x1 = x1[inlier_mask, :]
        x2 = x2[inlier_mask, :]
        if x1.shape[0] < 8:
            print('ERROR! Less than 8 points after inlier mask!')
            print(inlier_mask)
            return None
    # Cheirality check following OpenCV implementation: https://github.com/opencv/opencv/blob/808ba552c532408bddd5fe51784cf4209296448a/modules/calib3d/src/five-point.cpp#L513
    depth_thres = depth_thres
    cheirality_checks = []
    M2_list = []
    error_Rt = ()

    def within_mask(Z, thres_min, thres_max):
        return (Z > thres_min) & (Z < thres_max)

    for Rt_idx, M2 in enumerate(M2s):
        M2 = M2.detach().cpu().numpy()
        R2 = M2[:, :3]
        t2 = M2[:, 3:4]
        if show_debug:
            print(M2)
            print(np.linalg.det(R2))
        X_tri_homo = cv2.triangulatePoints(np.matmul(K, M1), np.matmul(K, M2),
                                           x1.T, x2.T)
        X_tri = X_tri_homo[:3, :] / X_tri_homo[-1, :]
        # C1 = -np.matmul(R1, t1) # https://math.stackexchange.com/questions/82602/how-to-find-camera-position-and-rotation-from-a-4x4-matrix
        # cheirality1 = np.matmul(R1[2:3, :], (X_tri-C1)).reshape(-1) # https://cmsc426.github.io/sfm/
        # if show_debug:
        #     print(X_tri[-1, :])
        cheirality_mask_1 = within_mask(X_tri[-1, :], 0., depth_thres)

        X_tri_cam2 = np.matmul(R2, X_tri) + t2
        # C2 = -np.matmul(R2, t2)
        # cheirality2 = np.matmul(R2[2:3, :], (X_tri_cam3-C2)).reshape(-1)
        cheirality_mask_2 = within_mask(X_tri_cam2[-1, :], 0., depth_thres)

        cheirality_mask_12 = cheirality_mask_1 & cheirality_mask_2
        cheirality_checks.append(cheirality_mask_12)

    if show_debug:
        print([np.sum(mask) for mask in cheirality_checks])
    good_M_index, non_zero_nums = max(enumerate(
        [np.sum(mask) for mask in cheirality_checks]),
                                      key=operator.itemgetter(1))
    if non_zero_nums > 0:
        # Rt_idx = cheirality_checks.index(True)
        # M_inv = utils_misc.Rt_depad(np.linalg.inv(utils_misc.Rt_pad(M2s[good_M_index].detach().cpu().numpy())))
        # M_inv = utils_misc.inv_Rt_np(M2s[good_M_index].detach().cpu().numpy())
        M_inv_th = utils_misc._inv_Rt(M2s[good_M_index])
        # print(M_inv, M_inv_th)
        if show_debug:
            print(
                'The %d_th (0-based) Rt meets the Cheirality Condition! with [R|t] (camera):\n'
                % good_M_index,
                M_inv_th.detach().cpu().numpy())

        if delta_Rt_gt_cam is not None:
            # R2 = M2s[good_M_index][:, :3].numpy()
            # t2 = M2s[good_M_index][:, 3:4].numpy()
            # error_R = min([utils_geo.rot12_to_angle_error(R2.numpy(), delta_R_gt) for R2 in R2s])
            # error_t = min(utils_geo.vector_angle(t2, delta_t_gt), utils_geo.vector_angle(-t2, delta_t_gt))
            M_inv = M_inv_th.detach().cpu().numpy()
            R2 = M_inv[:, :3]
            t2 = M_inv[:, 3:4]
            error_R = utils_geo.rot12_to_angle_error(
                R2, delta_Rt_gt_cam[:3, :3])  # [RUI] Both of camera motion
            error_t = utils_geo.vector_angle(t2, delta_Rt_gt_cam[:3, 3:4])
            if show_result:
                print(
                    'Recovered by %s (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f'
                    % (method_name, error_R, error_t))
            error_Rt = [error_R, error_t]
        else:
            error_Rt = []
        Rt_cam = M_inv_th

    else:
        # raise ValueError('ERROR! 0 of qualified [R|t] found!')
        print('ERROR! 0 of qualified [R|t] found!')
        error_Rt = []
        Rt_cam = None

    return M2_list, error_Rt, Rt_cam
def _E_to_M(E_est_th,
            K,
            x1,
            x2,
            inlier_mask=None,
            delta_Rt_gt=None,
            depth_thres=50.,
            show_debug=False,
            show_result=True,
            method_name='ours'):
    if show_debug:
        print('--- Recovering pose from E...')
    count_N = x1.shape[0]
    R2s, t2s, M2s = _get_M2s(E_est_th)

    R1 = np.eye(3)
    t1 = np.zeros((3, 1))
    M1 = np.hstack((R1, t1))

    if inlier_mask is not None:
        x1 = x1[inlier_mask, :]
        x2 = x2[inlier_mask, :]
        if x1.shape[0] < 8:
            print('ERROR! Less than 8 points after inlier mask!')
            print(inlier_mask)
            return None
    # Cheirality check following OpenCV implementation: https://github.com/opencv/opencv/blob/808ba552c532408bddd5fe51784cf4209296448a/modules/calib3d/src/five-point.cpp#L513
    depth_thres = depth_thres
    cheirality_checks = []
    M2_list = []
    error_Rt = ()

    def within_mask(Z, thres_min, thres_max):
        return (Z > thres_min) & (Z < thres_max)

    for Rt_idx, M2 in enumerate(M2s):
        M2 = M2.numpy()
        R2 = M2[:, :3]
        t2 = M2[:, 3:4]
        if show_debug:
            print(M2)
            print(np.linalg.det(R2))
        X_tri_homo = cv2.triangulatePoints(np.matmul(K, M1), np.matmul(K, M2),
                                           x1.T, x2.T)
        X_tri = X_tri_homo[:3, :] / X_tri_homo[-1, :]
        # C1 = -np.matmul(R1, t1) # https://math.stackexchange.com/questions/82602/how-to-find-camera-position-and-rotation-from-a-4x4-matrix
        # cheirality1 = np.matmul(R1[2:3, :], (X_tri-C1)).reshape(-1) # https://cmsc426.github.io/sfm/
        # if show_debug:
        #     print(X_tri[-1, :])
        cheirality_mask_1 = within_mask(X_tri[-1, :], 0., depth_thres)

        X_tri_cam2 = np.matmul(R2, X_tri) + t2
        # C2 = -np.matmul(R2, t2)
        # cheirality2 = np.matmul(R2[2:3, :], (X_tri_cam3-C2)).reshape(-1)
        cheirality_mask_2 = within_mask(X_tri_cam2[-1, :], 0., depth_thres)

        cheirality_mask_12 = cheirality_mask_1 & cheirality_mask_2
        cheirality_checks.append(cheirality_mask_12)

    if show_debug:
        print([np.sum(mask) for mask in cheirality_checks])
    good_M_index, non_zero_nums = max(enumerate(
        [np.sum(mask) for mask in cheirality_checks]),
                                      key=operator.itemgetter(1))
    if non_zero_nums > 0:
        # Rt_idx = cheirality_checks.index(True)
        M_inv = utils_misc.Rt_depad(
            np.linalg.inv(utils_misc.Rt_pad(M2s[good_M_index].numpy())))
        if show_result:
            print(
                'The %d_th (0-based) Rt meets the Cheirality Condition! with [R|t] (camera):\n'
                % good_M_index, M_inv)

        if delta_Rt_gt is not None:
            R2 = M2s[good_M_index][:, :3].numpy()
            t2 = M2s[good_M_index][:, 3:4].numpy()
            # error_R = min([utils_geo.rot12_to_angle_error(R2.numpy(), delta_R_gt) for R2 in R2s])
            # error_t = min(utils_geo.vector_angle(t2, delta_t_gt), utils_geo.vector_angle(-t2, delta_t_gt))

            R2 = M_inv[:, :3]
            t2 = M_inv[:, 3:4]
            error_R = utils_geo.rot12_to_angle_error(
                R2, delta_Rt_gt[:3, :3])  # [RUI] Both of camera motion
            error_t = utils_geo.vector_angle(t2, delta_Rt_gt[:3, 3:4])
            if show_result:
                print(
                    'Recovered by %s (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f'
                    % (method_name, error_R, error_t))
            error_Rt = [error_R, error_t]
            Rt_cam = [R2, t2]

    else:
        # raise ValueError('ERROR! 0 of qualified [R|t] found!')
        print('ERROR! 0 of qualified [R|t] found!')
        error_Rt = []
        Rt_cam = []

        # # Get rid of small angle points. @Manmo: you should discard points that are beyond a depth threshold (say, more than 100m), or which subtend a small angle between the two cameras (say, less than 5 degrees).
        # v1s = (X_tri-C1).T
        # v2s = (X_tri-C2).T
        # angles_X1_C1C2 = utils_geo.vectors_angle(v1s, v2s).reshape(-1)

        # v1s = (X_tri_cam3-C1).T
        # v2s = (X_tri_cam3-C2).T
        # angles_X2_C1C2 = utils_geo.vectors_angle(v1s, v2s).reshape(-1)

        # # angles_thres = 0.5
        # # # angles_thres = np.median(angles_X1_C1C2)
        # # angles_mask = angles_X1_C1C2 > angles_thres
        # # if show_debug:
        # #     print('!!! Good angles %d/%d with threshold %.2f'%(np.sum(angles_mask), angles_X1_C1C2.shape[0], angles_thres))

        # depth_thres = 30.
        # # print(X_tri[-1, :] > 0.)
        # # depth_mask = np.logical_and(X_tri[-1, :] > 0., X_tri[-1, :] < depth_thres).reshape(-1)
        # depth_mask = (X_tri[-1, :] < depth_thres).reshape(-1)
        # # print(angles_mask.shape)

        # # if angles_mask is not None:
        # if not np.any(depth_mask):
        #     cheirality_check = False
        #     # print('ERROR! No corres above the threshold of %.2f degrees!'%angles_thres)
        #     if show_debug:
        #         print('No depth within the threshold of 0-%.2f!'%depth_thres)
        #     # print(angles_C1C2)
        # else:
        #     # cheirality_check = np.min(cheirality1[depth_mask])>0 and np.min(cheirality2[depth_mask])>0
        #     cheirality_check = np.min(X_tri[-1, :].reshape(-1)[depth_mask])>0 and np.min(X_tri_cam3[-1, :].reshape(-1)[depth_mask])>0

        # # else:
        # #     cheirality_check = np.min(cheirality1)>0 and np.min(cheirality2)>0
        # cheirality_checks.append(cheirality_check)
        # if cheirality_check:
        #     print('-- Good M (scene):', M2)
        #     M2_list.append(M2)

        # if show_debug: # for debugging prints
        #     # print(X_tri[-1, angles_mask.reshape([-1])])
        #     # print(X_tri_cam3[-1, angles_mask.reshape([-1])])
        #     # outliers1 = cheirality1[depth_mask] < 0
        #     # print(angles_X1_C1C2[angles_mask].shape, outliers1.shape)
        #     # print(outliers1.shape, 'Outlier angles: ', angles_X1_C1C2[angles_mask][outliers1])
        #     print(X_tri[-1, :].reshape(-1))
        #     print(X_tri[-1, :].reshape(-1)[depth_mask])
        # #     # print(angles_X1_C1C2.shape, outliers1.shape)
        #     # print(angles_X1_C1C2, angles_X1_C1C2[depth_mask][outliers1])
        # #     # print(angles_X2_C1C2)
        # #     # print(X_tri[-1, :])
        # #     # print(cheirality1)
        # #     # print(cheirality2)

    # if np.sum(cheirality_checks)==1:
    #     Rt_idx = cheirality_checks.index(True)
    #     M_inv = utils_misc.Rt_depad(np.linalg.inv(utils_misc.Rt_pad(M2s[Rt_idx].numpy())))
    #     print('The %d_th Rt meets the Cheirality Condition! with [R|t] (camera):\n'%Rt_idx, M_inv)

    #     if delta_Rt_gt is not None:
    #         R2 = M2s[Rt_idx][:, :3].numpy()
    #         t2 = M2s[Rt_idx][:, 3:4].numpy()
    #         # error_R = min([utils_geo.rot12_to_angle_error(R2.numpy(), delta_R_gt) for R2 in R2s])
    #         # error_t = min(utils_geo.vector_angle(t2, delta_t_gt), utils_geo.vector_angle(-t2, delta_t_gt))

    #         R2 = M_inv[:, :3]
    #         t2 = M_inv[:, 3:4]
    #         error_R = utils_geo.rot12_to_angle_error(R2, delta_Rt_gt[:, :3])
    #         error_t = utils_geo.vector_angle(t2, delta_Rt_gt[:, 3:4])
    #         print('Recovered by %s (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f'%(method_name, error_R, error_t))
    #         error_Rt = (error_R, error_t)

    #     print(M_inv)
    # else:
    #     raise ValueError('ERROR! %d of qualified [R|t] found!'%np.sum(cheirality_checks))
    #     # print('ERROR! %d of qualified [R|t] found!'%np.sum(cheirality_checks))

    return M2_list, error_Rt, Rt_cam
def goodCorr_eval_nondecompose(p1s,
                               p2s,
                               E_hat,
                               delta_Rtij_inv,
                               K,
                               scores,
                               if_my_decomp=False):
    # Use only the top 10% in terms of score to decompose, we can probably
    # implement a better way of doing this, but this should be just fine.
    if scores is not None:
        num_top = len(scores) // 10
        num_top = max(1, num_top)
        th = np.sort(scores)[::-1][
            num_top]  ## [RUI] Only evaluating the top 10% corres.
        mask = scores >= th

        p1s_good = p1s[mask]
        p2s_good = p2s[mask]
    else:
        p1s_good, p2s_good = p1s, p2s

    # Match types
    # E_hat = E_hat.reshape(3, 3).astype(p1s.dtype))
    if p1s_good.shape[0] >= 5:
        # Get the best E just in case we get multipl E from findEssentialMat
        # num_inlier, R, t, mask_new = cv2.recoverPose(
        #     E_hat, p1s_good, p2s_good)
        if if_my_decomp:
            M2_list, error_Rt, Rt_cam = _E_to_M(torch.from_numpy(E_hat),
                                                torch.from_numpy(p1s_good),
                                                torch.from_numpy(p2s_good),
                                                delta_Rt_gt=delta_Rtij_inv,
                                                show_debug=False,
                                                method_name='Ours_best%d' %
                                                best_N)
            if not Rt_cam:
                return None, None
            else:
                print(Rt_cam[0], Rt_cam[1])
        else:
            num_inlier, R, t, mask_new = cv2.recoverPose(E_hat,
                                                         p1s_good,
                                                         p2s_good,
                                                         focal=K[0, 0],
                                                         pp=(K[0, 2], K[1, 2]))
        try:
            R_cam, t_cam = utils_geo.invert_Rt(R, t)
            err_q = utils_geo.rot12_to_angle_error(R_cam,
                                                   delta_Rtij_inv[:3, :3])
            err_t = utils_geo.vector_angle(t_cam, delta_Rtij_inv[:3, 3:4])
            # err_q, err_t = evaluate_R_t(dR, dt, R, t) # (3, 3) (3,) (3, 3) (3, 1)
        except:
            print("Failed in evaluation")
            print(R)
            print(t)
            err_q = 180.
            err_t = 90.
    else:
        err_q = 180.
        err_t = 90.
        R = np.eye(3, np.float32)
        t = np.zeros((3, 1), np.float32)

    return np.hstack((R, t)), (err_q, err_t)


# def compute_fundamental_scipy(x1,x2):
#     from scipy import linalg
#     """    Computes the fundamental matrix from corresponding points
#         (x1,x2 3*n arrays) using the 8 point algorithm.
#         Each row in the A matrix below is constructed as
#         [x'*x, x'*y, x', y'*x, y'*y, y', x, y, 1] """

#     n = x1.shape[1]
#     if x2.shape[1] != n:
#         raise ValueError("Number of points don't match.")

#     # build matrix for equations
#     A = zeros((n,9))
#     for i in range(n):
#         A[i] = [x1[0,i]*x2[0,i], x1[0,i]*x2[1,i], x1[0,i]*x2[2,i],
#                 x1[1,i]*x2[0,i], x1[1,i]*x2[1,i], x1[1,i]*x2[2,i],
#                 x1[2,i]*x2[0,i], x1[2,i]*x2[1,i], x1[2,i]*x2[2,i] ]

#     # compute linear least square solution
#     U,S,V = linalg.svd(A)
#     F = V[-1].reshape(3,3)

#     # constrain F
#     # make rank 2 by zeroing out last singular value
#     U,S,V = linalg.svd(F)
#     S[2] = 0
#     F = dot(U,dot(diag(S),V))

#     return F/F[2,2]

# def compute_fundamental_np(x1,x2):
#     """    Computes the fundamental matrix from corresponding points
#         (x1,x2 3*n arrays) using the 8 point algorithm.
#         Each row in the A matrix below is constructed as
#         [x'*x, x'*y, x', y'*x, y'*y, y', x, y, 1] """

#     n = x1.shape[1]
#     if x2.shape[1] != n:
#         raise ValueError("Number of points don't match.")

#     # build matrix for equations
#     A = zeros((n,9))
#     for i in range(n):
#         A[i] = [x1[0,i]*x2[0,i], x1[0,i]*x2[1,i], x1[0,i]*x2[2,i],
#                 x1[1,i]*x2[0,i], x1[1,i]*x2[1,i], x1[1,i]*x2[2,i],
#                 x1[2,i]*x2[0,i], x1[2,i]*x2[1,i], x1[2,i]*x2[2,i] ]

#     # compute linear least square solution
#     U,S,V = np.linalg.svd(A)
#     F = V[-1].reshape(3,3)

#     # # constrain F
#     # # make rank 2 by zeroing out last singular value
#     # U,S,V = np.linalg.svd(F)
#     # S[2] = 0
#     # F = dot(U,dot(diag(S),V))

#     return F/F[2,2], A
예제 #5
0
def get_Rt_loss(
    E_ests_layers, Ks_cpu, x1_cpu, x2_cpu, delta_Rtijs_4_4_cpu, qs_cam, ts_cam,
    device='cpu'
):
    """ losses from essential matrix and ground truth R,t
    params:
        E_ests_layers -> [B, 3, 3]: batch essential matrices
        Ks_cpu -> [B, 3, 3]: batch intrinsics
        x1_cpu, x2_cpu: no use
        delta_Rtijs_4_4_cpu -> [B, 4, 4]: ground truth transformation matrices
        qs_cam -> [B, 4]: ground truth rotation
        ts_cam -> [B, 3]: ground truth translation

    return:
        dict: with l2 loss, Rt angle error
    
    """
    # Differentiable R t decomposition from E_est
    K_np = Ks_cpu.numpy()
    x1_np, x2_np = x1_cpu.numpy(), x2_cpu.numpy()
    # delta_Rtijs_4_4_cpu_np = delta_Rtijs_4_4_cpu.numpy()

    R_angle_error_layers_list = []
    t_angle_error_layers_list = []
    t_l2_error_layers_list = []
    q_l2_error_layers_list = []
    R_angle_error_mean_layers_list = []
    t_angle_error_mean_layers_list = []
    t_l2_error_mean_layers_list = []
    q_l2_error_mean_layers_list = []

    ## many layers of essential matrices
    for layer_idx, E_ests in enumerate(E_ests_layers):
        R_angle_error_list = []
        t_angle_error_list = []
        t_l2_error_list = []
        q_l2_error_list = []

        # ================= method 1/2 ===============
        ## convert E mat to R, t
        R12s_list = []
        t12s_list = []
        for idx, E_cam in enumerate(E_ests.cpu().transpose(1, 2)):
            # FU, FD, FV= torch.svd(E_cam, some=True)
            # # print('[info.Debug @_E_from_XY] Singular values for recovered E(F):\n', FD.detach().numpy())
            # S_110 = torch.diag(torch.tensor([1., 1., 0.], dtype=FU.dtype, device=FU.device))
            # E_cam = torch.mm(FU, torch.mm(S_110, FV.t()))

            R12s, t12s, M12s = utils_F._get_M2s(E_cam)
            R12s_list.append(R12s)
            t12s_list.append(t12s)
        R12s_batch_cam = [
            torch.stack([R12s[0] for R12s in R12s_list]).to(device),
            torch.stack([R12s[1] for R12s in R12s_list]).to(device),
        ]
        t12s_batch_cam = [
            torch.stack([t12s[0] for t12s in t12s_list]).to(device),
            torch.stack([t12s[1] for t12s in t12s_list]).to(device),
        ]  # already unit norm

        for (
            R1_est_cam,
            R2_est_cam,
            t1_est_cam,
            t2_est_cam,
            q_gt_cam,
            t_gt_cam,
            E_hat_single,
            K_single_np,
            x1_single_np,
            x2_single_np,
            delta_Rtijs_4_4_inv,
        ) in zip(
            R12s_batch_cam[0],
            R12s_batch_cam[1],
            t12s_batch_cam[0],
            t12s_batch_cam[1],
            qs_cam,
            ts_cam,
            E_ests,
            K_np,
            x1_np,
            x2_np,
            torch.inverse(delta_Rtijs_4_4_cpu.to(device)),
        ):
            q1_est_cam = utils_geo._R_to_q(R1_est_cam)
            q2_est_cam = utils_geo._R_to_q(R2_est_cam)
            t_gt_cam = F.normalize(t_gt_cam, p=2, dim=0) # normed translation
            q12_error = [
                utils_geo._l2_error(q1_est_cam, q_gt_cam),
                utils_geo._l2_error(q2_est_cam, q_gt_cam),
            ]
            t12_error = [
                utils_geo._l2_error(t1_est_cam, t_gt_cam),
                utils_geo._l2_error(t2_est_cam, t_gt_cam),
            ]
            q12_who_is_small = q12_error[0] < q12_error[1]
            t12_who_is_small = t12_error[0] < t12_error[1]

            R_est = (
                q12_who_is_small * R1_est_cam + (~q12_who_is_small) * R2_est_cam
            )
            t_est = (
                t12_who_is_small * t1_est_cam + (~t12_who_is_small) * t2_est_cam
            )

            R_gt = delta_Rtijs_4_4_inv[:3, :3]

            # R_angle_error = utils_geo._rot_angle_error(R_est, R_gt)
            R_angle_error = utils_geo.rot12_to_angle_error(
                R_est.detach().cpu().numpy(), R_gt.detach().cpu().numpy()
            )
            t_angle_error = utils_geo.vector_angle(
                t_est.detach().cpu().numpy(), t_gt_cam.detach().cpu().numpy()
            )

            ## calcualte l2 loss
            q_l2_error = (
                q12_who_is_small * q12_error[0]
                + (~q12_who_is_small) * q12_error[1]
            )
            t_l2_error = (
                t12_who_is_small * t12_error[0]
                + (~t12_who_is_small) * t12_error[1]
            )

            # print('--1', layer_idx, R_est.cpu().detach().numpy(), R_gt.cpu().detach().numpy(), R_angle_error)
            # print('--1', layer_idx, R_angle_error, t_angle_error)

            # ================= method 3/2: OpenCV ===============
            # if layer_idx == len(E_ests_layers)-1:
            #     print('---3', E_hat_single)
            #     FU, FD, FV= torch.svd(E_hat_single, some=True)
            #     # print('[info.Debug @_E_from_XY] Singular values for recovered E(F):\n', FD.detach().numpy())
            #     S_110 = torch.diag(torch.tensor([1., 1., 0.], dtype=FU.dtype, device=FU.device))
            #     E_hat_single = torch.mm(FU, torch.mm(S_110, FV.t()))

            #     M_estW, error_Rt_estW, M_estW_cam = utils_F.goodCorr_eval_nondecompose(x1_single_np, x2_single_np, E_hat_single.cpu().detach().numpy().astype(np.float64), delta_Rtijs_4_4_inv.cpu().detach().numpy(), K_single_np, None)
            #     print('--3', M_estW_cam[:3, :3], delta_Rtijs_4_4_inv.cpu().detach().numpy()[:3, :3], error_Rt_estW[0])
            #     # print('--3', layer_idx, error_Rt_estW[0], error_Rt_estW[1])

            # # R2s_batch, t2s_batch = utils_F._get_M2s_batch(E_ests[:2])

            # ================= method 2/2 ===============
            # for E_hat_single, K_single_np, x1_single_np, x2_single_np, delta_Rtijs_4_4_inv, q_gt_cam, t_gt_cam in zip(E_ests, K_np, x1_np, x2_np, torch.inverse(delta_Rtijs_4_4_cpu.cuda()), qs_cam, ts_cam):
            #     M2_list, error_Rt, Rt_cam = utils_F._E_to_M_train(E_hat_single, K_single_np, x1_single_np, x2_single_np, delta_Rt_gt_cam=None, show_debug=False, show_result=False)
            #     if Rt_cam is None:
            #         R_angle_error_list.append(0.)
            #         t_angle_error_list.append(0.)
            #         t_l2_error_list.append(torch.tensor(0.).float().cuda())
            #         q_l2_error_list.append(torch.tensor(0.).float().cuda())
            #         continue

            #     R_est = Rt_cam[:, :3]
            #     # t_est = F.normalize(Rt_cam[:, 3:4], p=2, dim=0)  # already unit norm
            #     t_est = Rt_cam[:, 3:4]
            #     R_gt = delta_Rtijs_4_4_inv[:3, :3]
            #     # t_gt = F.normalize(delta_Rtijs_4_4_inv[:3, 3:4], p=2, dim=0)
            #     t_gt = F.normalize(t_gt_cam, p=2, dim=0)
            #     # t_gt = delta_Rtijs_4_4_inv[:3, 3:4]

            #     R_angle_error = utils_geo._rot_angle_error(R_est, R_gt)
            #     t_angle_error = utils_geo.vector_angle(t_est.detach().cpu().numpy(), t_gt.detach().cpu().numpy())

            #     q_est = utils_geo._R_to_q(R_est)
            #     q_gt = q_gt_cam

            #     q_l2_error = utils_geo._l2_error(q_est, q_gt)
            #     q_l2_error = q_l2_error * (R_angle_error < 30.)
            #     t_l2_error = utils_geo._l2_error(t_est, t_gt)
            #     t_l2_error = t_l2_error * (t_angle_error < 30.)

            R_angle_error_list.append(R_angle_error)
            t_angle_error_list.append(t_angle_error)
            t_l2_error_list.append(t_l2_error)
            q_l2_error_list.append(q_l2_error)

        #     print('--2', layer_idx, R_est.cpu().detach().numpy())

        #     num_inlier, R, t, mask_new = cv2.recoverPose(E_hat_single.cpu().detach().numpy().astype(np.float64), x1_single_np, x2_single_np, focal=K_single_np[0, 0], pp=(K_single_np[0, 2], K_single_np[1, 2]))
        #     print('--3', layer_idx, R)

        # ================================

        # if layer_idx == len(E_ests_layers)-1:
        #     print('R_angle_error_list', R_angle_error_list)
        #     print('t_angle_error_list', t_angle_error_list)

        R_angle_error_mean = sum(R_angle_error_list) / len(R_angle_error_list)
        t_angle_error_mean = sum(t_angle_error_list) / len(t_angle_error_list)
        t_l2_error_mean = sum(t_l2_error_list) / len(t_l2_error_list)
        q_l2_error_mean = sum(q_l2_error_list) / len(q_l2_error_list)

        R_angle_error_layers_list.append(np.array(R_angle_error_list))
        t_angle_error_layers_list.append(np.array(t_angle_error_list))
        t_l2_error_layers_list.append(torch.stack(t_l2_error_list))
        q_l2_error_layers_list.append(torch.stack(q_l2_error_list))

        R_angle_error_mean_layers_list.append(R_angle_error_mean)
        t_angle_error_mean_layers_list.append(t_angle_error_mean)
        t_l2_error_mean_layers_list.append(t_l2_error_mean)
        q_l2_error_mean_layers_list.append(q_l2_error_mean)

    R_angle_error_mean_all = mean_list(R_angle_error_mean_layers_list)
    t_angle_error_mean_all = mean_list(t_angle_error_mean_layers_list)
    t_l2_error_mean_all = mean_list(t_l2_error_mean_layers_list)
    q_l2_error_mean_all = mean_list(q_l2_error_mean_layers_list)

    return_list = {
        "t_l2_error_mean": t_l2_error_mean_all,
        "q_l2_error_mean": q_l2_error_mean_all,
        "t_l2_error_list": torch.stack(t_l2_error_mean_layers_list),
        "q_l2_error_list": torch.stack(t_l2_error_mean_layers_list),
    }
    return_list.update(
        {
            "R_angle_error_mean": R_angle_error_mean_all,
            "R_angle_error_list": np.array(R_angle_error_mean_layers_list),
            "t_angle_error_mean": t_angle_error_mean_all,
            "t_angle_error_list": np.array(t_angle_error_mean_layers_list),
        }
    )
    return_list.update(
        {
            "R_angle_error_layers_list": R_angle_error_layers_list,
            "t_angle_error_layers_list": t_angle_error_layers_list,
            "t_l2_error_layers_list": t_l2_error_layers_list,
            "q_l2_error_layers_list": q_l2_error_layers_list,
        }
    )

    return return_list
예제 #6
0
    def eval_one_sample(self, sample):
        import torch
        import dsac_tools.utils_F as utils_F  # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
        import dsac_tools.utils_opencv as utils_opencv  # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
        import dsac_tools.utils_vis as utils_vis  # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
        import dsac_tools.utils_misc as utils_misc  # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
        import dsac_tools.utils_geo as utils_geo  # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
        from train_good_utils import val_rt, get_matches_from_SP

        # params
        config = self.config
        net_dict = self.net_dict
        if_SP = self.config["model"]["if_SP"]
        if_quality = self.config["model"]["if_quality"]
        device = self.device
        net_SP_helper = self.net_SP_helper

        task = "validating"
        imgs = sample["imgs"]  # [batch_size, H, W, 3]
        Ks = sample["K"].to(device)  # [batch_size, 3, 3]
        K_invs = sample["K_inv"].to(device)  # [batch_size, 3, 3]
        batch_size = Ks.size(0)
        scene_names = sample["scene_name"]
        frame_ids = sample["frame_ids"]
        scene_poses = sample[
            "relative_scene_poses"]  # list of sequence_length tensors, which with size [batch_size, 4, 4]; the first being identity, the rest are [[R; t], [0, 1]]
        if config["data"]["read_what"]["with_X"]:
            Xs = sample[
                "X_cam2s"]  # list of [batch_size, 3, Ni]; only support batch_size=1 because of variable points Ni for each sample
        # sift_kps, sift_deses = sample['sift_kps'], sample['sift_deses']
        assert sample["get_flags"]["have_matches"][0].numpy(
        ), "Did not find the corres files!"
        matches_all, matches_good = sample["matches_all"], sample[
            "matches_good"]
        quality_all, quality_good = sample["quality_all"], sample[
            "quality_good"]

        delta_Rtijs_4_4 = scene_poses[1].float(
        )  # [batch_size, 4, 4], asserting we have 2 frames where scene_poses[0] are all identities
        E_gts, F_gts = sample["E"], sample["F"]
        pts1_virt_normalizedK, pts2_virt_normalizedK = (
            sample["pts1_virt_normalized"].to(device),
            sample["pts2_virt_normalized"].to(device),
        )
        pts1_virt_ori, pts2_virt_ori = (
            sample["pts1_virt"].to(device),
            sample["pts2_virt"].to(device),
        )
        # pts1_virt_ori, pts2_virt_ori = sample['pts1_velo'].to(device), sample['pts2_velo'].to(device)

        # Get and Normalize points
        if if_SP:
            net_SP = net_dict["net_SP"]
            SP_processer, SP_tracker = (
                net_SP_helper["SP_processer"],
                net_SP_helper["SP_tracker"],
            )
            xs, offsets, quality = get_matches_from_SP(sample["imgs_grey"],
                                                       net_SP, SP_processer,
                                                       SP_tracker)
            matches_use = xs + offsets
            # matches_use = xs + offsets
            quality_use = quality
        else:
            # Get and Normalize points
            matches_use = matches_good  # [SWITCH!!!]
            quality_use = quality_good.to(
                device) if if_quality else None  # [SWITCH!!!]

        ## process x1, x2
        matches_use = matches_use.to(device)

        N_corres = matches_use.shape[
            1]  # 1311 for matches_good, 2000 for matches_all
        x1, x2 = (
            matches_use[:, :, :2],
            matches_use[:, :, 2:],
        )  # [batch_size, N, 2(W, H)]
        x1_normalizedK = utils_misc._de_homo(
            torch.matmul(
                torch.inverse(Ks),
                utils_misc._homo(x1).transpose(1, 2)).transpose(
                    1,
                    2))  # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
        x2_normalizedK = utils_misc._de_homo(
            torch.matmul(
                torch.inverse(Ks),
                utils_misc._homo(x2).transpose(1, 2)).transpose(
                    1,
                    2))  # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
        matches_use_normalizedK = torch.cat((x1_normalizedK, x2_normalizedK),
                                            2)

        matches_use_ori = torch.cat((x1, x2), 2)

        # Get image feats
        if config["model"]["if_img_feat"]:
            imgs = sample["imgs"]  # [batch_size, H, W, 3]
            imgs_stack = ((torch.cat(imgs, 3).float() - 127.5) /
                          127.5).permute(0, 3, 1, 2)

        qs_scene = sample["q_scene"].to(device)  # [B, 4, 1]
        ts_scene = sample["t_scene"].to(device)  # [B, 3, 1]
        qs_cam = sample["q_cam"].to(device)  # [B, 4, 1]
        ts_cam = sample["t_cam"].to(device)  # [B, 3, 1]

        t_scene_scale = torch.norm(ts_scene, p=2, dim=1, keepdim=True)

        # image_height, image_width = config['data']['image']['size'][0], config['data']['image']['size'][1]
        # mask_x1 = (matches_use_ori[:, :, 0] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 0] < (image_width/8.*5.)).byte()
        # mask_x2 = (matches_use_ori[:, :, 2] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 2] < (image_width/8.*5.)).byte()
        # mask_y1 = (matches_use_ori[:, :, 1] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 1] < (image_height/8.*5.)).byte()
        # mask_y2 = (matches_use_ori[:, :, 3] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 3] < (image_height/8.*5.)).byte()
        # mask_center = (~(mask_x1 & mask_y1)) & (~(mask_x2 & mask_y2))
        # matches_use_ori = (mask_center.float()).unsqueeze(-1) * matches_use_ori + torch.tensor([image_width/2., image_height/2., image_width/2., image_height/2.]).to(device).unsqueeze(0).unsqueeze(0) * (1- (mask_center.float()).unsqueeze(-1))
        # x1, x2 = matches_use_ori[:, :, :2], matches_use_ori[:, :, 2:] # [batch_size, N, 2(W, H)]

        data_batch = {
            "matches_xy_ori": matches_use_ori,
            "quality": quality_use,
            "x1_normalizedK": x1_normalizedK,
            "x2_normalizedK": x2_normalizedK,
            "Ks": Ks,
            "K_invs": K_invs,
            "matches_good_unique_nums": sample["matches_good_unique_nums"],
            "t_scene_scale": t_scene_scale,
        }
        # loss_params = {'model': config['model']['name'], 'clamp_at':config['model']['clamp_at'], 'depth': config['model']['depth']}
        loss_params = {
            "model": config["model"]["name"],
            "clamp_at": config["model"]["clamp_at"],
            "depth": config["model"]["depth"],
        }

        with torch.no_grad():
            outs = net_dict["net_deepF"](data_batch)

            pts1_eval, pts2_eval = pts1_virt_ori, pts2_virt_ori

            #     logits = outs['logits'] # [batch_size, N]
            #     logits_weights = F.softmax(logits, dim=1)
            logits_weights = outs["weights"]
            loss_E = 0.0

            F_out, T1, T2, out_a = (
                outs["F_est"],
                outs["T1"],
                outs["T2"],
                outs["out_layers"],
            )
            pts1_eval = torch.bmm(T1,
                                  pts1_virt_ori.permute(0, 2,
                                                        1)).permute(0, 2, 1)
            pts2_eval = torch.bmm(T2,
                                  pts2_virt_ori.permute(0, 2,
                                                        1)).permute(0, 2, 1)

            # pts1_eval = utils_misc._homo(F.normalize(pts1_eval[:, :, :2], dim=2))
            # pts2_eval = utils_misc._homo(F.normalize(pts2_eval[:, :, :2], dim=2))

            loss_layers = []
            losses_layers = []
            # losses = utils_F.compute_epi_residual(pts1_eval, pts2_eval, F_est, loss_params['clamp_at']) #- res.mean()
            # losses_layers.append(losses)
            # loss_all = losses.mean()
            # loss_layers.append(loss_all)
            out_a.append(F_out)
            loss_all = 0.0
            for iter in range(loss_params["depth"]):
                losses = utils_F.compute_epi_residual(pts1_eval, pts2_eval,
                                                      out_a[iter],
                                                      loss_params["clamp_at"])
                # losses = utils_F._YFX(pts1_eval, pts2_eval, out_a[iter], if_homo=True, clamp_at=loss_params['clamp_at'])
                losses_layers.append(losses)
                loss = losses.mean()
                loss_layers.append(loss)
                loss_all += loss

            loss_all = loss_all / len(loss_layers)

            F_ests = T2.permute(0, 2, 1).bmm(F_out.bmm(T1))
            E_ests = Ks.transpose(1, 2) @ F_ests @ Ks

            last_losses = losses_layers[-1].detach().cpu().numpy()
            print(last_losses)
            print(np.amax(last_losses, axis=1))

        # E_ests_list = []
        # for x1_single, x2_single, K, w in zip(x1, x2, Ks, logits_weights):
        #     E_est = utils_F._E_from_XY(x1_single, x2_single, K, torch.diag(w))
        #     E_ests_list.append(E_est)
        # E_ests = torch.stack(E_ests_list).to(device)
        # F_ests = utils_F._E_to_F(E_ests, Ks)
        K_np = Ks.cpu().detach().numpy()
        x1_np, x2_np = x1.cpu().detach().numpy(), x2.cpu().detach().numpy()
        E_est_np = E_ests.cpu().detach().numpy()
        F_est_np = F_ests.cpu().detach().numpy()
        delta_Rtijs_4_4_cpu_np = delta_Rtijs_4_4.cpu().numpy()

        # Tests and vis
        idx = 0
        img1 = imgs[0][idx].numpy().astype(np.uint8)
        img2 = imgs[1][idx].numpy().astype(np.uint8)
        img1_rgb, img2_rgb = img1, img2
        img1_rgb_np, img2_rgb_np = img1, img2
        im_shape = img1.shape
        x1 = x1_np[idx]
        x2 = x2_np[idx]
        #         utils_vis.draw_corr(img1, img2, x1, x2)

        delta_Rtij = delta_Rtijs_4_4_cpu_np[idx]
        print("----- delta_Rtij", delta_Rtij)
        delta_Rtij_inv = np.linalg.inv(delta_Rtij)
        K = K_np[idx]
        F_gt_th = F_gts[idx].cpu()
        F_gt = F_gt_th.numpy()
        E_gt_th = E_gts[idx].cpu()
        E_gt = E_gt_th.numpy()
        F_est = F_est_np[idx]
        E_est = E_est_np[idx]

        unique_rows_all, unique_rows_all_idxes = np.unique(np.hstack((x1, x2)),
                                                           axis=0,
                                                           return_index=True)
        mask_sample = np.random.choice(x1.shape[0], 100)
        angle_R = utils_geo.rot12_to_angle_error(np.eye(3),
                                                 delta_Rtij_inv[:3, :3])
        angle_t = utils_geo.vector_angle(np.array([[0.0], [0.0], [1.0]]),
                                         delta_Rtij_inv[:3, 3:4])
        print(
            ">>>>>>>>>>>>>>>> Between frames: The rotation angle (degree) %.4f, and translation angle (degree) %.4f"
            % (angle_R, angle_t))
        utils_vis.draw_corr(
            img1_rgb,
            img2_rgb,
            x1[mask_sample],
            x2[mask_sample],
            linewidth=2.0,
            title="Sample of 100 corres.",
        )

        #         ## Baseline: 8-points
        #         M_8point, error_Rt_8point, mask2_8point, E_est_8point = utils_opencv.recover_camera_opencv(K, x1, x2, delta_Rtij_inv, five_point=False, threshold=0.01)

        ## Baseline: 5-points
        five_point = False
        M_opencv, error_Rt_opencv, mask2, E_return = utils_opencv.recover_camera_opencv(
            K, x1, x2, delta_Rtij_inv, five_point=five_point, threshold=0.01)

        if five_point:
            E_est_opencv = E_return
            F_est_opencv = utils_F.E_to_F_np(E_est_opencv, K)
        else:
            E_est_opencv, F_est_opencv = E_return[0], E_return[1]

        ## Check geo dists
        print(f"K: {K}")
        x1_normalizedK = utils_misc.de_homo_np(
            (np.linalg.inv(K) @ utils_misc.homo_np(x1).T).T)
        x2_normalizedK = utils_misc.de_homo_np(
            (np.linalg.inv(K) @ utils_misc.homo_np(x2).T).T)
        K_th = torch.from_numpy(K)
        F_gt_normalized = K_th.t(
        ) @ F_gt_th @ K_th  # Should be identical to E_gts[idx]

        geo_dists = utils_F._sym_epi_dist(
            F_gt_normalized,
            torch.from_numpy(x1_normalizedK),
            torch.from_numpy(x2_normalizedK),
        ).numpy()
        geo_thres = 1e-4
        mask_in = geo_dists < geo_thres
        mask_out = geo_dists >= geo_thres

        mask_sample = mask2
        print(mask2.shape)
        np.set_printoptions(precision=8, suppress=True)

        ## Ours: Some analysis
        print("----- Oursssssssssss")
        scores_ori = logits_weights.cpu().numpy().flatten()
        import matplotlib.pyplot as plt

        plt.hist(scores_ori, 100)
        plt.show()
        sort_idxes = np.argsort(scores_ori[unique_rows_all_idxes])[::-1]
        scores = scores_ori[unique_rows_all_idxes][sort_idxes]
        num_corr = 100
        mask_conf = sort_idxes[:num_corr]
        # mask_sample = np.array(range(x1.shape[0]))[mask_sample][:20]

        utils_vis.draw_corr(
            img1_rgb,
            img2_rgb,
            x1[unique_rows_all_idxes],
            x2[unique_rows_all_idxes],
            linewidth=2.0,
            title=f"All {unique_rows_all_idxes.shape[0]} correspondences",
        )

        utils_vis.draw_corr(
            img1_rgb,
            img2_rgb,
            x1[unique_rows_all_idxes][mask_conf, :],
            x2[unique_rows_all_idxes][mask_conf, :],
            linewidth=2.0,
            title=f"Ours top {num_corr} confidents",
        )
        #         print('(%d unique corres)'%scores.shape[0])
        utils_vis.show_epipolar_rui_gtEst(
            x2[unique_rows_all_idxes][mask_conf, :],
            x1[unique_rows_all_idxes][mask_conf, :],
            img2_rgb,
            img1_rgb,
            F_gt.T,
            F_est.T,
            weights=scores_ori[unique_rows_all_idxes][mask_conf],
            im_shape=im_shape,
            title_append="Ours top %d with largest score points" %
            mask_conf.shape[0],
        )
        print(f"F_gt: {F_gt/F_gt[2, 2]}")
        print(f"F_est: {F_est/F_est[2, 2]}")
        error_Rt_est_ours, epi_dist_mean_est_ours, _, _, _, _, _, M_estW = val_rt(
            idx,
            K,
            x1,
            x2,
            E_est,
            E_gt,
            F_est,
            F_gt,
            delta_Rtij,
            five_point=False,
            if_opencv=False,
        )
        print(
            "Recovered by ours (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f"
            % (error_Rt_est_ours[0], error_Rt_est_ours[1]))
        #         print(epi_dist_mean_est_ours, np.mean(epi_dist_mean_est_ours))
        print("%.2f, %.2f" % (
            np.sum(epi_dist_mean_est_ours < 0.1) /
            epi_dist_mean_est_ours.shape[0],
            np.sum(epi_dist_mean_est_ours < 1) /
            epi_dist_mean_est_ours.shape[0],
        ))

        ## OpenCV: Some analysis
        corres = np.hstack((x1[mask_sample, :], x2[mask_sample, :]))

        unique_rows = np.unique(corres,
                                axis=0) if corres.shape[0] > 0 else corres

        opencv_name = "5-point" if five_point else "8-point"
        utils_vis.draw_corr(
            img1_rgb,
            img2_rgb,
            x1[mask_sample, :],
            x2[mask_sample, :],
            linewidth=2.0,
            title=f"OpenCV {opencv_name} inliers",
        )

        print("----- OpenCV %s (%d unique inliers)" %
              (opencv_name, unique_rows.shape[0]))
        utils_vis.show_epipolar_rui_gtEst(
            x2[mask_sample, :],
            x1[mask_sample, :],
            img2_rgb,
            img1_rgb,
            F_gt.T,
            F_est_opencv.T,
            weights=scores_ori[mask_sample],
            im_shape=im_shape,
            title_append="OpenCV 5-point with its inliers",
        )
        print(F_gt / F_gt[2, 2])
        print(F_est_opencv / F_est_opencv[2, 2])
        error_Rt_est_5p, epi_dist_mean_est_5p, _, _, _, _, _, M_estOpenCV = val_rt(
            idx,
            K,
            x1,
            x2,
            E_est_opencv,
            E_gt,
            F_est_opencv,
            F_gt,
            delta_Rtij,
            five_point=False,
            if_opencv=False,
        )
        print(
            "Recovered by OpenCV %s (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f"
            % (opencv_name, error_Rt_est_5p[0], error_Rt_est_5p[1]))
        print("%.2f, %.2f" % (
            np.sum(epi_dist_mean_est_5p < 0.1) / epi_dist_mean_est_5p.shape[0],
            np.sum(epi_dist_mean_est_5p < 1) / epi_dist_mean_est_5p.shape[0],
        ))
        # dict_of_lists['opencv5p'].append((np.sum(epi_dist_mean_est_5p<0.1)/epi_dist_mean_est_5p.shape[0], np.sum(epi_dist_mean_est_5p<1)/epi_dist_mean_est_5p.shape[0]))
        # dict_of_lists['ours'].append((np.sum(epi_dist_mean_est_ours<0.1)/epi_dist_mean_est_ours.shape[0], np.sum(epi_dist_mean_est_ours<1)/epi_dist_mean_est_ours.shape[0]))

        print("+++ GT, Opencv_5p, Ours")
        np.set_printoptions(precision=4, suppress=True)
        print(delta_Rtij_inv[:3])
        print(
            np.hstack((
                M_opencv[:, :3],
                M_opencv[:, 3:4] / M_opencv[2, 3] * delta_Rtij_inv[2, 3],
            )))
        print(
            np.hstack((M_estW[:, :3],
                       M_estW[:, 3:4] / M_estW[2, 3] * delta_Rtij_inv[2, 3])))

        return {
            "img1_rgb": img1_rgb,
            "img2_rgb": img2_rgb,
            "delta_Rtij": delta_Rtij
        }
예제 #7
0
    def get_ij(self, i, j, visualize=False):
        """ Return frame i and j with point cloud from i, and relative camera pose [R|t] """
        # Rt0 = self.scene_data['pose'][0] # Identity, or = utils_misc.identity_Rt()
        # Rti = self.scene_data['pose'][i]
        # Rtj = self.scene_data['pose'][j]

        # print('Rti', Rti)
        # print('Rti', Rtj)

        X_rect_i = self.X_rect_list[i]

        np.set_printoptions(precision=8, suppress=True)\

        # delta_Rtij = utils_misc.Rt_depad(np.linalg.inv(utils_misc.Rt_pad(Rti)) @ utils_misc.Rt_pad(Rtj))
        odo_pose = self.imu2cam @ np.linalg.inv(
            self.scene_data['imu_pose_matrix']
            [i]) @ self.scene_data['imu_pose_matrix'][j] @ np.linalg.inv(
                self.imu2cam)  # camera motion
        # delta_Rtij = utils_misc.Rt_depad(np.linalg.inv(odo_pose)) # scene motion;  [RUI] Cam 0

        print(
            self.imu2cam @ np.linalg.inv(self.scene_data['imu_pose_matrix'][j])
            @ self.scene_data['imu_pose_matrix'][i] @ np.linalg.inv(
                self.imu2cam))

        delta_Rtij = utils_misc.Rt_depad(
            self.Rtl_gt @ np.linalg.inv(odo_pose) @ np.linalg.inv(
                self.Rtl_gt))  # [RUI] Cam 2
        val_inds_i, _ = utils_vis.reproj_and_scatter(
            utils_misc.identity_Rt(),
            X_rect_i.T,
            self.dataset_rgb[i][0],
            self,
            visualize=visualize,
            title_appendix='frame %d (left)' % i,
            set_lim=True)
        val_inds_j, _ = utils_vis.reproj_and_scatter(
            delta_Rtij,
            X_rect_i.T,
            self.dataset_rgb[j][0],
            self,
            visualize=visualize,
            title_appendix='frame %d (left)' % j,
            set_lim=True)
        X_rect_j = self.X_rect_list[j]
        # val_inds_j = utils_vis.reproj_and_scatter(Rt0, X_rect_j, self.dataset_rgb[j][0], self, visualize=visualize)
        val_idxes = utils_misc.vis_masks_to_inds(val_inds_i, val_inds_j)
        X_rect_i_vis = X_rect_i[:, val_idxes]

        delta_Rtij_inv = utils_misc.Rt_depad(odo_pose)  # camera motion

        # print(delta_Rtij_inv)

        angle_R = utils_geo.rot12_to_angle_error(np.eye(3),
                                                 delta_Rtij_inv[:, :3])
        angle_t = utils_geo.vector_angle(np.array([[0.], [0.], [1.]]),
                                         delta_Rtij_inv[:, 3:4])

        print(
            '>>>>>>>>>>>>>>>> Between frame %d and %d: \nThe rotation angle (degree) %.4f, and translation angle (degree) %.4f'
            % (i, j, angle_R, angle_t))

        return X_rect_i, X_rect_i_vis, delta_Rtij, delta_Rtij_inv, self.dataset_rgb[
            i][0], self.dataset_rgb[j][0]