예제 #1
0
def relative_pose_ransac(b1, b2, method, threshold, iterations, probability):
    # in-house estimation
    if in_house_multiview:
        threshold = np.arccos(1 - threshold)
        params = pyrobust.RobustEstimatorParams()
        params.iterations = 1000
        result = pyrobust.ransac_relative_pose(b1, b2, threshold, params,
                                               pyrobust.RansacType.RANSAC)

        Rt = result.lo_model.copy()
        R, t = Rt[:3, :3].copy(), Rt[:, 3].copy()
        Rt[:3, :3] = R.T
        Rt[:, 3] = -R.T.dot(t)
        return Rt

    # fallback to opengv
    else:
        try:
            return pyopengv.relative_pose_ransac(b1,
                                                 b2,
                                                 method,
                                                 threshold,
                                                 iterations=iterations,
                                                 probability=probability)
        except Exception:
            # Older versions of pyopengv do not accept the probability argument.
            return pyopengv.relative_pose_ransac(b1, b2, method, threshold,
                                                 iterations)
예제 #2
0
def relative_pose_ransac(b1, b2, method, threshold, iterations, probability):
    try:
        return pyopengv.relative_pose_ransac(b1, b2, method, threshold,
                                             iterations=iterations,
                                             probability=probability)
    except Exception:
        # Older versions of pyopengv do not accept the probability argument.
        return pyopengv.relative_pose_ransac(b1, b2, method, threshold,
                                             iterations)
예제 #3
0
def relative_pose_ransac(b1, b2, method, threshold, iterations, probability):
    try:
        return pyopengv.relative_pose_ransac(b1,
                                             b2,
                                             method,
                                             threshold,
                                             iterations=iterations,
                                             probability=probability)
    except Exception:
        # Older versions of pyopengv do not accept the probability argument.
        return pyopengv.relative_pose_ransac(b1, b2, method, threshold,
                                             iterations)
예제 #4
0
def test_relative_pose_ransac():
    print("Testing relative pose ransac")

    d = RelativePoseDataset(100, 0.0, 0.3)

    ransac_transformation = pyopengv.relative_pose_ransac(
        d.bearing_vectors1, d.bearing_vectors2, "NISTER", 0.01, 1000)

    assert same_transformation(d.position, d.rotation, ransac_transformation)

    print("Done testing relative pose ransac")
예제 #5
0
def test_relative_pose_ransac():
    print "Testing relative pose ransac"

    d = RelativePoseDataset(100, 0.0, 0.3)

    ransac_transformation = pyopengv.relative_pose_ransac(
        d.bearing_vectors1, d.bearing_vectors2, "NISTER", 0.01, 1000)

    assert same_transformation(d.position, d.rotation, ransac_transformation)

    print "Done testing relative pose ransac"
예제 #6
0
파일: tests.py 프로젝트: ferreram/opengv
def test_relative_pose_ransac():
    print "Testing relative pose ransac"

    d = RelativePoseDataset(100, 0.0, 0.3)

    ransac_transformation = pyopengv.relative_pose_ransac(
        d.bearing_vectors1, d.bearing_vectors2, "NISTER", 0.01, 1000)

    assert same_transformation(d.position, d.rotation,
                               ransac_transformation[0])

    print("Test Relative Pose RANSAC \n")

    ransac_transformation = pyopengv.relative_pose_ransac(
        d.bearing_vectors1, d.bearing_vectors2, "STEWENIUS", 0.01, 1000)

    print("Result : \n %s \n" % ransac_transformation[0])
    print("Inliers : \n %s \n" % ransac_transformation[1])

    print "Done testing relative pose ransac"
예제 #7
0
파일: register.py 프로젝트: gilbert1991/map
def twoViewReconstruction(p1, p2, c1, c2, threshold):
	# 1000 is #iteration
    T = pyopengv.relative_pose_ransac(p1, p2, "NISTER", threshold, 1000)
    R = T[:, :3]
    t = T[:, 3]
    inliers = _two_view_reconstruction_inliers(b1, b2, R, t, threshold)

    T = run_relative_pose_optimize_nonlinear(b1[inliers], b2[inliers], t, R)
    R = T[:, :3]
    t = T[:, 3]
    inliers = _two_view_reconstruction_inliers(b1, b2, R, t, threshold)

    return cv2.Rodrigues(R.T)[0].ravel(), -R.T.dot(t), inliers
예제 #8
0
def test_relative_pose_ransac():
    print("Testing relative pose ransac")

    d = RelativePoseDataset(100, 0.0, 0.3)

    ransac_transformation, inliers = pyopengv.relative_pose_ransac(
        d.bearing_vectors1, d.bearing_vectors2, "NISTER", 0.01, 1000, 0.99,
        True)
    print('Inliers: {} out of {}'.format(len(inliers), 100))

    assert same_transformation(d.position, d.rotation, ransac_transformation)

    print("Done testing relative pose ransac")
예제 #9
0
def robust_match_calibrated(p1, p2, camera1, camera2, matches, config):
    """Filter matches by estimating the Essential matrix via RANSAC."""

    if len(matches) < 8:
        return np.array([])

    p1 = p1[matches[:, 0]][:, :2].copy()
    p2 = p2[matches[:, 1]][:, :2].copy()
    b1 = camera1.pixel_bearing_many(p1)
    b2 = camera2.pixel_bearing_many(p2)

    threshold = config['robust_matching_threshold']
    T = pyopengv.relative_pose_ransac(b1, b2, "STEWENIUS", 1 - np.cos(threshold), 1000)

    inliers = _compute_inliers_bearings(b1, b2, T)

    return matches[inliers]
예제 #10
0
def robust_match_calibrated(p1, p2, camera1, camera2, matches, config):
    '''Computes robust matches by estimating the Essential matrix via RANSAC.
    '''

    if len(matches) < 8:
        return np.array([])

    p1 = p1[matches[:, 0]][:, :2].copy()
    p2 = p2[matches[:, 1]][:, :2].copy()
    b1 = camera1.pixel_bearings(p1)
    b2 = camera2.pixel_bearings(p2)

    threshold = config['robust_matching_threshold']
    T = pyopengv.relative_pose_ransac(b1, b2, "STEWENIUS", 1 - np.cos(threshold), 1000)

    inliers = compute_inliers_bearings(b1, b2, T)

    return matches[inliers]
예제 #11
0
파일: register.py 프로젝트: gilbert1991/map
def relativePositionEstimation(imgPath1, imgPath2):
	img1, img2 = cv2.imread(imgPath1, 0), cv2.imread(imgPath2, 0)

	kps1, des1 = feature.siftExtraction(img1)
	kps2, des2 = feature.siftExtraction(img2)

	matches = bruteForceMatch(des1, des2, 0.3)

	point1, point2 = extractMatchPoints(kps1, kps2, matches)

	point1 = np.array(point1)
	point2 = np.array(point2)

	# print point1
	# print point1-point2

	T = pyopengv.relative_pose_ransac(point1, point2, "NISTER", 0.01, 1000)
	R = T[:, :3]
	t = T[:, 3]

	return R, t
예제 #12
0
파일: test.py 프로젝트: gilbert1991/map
def fivePointMatchTest(point1, point2):
	result = gv.relative_pose_ransac(point1, point2, "NISTER", 0.01, 1000)

	return result
예제 #13
0
def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False):
    """Calculate rotation between two sets of image coordinates using ransac.
    
    Inlier criteria is the reprojection error of y into image 1.

    Parameters
    -------------------------
    x : array 2xN image coordinates in image 1
    y : array 2xN image coordinates in image 2
    camera : Camera model
    threshold : float pixel distance threshold to accept as inlier
    do_translation : bool Try to estimate the translation as well

    Returns
    ------------------------
    R : array 3x3 The rotation that best fulfills X = RY
    t : array 3x1 translation if do_translation is False
    residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion)
    inliers : array Indices of the points (in X and Y) that are RANSAC inliers
    """
    assert x.shape == y.shape
    assert x.shape[0] == 2
    
    X = camera.unproject(x)
    Y = camera.unproject(y)

    data = np.vstack((X, Y, x))
    assert data.shape[0] == 8

    model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation)

    def eval_func(model, data):
        Y = data[3:6].reshape(3,-1)
        x = data[6:].reshape(2,-1)
        R, t = model

        Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t
        xhat = camera.project(Xhat)
        dist = np.sqrt(np.sum((x-xhat)**2, axis=0))

        return dist

    inlier_selection_prob = 0.99999
    model_points = 2
    ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points))

    ransac_transformation = None
    model_est = None

    if (X[2, 0] == 1):  # normal camera
        model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True)
    else:
        ransac_transformation, ransac_consensus_idx = pyopengv.relative_pose_ransac(Y.T, X.T, "STEWENIUS", threshold, ransac_iterations, inlier_selection_prob, True)
    if ransac_transformation is not None:
        R = ransac_transformation[:, :3]
        t = ransac_transformation[:, 3]
        dist = eval_func((R, None), data)
    elif model_est is not None:
        (R, t) = model_est
        dist = eval_func((R, t), data)
    else:
        dist = None
        R, t = None, None
        ransac_consensus_idx = []
    return R, t, dist, ransac_consensus_idx
예제 #14
0
def run_relative_pose_ransac(b1, b2, method, threshold, iterations):
    return pyopengv.relative_pose_ransac(b1, b2, method, threshold, iterations)
예제 #15
0
def rPnP(pc_to_align, pc_refs, desc_to_align, desc_refs, inits_T, K, **kwargs):
    match_function = kwargs.pop('match_function', None)
    desc_function = kwargs.pop('desc_function', None)
    fit_pc = kwargs.pop('fit_pc', False)
    pnp_algo = kwargs.pop('pnp_algo', 'NISTER')
    ransac_threshold = kwargs.pop('ransac_threshold', 0.0002)
    iterations = kwargs.pop('iterations', 1000)
    '''
        Algo are: STEWENIUS - NISTER - SEVENPT - EIGHTPT
    '''

    if kwargs:
        raise TypeError('Unexpected **kwargs: %r' % kwargs)

    Rr = list()
    u_dir = list()
    x_r = list()
    inliers = list()

    for n_pc, pc_ref in enumerate(pc_refs):
        desc_ref = desc_refs[n_pc]
        init_T = inits_T[n_pc]

        if desc_function is not None:
            desc_ref = desc_function(pc_ref, desc_ref)
        else:
            desc_ref = pc_ref

        if fit_pc:
            match_function.fit(pc_ref[0])
        else:
            match_function.fit(desc_ref[0])

        pc_rec = inits_T[0].matmul(pc_to_align)

        if desc_function is not None:
            desc_ta = desc_function(pc_rec, desc_to_align)
        else:
            desc_ta = pc_rec

        filtered_pc_to_align = pc_to_align.clone().detach()
        res_match = match_function(pc_rec, pc_ref, desc_ta, desc_ref)
        if 'inliers' in res_match.keys():
            filtered_pc_to_align = pc_to_align[
                0, :, res_match['inliers'][0].byte()].unsqueeze(0)
            res_match['nn'] = res_match['nn'][
                0, :, res_match['inliers'][0].byte()].unsqueeze(0)

            if filtered_pc_to_align.size(2) < 6:
                logger.warning(
                    "Less than 6 inliers founded, retuturning intial pose")
                return {'T': init_T}

        keypoints_1 = reproject_back(filtered_pc_to_align, K.squeeze())
        bearing_vector_1 = keypoints_to_bearing(keypoints_1, K.squeeze())
        nn_match = init_T.inverse().matmul(res_match['nn'])
        keypoints_2 = reproject_back(nn_match, K.squeeze())
        bearing_vector_2 = keypoints_to_bearing(keypoints_2, K.squeeze())

        non_nan_idx, _ = torch.min(bearing_vector_1 == bearing_vector_1, dim=0)
        bearing_vector_1 = bearing_vector_1[:, non_nan_idx]
        bearing_vector_2 = bearing_vector_2[:, non_nan_idx]

        Tr = pyopengv.relative_pose_ransac(bearing_vector_1.t().cpu().numpy(),
                                           bearing_vector_2.t().cpu().numpy(),
                                           algo_name=pnp_algo,
                                           threshold=ransac_threshold,
                                           iterations=iterations)
        match_function.unfit()

        with open("ransac_inliers.txt", 'r') as f:
            inlier = int(f.read())
        inliers.append(inlier)
        Tr_w = np.eye(4)
        Tr_w[:3, :] = Tr
        Tr_w = np.matmul(np.linalg.inv(Tr_w), init_T[0].cpu().numpy())
        Rr.append(Tr_w[:3, :3])
        x_r.append(init_T[0, :3, 3].cpu().numpy())
        u_dir.append(
            (np.matmul(init_T[0, :3, :3].cpu().numpy(), -Tr[:3, 3])) /
            np.linalg.norm(
                np.matmul(init_T[0, :3, :3].cpu().numpy(), -Tr[:3, 3])))

    inliers_norm = inliers
    '''
    med_inlier = np.median(inliers)
    x_r = [x for i, x in enumerate(x_r) if inliers[i] > med_inlier]
    u_dir = [u for i, u in enumerate(u_dir) if inliers[i] > med_inlier]
    Rr = [R for i, R in enumerate(Rr) if inliers[i] > med_inlier]
    inliers_norm = [inlier for i, inlier in enumerate(inliers) if inlier > med_inlier]
    '''
    t = intersec(x_r, u_dir, weights=inliers_norm)
    T = np.eye(4)

    T[:3, :3] = average_rotation(Rr, weights=inliers_norm)
    T[:3, 3] = t
    T_torch = pc_to_align.new_tensor(T)
    return {'T': T_torch.unsqueeze(0)}
예제 #16
0
def run_relative_pose_ransac(b1, b2, method, threshold, iterations):
    return pyopengv.relative_pose_ransac(b1, b2, method, threshold, iterations)
def test_relative_pose_ransac():
    print("Testing relative pose ransac")

    d = RelativePoseDataset(8000, 0.0, 0.3)

    ransac_transformation = pyopengv.relative_pose_ransac(
        d.bearing_vectors1, d.bearing_vectors2, "NISTER", 0.01, 1000)

    p1 = np.divide(d.bearing_vectors1, d.bearing_vectors1[:, 2][:,
                                                                None])[:, :2]
    p2 = np.divide(d.bearing_vectors2, d.bearing_vectors2[:, 2][:,
                                                                None])[:, :2]

    camIntrinsic = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    E, mask = cv2.findEssentialMat(p1,
                                   p2,
                                   cameraMatrix=camIntrinsic,
                                   method=cv2.RANSAC,
                                   prob=0.999,
                                   threshold=0.1)
    points, Re, te, mask = cv2.recoverPose(E, p1, p2, mask=mask)

    F, mask = cv2.findFundamentalMat(p1, p2, method=cv2.RANSAC)
    points, Rf, tf, mask_ = cv2.recoverPose(F, p1, p2, mask=mask)

    gt_normed = np.eye(4)
    gt_normed[:3, :3] = d.rotation
    gt_normed[:3, 3] = normalized(d.position)

    e_normed = np.eye(4)
    e_normed[:3, :3] = Re.T
    e_normed[:3, 3] = -np.dot(Re.T, normalized(te.squeeze()))

    e_normed_inv = np.eye(4)
    e_normed_inv[:3, :3] = Re
    e_normed_inv[:3, 3] = normalized(te.squeeze())

    f_normed = np.eye(4)
    f_normed[:3, :3] = Rf.T
    f_normed[:3, 3] = -np.dot(Rf.T, normalized(tf.squeeze()))

    f_normed_inv = np.eye(4)
    f_normed_inv[:3, :3] = ransac_transformation[:, :3].T
    f_normed_inv[:3, 3] = -ransac_transformation[:, :3].T @ normalized(
        ransac_transformation[:, 3].squeeze())

    # print(np.linalg.norm((gt_normed @ e_normed_inv)[:3, 3]))\
    # print(f_normed_inv)
    print(np.linalg.norm((gt_normed @ f_normed_inv)[:3, 3]))
    raise

    # print(R)
    # print(t)
    # print()
    # print(d.position)
    # print(d.rotation)
    print(ransac_transformation[:, 0:3])
    # print(R)
    # print(d.rotation)
    # raise
    # print(d.rotation - ransac_transformation[:, :3])
    print(ransac_transformation[:, 0:3] - R.T)
    print(ransac_transformation[:, 0:3] - R_.T)
    print(np.sqrt(np.mean((ransac_transformation[:, 0:3] - R.T)**2)))
    print(np.sqrt(np.mean((ransac_transformation[:, 0:3] - R_.T)**2)))
    # print(np.sqrt(np.mean((ransac_transformation[:, 0:3] - R)**2)))
    # print(np.sqrt(np.mean((ransac_transformation[:, 0:3] - R_)**2)))
    # print(np.sqrt(np.mean((ransac_transformation[:, 0:3] - d.rotation)**2)))

    raise

    t = np.dot(-R.T, t)
    gt_t_normed = normalized(d.position)
    cv_t_normed = normalized(t)
    # gv_t_normed = normalized(ransac_transformation[:, 3])

    t_ = np.dot(-R.T, t_)

    print()
    print(gt_t_normed)
    print(gt_t_normed - cv_t_normed.squeeze())
    print(gt_t_normed - gv_t_normed)
    raise

    assert same_transformation(d.position, d.rotation, ransac_transformation)

    print("Done testing relative pose ransac")
예제 #18
0
def main():
    args = configs()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    log_interval = args.logging_interval

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(args.load_path, "evflownet_{}".format(datetime.now().strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)



    for ep in experiment_params:

        rpe_stats = []
        rre_stats = []
        trans_e_stats = []


        rpe_rre_info = []
        trans_e_info = []
        gt_interpolated = []
        predict_camera_frame = []
        predict_ts = []


        print(f"{ep['name']}")
        base_path = f"results/{ep['name']}"
        if not os.path.exists(base_path):
            os.makedirs(base_path)


        if ep['dataset'] == 'outdoor1':
            dataset_param = outdoor1_params
        elif ep['dataset'] == 'outdoor2':
            dataset_param = outdoor2_params
        elif ep['dataset'] == 'poster_6dof':
            dataset_param = poster_6dof_params
        elif ep['dataset'] == 'hdr_boxes':
            dataset_param = hdr_boxes_params
        elif ep['dataset'] == 'poster_translation':
            dataset_param = poster_translation_params
        elif ep['dataset'] == 'indoor1':
            dataset_param = indoor1_params
        elif ep['dataset'] == 'indoor2':
            dataset_param = indoor2_params
        elif ep['dataset'] == 'indoor3':
            dataset_param = indoor3_params
        elif ep['dataset'] == 'indoor4':
            dataset_param = indoor4_params            
        elif ep['dataset'] == 'outdoor_night1':
            dataset_param = outdoor_night1_params
        elif ep['dataset'] == 'outdoor_night2':
            dataset_param = outdoor_night2_params
        elif ep['dataset'] == 'outdoor_night3':
            dataset_param = outdoor_night3_params


        with open(f"{base_path}/config.txt", "w") as f:
            f.write("experiment params:\n")
            f.write(pprint.pformat(ep))
            f.write("\n\n\n")
            f.write("dataset params:\n")
            f.write(pprint.pformat(dataset_param))



        print("Load Data Begin. ")
        start_frame = ep['start_frame']
        end_frame = ep['end_frame']
        model_path = ep['model']
        voxel_method = ep['voxel_method']
        select_events = ep['select_events']
        voxel_threshold = ep['voxel_threshold']
        findE_threshold = ep['findE_threshold']
        findE_prob = ep['findE_prob']
        reproject_err_threshold = ep['reproject_err_threshold']


        # Set parameters
        gt_path = dataset_param['gt_path']
        sensor_size = dataset_param['sensor_size']
        camIntrinsic = dataset_param['camera_intrinsic']
        h5Dataset = DynamicH5Dataset(dataset_param['dataset_path'], voxel_method=voxel_method)
        h5DataLoader = torch.utils.data.DataLoader(dataset=h5Dataset, batch_size=1, num_workers=6, shuffle=False)
        
        # model
        print("Load Model Begin. ")
        EVFlowNet_model = EVFlowNet(args).to(device)
        EVFlowNet_model.load_state_dict(torch.load(model_path))
        EVFlowNet_model.eval()
        # EVFlowNet_model.load_state_dict(torch.load('data/model/evflownet_1001_113912_outdoor2_5k/model0'))

        # optimizer
        optimizer = torch.optim.Adam(EVFlowNet_model.parameters(), lr=args.initial_learning_rate, weight_decay=args.weight_decay)
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=args.learning_rate_decay)
        loss_fun = TotalLoss(args.smoothness_weight, args.photometric_loss_weight)



        print("Start Evaluation. ")    
        for iteration, item in enumerate(tqdm(h5DataLoader)):

            if iteration < start_frame:
                continue

            if iteration > end_frame:
                break

            voxel = item['voxel'].to(device)
            events = item['events'].to(device)
            frame = item['frame'].to(device)
            frame_ = item['frame_'].to(device)
            num_events = item['num_events'].to(device)
            image_name = "{}/img_{:07d}.png".format(base_path, iteration)

            events_vis = events[0].detach().cpu()
            flow_dict = EVFlowNet_model(voxel)
            flow_vis = flow_dict["flow3"][0].detach().cpu()

            # Compose the event images and warp the events with flow
            if select_events == 'only_pos':
                ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, 1, sensor_size)

            elif select_events == 'only_neg':
                ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, -1, sensor_size)

            elif select_events == 'mixed':
                ev_bgn_pos, ev_end_pos, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, 1, sensor_size)
                ev_bgn_neg, ev_end_neg, ev_img_neg, timestamps_neg = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, -1, sensor_size)

                ev_bgn_x = torch.cat([ev_bgn_pos[0], ev_bgn_neg[0]])
                ev_bgn_y = torch.cat([ev_bgn_pos[1], ev_bgn_neg[1]])
                ev_end_x = torch.cat([ev_end_pos[0], ev_end_neg[0]])
                ev_end_y = torch.cat([ev_end_pos[1], ev_end_neg[1]])
                ev_bgn = (ev_bgn_x, ev_bgn_y)
                ev_end = (ev_end_x, ev_end_y)


            start_t = item['timestamp_begin'].cpu().numpy()[0]
            end_t = item['timestamp_end'].cpu().numpy()[0]

            # Convert to numpy format
            ev_img_raw = torch_to_numpy(ev_img[0])
            ev_img_bgn = torch_to_numpy(ev_img[1])
            ev_img_end = torch_to_numpy(ev_img[2])
            ev_bgn_xs = torch_to_numpy(ev_bgn[0])
            ev_bgn_ys = torch_to_numpy(ev_bgn[1])
            ev_end_xs = torch_to_numpy(ev_end[0])
            ev_end_ys = torch_to_numpy(ev_end[1])

            timestamps_before = torch_to_numpy(timestamps[0])
            timestamps_after = torch_to_numpy(timestamps[1])
            frame_vis = torch_to_numpy(item['frame'][0])
            frame_vis_ = torch_to_numpy(item['frame_'][0])
            flow_vis = torch_to_numpy(flow_dict["flow3"][0])


            METHOD = "opencv"
            # METHOD = "opengv"

            if METHOD == "opencv":

                ######### Opencv (calculate R and t) #########
                p1 = np.dstack([ev_bgn_xs, ev_bgn_ys]).squeeze()
                p2 = np.dstack([ev_end_xs, ev_end_ys]).squeeze()

                E, mask = cv2.findEssentialMat(p1, p2, cameraMatrix=camIntrinsic, method=cv2.RANSAC, prob=findE_prob, threshold=findE_threshold)
                points, R, t, mask1, triPoints = cv2.recoverPose(E, p1, p2, cameraMatrix=camIntrinsic, mask=mask, distanceThresh=5000)

            elif METHOD == "opengv":

                #### Calculate bearing vector manually ####
                ev_bgn_xs_undistorted = (ev_bgn_xs - 170.7684322973841) / 223.9940010790056
                ev_bgn_ys_undistorted = (ev_bgn_ys - 128.18711828338436) / 223.61783486959376
                ev_end_xs_undistorted = (ev_end_xs - 170.7684322973841) / 223.9940010790056
                ev_end_ys_undistorted = (ev_end_ys - 128.18711828338436) / 223.61783486959376

                bearing_p1 = np.dstack([ev_bgn_xs_undistorted, ev_bgn_ys_undistorted, np.ones_like(ev_bgn_xs)]).squeeze()
                bearing_p2 = np.dstack([ev_end_xs_undistorted, ev_end_ys_undistorted, np.ones_like(ev_end_xs)]).squeeze()

                bearing_p1 /= np.linalg.norm(bearing_p1, axis=1)[:, None]
                bearing_p2 /= np.linalg.norm(bearing_p2, axis=1)[:, None]

                bearing_p1 = bearing_p1.astype('float64')
                bearing_p2 = bearing_p2.astype('float64')

                # focal_length = 223.75
                # reproject_err_threshold = 0.1
                ransac_threshold = 1e-6
                ransac_transformation = pyopengv.relative_pose_ransac(bearing_p1, bearing_p2, "NISTER", threshold=ransac_threshold, iterations=1000, probability=0.999)
                R = ransac_transformation[:, 0:3]
                t = ransac_transformation[:, 3]

            # Interpolate Tw1 and Tw2
            Tw1 = get_interpolated_gt_pose(gt_path, start_t)
            Tw2 = get_interpolated_gt_pose(gt_path, end_t)
            Tw2_inv = inverse_se3_matrix(Tw2)

            # r1 = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
            # r2 = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
            # r3 = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
            # r4 = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])
            # r5 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
            # r6 = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
            # t = r5 @ t

            normed_t = t.squeeze() / np.linalg.norm(t)
            gt_t = Tw2[0:3, 3] - Tw1[0:3, 3]
            normed_gt = gt_t / np.linalg.norm(gt_t)

            # print()
            # print(np.rad2deg(np.arccos(np.dot(normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r1@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r2@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r3@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r4@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r5@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r6@normed_t, normed_gt))))


            rpe = np.rad2deg(np.arccos(np.dot(normed_t, normed_gt)))
            # raise

            # if iteration == 121:
            #     print(Tw1)
            #     print(Tw2)
            #     print(Tw2_inv)
            #     print(Tw2_inv @ Tw1)

            predict_ts.append(start_t)

            # Store gt vector for later visulizaiton
            gt_interpolated.append(Tw1)
            gt_scale = np.linalg.norm(Tw2[0:3, 3] - Tw1[0:3, 3])
            pd_scale = np.linalg.norm(t)

            t *= gt_scale / pd_scale  # scale translation vector with gt_scale

            # Predicted relative pose 
            P = create_se3_matrix_with_R_t(R, t)
            P_inv = inverse_se3_matrix(P)
            # print(P @ P_inv)




            # Calculate the rpe
            E = Tw2_inv @ Tw1 @ P
            trans_e = np.linalg.norm(E[0:3, 3])

            E_inv = Tw2_inv @ Tw1 @ P_inv
            trans_e_inv = np.linalg.norm(E_inv[0:3, 3])


            # print(Tw2_inv @ Tw1)
            # print(P_inv)
            # print(E_inv)
            # print()

            # print() 
            # print(t)
            # print(Tw1[0:3, 3] - Tw2[0:3, 3])
            # print(Tw1[0:3, 3] - Tw2[0:3, 3] - t.T)

            # print()


            # print(t)
            # print(Tw1[0:3, 3] - Tw2[0:3, 3])
            # print(np.dot(np.linalg.norm(t), np.linalg.norm(Tw1[0:3, 3] - Tw2[0:3, 3])))
            # print(np.arccos(np.dot(np.linalg.norm(t), np.linalg.norm(Tw1[0:3, 3] - Tw2[0:3, 3]))))
            # raise

            rre = np.linalg.norm(logm(E[:3, :3]))
            rre_inv = np.linalg.norm(logm(E_inv[:3, :3]))

            rpe_stats.append(rpe)
            rre_stats.append(rre_inv)
            rpe_rre_info.append([rpe, rre, rre_inv])


            if trans_e_inv/gt_scale > 1.85:
                predict_camera_frame.append(P)

                trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale])
                print(trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale, " || ", rpe, rre, rre_inv)

                trans_e_stats.append(trans_e/gt_scale)
            else:                
                trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale])
                print(trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale, " || ", rpe, rre, rre_inv)

                trans_e = trans_e_inv
                predict_camera_frame.append(P_inv)

                trans_e_stats.append(trans_e_inv/gt_scale)

            # raise

            # if trans_e/gt_scale > 1.85:

            #     trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale])
            #     print(trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale)


            #     trans_e = trans_e_inv
            #     predict_camera_frame.append(P_inv)
            # else:
            #     predict_camera_frame.append(P)


            #     trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale])
            #     print(trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale)


            cvshow_all_eval(ev_img_raw, ev_img_bgn, ev_img_end, (ev_bgn_xs, ev_bgn_ys), \
                (ev_end_xs, ev_end_ys), timestamps_before, timestamps_after, frame_vis, \
                frame_vis_, flow_vis, image_name, sensor_size, trans_e, gt_scale)


            predict_world_frame = relative_to_absolute_pose(np.array(predict_camera_frame))
            visualize_trajectory(predict_world_frame, "{}/path_{:07d}.png".format(base_path, iteration))
            visualize_trajectory(np.array(gt_interpolated), "{}/gt_path_{:07d}.png".format(base_path, iteration))


        rpe_stats = np.array(rpe_stats)
        rre_stats = np.array(rre_stats)
        trans_e_stats = np.array(trans_e_stats)

        with open(f"{base_path}/final_stats.txt", "w") as f:
            f.write("rpe_median, arpe_deg, arpe_outliner_10, arpe_outliner_15\n")
            f.write(f"{np.median(rpe_stats)}, {np.mean(rpe_stats)}, {100*len(rpe_stats[rpe_stats>10])/len(rpe_stats)}, {100*len(rpe_stats[rpe_stats>15])/len(rpe_stats)}\n\n")
            
            print("rpe_median, arpe_deg, arpe_outliner_10, arpe_outliner_15")
            print(f"{np.median(rpe_stats)}, {np.mean(rpe_stats)}, {100*len(rpe_stats[rpe_stats>10])/len(rpe_stats)}, {100*len(rpe_stats[rpe_stats>15])/len(rpe_stats)}\n")

            f.write("rre_median, arre_rad, arre_outliner_0.05, arpe_outliner_0.1\n")
            f.write(f"{np.median(rre_stats)}, {np.mean(rre_stats)}, {100*len(rre_stats[rre_stats>0.05])/len(rre_stats)}, {100*len(rre_stats[rre_stats>0.1])/len(rre_stats)}\n\n")

            print("rre_median, arre_rad, arre_outliner_0.05, arpe_outliner_0.1")
            print(f"{np.median(rre_stats)}, {np.mean(rre_stats)}, {100*len(rre_stats[rre_stats>0.05])/len(rre_stats)}, {100*len(rre_stats[rre_stats>0.1])/len(rre_stats)}\n\n")

            f.write("trans_e_median, trans_e_avg, trans_e_outliner_0.5, trans_e_outliner_1.0\n")
            f.write(f"{np.median(trans_e_stats)}, {np.mean(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>0.5])/len(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>1.0])/len(trans_e_stats)}\n")

            print("trans_e_median, trans_e_avg, trans_e_outliner_0.5, trans_e_outliner_1.0\n")
            print(f"{np.median(trans_e_stats)}, {np.mean(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>0.5])/len(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>1.0])/len(trans_e_stats)}\n")




        with open(f"{base_path}/rpe_rre.txt", "w") as f:
            for row in rpe_rre_info:
                for item in row:
                    f.write(f"{item}, ")
                f.write("\n")

        with open(f"{base_path}/trans_e.txt", "w") as f:
            for row in trans_e_info:
                for item in row:
                    f.write(f"{item}, ")
                f.write("\n")
        
        with open(f"{base_path}/predict_pose.txt", "w") as f:
            for p in predict_world_frame:
                f.write(f"{p}\n")
        
        with open(f"{base_path}/gt_pose.txt", "w") as f:
            for p in np.array(gt_interpolated):
                f.write(f"{p}\n")

        with open(f"{base_path}/predict_tum.txt", "w") as f:
            for ts, p in zip(predict_ts, predict_world_frame):
                qx, qy, qz, qw = rotation_matrix_to_quaternion(p[:3, :3])
                tx, ty, tz = p[:3, 3]
                f.write(f"{ts} {tx} {ty} {tz} {qx} {qy} {qz} {qw}\n")

        with open(f"{base_path}/gt_tum.txt", "w") as f:
            for ts, p in zip(predict_ts, np.array(gt_interpolated)):
                qx, qy, qz, qw = rotation_matrix_to_quaternion(p[:3, :3])
                tx, ty, tz = p[:3, 3]
                f.write(f"{ts} {tx} {ty} {tz} {qx} {qy} {qz} {qw}\n")

        rotation_matrix_to_quaternion

        predict_world_frame = relative_to_absolute_pose(np.array(predict_camera_frame))
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path00.png", show=True)
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path01.png", rotate='x')
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path02.png", rotate='y')
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path03.png", rotate='z')